content
large_stringlengths 0
6.46M
| path
large_stringlengths 3
331
| license_type
large_stringclasses 2
values | repo_name
large_stringlengths 5
125
| language
large_stringclasses 1
value | is_vendor
bool 2
classes | is_generated
bool 2
classes | length_bytes
int64 4
6.46M
| extension
large_stringclasses 75
values | text
stringlengths 0
6.46M
|
|---|---|---|---|---|---|---|---|---|---|
#plot2
plot2 <- function() {
plot(timestamp,power$GlobalActivePower, type="l", xlab="", ylab="Global Active Power (kilowatts)")
dev.copy(png, file="plot2.png", width=480, height=480)
dev.off()
}
plot2()
|
/plot2.R
|
no_license
|
smcnish/ExData_Plotting1
|
R
| false
| false
| 216
|
r
|
#plot2
plot2 <- function() {
plot(timestamp,power$GlobalActivePower, type="l", xlab="", ylab="Global Active Power (kilowatts)")
dev.copy(png, file="plot2.png", width=480, height=480)
dev.off()
}
plot2()
|
/eCE Direct Method, TC (直接法)/拉伸曲线统计卸载功&超弹性应变&加载功及内耗【v19.11.6】(使用cubintegrate积分).R
|
no_license
|
laye-d/statistics-in-eCE-by-R
|
R
| false
| false
| 8,855
|
r
| ||
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/data.R
\docType{data}
\name{division}
\alias{division}
\title{Divisiones de actividad}
\format{Un data-frame con dos variables: \code{division} y \code{desc_division}.
(\code{division} está desagregada en las 88 divisiones de actividad, cada uno con su respectiva
descripción)}
\usage{
division
}
\description{
A partir de codiguera del INE.
}
\examples{
division
}
\keyword{datasets}
|
/man/division.Rd
|
no_license
|
transformauy/codigueras
|
R
| false
| true
| 467
|
rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/data.R
\docType{data}
\name{division}
\alias{division}
\title{Divisiones de actividad}
\format{Un data-frame con dos variables: \code{division} y \code{desc_division}.
(\code{division} está desagregada en las 88 divisiones de actividad, cada uno con su respectiva
descripción)}
\usage{
division
}
\description{
A partir de codiguera del INE.
}
\examples{
division
}
\keyword{datasets}
|
setwd("~/Downloads/TP")
# install.packages("bnlearn")
source("http://bioconductor.org/biocLite.R")
biocLite(c("graph", "Rgraphviz"))
library("bnlearn")
# charge la base de données alarm
data("alarm")
# infos sur les colonnes (nos variables)
ncol(alarm)
colnames(alarm)
# infos sur les lignes (nos observations)
nrow(alarm)
rownames(alarm)
# dix premières lignes, 5 premières colonnes
alarm[1:10, 1:5]
# lignes 3, 5, 1, colonnes "ANES", "HIST" et "MINV"
alarm[c(3, 5, 1), c("ANES", "HIST", "MINV")]
ci.test(x = "PAP", y = "SHNT", z = as.character(NULL), data = alarm, test = "mi")
res = ci.test(x = "PAP", y = "SHNT", z = "PMB", data = alarm, test = "mi")
res$statistic
res$p.value
table(alarm[, "PAP"])
plot(alarm[, "PAP"])
prop.table(table(alarm[, "PAP"]))
table(alarm[, "SHNT"])
plot(alarm[, "SHNT"])
prop.table(table(alarm[, "SHNT"]))
ct = table(alarm[, c("PAP", "SHNT")])
prop.table(ct)
prop.table(ct, margin = 1)
prop.table(ct, margin = 2)
ci.test(x = "STKV", y = "HR", data = alarm, test = "mi")
ci.test(x = "STKV", y = "HR", z = "CO", data = alarm, test = "mi")
ci.test(x = "HR", y = "CO", data = alarm, test = "mi")
ci.test(x = "HR", y = "CO", z = "STKV", data = alarm, test = "mi")
ci.test(x = "CO", y = "STKV", data = alarm, test = "mi")
ci.test(x = "CO", y = "STKV", z = "HR", data = alarm, test = "mi")
mask = rep(TRUE, nrow(alarm))
p = prop.table(table(alarm[mask, c("STKV", "HR")]), margin = 1)
plot(p, main="p(y|x)")
mask = alarm[, "CO"] == "HIGH"
p = prop.table(table(alarm[mask, c("STKV", "HR")]), margin = 1)
plot(p, main="p(y|x,z=HIGH)")
# structure
bn = hc(alarm)
graphviz.plot(bn)
# parametres
bn = bn.fit(bn, data = alarm, method = "bayes")
bn[["CO"]]
cpquery(bn, event = (STKV == "HIGH"), evidence = (HR == "LOW"))
cpquery(bn, event = (STKV == "HIGH"), evidence = (HR == "LOW" & CO == "LOW"))
source("includes.R")
p = exact.dist(bn, event = c("STKV", "HR", "CO"), evidence = TRUE)
sum(p["HIGH", "LOW", ]) / sum(p[, "LOW", ])
sum(p["HIGH", "LOW", "LOW"]) / sum(p[, "LOW", "LOW"])
p = exact.dist(bn, event = c("INT", "APL"), evidence = TRUE)
p = exact.dist(bn, event = c("HYP", "STKV"), evidence = TRUE)
p.y.x = prop.table(p, margin = 2)
p = exact.dist(bn, event = c("HYP", "STKV", "LVV"), evidence = TRUE)
p.z = margin.table(p, margin = 3)
p.y.xz = prop.table(p, margin = c(2, 3))
p.y.do.x = margin.table(p.y.xz * rep(p.z, each=prod(dim(p.y.xz)[-3])), margin = c(1, 2))
p.y.x
p.y.do.x
vars = colnames(alarm)
g = empty.graph(vars)
for (x in vars) {
for (y in setdiff(vars, x)) {
g = set.edge(g, from = x, to = y)
}
}
graphviz.plot(g)
alpha = 0.001
z.xy = sapply(vars, function(v) {list()})
for(m in 0:length(vars)) {
x.done = NULL
for (x in vars) {
x.done = c(x.done, x)
x.nbrs = g$nodes[[x]]$nbr
for (y in setdiff(x.nbrs, x.done)) {
y.nbrs = g$nodes[[y]]$nbr
z.cands = array(NA, dim = c(m, 0))
if(length(x.nbrs) > m) {
z.cands = cbind(z.cands, combn(setdiff(x.nbrs, y), m))
}
if(length(y.nbrs) > m) {
z.cands = cbind(z.cands, combn(setdiff(y.nbrs, x), m))
}
for (i in (0:ncol(z.cands))[-1]) {
z = z.cands[, i]
## cat("testing", x, "indep", y, "given", paste(z, collapse = ", "), "\n")
res = ci.test(x = x, y = y, z = z, data = alarm, test = "mi")
if(res$p.value > alpha) {
cat("dropping edge\n")
g = drop.edge(g, from = x, to = y)
x.nbrs = setdiff(x.nbrs, y)
z.xy[[x]][[y]] = z
break
}
}
}
}
}
skeleton = g
graphviz.plot(g)
# graphe (presque) dirigé
g = skeleton
x.done = NULL
for (x in vars) {
x.done = c(x.done, x)
x.nbrs = g$nodes[[x]]$nbr
for (w in setdiff(x.nbrs, x.done)) {
w.nbrs = g$nodes[[w]]$nbr
for (y in setdiff(w.nbrs, c(x.done, x.nbrs))) {
if (!(w %in% z.xy[[x]][[y]])) {
if (w %in% g$nodes[[x]]$parents) {
cat("warning, inconsistent arc", x, "<->", w, "\n")
}
if (w %in% g$nodes[[y]]$parents) {
cat("warning, inconsistent arc", y, "<->", w, "\n")
}
g = set.arc(g, from = x, to = w)
g = set.arc(g, from = y, to = w)
}
}
}
}
graphviz.plot(g)
|
/code_R_bnlearn.R
|
no_license
|
ppmdatix/TP_pytorch
|
R
| false
| false
| 4,282
|
r
|
setwd("~/Downloads/TP")
# install.packages("bnlearn")
source("http://bioconductor.org/biocLite.R")
biocLite(c("graph", "Rgraphviz"))
library("bnlearn")
# charge la base de données alarm
data("alarm")
# infos sur les colonnes (nos variables)
ncol(alarm)
colnames(alarm)
# infos sur les lignes (nos observations)
nrow(alarm)
rownames(alarm)
# dix premières lignes, 5 premières colonnes
alarm[1:10, 1:5]
# lignes 3, 5, 1, colonnes "ANES", "HIST" et "MINV"
alarm[c(3, 5, 1), c("ANES", "HIST", "MINV")]
ci.test(x = "PAP", y = "SHNT", z = as.character(NULL), data = alarm, test = "mi")
res = ci.test(x = "PAP", y = "SHNT", z = "PMB", data = alarm, test = "mi")
res$statistic
res$p.value
table(alarm[, "PAP"])
plot(alarm[, "PAP"])
prop.table(table(alarm[, "PAP"]))
table(alarm[, "SHNT"])
plot(alarm[, "SHNT"])
prop.table(table(alarm[, "SHNT"]))
ct = table(alarm[, c("PAP", "SHNT")])
prop.table(ct)
prop.table(ct, margin = 1)
prop.table(ct, margin = 2)
ci.test(x = "STKV", y = "HR", data = alarm, test = "mi")
ci.test(x = "STKV", y = "HR", z = "CO", data = alarm, test = "mi")
ci.test(x = "HR", y = "CO", data = alarm, test = "mi")
ci.test(x = "HR", y = "CO", z = "STKV", data = alarm, test = "mi")
ci.test(x = "CO", y = "STKV", data = alarm, test = "mi")
ci.test(x = "CO", y = "STKV", z = "HR", data = alarm, test = "mi")
mask = rep(TRUE, nrow(alarm))
p = prop.table(table(alarm[mask, c("STKV", "HR")]), margin = 1)
plot(p, main="p(y|x)")
mask = alarm[, "CO"] == "HIGH"
p = prop.table(table(alarm[mask, c("STKV", "HR")]), margin = 1)
plot(p, main="p(y|x,z=HIGH)")
# structure
bn = hc(alarm)
graphviz.plot(bn)
# parametres
bn = bn.fit(bn, data = alarm, method = "bayes")
bn[["CO"]]
cpquery(bn, event = (STKV == "HIGH"), evidence = (HR == "LOW"))
cpquery(bn, event = (STKV == "HIGH"), evidence = (HR == "LOW" & CO == "LOW"))
source("includes.R")
p = exact.dist(bn, event = c("STKV", "HR", "CO"), evidence = TRUE)
sum(p["HIGH", "LOW", ]) / sum(p[, "LOW", ])
sum(p["HIGH", "LOW", "LOW"]) / sum(p[, "LOW", "LOW"])
p = exact.dist(bn, event = c("INT", "APL"), evidence = TRUE)
p = exact.dist(bn, event = c("HYP", "STKV"), evidence = TRUE)
p.y.x = prop.table(p, margin = 2)
p = exact.dist(bn, event = c("HYP", "STKV", "LVV"), evidence = TRUE)
p.z = margin.table(p, margin = 3)
p.y.xz = prop.table(p, margin = c(2, 3))
p.y.do.x = margin.table(p.y.xz * rep(p.z, each=prod(dim(p.y.xz)[-3])), margin = c(1, 2))
p.y.x
p.y.do.x
vars = colnames(alarm)
g = empty.graph(vars)
for (x in vars) {
for (y in setdiff(vars, x)) {
g = set.edge(g, from = x, to = y)
}
}
graphviz.plot(g)
alpha = 0.001
z.xy = sapply(vars, function(v) {list()})
for(m in 0:length(vars)) {
x.done = NULL
for (x in vars) {
x.done = c(x.done, x)
x.nbrs = g$nodes[[x]]$nbr
for (y in setdiff(x.nbrs, x.done)) {
y.nbrs = g$nodes[[y]]$nbr
z.cands = array(NA, dim = c(m, 0))
if(length(x.nbrs) > m) {
z.cands = cbind(z.cands, combn(setdiff(x.nbrs, y), m))
}
if(length(y.nbrs) > m) {
z.cands = cbind(z.cands, combn(setdiff(y.nbrs, x), m))
}
for (i in (0:ncol(z.cands))[-1]) {
z = z.cands[, i]
## cat("testing", x, "indep", y, "given", paste(z, collapse = ", "), "\n")
res = ci.test(x = x, y = y, z = z, data = alarm, test = "mi")
if(res$p.value > alpha) {
cat("dropping edge\n")
g = drop.edge(g, from = x, to = y)
x.nbrs = setdiff(x.nbrs, y)
z.xy[[x]][[y]] = z
break
}
}
}
}
}
skeleton = g
graphviz.plot(g)
# graphe (presque) dirigé
g = skeleton
x.done = NULL
for (x in vars) {
x.done = c(x.done, x)
x.nbrs = g$nodes[[x]]$nbr
for (w in setdiff(x.nbrs, x.done)) {
w.nbrs = g$nodes[[w]]$nbr
for (y in setdiff(w.nbrs, c(x.done, x.nbrs))) {
if (!(w %in% z.xy[[x]][[y]])) {
if (w %in% g$nodes[[x]]$parents) {
cat("warning, inconsistent arc", x, "<->", w, "\n")
}
if (w %in% g$nodes[[y]]$parents) {
cat("warning, inconsistent arc", y, "<->", w, "\n")
}
g = set.arc(g, from = x, to = w)
g = set.arc(g, from = y, to = w)
}
}
}
}
graphviz.plot(g)
|
###### Pie Platform ####
library(billboarder)
library(dplyr)
setwd("C:\\College\\R Project\\R Case Study")
data<- read.csv("ott data.csv")
c <- table(data$Platform)
billboarder() %>% bb_piechart(c)
|
/Analysis-on-OTT-Platforms/Pie Platform.R
|
no_license
|
Nemwos/Analysis-on-OTT-Platforms
|
R
| false
| false
| 217
|
r
|
###### Pie Platform ####
library(billboarder)
library(dplyr)
setwd("C:\\College\\R Project\\R Case Study")
data<- read.csv("ott data.csv")
c <- table(data$Platform)
billboarder() %>% bb_piechart(c)
|
\name{Test.Paired}
\alias{Test.Paired}
\title{Test Paired Data Sets}
\description{Tests two paired data sets for similarity.}
\usage{Test.Paired(group.data, numPerms = 1000, parallel = FALSE, cores = 3)}
\arguments{
\item{group.data}{A list of 2 matrices of taxonomic counts(columns) for each sample(rows).}
\item{numPerms}{Number of permutations. In practice this should be at least 1,000.}
\item{parallel}{When this is 'TRUE' it allows for parallel calculation of the permutations. Requires the package \code{doParallel}.}
\item{cores}{The number of parallel processes to run if parallel is 'TRUE'.}
}
\value{A pvalue.}
\examples{
data(saliva)
data(throat)
### Since saliva and throat come from same subjects, the data is paired
saliva1 <- saliva[-24,] # Make saliva 23 subjects to match throat
group.data <- list(throat, saliva1)
### We use 1 for speed, should be at least 1,000
numPerms <- 1
pval <- Test.Paired(group.data, numPerms)
pval
}
|
/man/Test.Paired.Rd
|
no_license
|
cran/HMP
|
R
| false
| false
| 1,005
|
rd
|
\name{Test.Paired}
\alias{Test.Paired}
\title{Test Paired Data Sets}
\description{Tests two paired data sets for similarity.}
\usage{Test.Paired(group.data, numPerms = 1000, parallel = FALSE, cores = 3)}
\arguments{
\item{group.data}{A list of 2 matrices of taxonomic counts(columns) for each sample(rows).}
\item{numPerms}{Number of permutations. In practice this should be at least 1,000.}
\item{parallel}{When this is 'TRUE' it allows for parallel calculation of the permutations. Requires the package \code{doParallel}.}
\item{cores}{The number of parallel processes to run if parallel is 'TRUE'.}
}
\value{A pvalue.}
\examples{
data(saliva)
data(throat)
### Since saliva and throat come from same subjects, the data is paired
saliva1 <- saliva[-24,] # Make saliva 23 subjects to match throat
group.data <- list(throat, saliva1)
### We use 1 for speed, should be at least 1,000
numPerms <- 1
pval <- Test.Paired(group.data, numPerms)
pval
}
|
#rm(list=ls())
#installing packages
install.packages(c("Rcpp", "downloader", "rgdal"))
#installing SplitR package
devtools::install_github("lhenneman/SplitR", force = TRUE)
# devtools::install_github("lhenneman/hyspdisp", force = TRUE)
install.packages("USAboundariesData", repos = "http://packages.ropensci.org", type = "source")
####### problem installing package (vignette building failed) ######
devtools::install_github("lhenneman/disperseR@dev", force = TRUE, build_vignettes = TRUE)
# package install without vignette
devtools::install_github("lhenneman/disperseR@dev", force = TRUE, build_vignettes = FALSE)
#setting R directory
setwd("/Users/munshirasel/hello-world/munshimdrasel/mello/mello2")
#loading libraries
library(disperseR) # our package
library(ncdf4)
library(data.table)
library(tidyverse)
library(parallel)
library(sf)
library(viridis)
library(ggplot2)
library(scales)
library(ggsn)
library(gridExtra)
library(ggmap)
library(ggrepel)
library(fst)
#creating directory for disperseR
disperseR::create_dirs(location = "/Users/munshirasel/hello-world/munshimdrasel/mello/mello2")
# download data
disperseR::get_data(data = "all",
start.year = "2005",
start.month = "01",
end.year = "2006",
end.month = "05")
# view units data
view(disperseR::units)
# pick out units to run, top two SOx emitters in 2006 & 2005
unitsrun2005 <- disperseR::units %>%
dplyr::filter(year == 2005) %>% # only get data for 2005
dplyr::top_n(2, SOx) # sort and take the two rows with the biggest value for SOx
unitsrun2006 <- disperseR::units %>%
dplyr::filter(year == 2006) %>% # only get data for 2006
dplyr::top_n(2, SOx) # sort and take the two rows with the biggest value for SOx
# append together and transform to data table
unitsrun<-data.table::data.table(rbind(unitsrun2005, unitsrun2006))
# find unique combos of Latitude, Longitude, and Height
unitslatlonh <- unique( unitsrun[ ,.( Latitude, Longitude, Height, year)] )
unitslatlonh[, unit_run_ref:=1:nrow( unitslatlonh)]
unitsrun_trim <- merge( unitsrun, unitslatlonh)[ !duplicated( unit_run_ref)]
# define data.table with all emissions events
input_refs <- disperseR::define_inputs(units = unitsrun,
startday = '2005-11-01',
endday = '2006-02-28',
start.hours = c(0, 6, 12, 18),
duration = 120)
head(input_refs, 10)
# subset the input refs
input_refs_subset <- input_refs[format(as.Date(input_refs$start_day,
format = "%Y-%m-%d"),
format = "%d") == "01" & start_hour == 0]
head (input_refs_subset, 10)
# run disperser
hysp_raw <- disperseR::run_disperser_parallel(input.refs = input_refs_subset,
pbl.height = pblheight,
species = 'so2',
proc_dir = proc_dir,
overwrite = FALSE, ## FALSE BY DEFAULT
npart = 100,
keep.hysplit.files = FALSE, ## FALSE BY DEFAULT
mc.cores = parallel::detectCores())
# Link results to spatial domains
yearmons <- disperseR::get_yearmon(start.year = "2005",
start.month = "07",
end.year = "2006",
end.month = "06")
unitsrun
linked_zips <- disperseR::link_all_units(
units.run = unitsrun,
link.to = 'zips',
mc.cores = parallel::detectCores(),
year.mons = yearmons,
pbl.height = pblheight,
crosswalk. = crosswalk,
duration.run.hours = 240,
res.link = 12000,
overwrite = FALSE)
#> processed unit 3136-1
#> processed unit 3149-1
#> processed unit 3136-2
# link all units to counties
linked_counties <- disperseR::link_all_units(
units.run=unitsrun,
link.to = 'counties',
mc.cores = parallel::detectCores(),
year.mons = yearmons,
pbl.height = pblheight,
counties. = USAboundaries::us_counties( ),
crosswalk. = NULL,
duration.run.hours = 240,
overwrite = FALSE)
#> processed unit 3136-1
#> processed unit 3149-1
#> processed unit 3136-2
####problem######
# link all units to grids
linked_grids <- disperseR::link_all_units(
units.run=unitsrun,
link.to = 'grids',
mc.cores = parallel::detectCores(),
year.mons = yearmons,
pbl.height = pblheight,
crosswalk. = NULL,
duration.run.hours = 240,
overwrite = FALSE)
#> processed unit 3136-1
#> processed unit 3149-1
#> processed unit 3136-2
head(linked_zips)
head(linked_counties)
head(linked_grids)
unique(linked_zips$comb)
# Visualization of the results.
impact_table_zip_single <- disperseR::create_impact_table_single(
data.linked=linked_zips,
link.to = 'zips',
data.units = unitsrun,
zcta.dataset = zcta_dataset,
map.unitID = "3136-1",
map.month = "200511",
metric = 'N')
impact_table_county_single <- disperseR::create_impact_table_single(
data.linked=linked_counties,
link.to = 'counties',
data.units = unitsrun,
counties. = USAboundaries::us_counties( ),
map.unitID = "3136-1",
map.month = "200511",
metric = 'N')
impact_table_grid_single <- disperseR::create_impact_table_single(
data.linked=linked_grids,
link.to = 'grids',
data.units = unitsrun,
map.unitID = "3136-1",
map.month = "200511",
metric = 'N')
head(impact_table_zip_single)
link_plot_zips <- disperseR::plot_impact_single(
data.linked = linked_zips,
link.to = 'zips',
map.unitID = "3136-1",
map.month = "20061",
data.units = unitsrun,
zcta.dataset = zcta_dataset,
metric = 'N',
graph.dir = graph_dir,
zoom = T, # TRUE by default
legend.name = 'HyADS raw exposure',
# other parameters passed to ggplot2::theme()
axis.text = element_blank(),
legend.position = c( .75, .15))
link_plot_grids <- disperseR::plot_impact_single(
data.linked = linked_grids,
link.to = 'grids',
map.unitID = "3136-1",
map.month = "20061",
data.units = unitsrun,
metric = 'N',
graph.dir = graph_dir,
zoom = F, # TRUE by default (false meaning to show the whole country)
legend.name = 'HyADS raw exposure',
# other parameters passed to ggplot2::theme()
axis.text = element_blank(),
legend.position = c( .75, .15))
link_plot_counties <- disperseR::plot_impact_single(
data.linked = linked_counties,
link.to = 'counties',
map.unitID = "3136-1",
map.month = "20061",
counties. = USAboundaries::us_counties( ),
data.units = unitsrun,
metric = 'N',
graph.dir = graph_dir,
zoom = T, # TRUE by default (true means to show that location area only)
legend.name = 'HyADS raw exposure',
# other parameters passed to ggplot2::theme()
axis.text = element_blank(),
legend.position = c( .75, .15))
link_plot_zips
link_plot_grids
link_plot_counties
# Combine all results into RData file.
combined_ziplinks <- disperseR::combine_monthly_links(
month_YYYYMMs = yearmons,
link.to = 'zips',
filename = 'hyads_vig_unwgted_zips.RData')
combined_countylinks <- disperseR::combine_monthly_links(
month_YYYYMMs = yearmons,
link.to = 'counties',
filename = 'hyads_vig_unwgted_counties.RData')
combined_gridlinks <- disperseR::combine_monthly_links(
month_YYYYMMs = yearmons,
link.to = 'grids',
filename = 'hyads_vig_unwgted_grids.RData')
names(combined_ziplinks)
# Calculate and extract useful information from the results
# Weight the results by emissions
exp_ann_unit_zip <- disperseR::calculate_exposure(
year.E = 2005,
year.D = 2005,
link.to = 'zips',
pollutant = 'SO2.tons',
rda_file = file.path(rdata_dir, "hyads_vig_unwgted_zips.RData"),
exp_dir = exp_dir,
units.mo = PP.units.monthly1995_2017,
source.agg = 'unit',
time.agg = 'month',
return.monthly.data = T)
exp_ann_unit_grids <- disperseR::calculate_exposure(
year.E = 2005,
year.D = 2005,
link.to = 'grids',
pollutant = 'SO2.tons',
rda_file = file.path(rdata_dir, "hyads_vig_unwgted_grids.RData"),
exp_dir = exp_dir,
units.mo = PP.units.monthly1995_2017,
source.agg = 'unit',
time.agg = 'month',
return.monthly.data = T)
exp_ann_unit_counties <- disperseR::calculate_exposure(
year.E = 2005,
year.D = 2005,
link.to = 'counties',
pollutant = 'SO2.tons',
rda_file = file.path(rdata_dir, "hyads_vig_unwgted_counties.RData"),
exp_dir = exp_dir,
units.mo = PP.units.monthly1995_2017,
source.agg = 'unit',
time.agg = 'month',
return.monthly.data = T)
zip_exp_ann_plot <- disperseR::plot_impact_weighted(
data.linked = exp_ann_unit_zip,
data.units = unitsrun,
link.to = 'zips',
zcta.dataset = zcta_dataset,
time.agg = 'year',
metric = 'hyads',
legend.name = 'Aggregate HyADS exposure',
zoom = T, # TRUE by default
graph.dir = graph_dir,
map.month = NULL, # NULL by default change if time.agg = 'month'
# other parameters passed to ggplot2::theme()
axis.text = element_blank(),
legend.position = c( .75, .15)) # 0 by default
counties_exp_ann_plot <- disperseR::plot_impact_weighted(
data.linked = exp_ann_unit_counties,
data.units = unitsrun,
link.to = 'counties',
counties. = USAboundaries::us_counties( ),
time.agg = 'year',
metric = 'hyads',
legend.name = 'Aggregate HyADS exposure',
zoom = T, # TRUE by default
graph.dir = graph_dir,
map.month = NULL, # NULL by default change if time.agg = 'month'
# other parameters passed to ggplot2::theme()
axis.text = element_blank(),
legend.position = c( .75, .15)) # 0 by default
grids_exp_ann_plot <- disperseR::plot_impact_weighted(
data.linked = exp_ann_unit_grids,
data.units = unitsrun,
link.to = 'grids',
time.agg = 'year',
metric = 'hyads',
legend.name = 'Aggregate HyADS exposure',
zoom = T, # TRUE by default
graph.dir = graph_dir,
map.month = NULL, # NULL by default change if time.agg = 'month'
# other parameters passed to ggplot2::theme()
axis.text = element_blank(),
legend.position = c( .75, .15)) # 0 by default
zip_exp_ann_plot
counties_exp_ann_plot
grids_exp_ann_plot
#plotting monthly exposure
exp_mon_unit_zip <- disperseR::calculate_exposure(
year.E = 2005,
year.D = 2005,
link.to = 'zips',
pollutant = 'SO2.tons',
rda_file = file.path(rdata_dir, "hyads_vig_unwgted_zips.RData"),
exp_dir = exp_dir,
units.mo = PP.units.monthly1995_2017,
source.agg = 'unit',
time.agg = 'month',
return.monthly.data = T)
exp_mon_unit_grids <- disperseR::calculate_exposure(
year.E = 2005,
year.D = 2005,
link.to = 'grids',
pollutant = 'SO2.tons',
rda_file = file.path(rdata_dir, "hyads_vig_unwgted_grids.RData"),
exp_dir = exp_dir,
units.mo = PP.units.monthly1995_2017,
source.agg = 'unit',
time.agg = 'month',
return.monthly.data = T)
exp_mon_unit_counties <- disperseR::calculate_exposure(
year.E = 2005,
year.D = 2005,
link.to = 'counties',
pollutant = 'SO2.tons',
rda_file = file.path(rdata_dir, "hyads_vig_unwgted_counties.RData"),
exp_dir = exp_dir,
units.mo = PP.units.monthly1995_2017,
source.agg = 'unit',
time.agg = 'month',
return.monthly.data = T)
zip_exp_mon_plot <- disperseR::plot_impact_weighted(
data.linked = exp_mon_unit_zip,
data.units = unitsrun,
zcta.dataset = zcta_dataset,
time.agg = 'month',
map.month = "200511",
metric = 'hyads',
legend.name = 'Montly HyADS exposure',
zoom = T, # TRUE by default
graph.dir = graph_dir)
zip_exp_mon_plot
# Plot unit-specific impacts over time
zip_exp_unit_mon2005 <- disperseR::calculate_exposure(
rda_file = file.path(rdata_dir, "hyads_vig_unwgted_zips.RData"),
units.mo = PP.units.monthly1995_2017,
link.to = 'zips',
year.E = 2005,
year.D = 2005,
pollutant = 'SO2.tons',
source.agg = 'unit', # note!
time.agg = 'month',
exp_dir = exp_dir,
return.monthly.data = T)
zip_exp_unit_mon2006 <- disperseR::calculate_exposure(
rda_file = file.path(rdata_dir, "hyads_vig_unwgted_zips.RData"),
units.mo = PP.units.monthly1995_2017,
link.to = 'zips',
year.E = 2006,
year.D = 2006,
pollutant = 'SO2.tons',
source.agg = 'unit', # note!
time.agg = 'month',
exp_dir = exp_dir,
return.monthly.data = T)
zip_exp_unit_mon <- rbind(zip_exp_unit_mon2005, zip_exp_unit_mon2006)
zipcodes <- c("13039","21798", "03804")
###????
zip_exp_unit <- disperseR::plot_impact_unit(
data.linked = zip_exp_unit_mon,
zip.codes = zipcodes,
graph.dir = graph_dir)
#> geom_path: Each group consists of only one observation. Do you need to
#> adjust the group aesthetic?
# Rank facilities.
zip_exp_ann_unit <- disperseR::calculate_exposure(
year.E = 2005,
year.D = 2005,
link.to = 'zips',
pollutant = 'SO2.tons',
rda_file = file.path(rdata_dir, "hyads_vig_unwgted_zips.RData"),
exp_dir = exp_dir,
units.mo = PP.units.monthly1995_2017,
source.agg = 'unit',
time.agg = 'year')
zip_exp_ann_unit[, year := 2005]
unitRanks2005 <- disperseR::rankfacs_by_popwgt_location(
data.linked = zip_exp_ann_unit,
crosswalk. = crosswalk,
rank.by = c('hyads'),
state.value = 'PA',
year = 2005)
unitRanks2005
# Plot ranked facilities.
plotUnitsRanked <- disperseR::plot_units_ranked(
data.units = unitsrun,
data.ranked = unitRanks2005,
year = 2005,
graph.dir = graph_dir)
plotUnitsRanked
#> $ggbar
#>
#> $ggmap
##End of code
# not uploading main folders due to large size
|
/disperser.R
|
no_license
|
mmrasel/mello
|
R
| false
| false
| 13,543
|
r
|
#rm(list=ls())
#installing packages
install.packages(c("Rcpp", "downloader", "rgdal"))
#installing SplitR package
devtools::install_github("lhenneman/SplitR", force = TRUE)
# devtools::install_github("lhenneman/hyspdisp", force = TRUE)
install.packages("USAboundariesData", repos = "http://packages.ropensci.org", type = "source")
####### problem installing package (vignette building failed) ######
devtools::install_github("lhenneman/disperseR@dev", force = TRUE, build_vignettes = TRUE)
# package install without vignette
devtools::install_github("lhenneman/disperseR@dev", force = TRUE, build_vignettes = FALSE)
#setting R directory
setwd("/Users/munshirasel/hello-world/munshimdrasel/mello/mello2")
#loading libraries
library(disperseR) # our package
library(ncdf4)
library(data.table)
library(tidyverse)
library(parallel)
library(sf)
library(viridis)
library(ggplot2)
library(scales)
library(ggsn)
library(gridExtra)
library(ggmap)
library(ggrepel)
library(fst)
#creating directory for disperseR
disperseR::create_dirs(location = "/Users/munshirasel/hello-world/munshimdrasel/mello/mello2")
# download data
disperseR::get_data(data = "all",
start.year = "2005",
start.month = "01",
end.year = "2006",
end.month = "05")
# view units data
view(disperseR::units)
# pick out units to run, top two SOx emitters in 2006 & 2005
unitsrun2005 <- disperseR::units %>%
dplyr::filter(year == 2005) %>% # only get data for 2005
dplyr::top_n(2, SOx) # sort and take the two rows with the biggest value for SOx
unitsrun2006 <- disperseR::units %>%
dplyr::filter(year == 2006) %>% # only get data for 2006
dplyr::top_n(2, SOx) # sort and take the two rows with the biggest value for SOx
# append together and transform to data table
unitsrun<-data.table::data.table(rbind(unitsrun2005, unitsrun2006))
# find unique combos of Latitude, Longitude, and Height
unitslatlonh <- unique( unitsrun[ ,.( Latitude, Longitude, Height, year)] )
unitslatlonh[, unit_run_ref:=1:nrow( unitslatlonh)]
unitsrun_trim <- merge( unitsrun, unitslatlonh)[ !duplicated( unit_run_ref)]
# define data.table with all emissions events
input_refs <- disperseR::define_inputs(units = unitsrun,
startday = '2005-11-01',
endday = '2006-02-28',
start.hours = c(0, 6, 12, 18),
duration = 120)
head(input_refs, 10)
# subset the input refs
input_refs_subset <- input_refs[format(as.Date(input_refs$start_day,
format = "%Y-%m-%d"),
format = "%d") == "01" & start_hour == 0]
head (input_refs_subset, 10)
# run disperser
hysp_raw <- disperseR::run_disperser_parallel(input.refs = input_refs_subset,
pbl.height = pblheight,
species = 'so2',
proc_dir = proc_dir,
overwrite = FALSE, ## FALSE BY DEFAULT
npart = 100,
keep.hysplit.files = FALSE, ## FALSE BY DEFAULT
mc.cores = parallel::detectCores())
# Link results to spatial domains
yearmons <- disperseR::get_yearmon(start.year = "2005",
start.month = "07",
end.year = "2006",
end.month = "06")
unitsrun
linked_zips <- disperseR::link_all_units(
units.run = unitsrun,
link.to = 'zips',
mc.cores = parallel::detectCores(),
year.mons = yearmons,
pbl.height = pblheight,
crosswalk. = crosswalk,
duration.run.hours = 240,
res.link = 12000,
overwrite = FALSE)
#> processed unit 3136-1
#> processed unit 3149-1
#> processed unit 3136-2
# link all units to counties
linked_counties <- disperseR::link_all_units(
units.run=unitsrun,
link.to = 'counties',
mc.cores = parallel::detectCores(),
year.mons = yearmons,
pbl.height = pblheight,
counties. = USAboundaries::us_counties( ),
crosswalk. = NULL,
duration.run.hours = 240,
overwrite = FALSE)
#> processed unit 3136-1
#> processed unit 3149-1
#> processed unit 3136-2
####problem######
# link all units to grids
linked_grids <- disperseR::link_all_units(
units.run=unitsrun,
link.to = 'grids',
mc.cores = parallel::detectCores(),
year.mons = yearmons,
pbl.height = pblheight,
crosswalk. = NULL,
duration.run.hours = 240,
overwrite = FALSE)
#> processed unit 3136-1
#> processed unit 3149-1
#> processed unit 3136-2
head(linked_zips)
head(linked_counties)
head(linked_grids)
unique(linked_zips$comb)
# Visualization of the results.
impact_table_zip_single <- disperseR::create_impact_table_single(
data.linked=linked_zips,
link.to = 'zips',
data.units = unitsrun,
zcta.dataset = zcta_dataset,
map.unitID = "3136-1",
map.month = "200511",
metric = 'N')
impact_table_county_single <- disperseR::create_impact_table_single(
data.linked=linked_counties,
link.to = 'counties',
data.units = unitsrun,
counties. = USAboundaries::us_counties( ),
map.unitID = "3136-1",
map.month = "200511",
metric = 'N')
impact_table_grid_single <- disperseR::create_impact_table_single(
data.linked=linked_grids,
link.to = 'grids',
data.units = unitsrun,
map.unitID = "3136-1",
map.month = "200511",
metric = 'N')
head(impact_table_zip_single)
link_plot_zips <- disperseR::plot_impact_single(
data.linked = linked_zips,
link.to = 'zips',
map.unitID = "3136-1",
map.month = "20061",
data.units = unitsrun,
zcta.dataset = zcta_dataset,
metric = 'N',
graph.dir = graph_dir,
zoom = T, # TRUE by default
legend.name = 'HyADS raw exposure',
# other parameters passed to ggplot2::theme()
axis.text = element_blank(),
legend.position = c( .75, .15))
link_plot_grids <- disperseR::plot_impact_single(
data.linked = linked_grids,
link.to = 'grids',
map.unitID = "3136-1",
map.month = "20061",
data.units = unitsrun,
metric = 'N',
graph.dir = graph_dir,
zoom = F, # TRUE by default (false meaning to show the whole country)
legend.name = 'HyADS raw exposure',
# other parameters passed to ggplot2::theme()
axis.text = element_blank(),
legend.position = c( .75, .15))
link_plot_counties <- disperseR::plot_impact_single(
data.linked = linked_counties,
link.to = 'counties',
map.unitID = "3136-1",
map.month = "20061",
counties. = USAboundaries::us_counties( ),
data.units = unitsrun,
metric = 'N',
graph.dir = graph_dir,
zoom = T, # TRUE by default (true means to show that location area only)
legend.name = 'HyADS raw exposure',
# other parameters passed to ggplot2::theme()
axis.text = element_blank(),
legend.position = c( .75, .15))
link_plot_zips
link_plot_grids
link_plot_counties
# Combine all results into RData file.
combined_ziplinks <- disperseR::combine_monthly_links(
month_YYYYMMs = yearmons,
link.to = 'zips',
filename = 'hyads_vig_unwgted_zips.RData')
combined_countylinks <- disperseR::combine_monthly_links(
month_YYYYMMs = yearmons,
link.to = 'counties',
filename = 'hyads_vig_unwgted_counties.RData')
combined_gridlinks <- disperseR::combine_monthly_links(
month_YYYYMMs = yearmons,
link.to = 'grids',
filename = 'hyads_vig_unwgted_grids.RData')
names(combined_ziplinks)
# Calculate and extract useful information from the results
# Weight the results by emissions
exp_ann_unit_zip <- disperseR::calculate_exposure(
year.E = 2005,
year.D = 2005,
link.to = 'zips',
pollutant = 'SO2.tons',
rda_file = file.path(rdata_dir, "hyads_vig_unwgted_zips.RData"),
exp_dir = exp_dir,
units.mo = PP.units.monthly1995_2017,
source.agg = 'unit',
time.agg = 'month',
return.monthly.data = T)
exp_ann_unit_grids <- disperseR::calculate_exposure(
year.E = 2005,
year.D = 2005,
link.to = 'grids',
pollutant = 'SO2.tons',
rda_file = file.path(rdata_dir, "hyads_vig_unwgted_grids.RData"),
exp_dir = exp_dir,
units.mo = PP.units.monthly1995_2017,
source.agg = 'unit',
time.agg = 'month',
return.monthly.data = T)
exp_ann_unit_counties <- disperseR::calculate_exposure(
year.E = 2005,
year.D = 2005,
link.to = 'counties',
pollutant = 'SO2.tons',
rda_file = file.path(rdata_dir, "hyads_vig_unwgted_counties.RData"),
exp_dir = exp_dir,
units.mo = PP.units.monthly1995_2017,
source.agg = 'unit',
time.agg = 'month',
return.monthly.data = T)
zip_exp_ann_plot <- disperseR::plot_impact_weighted(
data.linked = exp_ann_unit_zip,
data.units = unitsrun,
link.to = 'zips',
zcta.dataset = zcta_dataset,
time.agg = 'year',
metric = 'hyads',
legend.name = 'Aggregate HyADS exposure',
zoom = T, # TRUE by default
graph.dir = graph_dir,
map.month = NULL, # NULL by default change if time.agg = 'month'
# other parameters passed to ggplot2::theme()
axis.text = element_blank(),
legend.position = c( .75, .15)) # 0 by default
counties_exp_ann_plot <- disperseR::plot_impact_weighted(
data.linked = exp_ann_unit_counties,
data.units = unitsrun,
link.to = 'counties',
counties. = USAboundaries::us_counties( ),
time.agg = 'year',
metric = 'hyads',
legend.name = 'Aggregate HyADS exposure',
zoom = T, # TRUE by default
graph.dir = graph_dir,
map.month = NULL, # NULL by default change if time.agg = 'month'
# other parameters passed to ggplot2::theme()
axis.text = element_blank(),
legend.position = c( .75, .15)) # 0 by default
grids_exp_ann_plot <- disperseR::plot_impact_weighted(
data.linked = exp_ann_unit_grids,
data.units = unitsrun,
link.to = 'grids',
time.agg = 'year',
metric = 'hyads',
legend.name = 'Aggregate HyADS exposure',
zoom = T, # TRUE by default
graph.dir = graph_dir,
map.month = NULL, # NULL by default change if time.agg = 'month'
# other parameters passed to ggplot2::theme()
axis.text = element_blank(),
legend.position = c( .75, .15)) # 0 by default
zip_exp_ann_plot
counties_exp_ann_plot
grids_exp_ann_plot
#plotting monthly exposure
exp_mon_unit_zip <- disperseR::calculate_exposure(
year.E = 2005,
year.D = 2005,
link.to = 'zips',
pollutant = 'SO2.tons',
rda_file = file.path(rdata_dir, "hyads_vig_unwgted_zips.RData"),
exp_dir = exp_dir,
units.mo = PP.units.monthly1995_2017,
source.agg = 'unit',
time.agg = 'month',
return.monthly.data = T)
exp_mon_unit_grids <- disperseR::calculate_exposure(
year.E = 2005,
year.D = 2005,
link.to = 'grids',
pollutant = 'SO2.tons',
rda_file = file.path(rdata_dir, "hyads_vig_unwgted_grids.RData"),
exp_dir = exp_dir,
units.mo = PP.units.monthly1995_2017,
source.agg = 'unit',
time.agg = 'month',
return.monthly.data = T)
exp_mon_unit_counties <- disperseR::calculate_exposure(
year.E = 2005,
year.D = 2005,
link.to = 'counties',
pollutant = 'SO2.tons',
rda_file = file.path(rdata_dir, "hyads_vig_unwgted_counties.RData"),
exp_dir = exp_dir,
units.mo = PP.units.monthly1995_2017,
source.agg = 'unit',
time.agg = 'month',
return.monthly.data = T)
zip_exp_mon_plot <- disperseR::plot_impact_weighted(
data.linked = exp_mon_unit_zip,
data.units = unitsrun,
zcta.dataset = zcta_dataset,
time.agg = 'month',
map.month = "200511",
metric = 'hyads',
legend.name = 'Montly HyADS exposure',
zoom = T, # TRUE by default
graph.dir = graph_dir)
zip_exp_mon_plot
# Plot unit-specific impacts over time
zip_exp_unit_mon2005 <- disperseR::calculate_exposure(
rda_file = file.path(rdata_dir, "hyads_vig_unwgted_zips.RData"),
units.mo = PP.units.monthly1995_2017,
link.to = 'zips',
year.E = 2005,
year.D = 2005,
pollutant = 'SO2.tons',
source.agg = 'unit', # note!
time.agg = 'month',
exp_dir = exp_dir,
return.monthly.data = T)
zip_exp_unit_mon2006 <- disperseR::calculate_exposure(
rda_file = file.path(rdata_dir, "hyads_vig_unwgted_zips.RData"),
units.mo = PP.units.monthly1995_2017,
link.to = 'zips',
year.E = 2006,
year.D = 2006,
pollutant = 'SO2.tons',
source.agg = 'unit', # note!
time.agg = 'month',
exp_dir = exp_dir,
return.monthly.data = T)
zip_exp_unit_mon <- rbind(zip_exp_unit_mon2005, zip_exp_unit_mon2006)
zipcodes <- c("13039","21798", "03804")
###????
zip_exp_unit <- disperseR::plot_impact_unit(
data.linked = zip_exp_unit_mon,
zip.codes = zipcodes,
graph.dir = graph_dir)
#> geom_path: Each group consists of only one observation. Do you need to
#> adjust the group aesthetic?
# Rank facilities.
zip_exp_ann_unit <- disperseR::calculate_exposure(
year.E = 2005,
year.D = 2005,
link.to = 'zips',
pollutant = 'SO2.tons',
rda_file = file.path(rdata_dir, "hyads_vig_unwgted_zips.RData"),
exp_dir = exp_dir,
units.mo = PP.units.monthly1995_2017,
source.agg = 'unit',
time.agg = 'year')
zip_exp_ann_unit[, year := 2005]
unitRanks2005 <- disperseR::rankfacs_by_popwgt_location(
data.linked = zip_exp_ann_unit,
crosswalk. = crosswalk,
rank.by = c('hyads'),
state.value = 'PA',
year = 2005)
unitRanks2005
# Plot ranked facilities.
plotUnitsRanked <- disperseR::plot_units_ranked(
data.units = unitsrun,
data.ranked = unitRanks2005,
year = 2005,
graph.dir = graph_dir)
plotUnitsRanked
#> $ggbar
#>
#> $ggmap
##End of code
# not uploading main folders due to large size
|
dataFile <- "./data/household_power_consumption.txt"
#reading the data file
consumpData <- read.table(dataFile, header=TRUE, sep=";", stringsAsFactors = FALSE, dec=".")
#selecting data only from the dates 2007-02-01 and 2007-02-02
subsetConsumpData <- consumpData[consumpData$Date %in% c("1/2/2007","2/2/2007"),]
datetime <- strptime(paste(subsetConsumpData$Date, subsetConsumpData$Time, sep=" "), "%d/%m/%Y %H:%M:%S")
# GlobalActivePower data
globalActivePower <- as.numeric(subsetConsumpData$Global_active_power)
# GlobalReactivePower data
globalReactivePower <- as.numeric(subsetConsumpData$Global_reactive_power)
# Voltage data
voltage <- as.numeric(subsetConsumpData$Voltage)
#SubMetering1 data
submetering1 <- as.numeric(subsetConsumpData$Sub_metering_1)
#SubMetering2 data
submetering2 <- as.numeric(subsetConsumpData$Sub_metering_2)
#SubMetering3 data
submetering3 <- as.numeric(subsetConsumpData$Sub_metering_3)
#Plot the data
png("plot4.png", width=480, height=480)
par(mfrow = c(2,2))
#Plot the GlobalActivePower data
plot(datetime, globalActivePower, type="l", xlab="", ylab="Global Active Power")
#Plot the Voltage data
plot(datetime, voltage, type="l", xlab="datetime", ylab="Voltage")
#Plot the Submetering data
plot(datetime, submetering1, type="l", xlab="", ylab="Energy sub metering" )
lines(datetime, submetering2, type="l", col="red")
lines(datetime, submetering3, type="l", col="blue")
legend("topright", c("Sub_metering_1","Sub_metering_2","Sub_metering_3"), lty=1, lwd=2.5, col=c("black","red","blue"))
#Plot the GlobalReactivePower data
plot(datetime, globalReactivePower, type="l", xlab="datetime", ylab="Global_reactive_power")
dev.off()
|
/plot4.R
|
no_license
|
ParamitaBasu/ExData_Plotting1
|
R
| false
| false
| 1,679
|
r
|
dataFile <- "./data/household_power_consumption.txt"
#reading the data file
consumpData <- read.table(dataFile, header=TRUE, sep=";", stringsAsFactors = FALSE, dec=".")
#selecting data only from the dates 2007-02-01 and 2007-02-02
subsetConsumpData <- consumpData[consumpData$Date %in% c("1/2/2007","2/2/2007"),]
datetime <- strptime(paste(subsetConsumpData$Date, subsetConsumpData$Time, sep=" "), "%d/%m/%Y %H:%M:%S")
# GlobalActivePower data
globalActivePower <- as.numeric(subsetConsumpData$Global_active_power)
# GlobalReactivePower data
globalReactivePower <- as.numeric(subsetConsumpData$Global_reactive_power)
# Voltage data
voltage <- as.numeric(subsetConsumpData$Voltage)
#SubMetering1 data
submetering1 <- as.numeric(subsetConsumpData$Sub_metering_1)
#SubMetering2 data
submetering2 <- as.numeric(subsetConsumpData$Sub_metering_2)
#SubMetering3 data
submetering3 <- as.numeric(subsetConsumpData$Sub_metering_3)
#Plot the data
png("plot4.png", width=480, height=480)
par(mfrow = c(2,2))
#Plot the GlobalActivePower data
plot(datetime, globalActivePower, type="l", xlab="", ylab="Global Active Power")
#Plot the Voltage data
plot(datetime, voltage, type="l", xlab="datetime", ylab="Voltage")
#Plot the Submetering data
plot(datetime, submetering1, type="l", xlab="", ylab="Energy sub metering" )
lines(datetime, submetering2, type="l", col="red")
lines(datetime, submetering3, type="l", col="blue")
legend("topright", c("Sub_metering_1","Sub_metering_2","Sub_metering_3"), lty=1, lwd=2.5, col=c("black","red","blue"))
#Plot the GlobalReactivePower data
plot(datetime, globalReactivePower, type="l", xlab="datetime", ylab="Global_reactive_power")
dev.off()
|
c DCNF-Autarky [version 0.0.1].
c Copyright (c) 2018-2019 Swansea University.
c
c Input Clause Count: 20611
c Performing E1-Autarky iteration.
c Remaining clauses count after E-Reduction: 20611
c
c Input Parameter (command line, file):
c input filename QBFLIB/Faber-Leone-Maratea-Ricca/Strategic_Companies/x220.0.qdimacs
c output filename /tmp/dcnfAutarky.dimacs
c autarky level 1
c conformity level 0
c encoding type 2
c no.of var 6824
c no.of clauses 20611
c no.of taut cls 0
c
c Output Parameters:
c remaining no.of clauses 20611
c
c QBFLIB/Faber-Leone-Maratea-Ricca/Strategic_Companies/x220.0.qdimacs 6824 20611 E1 [] 0 220 6604 20611 NONE
|
/code/dcnf-ankit-optimized/Results/QBFLIB-2018/E1/Experiments/Faber-Leone-Maratea-Ricca/Strategic_Companies/x220.0/x220.0.R
|
no_license
|
arey0pushpa/dcnf-autarky
|
R
| false
| false
| 672
|
r
|
c DCNF-Autarky [version 0.0.1].
c Copyright (c) 2018-2019 Swansea University.
c
c Input Clause Count: 20611
c Performing E1-Autarky iteration.
c Remaining clauses count after E-Reduction: 20611
c
c Input Parameter (command line, file):
c input filename QBFLIB/Faber-Leone-Maratea-Ricca/Strategic_Companies/x220.0.qdimacs
c output filename /tmp/dcnfAutarky.dimacs
c autarky level 1
c conformity level 0
c encoding type 2
c no.of var 6824
c no.of clauses 20611
c no.of taut cls 0
c
c Output Parameters:
c remaining no.of clauses 20611
c
c QBFLIB/Faber-Leone-Maratea-Ricca/Strategic_Companies/x220.0.qdimacs 6824 20611 E1 [] 0 220 6604 20611 NONE
|
#' @export
#' @title add section
#' @description add a section in a Word document. A section affects
#' preceding paragraphs or tables.
#'
#' @details
#' A section starts at the end of the previous section (or the beginning of
#' the document if no preceding section exists), and stops where the section is declared.
#' The function \code{body_end_section()} is reflecting that Word concept.
#' The function \code{body_default_section()} is only modifying the default section of
#' the document.
#' @importFrom xml2 xml_remove
#' @param x an rdocx object
#' @param landscape landscape orientation
#' @param margins a named vector of margin settings in inches, margins not set remain at their default setting
#' @param colwidths columns widths as percentages, summing to 1. If 3 values, 3 columns
#' will be produced.
#' @param space space in percent between columns.
#' @param sep if TRUE a line is separating columns.
#' @param continuous TRUE for a continuous section break.
#' @note
#' This function is deprecated, use body_end_section_continuous,
#' body_end_section_landscape, body_end_section_portrait,
#' body_end_section_columns or body_end_section_columns_landscape
#' instead.
#' @examples
#' library(magrittr)
#'
#' str1 <- "Lorem ipsum dolor sit amet, consectetur adipiscing elit. " %>%
#' rep(10) %>% paste(collapse = "")
#'
#' my_doc <- read_docx() %>%
#' # add a paragraph
#' body_add_par(value = str1, style = "Normal") %>%
#' # add a continuous section
#' body_end_section(continuous = TRUE) %>%
#' body_add_par(value = str1, style = "Normal") %>%
#' body_add_par(value = str1, style = "Normal") %>%
#' # preceding paragraph is on a new column
#' slip_in_column_break(pos = "before") %>%
#' # add a two columns continous section
#' body_end_section(colwidths = c(.6, .4),
#' space = .05, sep = FALSE, continuous = TRUE) %>%
#' body_add_par(value = str1, style = "Normal") %>%
#' # add a continuous section ... so far there is no break page
#' body_end_section(continuous = TRUE) %>%
#' body_add_par(value = str1, style = "Normal") %>%
#' body_default_section(landscape = TRUE, margins = c(top = 0.5, bottom = 0.5))
#'
#' print(my_doc, target = "section.docx")
body_end_section <- function(x, landscape = FALSE, margins = c(top = NA, bottom = NA, left = NA, right = NA),
colwidths = c(1), space = .05, sep = FALSE, continuous = FALSE){# nocov start
.Deprecated(msg = "body_end_section is deprecated. See ?sections for replacement functions.")
stopifnot(all.equal( sum(colwidths), 1 ) )
if( landscape && continuous ){
stop("using landscape=TRUE and continuous=TRUE is not possible as changing orientation require a new page.")
}
sdim <- x$sect_dim
h_ref <- sdim$page["height"];w_ref <- sdim$page["width"]
mar_t <- sdim$margins["top"];mar_b <- sdim$margins["bottom"]
mar_r <- sdim$margins["right"];mar_l <- sdim$margins["left"]
mar_h <- sdim$margins["header"];mar_f <- sdim$margins["footer"]
if( !landscape ){
h <- h_ref
w <- w_ref
mar_top <- mar_t
mar_bottom <- mar_b
mar_right <- mar_r
mar_left <- mar_l
} else {
h <- w_ref
w <- h_ref
mar_top <- mar_r
mar_bottom <- mar_l
mar_right <- mar_t
mar_left <- mar_b
}
if (!is.na(margins["top"]) & is.numeric(margins["top"])) { mar_top = margins["top"] * 20 * 72}
if (!is.na(margins["bottom"]) & is.numeric(margins["bottom"])) { mar_bottom = margins["bottom"] * 20 * 72}
if (!is.na(margins["left"]) & is.numeric(margins["left"])) { mar_left = margins["left"] * 20 * 72}
if (!is.na(margins["right"]) & is.numeric(margins["right"])) { mar_right = margins["right"] * 20 * 72}
pgsz_str <- "<w:pgSz %sw:w=\"%.0f\" w:h=\"%.0f\"/>"
pgsz_str <- sprintf(pgsz_str, ifelse( landscape, "w:orient=\"landscape\" ", ""), w, h )
mar_str <- "<w:pgMar w:top=\"%.0f\" w:right=\"%.0f\" w:bottom=\"%.0f\" w:left=\"%.0f\" w:header=\"%.0f\" w:footer=\"%.0f\" w:gutter=\"0\"/>"
mar_str <- sprintf(mar_str, mar_top, mar_right, mar_bottom, mar_left, mar_h, mar_f )
width_ <- w - mar_right - mar_left
column_values <- colwidths - space
columns_str_all_but_last <- sprintf("<w:col w:w=\"%.0f\" w:space=\"%.0f\"/>",
column_values[-length(column_values)] * width_,
space * width_)
columns_str_last <- sprintf("<w:col w:w=\"%.0f\"/>",
column_values[length(column_values)] * width_)
columns_str <- c(columns_str_all_but_last, columns_str_last)
if( length(colwidths) > 1 )
columns_str <- sprintf("<w:cols w:num=\"%.0f\" w:sep=\"%.0f\" w:space=\"%.0f\" w:equalWidth=\"0\">%s</w:cols>",
length(colwidths), as.integer(sep), space * w, paste0(columns_str, collapse = "") )
else columns_str <- sprintf("<w:cols w:space=\"%.0f\" w:equalWidth=\"0\">%s</w:cols>",
space * w, paste0(columns_str, collapse = "") )
str <- paste0( wml_with_ns("w:p"),
"<w:pPr><w:sectPr>",
ifelse( continuous, "<w:type w:val=\"continuous\"/>", "" ),
pgsz_str, mar_str, columns_str, "</w:sectPr></w:pPr></w:p>")
body_add_xml(x, str = str, pos = "after")
}# nocov end
#' @export
#' @rdname body_end_section
body_default_section <- function(x, landscape = FALSE, margins = c(top = NA, bottom = NA, left = NA, right = NA)){# nocov start
.Deprecated(msg = "body_default_section is deprecated. See ?sections for replacement functions.")
sdim <- x$sect_dim
h_ref <- sdim$page["height"];w_ref <- sdim$page["width"]
mar_t <- sdim$margins["top"];mar_b <- sdim$margins["bottom"]
mar_r <- sdim$margins["right"];mar_l <- sdim$margins["left"]
mar_h <- sdim$margins["header"];mar_f <- sdim$margins["footer"]
if( !landscape ){
h <- h_ref
w <- w_ref
mar_top <- mar_t
mar_bottom <- mar_b
mar_right <- mar_r
mar_left <- mar_l
} else {
h <- w_ref
w <- h_ref
mar_top <- mar_r
mar_bottom <- mar_l
mar_right <- mar_t
mar_left <- mar_b
}
if (!is.na(margins["top"]) & is.numeric(margins["top"])) { mar_top = margins["top"] * 20 * 72}
if (!is.na(margins["bottom"]) & is.numeric(margins["bottom"])) { mar_bottom = margins["bottom"] * 20 * 72}
if (!is.na(margins["left"]) & is.numeric(margins["left"])) { mar_left = margins["left"] * 20 * 72}
if (!is.na(margins["right"]) & is.numeric(margins["right"])) { mar_right = margins["right"] * 20 * 72}
pgsz_str <- "<w:pgSz %sw:w=\"%.0f\" w:h=\"%.0f\"/>"
pgsz_str <- sprintf(pgsz_str, ifelse( landscape, "w:orient=\"landscape\" ", ""), w, h )
mar_str <- "<w:pgMar w:top=\"%.0f\" w:right=\"%.0f\" w:bottom=\"%.0f\" w:left=\"%.0f\" w:header=\"%.0f\" w:footer=\"%.0f\" w:gutter=\"0\"/>"
mar_str <- sprintf(mar_str, mar_top, mar_right, mar_bottom, mar_left, mar_h, mar_f )
str <- paste0( wml_with_ns("w:sectPr"),
"<w:type w:val=\"continuous\"/>",
pgsz_str, mar_str, "</w:sectPr>")
last_sect <- xml_find_first(x$doc_obj$get(), "/w:document/w:body/w:sectPr[last()]")
xml_replace(last_sect, as_xml_document(str) )
x
}# nocov end
#' @export
#' @rdname slip_in_column_break
break_column_before <- function( x ){ # nocov start
.Deprecated(new = "slip_in_column_break")
xml_elt <- paste0( wml_with_ns("w:r"), "<w:br w:type=\"column\"/>", "</w:r>")
slip_in_xml(x = x, str = xml_elt, pos = "before")
} # nocov end
#' @export
#' @title add a column break
#' @description add a column break into a Word document. A column break
#' is used to add a break in a multi columns section in a Word
#' Document.
#' @param x an rdocx object
#' @param pos where to add the new element relative to the cursor,
#' "after" or "before".
slip_in_column_break <- function( x, pos = "before" ){
xml_elt <- paste0( wml_with_ns("w:r"), "<w:br w:type=\"column\"/>", "</w:r>")
slip_in_xml(x = x, str = xml_elt, pos = pos)
}
# new functions ----
#' @title sections
#'
#' @description Add sections in a Word document.
#'
#' @details
#' A section starts at the end of the previous section (or the beginning of
#' the document if no preceding section exists), and stops where the section is declared.
#' @param x an rdocx object
#' @param w,h width and height in inches of the section page. This will
#' be ignored if the default section (of the \code{reference_docx} file)
#' already has a width and a height.
#' @export
#' @rdname sections
#' @name sections
#' @examples
#' library(magrittr)
#'
#' str1 <- "Lorem ipsum dolor sit amet, consectetur adipiscing elit. " %>%
#' rep(5) %>% paste(collapse = "")
#' str2 <- "Aenean venenatis varius elit et fermentum vivamus vehicula. " %>%
#' rep(5) %>% paste(collapse = "")
#'
#' my_doc <- read_docx() %>%
#' body_add_par(value = "Default section", style = "heading 1") %>%
#' body_add_par(value = str1, style = "centered") %>%
#' body_add_par(value = str2, style = "centered") %>%
#'
#' body_end_section_continuous() %>%
#' body_add_par(value = "Landscape section", style = "heading 1") %>%
#' body_add_par(value = str1, style = "centered") %>%
#' body_add_par(value = str2, style = "centered") %>%
#' body_end_section_landscape() %>%
#'
#' body_add_par(value = "Columns", style = "heading 1") %>%
#' body_end_section_continuous() %>%
#' body_add_par(value = str1, style = "centered") %>%
#' body_add_par(value = str2, style = "centered") %>%
#' slip_in_column_break() %>%
#' body_add_par(value = str1, style = "centered") %>%
#' body_end_section_columns(widths = c(2,2), sep = TRUE, space = 1) %>%
#'
#' body_add_par(value = str1, style = "Normal") %>%
#' body_add_par(value = str2, style = "Normal") %>%
#' slip_in_column_break() %>%
#' body_end_section_columns_landscape(widths = c(3,3), sep = TRUE, space = 1)
#'
#' print(my_doc, target = "section.docx")
body_end_section_continuous <- function( x ){
str <- "<w:pPr><w:sectPr><w:officersection/><w:type w:val=\"continuous\"/></w:sectPr></w:pPr>"
str <- paste0( wml_with_ns("w:p"), str, "</w:p>")
body_add_xml(x, str = str, pos = "after")
}
#' @export
#' @rdname sections
body_end_section_landscape <- function( x, w = 21 / 2.54, h = 29.7 / 2.54 ){
w = w * 20 * 72
h = h * 20 * 72
pgsz_str <- "<w:pgSz w:orient=\"landscape\" w:w=\"%.0f\" w:h=\"%.0f\"/>"
pgsz_str <- sprintf(pgsz_str, h, w )
str <- sprintf( "<w:pPr><w:sectPr><w:officersection/>%s</w:sectPr></w:pPr>", pgsz_str)
str <- paste0( wml_with_ns("w:p"), str, "</w:p>")
as_xml_document(str)
body_add_xml(x, str = str, pos = "after")
}
#' @export
#' @rdname sections
body_end_section_portrait <- function( x, w = 21 / 2.54, h = 29.7 / 2.54 ){
w = w * 20 * 72
h = h * 20 * 72
pgsz_str <- "<w:pgSz w:orient=\"portrait\" w:w=\"%.0f\" w:h=\"%.0f\"/>"
pgsz_str <- sprintf(pgsz_str, w, h )
str <- sprintf( "<w:pPr><w:sectPr><w:officersection/>%s</w:sectPr></w:pPr>", pgsz_str)
str <- paste0( wml_with_ns("w:p"), str, "</w:p>")
body_add_xml(x, str = str, pos = "after")
}
#' @export
#' @param widths columns widths in inches. If 3 values, 3 columns
#' will be produced.
#' @param space space in inches between columns.
#' @param sep if TRUE a line is separating columns.
#' @rdname sections
body_end_section_columns <- function(x, widths = c(2.5,2.5), space = .25, sep = FALSE){
widths <- widths * 20 * 72
space <- space * 20 * 72
columns_str_all_but_last <- sprintf("<w:col w:w=\"%.0f\" w:space=\"%.0f\"/>",
widths[-length(widths)], space)
columns_str_last <- sprintf("<w:col w:w=\"%.0f\"/>",
widths[length(widths)])
columns_str <- c(columns_str_all_but_last, columns_str_last)
if( length(widths) < 2 )
stop("length of widths should be at least 2", call. = FALSE)
columns_str <- sprintf("<w:cols w:num=\"%.0f\" w:sep=\"%.0f\" w:space=\"%.0f\" w:equalWidth=\"0\">%s</w:cols>",
length(widths), as.integer(sep), space, paste0(columns_str, collapse = "") )
str <- paste0( "<w:pPr><w:sectPr><w:officersection/>",
"<w:type w:val=\"continuous\"/>",
columns_str, "</w:sectPr></w:pPr>")
str <- paste0( wml_with_ns("w:p"), str, "</w:p>")
body_add_xml(x, str = str, pos = "after")
}
#' @export
#' @rdname sections
body_end_section_columns_landscape <- function(x, widths = c(2.5,2.5), space = .25, sep = FALSE, w = 21 / 2.54, h = 29.7 / 2.54){
widths <- widths * 20 * 72
space <- space * 20 * 72
columns_str_all_but_last <- sprintf("<w:col w:w=\"%.0f\" w:space=\"%.0f\"/>",
widths[-length(widths)], space)
columns_str_last <- sprintf("<w:col w:w=\"%.0f\"/>",
widths[length(widths)])
columns_str <- c(columns_str_all_but_last, columns_str_last)
if( length(widths) < 2 )
stop("length of widths should be at least 2", call. = FALSE)
columns_str <- sprintf("<w:cols w:num=\"%.0f\" w:sep=\"%.0f\" w:space=\"%.0f\" w:equalWidth=\"0\">%s</w:cols>",
length(widths), as.integer(sep), space, paste0(columns_str, collapse = "") )
w = w * 20 * 72
h = h * 20 * 72
pgsz_str <- "<w:pgSz w:orient=\"landscape\" w:w=\"%.0f\" w:h=\"%.0f\"/>"
pgsz_str <- sprintf(pgsz_str, h, w )
str <- paste0( "<w:pPr><w:sectPr><w:officersection/>",
pgsz_str,
columns_str, "</w:sectPr></w:pPr>")
str <- paste0( wml_with_ns("w:p"), str, "</w:p>")
body_add_xml(x, str = str, pos = "after")
}
# utils ----
process_sections <- function( x ){
all_nodes <- xml_find_all(x$doc_obj$get(), "//w:sectPr[w:officersection]")
main_sect <- xml_find_first(x$doc_obj$get(), "w:body/w:sectPr")
for(node_id in seq_along(all_nodes) ){
current_node <- as_xml_document(all_nodes[[node_id]])
new_node <- as_xml_document(main_sect)
# correct type ---
type <- xml_child(current_node, "w:type")
type_ref <- xml_child(new_node, "w:type")
if( !inherits(type, "xml_missing") ){
if( !inherits(type_ref, "xml_missing") )
type_ref <- xml_replace(type_ref, type)
else xml_add_child(new_node, type)
}
# correct cols ---
cols <- xml_child(current_node, "w:cols")
cols_ref <- xml_child(new_node, "w:cols")
if( !inherits(cols, "xml_missing") ){
if( !inherits(cols_ref, "xml_missing") )
cols_ref <- xml_replace(cols_ref, cols)
else xml_add_child(new_node, cols)
}
# correct pgSz ---
pgSz <- xml_child(current_node, "w:pgSz")
pgSz_ref <- xml_child(new_node, "w:pgSz")
if( !inherits(pgSz, "xml_missing") ){
if( !inherits(pgSz_ref, "xml_missing") ){
xml_attr(pgSz_ref, "w:orient") <- xml_attr(pgSz, "orient")
wref <- as.integer( xml_attr(pgSz_ref, "w") )
href <- as.integer( xml_attr(pgSz_ref, "h") )
if( xml_attr(pgSz, "orient") %in% "portrait" ){
h <- ifelse( wref < href, href, wref )
w <- ifelse( wref < href, wref, href )
} else if( xml_attr(pgSz, "orient") %in% "landscape" ){
w <- ifelse( wref < href, href, wref )
h <- ifelse( wref < href, wref, href )
}
xml_attr(pgSz_ref, "w:w") <- w
xml_attr(pgSz_ref, "w:h") <- h
} else {
xml_add_child(new_node, pgSz)
}
}
node <- xml_replace(all_nodes[[node_id]], new_node)
}
x
}
section_dimensions <- function(node){
section_obj <- as_list(node)
landscape <- FALSE
if( !is.null(attr(section_obj$pgSz, "orient")) && attr(section_obj$pgSz, "orient") == "landscape" ){
landscape <- TRUE
}
h_ref <- as.integer(attr(section_obj$pgSz, "h"))
w_ref <- as.integer(attr(section_obj$pgSz, "w"))
mar_t <- as.integer(attr(section_obj$pgMar, "top"))
mar_b <- as.integer(attr(section_obj$pgMar, "bottom"))
mar_r <- as.integer(attr(section_obj$pgMar, "right"))
mar_l <- as.integer(attr(section_obj$pgMar, "left"))
mar_h <- as.integer(attr(section_obj$pgMar, "header"))
mar_f <- as.integer(attr(section_obj$pgMar, "footer"))
list( page = c("width" = w_ref, "height" = h_ref),
landscape = landscape,
margins = c(top = mar_t, bottom = mar_b,
left = mar_l, right = mar_r,
header = mar_h, footer = mar_f) )
}
|
/R/docx_section.R
|
no_license
|
fredguinog/officer
|
R
| false
| false
| 16,303
|
r
|
#' @export
#' @title add section
#' @description add a section in a Word document. A section affects
#' preceding paragraphs or tables.
#'
#' @details
#' A section starts at the end of the previous section (or the beginning of
#' the document if no preceding section exists), and stops where the section is declared.
#' The function \code{body_end_section()} is reflecting that Word concept.
#' The function \code{body_default_section()} is only modifying the default section of
#' the document.
#' @importFrom xml2 xml_remove
#' @param x an rdocx object
#' @param landscape landscape orientation
#' @param margins a named vector of margin settings in inches, margins not set remain at their default setting
#' @param colwidths columns widths as percentages, summing to 1. If 3 values, 3 columns
#' will be produced.
#' @param space space in percent between columns.
#' @param sep if TRUE a line is separating columns.
#' @param continuous TRUE for a continuous section break.
#' @note
#' This function is deprecated, use body_end_section_continuous,
#' body_end_section_landscape, body_end_section_portrait,
#' body_end_section_columns or body_end_section_columns_landscape
#' instead.
#' @examples
#' library(magrittr)
#'
#' str1 <- "Lorem ipsum dolor sit amet, consectetur adipiscing elit. " %>%
#' rep(10) %>% paste(collapse = "")
#'
#' my_doc <- read_docx() %>%
#' # add a paragraph
#' body_add_par(value = str1, style = "Normal") %>%
#' # add a continuous section
#' body_end_section(continuous = TRUE) %>%
#' body_add_par(value = str1, style = "Normal") %>%
#' body_add_par(value = str1, style = "Normal") %>%
#' # preceding paragraph is on a new column
#' slip_in_column_break(pos = "before") %>%
#' # add a two columns continous section
#' body_end_section(colwidths = c(.6, .4),
#' space = .05, sep = FALSE, continuous = TRUE) %>%
#' body_add_par(value = str1, style = "Normal") %>%
#' # add a continuous section ... so far there is no break page
#' body_end_section(continuous = TRUE) %>%
#' body_add_par(value = str1, style = "Normal") %>%
#' body_default_section(landscape = TRUE, margins = c(top = 0.5, bottom = 0.5))
#'
#' print(my_doc, target = "section.docx")
body_end_section <- function(x, landscape = FALSE, margins = c(top = NA, bottom = NA, left = NA, right = NA),
colwidths = c(1), space = .05, sep = FALSE, continuous = FALSE){# nocov start
.Deprecated(msg = "body_end_section is deprecated. See ?sections for replacement functions.")
stopifnot(all.equal( sum(colwidths), 1 ) )
if( landscape && continuous ){
stop("using landscape=TRUE and continuous=TRUE is not possible as changing orientation require a new page.")
}
sdim <- x$sect_dim
h_ref <- sdim$page["height"];w_ref <- sdim$page["width"]
mar_t <- sdim$margins["top"];mar_b <- sdim$margins["bottom"]
mar_r <- sdim$margins["right"];mar_l <- sdim$margins["left"]
mar_h <- sdim$margins["header"];mar_f <- sdim$margins["footer"]
if( !landscape ){
h <- h_ref
w <- w_ref
mar_top <- mar_t
mar_bottom <- mar_b
mar_right <- mar_r
mar_left <- mar_l
} else {
h <- w_ref
w <- h_ref
mar_top <- mar_r
mar_bottom <- mar_l
mar_right <- mar_t
mar_left <- mar_b
}
if (!is.na(margins["top"]) & is.numeric(margins["top"])) { mar_top = margins["top"] * 20 * 72}
if (!is.na(margins["bottom"]) & is.numeric(margins["bottom"])) { mar_bottom = margins["bottom"] * 20 * 72}
if (!is.na(margins["left"]) & is.numeric(margins["left"])) { mar_left = margins["left"] * 20 * 72}
if (!is.na(margins["right"]) & is.numeric(margins["right"])) { mar_right = margins["right"] * 20 * 72}
pgsz_str <- "<w:pgSz %sw:w=\"%.0f\" w:h=\"%.0f\"/>"
pgsz_str <- sprintf(pgsz_str, ifelse( landscape, "w:orient=\"landscape\" ", ""), w, h )
mar_str <- "<w:pgMar w:top=\"%.0f\" w:right=\"%.0f\" w:bottom=\"%.0f\" w:left=\"%.0f\" w:header=\"%.0f\" w:footer=\"%.0f\" w:gutter=\"0\"/>"
mar_str <- sprintf(mar_str, mar_top, mar_right, mar_bottom, mar_left, mar_h, mar_f )
width_ <- w - mar_right - mar_left
column_values <- colwidths - space
columns_str_all_but_last <- sprintf("<w:col w:w=\"%.0f\" w:space=\"%.0f\"/>",
column_values[-length(column_values)] * width_,
space * width_)
columns_str_last <- sprintf("<w:col w:w=\"%.0f\"/>",
column_values[length(column_values)] * width_)
columns_str <- c(columns_str_all_but_last, columns_str_last)
if( length(colwidths) > 1 )
columns_str <- sprintf("<w:cols w:num=\"%.0f\" w:sep=\"%.0f\" w:space=\"%.0f\" w:equalWidth=\"0\">%s</w:cols>",
length(colwidths), as.integer(sep), space * w, paste0(columns_str, collapse = "") )
else columns_str <- sprintf("<w:cols w:space=\"%.0f\" w:equalWidth=\"0\">%s</w:cols>",
space * w, paste0(columns_str, collapse = "") )
str <- paste0( wml_with_ns("w:p"),
"<w:pPr><w:sectPr>",
ifelse( continuous, "<w:type w:val=\"continuous\"/>", "" ),
pgsz_str, mar_str, columns_str, "</w:sectPr></w:pPr></w:p>")
body_add_xml(x, str = str, pos = "after")
}# nocov end
#' @export
#' @rdname body_end_section
body_default_section <- function(x, landscape = FALSE, margins = c(top = NA, bottom = NA, left = NA, right = NA)){# nocov start
.Deprecated(msg = "body_default_section is deprecated. See ?sections for replacement functions.")
sdim <- x$sect_dim
h_ref <- sdim$page["height"];w_ref <- sdim$page["width"]
mar_t <- sdim$margins["top"];mar_b <- sdim$margins["bottom"]
mar_r <- sdim$margins["right"];mar_l <- sdim$margins["left"]
mar_h <- sdim$margins["header"];mar_f <- sdim$margins["footer"]
if( !landscape ){
h <- h_ref
w <- w_ref
mar_top <- mar_t
mar_bottom <- mar_b
mar_right <- mar_r
mar_left <- mar_l
} else {
h <- w_ref
w <- h_ref
mar_top <- mar_r
mar_bottom <- mar_l
mar_right <- mar_t
mar_left <- mar_b
}
if (!is.na(margins["top"]) & is.numeric(margins["top"])) { mar_top = margins["top"] * 20 * 72}
if (!is.na(margins["bottom"]) & is.numeric(margins["bottom"])) { mar_bottom = margins["bottom"] * 20 * 72}
if (!is.na(margins["left"]) & is.numeric(margins["left"])) { mar_left = margins["left"] * 20 * 72}
if (!is.na(margins["right"]) & is.numeric(margins["right"])) { mar_right = margins["right"] * 20 * 72}
pgsz_str <- "<w:pgSz %sw:w=\"%.0f\" w:h=\"%.0f\"/>"
pgsz_str <- sprintf(pgsz_str, ifelse( landscape, "w:orient=\"landscape\" ", ""), w, h )
mar_str <- "<w:pgMar w:top=\"%.0f\" w:right=\"%.0f\" w:bottom=\"%.0f\" w:left=\"%.0f\" w:header=\"%.0f\" w:footer=\"%.0f\" w:gutter=\"0\"/>"
mar_str <- sprintf(mar_str, mar_top, mar_right, mar_bottom, mar_left, mar_h, mar_f )
str <- paste0( wml_with_ns("w:sectPr"),
"<w:type w:val=\"continuous\"/>",
pgsz_str, mar_str, "</w:sectPr>")
last_sect <- xml_find_first(x$doc_obj$get(), "/w:document/w:body/w:sectPr[last()]")
xml_replace(last_sect, as_xml_document(str) )
x
}# nocov end
#' @export
#' @rdname slip_in_column_break
break_column_before <- function( x ){ # nocov start
.Deprecated(new = "slip_in_column_break")
xml_elt <- paste0( wml_with_ns("w:r"), "<w:br w:type=\"column\"/>", "</w:r>")
slip_in_xml(x = x, str = xml_elt, pos = "before")
} # nocov end
#' @export
#' @title add a column break
#' @description add a column break into a Word document. A column break
#' is used to add a break in a multi columns section in a Word
#' Document.
#' @param x an rdocx object
#' @param pos where to add the new element relative to the cursor,
#' "after" or "before".
slip_in_column_break <- function( x, pos = "before" ){
xml_elt <- paste0( wml_with_ns("w:r"), "<w:br w:type=\"column\"/>", "</w:r>")
slip_in_xml(x = x, str = xml_elt, pos = pos)
}
# new functions ----
#' @title sections
#'
#' @description Add sections in a Word document.
#'
#' @details
#' A section starts at the end of the previous section (or the beginning of
#' the document if no preceding section exists), and stops where the section is declared.
#' @param x an rdocx object
#' @param w,h width and height in inches of the section page. This will
#' be ignored if the default section (of the \code{reference_docx} file)
#' already has a width and a height.
#' @export
#' @rdname sections
#' @name sections
#' @examples
#' library(magrittr)
#'
#' str1 <- "Lorem ipsum dolor sit amet, consectetur adipiscing elit. " %>%
#' rep(5) %>% paste(collapse = "")
#' str2 <- "Aenean venenatis varius elit et fermentum vivamus vehicula. " %>%
#' rep(5) %>% paste(collapse = "")
#'
#' my_doc <- read_docx() %>%
#' body_add_par(value = "Default section", style = "heading 1") %>%
#' body_add_par(value = str1, style = "centered") %>%
#' body_add_par(value = str2, style = "centered") %>%
#'
#' body_end_section_continuous() %>%
#' body_add_par(value = "Landscape section", style = "heading 1") %>%
#' body_add_par(value = str1, style = "centered") %>%
#' body_add_par(value = str2, style = "centered") %>%
#' body_end_section_landscape() %>%
#'
#' body_add_par(value = "Columns", style = "heading 1") %>%
#' body_end_section_continuous() %>%
#' body_add_par(value = str1, style = "centered") %>%
#' body_add_par(value = str2, style = "centered") %>%
#' slip_in_column_break() %>%
#' body_add_par(value = str1, style = "centered") %>%
#' body_end_section_columns(widths = c(2,2), sep = TRUE, space = 1) %>%
#'
#' body_add_par(value = str1, style = "Normal") %>%
#' body_add_par(value = str2, style = "Normal") %>%
#' slip_in_column_break() %>%
#' body_end_section_columns_landscape(widths = c(3,3), sep = TRUE, space = 1)
#'
#' print(my_doc, target = "section.docx")
body_end_section_continuous <- function( x ){
str <- "<w:pPr><w:sectPr><w:officersection/><w:type w:val=\"continuous\"/></w:sectPr></w:pPr>"
str <- paste0( wml_with_ns("w:p"), str, "</w:p>")
body_add_xml(x, str = str, pos = "after")
}
#' @export
#' @rdname sections
body_end_section_landscape <- function( x, w = 21 / 2.54, h = 29.7 / 2.54 ){
w = w * 20 * 72
h = h * 20 * 72
pgsz_str <- "<w:pgSz w:orient=\"landscape\" w:w=\"%.0f\" w:h=\"%.0f\"/>"
pgsz_str <- sprintf(pgsz_str, h, w )
str <- sprintf( "<w:pPr><w:sectPr><w:officersection/>%s</w:sectPr></w:pPr>", pgsz_str)
str <- paste0( wml_with_ns("w:p"), str, "</w:p>")
as_xml_document(str)
body_add_xml(x, str = str, pos = "after")
}
#' @export
#' @rdname sections
body_end_section_portrait <- function( x, w = 21 / 2.54, h = 29.7 / 2.54 ){
w = w * 20 * 72
h = h * 20 * 72
pgsz_str <- "<w:pgSz w:orient=\"portrait\" w:w=\"%.0f\" w:h=\"%.0f\"/>"
pgsz_str <- sprintf(pgsz_str, w, h )
str <- sprintf( "<w:pPr><w:sectPr><w:officersection/>%s</w:sectPr></w:pPr>", pgsz_str)
str <- paste0( wml_with_ns("w:p"), str, "</w:p>")
body_add_xml(x, str = str, pos = "after")
}
#' @export
#' @param widths columns widths in inches. If 3 values, 3 columns
#' will be produced.
#' @param space space in inches between columns.
#' @param sep if TRUE a line is separating columns.
#' @rdname sections
body_end_section_columns <- function(x, widths = c(2.5,2.5), space = .25, sep = FALSE){
widths <- widths * 20 * 72
space <- space * 20 * 72
columns_str_all_but_last <- sprintf("<w:col w:w=\"%.0f\" w:space=\"%.0f\"/>",
widths[-length(widths)], space)
columns_str_last <- sprintf("<w:col w:w=\"%.0f\"/>",
widths[length(widths)])
columns_str <- c(columns_str_all_but_last, columns_str_last)
if( length(widths) < 2 )
stop("length of widths should be at least 2", call. = FALSE)
columns_str <- sprintf("<w:cols w:num=\"%.0f\" w:sep=\"%.0f\" w:space=\"%.0f\" w:equalWidth=\"0\">%s</w:cols>",
length(widths), as.integer(sep), space, paste0(columns_str, collapse = "") )
str <- paste0( "<w:pPr><w:sectPr><w:officersection/>",
"<w:type w:val=\"continuous\"/>",
columns_str, "</w:sectPr></w:pPr>")
str <- paste0( wml_with_ns("w:p"), str, "</w:p>")
body_add_xml(x, str = str, pos = "after")
}
#' @export
#' @rdname sections
body_end_section_columns_landscape <- function(x, widths = c(2.5,2.5), space = .25, sep = FALSE, w = 21 / 2.54, h = 29.7 / 2.54){
widths <- widths * 20 * 72
space <- space * 20 * 72
columns_str_all_but_last <- sprintf("<w:col w:w=\"%.0f\" w:space=\"%.0f\"/>",
widths[-length(widths)], space)
columns_str_last <- sprintf("<w:col w:w=\"%.0f\"/>",
widths[length(widths)])
columns_str <- c(columns_str_all_but_last, columns_str_last)
if( length(widths) < 2 )
stop("length of widths should be at least 2", call. = FALSE)
columns_str <- sprintf("<w:cols w:num=\"%.0f\" w:sep=\"%.0f\" w:space=\"%.0f\" w:equalWidth=\"0\">%s</w:cols>",
length(widths), as.integer(sep), space, paste0(columns_str, collapse = "") )
w = w * 20 * 72
h = h * 20 * 72
pgsz_str <- "<w:pgSz w:orient=\"landscape\" w:w=\"%.0f\" w:h=\"%.0f\"/>"
pgsz_str <- sprintf(pgsz_str, h, w )
str <- paste0( "<w:pPr><w:sectPr><w:officersection/>",
pgsz_str,
columns_str, "</w:sectPr></w:pPr>")
str <- paste0( wml_with_ns("w:p"), str, "</w:p>")
body_add_xml(x, str = str, pos = "after")
}
# utils ----
process_sections <- function( x ){
all_nodes <- xml_find_all(x$doc_obj$get(), "//w:sectPr[w:officersection]")
main_sect <- xml_find_first(x$doc_obj$get(), "w:body/w:sectPr")
for(node_id in seq_along(all_nodes) ){
current_node <- as_xml_document(all_nodes[[node_id]])
new_node <- as_xml_document(main_sect)
# correct type ---
type <- xml_child(current_node, "w:type")
type_ref <- xml_child(new_node, "w:type")
if( !inherits(type, "xml_missing") ){
if( !inherits(type_ref, "xml_missing") )
type_ref <- xml_replace(type_ref, type)
else xml_add_child(new_node, type)
}
# correct cols ---
cols <- xml_child(current_node, "w:cols")
cols_ref <- xml_child(new_node, "w:cols")
if( !inherits(cols, "xml_missing") ){
if( !inherits(cols_ref, "xml_missing") )
cols_ref <- xml_replace(cols_ref, cols)
else xml_add_child(new_node, cols)
}
# correct pgSz ---
pgSz <- xml_child(current_node, "w:pgSz")
pgSz_ref <- xml_child(new_node, "w:pgSz")
if( !inherits(pgSz, "xml_missing") ){
if( !inherits(pgSz_ref, "xml_missing") ){
xml_attr(pgSz_ref, "w:orient") <- xml_attr(pgSz, "orient")
wref <- as.integer( xml_attr(pgSz_ref, "w") )
href <- as.integer( xml_attr(pgSz_ref, "h") )
if( xml_attr(pgSz, "orient") %in% "portrait" ){
h <- ifelse( wref < href, href, wref )
w <- ifelse( wref < href, wref, href )
} else if( xml_attr(pgSz, "orient") %in% "landscape" ){
w <- ifelse( wref < href, href, wref )
h <- ifelse( wref < href, wref, href )
}
xml_attr(pgSz_ref, "w:w") <- w
xml_attr(pgSz_ref, "w:h") <- h
} else {
xml_add_child(new_node, pgSz)
}
}
node <- xml_replace(all_nodes[[node_id]], new_node)
}
x
}
section_dimensions <- function(node){
section_obj <- as_list(node)
landscape <- FALSE
if( !is.null(attr(section_obj$pgSz, "orient")) && attr(section_obj$pgSz, "orient") == "landscape" ){
landscape <- TRUE
}
h_ref <- as.integer(attr(section_obj$pgSz, "h"))
w_ref <- as.integer(attr(section_obj$pgSz, "w"))
mar_t <- as.integer(attr(section_obj$pgMar, "top"))
mar_b <- as.integer(attr(section_obj$pgMar, "bottom"))
mar_r <- as.integer(attr(section_obj$pgMar, "right"))
mar_l <- as.integer(attr(section_obj$pgMar, "left"))
mar_h <- as.integer(attr(section_obj$pgMar, "header"))
mar_f <- as.integer(attr(section_obj$pgMar, "footer"))
list( page = c("width" = w_ref, "height" = h_ref),
landscape = landscape,
margins = c(top = mar_t, bottom = mar_b,
left = mar_l, right = mar_r,
header = mar_h, footer = mar_f) )
}
|
# Manipulating data with dplyr and tidyr
# library packages
library(dplyr) # data manipulation functions, akin to manual filtering, reordering, calculation
library(tidyr) # reshaping data functions
library(readr) # reading and writing csvs
library(udunits2) # unit conversions
# read in data
# read_csv: 1) faster 2) strings automatically read as factors
surveys <- read.csv("data_raw/portal_data_joined.csv")
str(surveys)
head(surveys)
nrow(surveys); ncol(surveys)
View(surveys)
# Subsetting by rows (filter) and column (select)
filter(surveys, year == 1995)
select(surveys, month, species, genus)
select(surveys, -record_id, -day)
surveys_sml <- surveys %>%
filter(weight < 5) %>%
select(species_id, sex, weight)
# Adding a calculated colum (mutate)
surveys %>%
mutate(weight_kg = weight/1000) # original units g
survey %>%
filter(!is.na(weight)) %>%
select(weight) %>%
mutate(weight_kg = ud.convert(weight, "g", "kg")) # orginal units g
# split/apply/combine paradigm
surveys %>% group_by(sex, species_id) %>%
filter(!is.na(weight), !is.na(sex)) %>%
summarize(mean_weight = mean(weight),
sd_weight = sd(weight)
n = n())
# Counting, count(), n()
count(surveys, species, sex)
|
/scripts/manipulating_data.R
|
no_license
|
fanelson/SEEDS-Critical-Skills
|
R
| false
| false
| 1,229
|
r
|
# Manipulating data with dplyr and tidyr
# library packages
library(dplyr) # data manipulation functions, akin to manual filtering, reordering, calculation
library(tidyr) # reshaping data functions
library(readr) # reading and writing csvs
library(udunits2) # unit conversions
# read in data
# read_csv: 1) faster 2) strings automatically read as factors
surveys <- read.csv("data_raw/portal_data_joined.csv")
str(surveys)
head(surveys)
nrow(surveys); ncol(surveys)
View(surveys)
# Subsetting by rows (filter) and column (select)
filter(surveys, year == 1995)
select(surveys, month, species, genus)
select(surveys, -record_id, -day)
surveys_sml <- surveys %>%
filter(weight < 5) %>%
select(species_id, sex, weight)
# Adding a calculated colum (mutate)
surveys %>%
mutate(weight_kg = weight/1000) # original units g
survey %>%
filter(!is.na(weight)) %>%
select(weight) %>%
mutate(weight_kg = ud.convert(weight, "g", "kg")) # orginal units g
# split/apply/combine paradigm
surveys %>% group_by(sex, species_id) %>%
filter(!is.na(weight), !is.na(sex)) %>%
summarize(mean_weight = mean(weight),
sd_weight = sd(weight)
n = n())
# Counting, count(), n()
count(surveys, species, sex)
|
library(readr)
extract_value <- function(input_filename, output_filename){
scanned_risk <- read_csv(input_filename, col_names = F)
age <- unique(round(scanned_risk$X1))
risk_1_year <- data.frame(age = age, risk = 0)
scanned_risk$X1 <- as.numeric(format(round(scanned_risk$X1, 2), nsmall = 2))
for (i in risk_1_year$age){
for (j in 1:dim(scanned_risk)[1]){
if (i > scanned_risk$X1[j]){
next
}
else if (i == scanned_risk$X1[j]){
risk_1_year$risk[risk_1_year$age == i] <- scanned_risk$X2[j]
break
}
else {
risk_1_year$risk[risk_1_year$age == i] <- (scanned_risk$X2[j] - scanned_risk$X2[j-1]) /
(scanned_risk$X1[j] - scanned_risk$X1[j-1]) * (i - scanned_risk$X1[j-1]) +
scanned_risk$X2[j-1]
break
}
}
}
write_csv(risk_1_year, output_filename, col_names = T)
}
|
/LiteratureReview/MSH2/colorectal/Final Paper/extract_value.R
|
no_license
|
joseivm/NLP_Project
|
R
| false
| false
| 894
|
r
|
library(readr)
extract_value <- function(input_filename, output_filename){
scanned_risk <- read_csv(input_filename, col_names = F)
age <- unique(round(scanned_risk$X1))
risk_1_year <- data.frame(age = age, risk = 0)
scanned_risk$X1 <- as.numeric(format(round(scanned_risk$X1, 2), nsmall = 2))
for (i in risk_1_year$age){
for (j in 1:dim(scanned_risk)[1]){
if (i > scanned_risk$X1[j]){
next
}
else if (i == scanned_risk$X1[j]){
risk_1_year$risk[risk_1_year$age == i] <- scanned_risk$X2[j]
break
}
else {
risk_1_year$risk[risk_1_year$age == i] <- (scanned_risk$X2[j] - scanned_risk$X2[j-1]) /
(scanned_risk$X1[j] - scanned_risk$X1[j-1]) * (i - scanned_risk$X1[j-1]) +
scanned_risk$X2[j-1]
break
}
}
}
write_csv(risk_1_year, output_filename, col_names = T)
}
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/qualitycheck_lotes.R
\name{qualitycheck_lotes}
\alias{qualitycheck_lotes}
\title{qualititycheck_lotes}
\usage{
qualitycheck_lotes(Producto, Lista_lotes, Lista_fechas)
}
\arguments{
\item{Producto}{A character of the name of the product. Ej: "Aspirin"}
\item{Lista_lotes}{A character vector with the ID of the Lote}
\item{Lista_fechas}{A character vector with date in the format in numer day-month-year; ej: "01-08-2019"}
}
\value{
A .csv document with th dates of all lots
}
\description{
qualititycheck_lotes
}
\examples{
b <- qualitycheck_lots("Aspirina", c("1a","2b","3c","4c"), c("10-09-2019", "20-03-2019","30-03-2019", "30-11-2019"))
}
|
/man/qualitycheck_lotes.Rd
|
no_license
|
Erickcufe/qualitycheck
|
R
| false
| true
| 723
|
rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/qualitycheck_lotes.R
\name{qualitycheck_lotes}
\alias{qualitycheck_lotes}
\title{qualititycheck_lotes}
\usage{
qualitycheck_lotes(Producto, Lista_lotes, Lista_fechas)
}
\arguments{
\item{Producto}{A character of the name of the product. Ej: "Aspirin"}
\item{Lista_lotes}{A character vector with the ID of the Lote}
\item{Lista_fechas}{A character vector with date in the format in numer day-month-year; ej: "01-08-2019"}
}
\value{
A .csv document with th dates of all lots
}
\description{
qualititycheck_lotes
}
\examples{
b <- qualitycheck_lots("Aspirina", c("1a","2b","3c","4c"), c("10-09-2019", "20-03-2019","30-03-2019", "30-11-2019"))
}
|
library(tidyverse)
data <- read_csv("https://raw.githubusercontent.com/rfordatascience/tidytuesday/master/data/2018-10-23/movie_profit.csv")
glimpse(data)
data %>%
group_by(genre) %>%
summarise(numero = n()) %>%
mutate(p = numero/sum(numero))
data %>%
count(genre)
data %>%
count(distributor, sort = TRUE) %>%
mutate(
p = n/sum(n),
pcum = cumsum(p)
) %>%
View()
data <- data %>%
mutate(
distributor2 = fct_lump(distributor, n = 12)
)
data %>%
count(distributor2, sort = TRUE) %>%
mutate(
p = n/sum(n),
pcum = cumsum(p)
) %>%
View()
data %>%
count(distributor2, genre) %>%
ggplot(aes(genre, distributor2, fill = n)) +
geom_tile()
data %>%
count(distributor2, genre) %>%
group_by(distributor2) %>%
mutate(p = n/sum(n), pcum = cumsum(p)) %>%
ggplot(aes(genre, distributor2, fill = p)) +
geom_tile()
data %>%
filter(str_detect(distributor, "Disney")) %>%
filter(genre == "Horror") %>%
View()
data
# tiempo ------------------------------------------------------------------
# data genero tiempo
dgt <- data %>%
count(release_date, genre)
ggplot(dgt) +
geom_line(aes(release_date, n, color = genre, group = 1))
library(lubridate)
data <- data %>%
mutate(
release_date = mdy(release_date)
)
data
dgt <- data %>%
count(release_date, genre)
ggplot(dgt) +
geom_line(aes(release_date, n, color = genre, group = 1))
data <- data %>%
mutate(release_year = year(release_date))
dgt <- data %>%
count(release_year, genre)
ggplot(dgt) +
geom_line(aes(release_year, n, color = genre, group = 1))
ggplot(dgt) +
geom_line(aes(release_year, n, color = genre, group = genre))
ggplot(dgt) +
geom_line(aes(release_year, n, color = genre, group = genre), size = 1.2) +
scale_color_viridis_d(option = "B")
ggplot(dgt) +
geom_line(aes(release_year, n, color = genre, group = genre), size = 1.2) +
scale_color_viridis_d(option = "B") +
scale_x_continuous(limits = c(1985, NA))
data <- data %>%
mutate(release_month = rollback(release_date))
dgt <- data %>%
count(release_month, genre)
ggplot(dgt) +
geom_line(aes(release_month, n, color = genre, group = genre), size = 1.2) +
scale_color_viridis_d(option = "B")
data <- data %>%
mutate(month = month(release_date, label = TRUE))
data
dgt <- data %>%
count(month, genre)
ggplot(dgt) +
geom_line(aes(month, n, color = genre, group = genre), size = 1.2) +
scale_color_viridis_d(option = "B")
# scatter -----------------------------------------------------------------
data
library(scales)
percent(0.45)
dollar(2450003)
ggplot(data) +
geom_point(aes(production_budget, domestic_gross))
ggplot(data) +
geom_point(aes(production_budget, worldwide_gross)) +
geom_abline(slope = 1, intercept = 0, size = 2, color = "red")
# exploracion rapida?
ggplot(data) +
geom_point(aes(production_budget, worldwide_gross, label = movie)) +
geom_abline(slope = 1, intercept = 0, size = 2, color = "red")
plotly::ggplotly()
ggplot(data) +
geom_point(aes(production_budget, domestic_gross), alpha = 0.3) +
scale_x_sqrt(labels = dollar, limits = c(0, NA)) +
scale_y_sqrt(labels = dollar, limits = c(0, NA))
p <- ggplot(data) +
geom_point(aes(production_budget, domestic_gross), alpha = 0.3) +
scale_x_continuous(labels = dollar, limits = c(0, NA)) +
scale_y_continuous(labels = dollar, limits = c(0, NA))
p
datarel <- data %>%
filter(domestic_gross >= 4e8 | production_budget >= 175e6)
datarel
p +
geom_point(aes(production_budget, domestic_gross),
color = "darkred")
p +
geom_point(aes(production_budget, domestic_gross),
color = "darkred", size = 3.53333333,
data = datarel) +
geom_text(aes(production_budget, domestic_gross, label = movie),
data = datarel, size = 3)
library(ggrepel)
geom_text_repel()
p <- ggplot(mtcars,
aes(wt, mpg, label = rownames(mtcars), colour = factor(cyl))) +
geom_point()
p
p + geom_text()
# Avoid overlaps by repelling text labels
p + geom_text_repel()
# Labels with background
p + geom_label_repel()
ggplot() +
geom_point(data = data, aes(production_budget, domestic_gross), alpha = 0.3) +
scale_x_continuous(labels = dollar, limits = c(0, NA)) +
scale_y_continuous(labels = dollar, limits = c(0, NA)) +
geom_point(aes(production_budget, domestic_gross),
color = "darkred", size = 3.53333333,
data = datarel) +
geom_text_repel(aes(production_budget, domestic_gross, label = movie),
data = datarel) +
theme_minimal()
ggplot() +
geom_point(data = data, aes(production_budget, domestic_gross), alpha = 0.3) +
scale_x_continuous(labels = dollar, limits = c(0, NA)) +
scale_y_continuous(labels = dollar, limits = c(0, NA)) +
geom_point(aes(production_budget, domestic_gross),
color = "darkred", size = 3.53333333,
data = datarel) +
geom_text_repel(aes(production_budget, domestic_gross, label = movie),
data = datarel) +
theme_minimal() +
facet_wrap(vars(genre))
datarel <- data %>%
arrange(desc(domestic_gross)) %>%
group_by(genre) %>%
filter(row_number()<=5)
datarel
ggplot() +
geom_point(data = data, aes(production_budget, domestic_gross), alpha = 0.3) +
scale_x_continuous(labels = dollar, limits = c(0, NA)) +
scale_y_continuous(labels = dollar, limits = c(0, NA)) +
geom_point(aes(production_budget, domestic_gross),
color = "darkred", size = 3.53333333,
data = datarel) +
geom_text_repel(aes(production_budget, domestic_gross, label = movie),
data = datarel) +
theme_minimal() +
facet_wrap(vars(genre))
|
/clase-17/script.R
|
no_license
|
jbkunst/usach-ingemat-intro-elementos-ds-201802
|
R
| false
| false
| 5,726
|
r
|
library(tidyverse)
data <- read_csv("https://raw.githubusercontent.com/rfordatascience/tidytuesday/master/data/2018-10-23/movie_profit.csv")
glimpse(data)
data %>%
group_by(genre) %>%
summarise(numero = n()) %>%
mutate(p = numero/sum(numero))
data %>%
count(genre)
data %>%
count(distributor, sort = TRUE) %>%
mutate(
p = n/sum(n),
pcum = cumsum(p)
) %>%
View()
data <- data %>%
mutate(
distributor2 = fct_lump(distributor, n = 12)
)
data %>%
count(distributor2, sort = TRUE) %>%
mutate(
p = n/sum(n),
pcum = cumsum(p)
) %>%
View()
data %>%
count(distributor2, genre) %>%
ggplot(aes(genre, distributor2, fill = n)) +
geom_tile()
data %>%
count(distributor2, genre) %>%
group_by(distributor2) %>%
mutate(p = n/sum(n), pcum = cumsum(p)) %>%
ggplot(aes(genre, distributor2, fill = p)) +
geom_tile()
data %>%
filter(str_detect(distributor, "Disney")) %>%
filter(genre == "Horror") %>%
View()
data
# tiempo ------------------------------------------------------------------
# data genero tiempo
dgt <- data %>%
count(release_date, genre)
ggplot(dgt) +
geom_line(aes(release_date, n, color = genre, group = 1))
library(lubridate)
data <- data %>%
mutate(
release_date = mdy(release_date)
)
data
dgt <- data %>%
count(release_date, genre)
ggplot(dgt) +
geom_line(aes(release_date, n, color = genre, group = 1))
data <- data %>%
mutate(release_year = year(release_date))
dgt <- data %>%
count(release_year, genre)
ggplot(dgt) +
geom_line(aes(release_year, n, color = genre, group = 1))
ggplot(dgt) +
geom_line(aes(release_year, n, color = genre, group = genre))
ggplot(dgt) +
geom_line(aes(release_year, n, color = genre, group = genre), size = 1.2) +
scale_color_viridis_d(option = "B")
ggplot(dgt) +
geom_line(aes(release_year, n, color = genre, group = genre), size = 1.2) +
scale_color_viridis_d(option = "B") +
scale_x_continuous(limits = c(1985, NA))
data <- data %>%
mutate(release_month = rollback(release_date))
dgt <- data %>%
count(release_month, genre)
ggplot(dgt) +
geom_line(aes(release_month, n, color = genre, group = genre), size = 1.2) +
scale_color_viridis_d(option = "B")
data <- data %>%
mutate(month = month(release_date, label = TRUE))
data
dgt <- data %>%
count(month, genre)
ggplot(dgt) +
geom_line(aes(month, n, color = genre, group = genre), size = 1.2) +
scale_color_viridis_d(option = "B")
# scatter -----------------------------------------------------------------
data
library(scales)
percent(0.45)
dollar(2450003)
ggplot(data) +
geom_point(aes(production_budget, domestic_gross))
ggplot(data) +
geom_point(aes(production_budget, worldwide_gross)) +
geom_abline(slope = 1, intercept = 0, size = 2, color = "red")
# exploracion rapida?
ggplot(data) +
geom_point(aes(production_budget, worldwide_gross, label = movie)) +
geom_abline(slope = 1, intercept = 0, size = 2, color = "red")
plotly::ggplotly()
ggplot(data) +
geom_point(aes(production_budget, domestic_gross), alpha = 0.3) +
scale_x_sqrt(labels = dollar, limits = c(0, NA)) +
scale_y_sqrt(labels = dollar, limits = c(0, NA))
p <- ggplot(data) +
geom_point(aes(production_budget, domestic_gross), alpha = 0.3) +
scale_x_continuous(labels = dollar, limits = c(0, NA)) +
scale_y_continuous(labels = dollar, limits = c(0, NA))
p
datarel <- data %>%
filter(domestic_gross >= 4e8 | production_budget >= 175e6)
datarel
p +
geom_point(aes(production_budget, domestic_gross),
color = "darkred")
p +
geom_point(aes(production_budget, domestic_gross),
color = "darkred", size = 3.53333333,
data = datarel) +
geom_text(aes(production_budget, domestic_gross, label = movie),
data = datarel, size = 3)
library(ggrepel)
geom_text_repel()
p <- ggplot(mtcars,
aes(wt, mpg, label = rownames(mtcars), colour = factor(cyl))) +
geom_point()
p
p + geom_text()
# Avoid overlaps by repelling text labels
p + geom_text_repel()
# Labels with background
p + geom_label_repel()
ggplot() +
geom_point(data = data, aes(production_budget, domestic_gross), alpha = 0.3) +
scale_x_continuous(labels = dollar, limits = c(0, NA)) +
scale_y_continuous(labels = dollar, limits = c(0, NA)) +
geom_point(aes(production_budget, domestic_gross),
color = "darkred", size = 3.53333333,
data = datarel) +
geom_text_repel(aes(production_budget, domestic_gross, label = movie),
data = datarel) +
theme_minimal()
ggplot() +
geom_point(data = data, aes(production_budget, domestic_gross), alpha = 0.3) +
scale_x_continuous(labels = dollar, limits = c(0, NA)) +
scale_y_continuous(labels = dollar, limits = c(0, NA)) +
geom_point(aes(production_budget, domestic_gross),
color = "darkred", size = 3.53333333,
data = datarel) +
geom_text_repel(aes(production_budget, domestic_gross, label = movie),
data = datarel) +
theme_minimal() +
facet_wrap(vars(genre))
datarel <- data %>%
arrange(desc(domestic_gross)) %>%
group_by(genre) %>%
filter(row_number()<=5)
datarel
ggplot() +
geom_point(data = data, aes(production_budget, domestic_gross), alpha = 0.3) +
scale_x_continuous(labels = dollar, limits = c(0, NA)) +
scale_y_continuous(labels = dollar, limits = c(0, NA)) +
geom_point(aes(production_budget, domestic_gross),
color = "darkred", size = 3.53333333,
data = datarel) +
geom_text_repel(aes(production_budget, domestic_gross, label = movie),
data = datarel) +
theme_minimal() +
facet_wrap(vars(genre))
|
library(scater)
library(DropletUtils)
library(Matrix)
library(Seurat)
library(stringr)
library(SingleCellExperiment)
library(mvoutlier)
library(limma)
library(ggplot2)
library(ggpmisc)
#load the dataset
raw_folder_address <- readRDS(file="10x_raw_folder_address.rds")
keyword <- readRDS(file = "keyword.rds")
scater_file_address<-readRDS(file = "scater_file_address.rds")
#convert 10x output folder into a scater object, by inputting the folder that contains the matrix.mtx file and two .tsv file.
raw_h5 <- list.files(path = raw_folder_address, pattern = "filtered.h5")
data <- Read10X_h5(paste0(raw_folder_address, raw_h5))
so_obj <- CreateSeuratObject(counts = data, min.cells = 3, min.features = 1000)
scater_object <- as.SingleCellExperiment(so_obj)
# raw_h5 <- list.files(path = raw_folder_address, pattern = "filtered.h5")
# scater_object_h5 <- read10xCounts(samples = paste0(raw_folder_address, raw_h5), type = "auto")
scater_object <- calculateQCMetrics(scater_object)
#keep_cells <- scater_object$total_counts > 1000
#keep_cells <- colSums(counts(scater_object))>=1000
#scater_object<-scater_object[,keep_cells]
scater_object_colnames <- colnames(scater_object)
colnames(scater_object) <- paste(keyword,colnames(scater_object),sep="_")
rownames(scater_object)<-make.unique(rownames(scater_object),sep="_")
#extract the library id attached to cell names
#offset to the library_id is calculated based on the length of the keyword, as
library_id <- as.integer(str_extract(scater_object_colnames,"[0-9]+"))
# load csv files in the folder.
files <- list.files(path=raw_folder_address, pattern="*.csv", full.names=F, recursive=FALSE)
#for each csv files, load the information and add it to the colData of the scater_object
for(i in files){
name<-sub("^([^.]*).csv", "\\1",i)
file<-as.character(read.csv(paste(raw_folder_address,i,sep=""))[,1])
colData(scater_object)[,name]<-file[library_id]
}
# colData(scater_object)$condition <- sub("_r[1-2]", "", keyword)
# colData(scater_object)$library_id <- keyword
# colData(scater_object)$sample_id <- sub("_r[1-2].*", "", keyword)
# keep_feature <- rowSums(counts(scater_object) > 0)> 0
# keep_feature <- nexprs(scater_object, byrow=T) > 0
# scater_object_filtered <- scater_object[keep_feature, ]
isSpike(scater_object, "MT") <- grepl("^MT-", rownames(scater_object))
#calculate automatic QC metrics to filter out ones with an Median Absolute Deviation greater than 3
scater_object <- scater::calculateQCMetrics(scater_object, use_spikes = T, exprs_value = 'counts')
#colData(scater_object)$total_features_by_counts<-colData(scater_object)$total_features_by_counts
#Current version of calculateQCMetrics does not generate automatic filter for total_counts and total_features_by_counts, use isOutlier to create these filter_ fields
scater_object$filter_on_total_counts <- isOutlier(scater_object$total_counts, nmads=3,
type="both", log=T)
scater_object$filter_on_total_features <- isOutlier(scater_object$total_features_by_counts, nmads=3,
type="lower", log=F)
scater_object$filter_on_pct_counts_MT<- isOutlier(scater_object$pct_counts_MT, nmads=3,
type="higher", log=F)
#setup manual filters of 3MAD on genes and counts
scater_object$use <- (!scater_object$filter_on_total_features & !scater_object$filter_on_total_counts&!scater_object$filter_on_pct_counts_MT)
rowData(scater_object)$use <- row.names(scater_object)
#summary(scater_object$use)
#output use filter
scater_filter_use <- summary(scater_object$use)
#run automatic filters based on default parameters without total_features_feature_control
parameters<-c("pct_counts_in_top_100_features","total_features_by_counts","log10_total_counts_endogenous","log10_total_counts_feature_control","pct_counts_feature_control")
scater_object<-normalize(scater_object)
#scater_object<-runPCA(scater_object,exprs_values = "counts",detect_outliers = T,use_coldata=T)
#scater_filter_outlier <- summary(scater_object$outlier)
#scater_object_filters<-cbind(scater_object$outlier,!scater_object$use)
#merge both manual and automatic filters, and remove genes that are expressed by less than 3 cells
#merge_manual_automatic_filters <- apply(counts(scater_object[ , colData(scater_object)$use & !colData(scater_object)$outlier]), 1, function(x) length(x[x > 1]) >= 3)
#add the merged filters to the $use column dataset
#rowData(scater_object)$use <- scater_object$use #merge_manual_automatic_filters
#output filtered genes by manual and auto outlier
#scater_filter_merged <- summary(scater_object$use & !scater_object$outlier)
#saves the above file
#saveRDS(scater_object, file=paste(scater_file_address,"scater_object.rds",sep=""))
#apply the filter with the $use settings
scater_object_qc <- scater_object[rowData(scater_object)$use,
colData(scater_object)$use]
saveRDS(scater_object_qc, file=paste(scater_file_address,"scater_object_qc.rds",sep=""))
|
/20191510_scater_hislet_h5.R
|
no_license
|
whelena/human_islet_cellbender
|
R
| false
| false
| 5,087
|
r
|
library(scater)
library(DropletUtils)
library(Matrix)
library(Seurat)
library(stringr)
library(SingleCellExperiment)
library(mvoutlier)
library(limma)
library(ggplot2)
library(ggpmisc)
#load the dataset
raw_folder_address <- readRDS(file="10x_raw_folder_address.rds")
keyword <- readRDS(file = "keyword.rds")
scater_file_address<-readRDS(file = "scater_file_address.rds")
#convert 10x output folder into a scater object, by inputting the folder that contains the matrix.mtx file and two .tsv file.
raw_h5 <- list.files(path = raw_folder_address, pattern = "filtered.h5")
data <- Read10X_h5(paste0(raw_folder_address, raw_h5))
so_obj <- CreateSeuratObject(counts = data, min.cells = 3, min.features = 1000)
scater_object <- as.SingleCellExperiment(so_obj)
# raw_h5 <- list.files(path = raw_folder_address, pattern = "filtered.h5")
# scater_object_h5 <- read10xCounts(samples = paste0(raw_folder_address, raw_h5), type = "auto")
scater_object <- calculateQCMetrics(scater_object)
#keep_cells <- scater_object$total_counts > 1000
#keep_cells <- colSums(counts(scater_object))>=1000
#scater_object<-scater_object[,keep_cells]
scater_object_colnames <- colnames(scater_object)
colnames(scater_object) <- paste(keyword,colnames(scater_object),sep="_")
rownames(scater_object)<-make.unique(rownames(scater_object),sep="_")
#extract the library id attached to cell names
#offset to the library_id is calculated based on the length of the keyword, as
library_id <- as.integer(str_extract(scater_object_colnames,"[0-9]+"))
# load csv files in the folder.
files <- list.files(path=raw_folder_address, pattern="*.csv", full.names=F, recursive=FALSE)
#for each csv files, load the information and add it to the colData of the scater_object
for(i in files){
name<-sub("^([^.]*).csv", "\\1",i)
file<-as.character(read.csv(paste(raw_folder_address,i,sep=""))[,1])
colData(scater_object)[,name]<-file[library_id]
}
# colData(scater_object)$condition <- sub("_r[1-2]", "", keyword)
# colData(scater_object)$library_id <- keyword
# colData(scater_object)$sample_id <- sub("_r[1-2].*", "", keyword)
# keep_feature <- rowSums(counts(scater_object) > 0)> 0
# keep_feature <- nexprs(scater_object, byrow=T) > 0
# scater_object_filtered <- scater_object[keep_feature, ]
isSpike(scater_object, "MT") <- grepl("^MT-", rownames(scater_object))
#calculate automatic QC metrics to filter out ones with an Median Absolute Deviation greater than 3
scater_object <- scater::calculateQCMetrics(scater_object, use_spikes = T, exprs_value = 'counts')
#colData(scater_object)$total_features_by_counts<-colData(scater_object)$total_features_by_counts
#Current version of calculateQCMetrics does not generate automatic filter for total_counts and total_features_by_counts, use isOutlier to create these filter_ fields
scater_object$filter_on_total_counts <- isOutlier(scater_object$total_counts, nmads=3,
type="both", log=T)
scater_object$filter_on_total_features <- isOutlier(scater_object$total_features_by_counts, nmads=3,
type="lower", log=F)
scater_object$filter_on_pct_counts_MT<- isOutlier(scater_object$pct_counts_MT, nmads=3,
type="higher", log=F)
#setup manual filters of 3MAD on genes and counts
scater_object$use <- (!scater_object$filter_on_total_features & !scater_object$filter_on_total_counts&!scater_object$filter_on_pct_counts_MT)
rowData(scater_object)$use <- row.names(scater_object)
#summary(scater_object$use)
#output use filter
scater_filter_use <- summary(scater_object$use)
#run automatic filters based on default parameters without total_features_feature_control
parameters<-c("pct_counts_in_top_100_features","total_features_by_counts","log10_total_counts_endogenous","log10_total_counts_feature_control","pct_counts_feature_control")
scater_object<-normalize(scater_object)
#scater_object<-runPCA(scater_object,exprs_values = "counts",detect_outliers = T,use_coldata=T)
#scater_filter_outlier <- summary(scater_object$outlier)
#scater_object_filters<-cbind(scater_object$outlier,!scater_object$use)
#merge both manual and automatic filters, and remove genes that are expressed by less than 3 cells
#merge_manual_automatic_filters <- apply(counts(scater_object[ , colData(scater_object)$use & !colData(scater_object)$outlier]), 1, function(x) length(x[x > 1]) >= 3)
#add the merged filters to the $use column dataset
#rowData(scater_object)$use <- scater_object$use #merge_manual_automatic_filters
#output filtered genes by manual and auto outlier
#scater_filter_merged <- summary(scater_object$use & !scater_object$outlier)
#saves the above file
#saveRDS(scater_object, file=paste(scater_file_address,"scater_object.rds",sep=""))
#apply the filter with the $use settings
scater_object_qc <- scater_object[rowData(scater_object)$use,
colData(scater_object)$use]
saveRDS(scater_object_qc, file=paste(scater_file_address,"scater_object_qc.rds",sep=""))
|
library(dplyr)
library(lubridate)
setwd("C:/Users/ldewit/Documents/coursera_local/Exploratory_Data_Analysis_Coursera/assignment 1")
## Loading data
df <- read.csv("../../Data/household_power_consumption.txt", header=T, sep=';', na.strings="?",
nrows=2075259, check.names=F, stringsAsFactors=F, comment.char="", quote='\"')
## Manipulating and subsetting data
df$Date <- as.Date(df$Date, format="%d/%m/%Y")
df <- filter(df, Date == as.Date('2007-02-01') | Date == as.Date('2007-02-02'))
datetime <- paste(as.Date(df$Date), df$Time)
df$Datetime <- as.POSIXct(datetime)
## Plot 3
with(df, {
plot(Sub_metering_1~Datetime, type="l",
ylab="Global Active Power (kilowatts)", xlab="")
lines(Sub_metering_2~Datetime,col='Red')
lines(Sub_metering_3~Datetime,col='Blue')
})
legend("topright", col=c("black", "red", "blue"), lty=1, lwd=2,
legend=c("Sub_metering_1", "Sub_metering_2", "Sub_metering_3"))
## Saving to file
dev.copy(png, file="plot3.png", height=500, width=500)
dev.off()
|
/assignment 1/plot3.R
|
no_license
|
laurensdw/Exploratory_Data_Analysis
|
R
| false
| false
| 1,015
|
r
|
library(dplyr)
library(lubridate)
setwd("C:/Users/ldewit/Documents/coursera_local/Exploratory_Data_Analysis_Coursera/assignment 1")
## Loading data
df <- read.csv("../../Data/household_power_consumption.txt", header=T, sep=';', na.strings="?",
nrows=2075259, check.names=F, stringsAsFactors=F, comment.char="", quote='\"')
## Manipulating and subsetting data
df$Date <- as.Date(df$Date, format="%d/%m/%Y")
df <- filter(df, Date == as.Date('2007-02-01') | Date == as.Date('2007-02-02'))
datetime <- paste(as.Date(df$Date), df$Time)
df$Datetime <- as.POSIXct(datetime)
## Plot 3
with(df, {
plot(Sub_metering_1~Datetime, type="l",
ylab="Global Active Power (kilowatts)", xlab="")
lines(Sub_metering_2~Datetime,col='Red')
lines(Sub_metering_3~Datetime,col='Blue')
})
legend("topright", col=c("black", "red", "blue"), lty=1, lwd=2,
legend=c("Sub_metering_1", "Sub_metering_2", "Sub_metering_3"))
## Saving to file
dev.copy(png, file="plot3.png", height=500, width=500)
dev.off()
|
exceptionalScores <- function(dat, items=NULL,
exception=.025, totalOnly=TRUE, append=TRUE,
both=TRUE, silent=FALSE, suffix = "_isExceptional",
totalVarName = "exceptionalScores") {
if (is.data.frame(dat)) {
if (is.null(items)) {
items <- names(dat);
if (!silent) {
cat("No items specified: extracting all variable names in dataframe.\n");
}
}
exceptionalScores <- dat[, items];
} else {
### Vector provided; store in dataframe.
exceptionalScores <- data.frame(dat);
names(exceptionalScores) <- deparse(substitute(dat));
}
originalCols <- ncol(exceptionalScores);
exceptionalScores <- data.frame(exceptionalScores[, unlist(lapply(exceptionalScores, is.numeric))]);
if ((originalCols > ncol(exceptionalScores) & !silent)) {
cat0("Note: ", originalCols - ncol(exceptionalScores), " variables ",
"were not numeric and will not be checked for exceptional values.\n");
}
namesToUse <- paste0(colnames(exceptionalScores), suffix);
exceptionalScores <- apply(exceptionalScores, 2,
exceptionalScore, prob = exception, both=both, silent=silent);
colnames(exceptionalScores) <- namesToUse;
if (totalOnly) {
totalTrues <- rowSums(exceptionalScores, na.rm=TRUE);
if (append) {
dat[, totalVarName] <- totalTrues;
return(dat);
} else {
return(totalTrues);
}
} else {
if (append) {
return(data.frame(dat,
exceptionalScores));
} else {
return(exceptionalScores);
}
}
}
|
/userfriendlyscience/R/exceptionalScores.R
|
no_license
|
ingted/R-Examples
|
R
| false
| false
| 1,719
|
r
|
exceptionalScores <- function(dat, items=NULL,
exception=.025, totalOnly=TRUE, append=TRUE,
both=TRUE, silent=FALSE, suffix = "_isExceptional",
totalVarName = "exceptionalScores") {
if (is.data.frame(dat)) {
if (is.null(items)) {
items <- names(dat);
if (!silent) {
cat("No items specified: extracting all variable names in dataframe.\n");
}
}
exceptionalScores <- dat[, items];
} else {
### Vector provided; store in dataframe.
exceptionalScores <- data.frame(dat);
names(exceptionalScores) <- deparse(substitute(dat));
}
originalCols <- ncol(exceptionalScores);
exceptionalScores <- data.frame(exceptionalScores[, unlist(lapply(exceptionalScores, is.numeric))]);
if ((originalCols > ncol(exceptionalScores) & !silent)) {
cat0("Note: ", originalCols - ncol(exceptionalScores), " variables ",
"were not numeric and will not be checked for exceptional values.\n");
}
namesToUse <- paste0(colnames(exceptionalScores), suffix);
exceptionalScores <- apply(exceptionalScores, 2,
exceptionalScore, prob = exception, both=both, silent=silent);
colnames(exceptionalScores) <- namesToUse;
if (totalOnly) {
totalTrues <- rowSums(exceptionalScores, na.rm=TRUE);
if (append) {
dat[, totalVarName] <- totalTrues;
return(dat);
} else {
return(totalTrues);
}
} else {
if (append) {
return(data.frame(dat,
exceptionalScores));
} else {
return(exceptionalScores);
}
}
}
|
## -----------------------------------------------------------------------
## delete db.data.frame objects
## -----------------------------------------------------------------------
setGeneric (
"delete",
def = function (x, ...) {
rst <- standardGeneric("delete")
res <- rst$res
conn.id <- rst$conn.id
if (res && (!is.character(x) ||
is.character(x) &&
.strip(deparse(substitute(x)), "\"") != x)) {
envir <- parent.env(parent.env(parent.env(parent.env(
parent.env(as.environment(-1))))))
warn.r <- getOption("warn")
options(warn = -1)
rm(list=deparse(substitute(x)), envir=envir)
options(warn = warn.r)
}
res
},
signature = "x")
## -----------------------------------------------------------------------
setMethod (
"delete",
signature (x = "db.data.frame"),
def = function (x, cascade = FALSE) {
## .db.removeTable(content(x), conn.id(x))
if (x@.table.type == "LOCAL TEMPORARY")
tbl <- gsub("^\\\"[^\"]*\\\"\\.", "", content(x))
else
tbl <- content(x)
s <- delete(tbl, conn.id(x), x@.table.type == "LOCAL TEMPORARY",
cascade)
list(res=s, conn.id=conn.id(x))
})
## -----------------------------------------------------------------------
setMethod (
"delete",
signature (x = "db.Rquery"),
def = function (x) {
list(res=TRUE, conn.id=conn.id(x))
})
## -----------------------------------------------------------------------
setMethod (
"delete",
signature (x = "character"),
def = function (x, conn.id = 1, is.temp = FALSE, cascade = FALSE) {
x <- paste("\"", .strip(strsplit(x, "\\.")[[1]], "\""), "\"",
collapse = ".", sep = "")
if (is.temp)
x <- gsub("^\\\"[^\"]*\\\"\\.", "", x)
origin.x <- x
warn.r <- getOption("warn")
options(warn = -1)
exists <- db.existsObject(x, conn.id, is.temp)
if (length(exists) == 2)
if (! exists[[1]]) {
options(warn = warn.r) # reset R warning level
return (list(res=FALSE, conn.id=conn.id))
} else
x <- exists[[2]]
else
if (! exists) {
options(warn = warn.r) # reset R warning level
return (list(res=FALSE, conn.id=conn.id))
} else {
if (length(x) == 1) x <- strsplit(x, "\\.")[[1]]
if (length(x) != 2) {
schemas <- arraydb.to.arrayr(
.db.getQuery("select current_schemas(True)", conn.id),
type = "character")
table_schema <- character(0)
for (schema in schemas)
if (.db.existsTable(c(schema, x), conn.id))
table_schema <- c(table_schema, schema)
if (identical(table_schema, character(0))) {
options(warn = warn.r) # reset R warning level
return (list(res=FALSE, conn.id=conn.id))
}
schema.str <- strsplit(table_schema, "_")
for (i in seq_len(length(schema.str))) {
str <- schema.str[[i]]
if (str[1] != "pg" || str[2] != "temp") {
x <- c(table_schema[i], x)
break
}
}
}
if (length(x) == 1) {
options(warn = warn.r) # reset R warning level
return (list(res=FALSE, conn.id=conn.id))
}
}
## .db.removeTable(x, conn.id)
table <- paste("\"", .strip(x[1], "\""),
"\".\"", .strip(x[2], "\""), "\"", sep = "")
if (cascade) cascade.str <- " cascade"
else cascade.str <- ""
if (.is.view(x, conn.id))
type.str <- "view "
else
type.str <- "table "
sql <- paste("drop ", type.str, table, cascade.str, sep = "")
res <- tryCatch(.db.getQuery(sql, conn.id),
error = function(e) { success <<- FALSE })
exists <- db.existsObject(origin.x, conn.id, is.temp)
options(warn = warn.r) # reset R warning level
if (length(exists) == 2)
if (! exists[[1]])
return (list(res=TRUE, conn.id=conn.id))
else
return (list(res=FALSE, conn.id=conn.id))
else
if (! exists)
return (list(res=TRUE, conn.id=conn.id))
else
return (list(res=FALSE, conn.id=conn.id))
})
## ----------------------------------------------------------------------
setMethod (
"delete",
signature (x = "arima.css.madlib"),
def = function (x) {
conn.id <- conn.id(x$model)
d1 <- delete(x$model)
d2 <- delete(x$residuals)
d3 <- delete(x$statistics)
if (x$temp.source) d4 <- delete(x$series)
else d4 <- TRUE
list(res=all(c(d1, d2, d3)), conn.id=conn.id)
})
## ----------------------------------------------------------------------
setMethod (
"delete",
signature (x = "summary.madlib"),
def = function (x) {
tbl <- attr(x, "summary")
conn.id <- conn.id(tbl)
d1 <- delete(tbl)
list(res=d1, conn.id=conn.id)
})
## ----------------------------------------------------------------------
setMethod (
"delete",
signature (x = "lm.madlib"),
def = function (x) {
if (is.null(x$model)) return (list(res=TRUE, conn.id=NULL))
conn.id <- conn.id(x$model)
d1 <- delete(x$model)
list(res=d1, conn.id=conn.id)
})
## ----------------------------------------------------------------------
setMethod (
"delete",
signature (x = "lm.madlib.grps"),
def = function (x) {
if (is.null(x[[1]]$model)) return (list(res=TRUE, conn.id=NULL))
conn.id <- conn.id(x[[1]]$model)
d1 <- delete(x[[1]]$model)
list(res=d1, conn.id=conn.id)
})
## ----------------------------------------------------------------------
setMethod (
"delete",
signature (x = "logregr.madlib"),
def = function (x) {
if (is.null(x$model)) return (list(res=TRUE, conn.id=NULL))
conn.id <- conn.id(x$model)
d1 <- delete(x$model)
list(res=d1, conn.id=conn.id)
})
## ----------------------------------------------------------------------
setMethod (
"delete",
signature (x = "logregr.madlib.grps"),
def = function (x) {
if (is.null(x[[1]]$model)) return (list(res=TRUE, conn.id=NULL))
conn.id <- conn.id(x[[1]]$model)
d1 <- delete(x[[1]]$model)
list(res=d1, conn.id=conn.id)
})
## ----------------------------------------------------------------------
setMethod (
"delete",
signature (x = "bagging.model"),
def = function (x) {
conn.id <- conn.id(x[[1]]$model)
res <- lapply(x, delete)
list(res = all(unlist(res)), conn.id = conn.id)
})
## ----------------------------------------------------------------------
setMethod (
"delete",
signature (x = "elnet.madlib"),
def = function (x) {
if (is(x$model, "db.obj")) {
conn.id <- conn.id(x$model)
d1 <- delete(x$model)
} else {
conn.id <- NA
d1 <- TRUE
}
list(res = d1, conn.id = conn.id)
})
## ------------------------------------------------------------
setMethod("delete",
signature(x = "dt.madlib"),
def = function (x) {
conn.id <- conn.id(x$model)
success <- delete(x$model) && delete(x$model.summary)
list(res = success, conn.id = conn.id)
})
setMethod("delete",
signature(x = "dt.madlib.grps"),
def = function (x) {
conn.id <- conn.id(x[[1]]$model)
success <- delete(x[[1]]$model) && delete(x[[1]]$model.summary)
list(res = success, conn.id = conn.id)
})
|
/PivotalR/R/method-delete_.R
|
no_license
|
ingted/R-Examples
|
R
| false
| false
| 8,326
|
r
|
## -----------------------------------------------------------------------
## delete db.data.frame objects
## -----------------------------------------------------------------------
setGeneric (
"delete",
def = function (x, ...) {
rst <- standardGeneric("delete")
res <- rst$res
conn.id <- rst$conn.id
if (res && (!is.character(x) ||
is.character(x) &&
.strip(deparse(substitute(x)), "\"") != x)) {
envir <- parent.env(parent.env(parent.env(parent.env(
parent.env(as.environment(-1))))))
warn.r <- getOption("warn")
options(warn = -1)
rm(list=deparse(substitute(x)), envir=envir)
options(warn = warn.r)
}
res
},
signature = "x")
## -----------------------------------------------------------------------
setMethod (
"delete",
signature (x = "db.data.frame"),
def = function (x, cascade = FALSE) {
## .db.removeTable(content(x), conn.id(x))
if (x@.table.type == "LOCAL TEMPORARY")
tbl <- gsub("^\\\"[^\"]*\\\"\\.", "", content(x))
else
tbl <- content(x)
s <- delete(tbl, conn.id(x), x@.table.type == "LOCAL TEMPORARY",
cascade)
list(res=s, conn.id=conn.id(x))
})
## -----------------------------------------------------------------------
setMethod (
"delete",
signature (x = "db.Rquery"),
def = function (x) {
list(res=TRUE, conn.id=conn.id(x))
})
## -----------------------------------------------------------------------
setMethod (
"delete",
signature (x = "character"),
def = function (x, conn.id = 1, is.temp = FALSE, cascade = FALSE) {
x <- paste("\"", .strip(strsplit(x, "\\.")[[1]], "\""), "\"",
collapse = ".", sep = "")
if (is.temp)
x <- gsub("^\\\"[^\"]*\\\"\\.", "", x)
origin.x <- x
warn.r <- getOption("warn")
options(warn = -1)
exists <- db.existsObject(x, conn.id, is.temp)
if (length(exists) == 2)
if (! exists[[1]]) {
options(warn = warn.r) # reset R warning level
return (list(res=FALSE, conn.id=conn.id))
} else
x <- exists[[2]]
else
if (! exists) {
options(warn = warn.r) # reset R warning level
return (list(res=FALSE, conn.id=conn.id))
} else {
if (length(x) == 1) x <- strsplit(x, "\\.")[[1]]
if (length(x) != 2) {
schemas <- arraydb.to.arrayr(
.db.getQuery("select current_schemas(True)", conn.id),
type = "character")
table_schema <- character(0)
for (schema in schemas)
if (.db.existsTable(c(schema, x), conn.id))
table_schema <- c(table_schema, schema)
if (identical(table_schema, character(0))) {
options(warn = warn.r) # reset R warning level
return (list(res=FALSE, conn.id=conn.id))
}
schema.str <- strsplit(table_schema, "_")
for (i in seq_len(length(schema.str))) {
str <- schema.str[[i]]
if (str[1] != "pg" || str[2] != "temp") {
x <- c(table_schema[i], x)
break
}
}
}
if (length(x) == 1) {
options(warn = warn.r) # reset R warning level
return (list(res=FALSE, conn.id=conn.id))
}
}
## .db.removeTable(x, conn.id)
table <- paste("\"", .strip(x[1], "\""),
"\".\"", .strip(x[2], "\""), "\"", sep = "")
if (cascade) cascade.str <- " cascade"
else cascade.str <- ""
if (.is.view(x, conn.id))
type.str <- "view "
else
type.str <- "table "
sql <- paste("drop ", type.str, table, cascade.str, sep = "")
res <- tryCatch(.db.getQuery(sql, conn.id),
error = function(e) { success <<- FALSE })
exists <- db.existsObject(origin.x, conn.id, is.temp)
options(warn = warn.r) # reset R warning level
if (length(exists) == 2)
if (! exists[[1]])
return (list(res=TRUE, conn.id=conn.id))
else
return (list(res=FALSE, conn.id=conn.id))
else
if (! exists)
return (list(res=TRUE, conn.id=conn.id))
else
return (list(res=FALSE, conn.id=conn.id))
})
## ----------------------------------------------------------------------
setMethod (
"delete",
signature (x = "arima.css.madlib"),
def = function (x) {
conn.id <- conn.id(x$model)
d1 <- delete(x$model)
d2 <- delete(x$residuals)
d3 <- delete(x$statistics)
if (x$temp.source) d4 <- delete(x$series)
else d4 <- TRUE
list(res=all(c(d1, d2, d3)), conn.id=conn.id)
})
## ----------------------------------------------------------------------
setMethod (
"delete",
signature (x = "summary.madlib"),
def = function (x) {
tbl <- attr(x, "summary")
conn.id <- conn.id(tbl)
d1 <- delete(tbl)
list(res=d1, conn.id=conn.id)
})
## ----------------------------------------------------------------------
setMethod (
"delete",
signature (x = "lm.madlib"),
def = function (x) {
if (is.null(x$model)) return (list(res=TRUE, conn.id=NULL))
conn.id <- conn.id(x$model)
d1 <- delete(x$model)
list(res=d1, conn.id=conn.id)
})
## ----------------------------------------------------------------------
setMethod (
"delete",
signature (x = "lm.madlib.grps"),
def = function (x) {
if (is.null(x[[1]]$model)) return (list(res=TRUE, conn.id=NULL))
conn.id <- conn.id(x[[1]]$model)
d1 <- delete(x[[1]]$model)
list(res=d1, conn.id=conn.id)
})
## ----------------------------------------------------------------------
setMethod (
"delete",
signature (x = "logregr.madlib"),
def = function (x) {
if (is.null(x$model)) return (list(res=TRUE, conn.id=NULL))
conn.id <- conn.id(x$model)
d1 <- delete(x$model)
list(res=d1, conn.id=conn.id)
})
## ----------------------------------------------------------------------
setMethod (
"delete",
signature (x = "logregr.madlib.grps"),
def = function (x) {
if (is.null(x[[1]]$model)) return (list(res=TRUE, conn.id=NULL))
conn.id <- conn.id(x[[1]]$model)
d1 <- delete(x[[1]]$model)
list(res=d1, conn.id=conn.id)
})
## ----------------------------------------------------------------------
setMethod (
"delete",
signature (x = "bagging.model"),
def = function (x) {
conn.id <- conn.id(x[[1]]$model)
res <- lapply(x, delete)
list(res = all(unlist(res)), conn.id = conn.id)
})
## ----------------------------------------------------------------------
setMethod (
"delete",
signature (x = "elnet.madlib"),
def = function (x) {
if (is(x$model, "db.obj")) {
conn.id <- conn.id(x$model)
d1 <- delete(x$model)
} else {
conn.id <- NA
d1 <- TRUE
}
list(res = d1, conn.id = conn.id)
})
## ------------------------------------------------------------
setMethod("delete",
signature(x = "dt.madlib"),
def = function (x) {
conn.id <- conn.id(x$model)
success <- delete(x$model) && delete(x$model.summary)
list(res = success, conn.id = conn.id)
})
setMethod("delete",
signature(x = "dt.madlib.grps"),
def = function (x) {
conn.id <- conn.id(x[[1]]$model)
success <- delete(x[[1]]$model) && delete(x[[1]]$model.summary)
list(res = success, conn.id = conn.id)
})
|
#' Remove PCR duplicates using bwa and Picard
#' @param idxbase name of bwa index
#' @param in1_fq left reads or 1 reads (of pair-end reads) fastq file
#' @param in2_fq right or 2 reads (of pair-end reads) fastq file
#' @param threads number of threads to run with (depends on server resources)
#' @param output_filename output filename (after removing PCR duplicates)
#' @return NULL
#' @export
### Remove PCR duplicates using SAMtools and Picard ###
map_with_bwa_and_remove_PCR_duplicates<-function(idxbase, in1_fq,in2_fq, threads, output_filename)
{
bwa<-Sys.which("bwa")
samtools<-which("samtools")
system(command = paste(bwa," index ",idxbase, sep=""))
if(is.na(in2_fq)){
system(paste("bwa mem ",idxbase," ",in1_fq," "," -t ",threads, " | samtools view -S -b - | samtools sort sorted_",output_filename," - ",sep=""), intern=TRUE)}else{
system(paste("bwa mem ",idxbase," ",in1_fq," ",in2_fq," -t ",threads, " | samtools view -S -b - | samtools sort sorted_",output_filename," - ",sep=""), intern=TRUE)
}
system(paste("java -Xmx4g -jar /home/abhijeet/bin/picard.jar MarkDuplicates INPUT=sorted_",output_filename," OUTPUT=dedup_",output_filename,"METRICS_FILE=dups_",output_filename," VALIDATION_STRINGENCY=LENIENT REMOVE_DUPLICATES=TRUE TMP_DIR=/tmp"), intern=TRUE)
return(NULL)
}
|
/R/map_with_bwa_and_remove_PCR_duplicates.R
|
no_license
|
abshah/RADseqR
|
R
| false
| false
| 1,336
|
r
|
#' Remove PCR duplicates using bwa and Picard
#' @param idxbase name of bwa index
#' @param in1_fq left reads or 1 reads (of pair-end reads) fastq file
#' @param in2_fq right or 2 reads (of pair-end reads) fastq file
#' @param threads number of threads to run with (depends on server resources)
#' @param output_filename output filename (after removing PCR duplicates)
#' @return NULL
#' @export
### Remove PCR duplicates using SAMtools and Picard ###
map_with_bwa_and_remove_PCR_duplicates<-function(idxbase, in1_fq,in2_fq, threads, output_filename)
{
bwa<-Sys.which("bwa")
samtools<-which("samtools")
system(command = paste(bwa," index ",idxbase, sep=""))
if(is.na(in2_fq)){
system(paste("bwa mem ",idxbase," ",in1_fq," "," -t ",threads, " | samtools view -S -b - | samtools sort sorted_",output_filename," - ",sep=""), intern=TRUE)}else{
system(paste("bwa mem ",idxbase," ",in1_fq," ",in2_fq," -t ",threads, " | samtools view -S -b - | samtools sort sorted_",output_filename," - ",sep=""), intern=TRUE)
}
system(paste("java -Xmx4g -jar /home/abhijeet/bin/picard.jar MarkDuplicates INPUT=sorted_",output_filename," OUTPUT=dedup_",output_filename,"METRICS_FILE=dups_",output_filename," VALIDATION_STRINGENCY=LENIENT REMOVE_DUPLICATES=TRUE TMP_DIR=/tmp"), intern=TRUE)
return(NULL)
}
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/hanlp.R
\name{hanlp.updateWord}
\alias{hanlp.updateWord}
\title{Add,remove,get a word.}
\usage{
hanlp.updateWord(x, mode = "insert")
}
\arguments{
\item{x}{a word like \code{c('word')} or \code{c('words','nz',freq)} .}
\item{mode}{\code{insert} ,\code{get} or \code{remove} a word, dynamically updated dictionary.}
}
\value{
TRUE, FALSE or a character.
}
\description{
Dynamically updated dictionary
}
\examples{
\dontrun{
hanlp.updateWord('newword')
hanlp.updateWord(c('newword','nz',1000))
hanlp.updateWord(x=c('newword'),mode='get')
hanlp.updateWord(x=c('newword'),mode='remove')
}
}
\author{
\link{https://github.com/qxde01/RHanLP}
}
|
/man/hanlp.updateWord.Rd
|
no_license
|
SimmsJeason/RHanLP
|
R
| false
| true
| 718
|
rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/hanlp.R
\name{hanlp.updateWord}
\alias{hanlp.updateWord}
\title{Add,remove,get a word.}
\usage{
hanlp.updateWord(x, mode = "insert")
}
\arguments{
\item{x}{a word like \code{c('word')} or \code{c('words','nz',freq)} .}
\item{mode}{\code{insert} ,\code{get} or \code{remove} a word, dynamically updated dictionary.}
}
\value{
TRUE, FALSE or a character.
}
\description{
Dynamically updated dictionary
}
\examples{
\dontrun{
hanlp.updateWord('newword')
hanlp.updateWord(c('newword','nz',1000))
hanlp.updateWord(x=c('newword'),mode='get')
hanlp.updateWord(x=c('newword'),mode='remove')
}
}
\author{
\link{https://github.com/qxde01/RHanLP}
}
|
library(tidyverse)
library(tuneR)
source("clean.R")
######
library(corrplot)
x <- midi_melodies %>%
mutate_all(as.numeric) %>%
cor(method = "pearson")
corr.p <- corrplot(x)
x["ME",] %>% sort(decreasing = TRUE)
######Density multiplot
p1 <- ggplot(midi_melodies, aes(x = channel, fill = as.factor(ME))) +
geom_density(alpha = 0.5) +
theme(legend.position = "none")
p2 <- ggplot(midi_melodies, aes(x = track, fill = as.factor(ME))) +
geom_density(alpha = 0.5) +
ylab(NULL) +
theme(legend.position = "none")
p3 <- ggplot(midi_melodies, aes(x = track_occ, fill = as.factor(ME))) +
geom_density(alpha = 0.5) +
ylab(NULL) +
theme(legend.position = "none")
p4 <- ggplot(midi_melodies, aes(x = frac_poly, fill = as.factor(ME))) +
geom_density(alpha = 0.5) +
ylab(NULL) +
theme(legend.position = "none")
p5 <- ggplot(midi_melodies, aes(x = events, fill = as.factor(ME))) +
geom_density(alpha = 0.5) +
scale_x_continuous(limits = c(0, 1000)) +
theme(legend.position = "none")
p6 <- ggplot(midi_melodies, aes(x = mad_int, fill = as.factor(ME))) +
geom_density(alpha = 0.5) +
ylab(NULL) +
theme(legend.position = "none")
p7 <- ggplot(midi_melodies, aes(x = IOI_entropy, fill = as.factor(ME))) +
geom_density(alpha = 0.5) +
ylab(NULL) +
theme(legend.position = "none")
p8 <- ggplot(midi_melodies, aes(x = pc_entropy, fill = as.factor(ME))) +
geom_density(alpha = 0.5) +
ylab(NULL) +
theme(legend.position = "none")
p9 <- ggplot(midi_melodies, aes(x = int_entropy, fill = as.factor(ME))) +
geom_density(alpha = 0.5) +
theme(legend.position = "none")
p10 <- ggplot(midi_melodies, aes(x = longpr, fill = as.factor(ME))) +
geom_density(alpha = 0.5) +
ylab(NULL) +
theme(legend.position = "none")
p11 <- ggplot(midi_melodies, aes(x = top_rate, fill = as.factor(ME))) +
geom_density(alpha = 0.5) +
ylab(NULL) +
theme(legend.position = "none")
p12 <- ggplot(midi_melodies, aes(x = med_note, fill = as.factor(ME))) +
geom_density(alpha = 0.5) +
ylab(NULL) +
theme(legend.position = "none")
layout <- matrix(1:12,3,4,byrow=TRUE)
multi.p1 <- multiplot(p1, p2, p3, p4, p5, p6, p7, p8, p9, p10,
p11, p12, layout=layout)
####Melody rates for categorical features
#chance of being a melody track
frac_melody <- sum(midi_melodies$ME == 1)/nrow(midi_melodies)*100
#confidence interval
frac_melody_UL <- get_binCI(sum(midi_melodies$ME == 1),
nrow(midi_melodies))$upr * 100
frac_melody_LL <- get_binCI(sum(midi_melodies$ME == 1),
nrow(midi_melodies))$lwr * 100
#refactor name column to random forest limit of 52 categories per variable
midi_melodies <- midi_melodies %>%
mutate(name = fct_lump(name, n = 52, ties.method = "random"))
##Plot fraction melody by instrument name
p13 <- midi_melodies %>%
group_by(name, ME) %>%
count() %>%
spread(ME, n) %>%
replace_na(list('0' = 0,
'1' = 0)) %>%
mutate(frac_claim = `1`/(`1`+`0`)*100,
lwr = get_binCI(`1`,(`1`+`0`))[[1]]*100,
upr = get_binCI(`1`,(`1`+`0`))[[2]]*100
) %>%
ggplot(aes(reorder(name, -frac_claim, FUN = max), frac_claim, fill = name)) +
geom_col() +
geom_errorbar(aes(ymin = lwr, ymax = upr),
width = 0.5, size = 0.7, color = "gray30") +
theme(legend.position = "none") +
labs(x = "name", y = "Melody [%]") +
theme(axis.text.x=element_text(angle = 90, hjust = 1, vjust = 0.5)) +
geom_hline(yintercept = frac_melody, linetype = "dashed") #+
#geom_hline(yintercept = frac_melody_UL, linetype = "dotted") +
#geom_hline(yintercept = frac_melody_LL, linetype = "dotted")
##Plot fraction melody by instrument family
p14 <- midi_melodies %>%
group_by(family, ME) %>%
count() %>%
spread(ME, n) %>%
replace_na(list('0' = 0,
'1' = 0)) %>%
mutate(frac_claim = `1`/(`1`+`0`)*100,
lwr = get_binCI(`1`,(`1`+`0`))[[1]]*100,
upr = get_binCI(`1`,(`1`+`0`))[[2]]*100
) %>%
ggplot(aes(reorder(family, -frac_claim, FUN = max), frac_claim, fill = family)) +
geom_col() +
geom_errorbar(aes(ymin = lwr, ymax = upr),
width = 0.5, size = 0.7, color = "gray30") +
theme(legend.position = "none") +
labs(x = "family", y = "Melody [%]") +
theme(axis.text.x=element_text(angle = 90, hjust = 1, vjust = 0.5)) +
geom_hline(yintercept = frac_melody, linetype = "dotted")
####Dotplots
library('ggridges')
##multiplot
p15 <- ggplot(midi_melodies, aes(x=med_note, y=top_rate)) +
geom_jitter(aes(alpha = 0.5,
colour = as.factor(ME)))+
theme(legend.position = "none")
p16 <- ggplot(midi_melodies, aes(x=channel, y=med_note)) +
geom_jitter(aes(alpha = 0.5,
colour = as.factor(ME)))+
theme(legend.position = "none")
p17 <- ggplot(midi_melodies, aes(x=channel, y=top_rate)) +
geom_jitter(aes(alpha = 0.5,
colour = as.factor(ME)))+
theme(legend.position = "none")
p18 <- ggplot(midi_melodies, aes(x=channel, y=pc_entropy)) +
geom_jitter(aes(alpha = 0.5,
colour = as.factor(ME)))+
theme(legend.position = "none")
layout <- matrix(1:4,2,2,byrow=TRUE)
multi.p2 <- multiplot(p15, p16, p17, p18, layout=layout)
#categorical ridgeplots
p19 <- ggplot(midi_melodies,
aes(x = med_note, y = factor(family), fill = factor(ME))) +
geom_density_ridges(scale = 2,alpha = .5,rel_min_height = 0.01) +
theme_ridges()
p20 <- ggplot(midi_melodies,
aes(x = med_note, y = factor(name), fill = factor(ME))) +
geom_density_ridges(scale = 2,alpha = .5,rel_min_height = 0.01) +
theme_ridges(font_size = 9)
pdf(title = "_midi_plots.pdf")
corrplot(x)
layout <- matrix(1:12,3,4,byrow=TRUE)
multi.p1 <- multiplot(p1, p2, p3, p4, p5, p6, p7, p8, p9, p10,
p11, p12, layout=layout)
p13
p14
layout <- matrix(1:4,2,2,byrow=TRUE)
multi.p2 <- multiplot(p15, p16, p17, p18, layout=layout)
p19
p20
dev.off()
|
/Visualize.r
|
no_license
|
areeves87/midi-analysis
|
R
| false
| false
| 6,539
|
r
|
library(tidyverse)
library(tuneR)
source("clean.R")
######
library(corrplot)
x <- midi_melodies %>%
mutate_all(as.numeric) %>%
cor(method = "pearson")
corr.p <- corrplot(x)
x["ME",] %>% sort(decreasing = TRUE)
######Density multiplot
p1 <- ggplot(midi_melodies, aes(x = channel, fill = as.factor(ME))) +
geom_density(alpha = 0.5) +
theme(legend.position = "none")
p2 <- ggplot(midi_melodies, aes(x = track, fill = as.factor(ME))) +
geom_density(alpha = 0.5) +
ylab(NULL) +
theme(legend.position = "none")
p3 <- ggplot(midi_melodies, aes(x = track_occ, fill = as.factor(ME))) +
geom_density(alpha = 0.5) +
ylab(NULL) +
theme(legend.position = "none")
p4 <- ggplot(midi_melodies, aes(x = frac_poly, fill = as.factor(ME))) +
geom_density(alpha = 0.5) +
ylab(NULL) +
theme(legend.position = "none")
p5 <- ggplot(midi_melodies, aes(x = events, fill = as.factor(ME))) +
geom_density(alpha = 0.5) +
scale_x_continuous(limits = c(0, 1000)) +
theme(legend.position = "none")
p6 <- ggplot(midi_melodies, aes(x = mad_int, fill = as.factor(ME))) +
geom_density(alpha = 0.5) +
ylab(NULL) +
theme(legend.position = "none")
p7 <- ggplot(midi_melodies, aes(x = IOI_entropy, fill = as.factor(ME))) +
geom_density(alpha = 0.5) +
ylab(NULL) +
theme(legend.position = "none")
p8 <- ggplot(midi_melodies, aes(x = pc_entropy, fill = as.factor(ME))) +
geom_density(alpha = 0.5) +
ylab(NULL) +
theme(legend.position = "none")
p9 <- ggplot(midi_melodies, aes(x = int_entropy, fill = as.factor(ME))) +
geom_density(alpha = 0.5) +
theme(legend.position = "none")
p10 <- ggplot(midi_melodies, aes(x = longpr, fill = as.factor(ME))) +
geom_density(alpha = 0.5) +
ylab(NULL) +
theme(legend.position = "none")
p11 <- ggplot(midi_melodies, aes(x = top_rate, fill = as.factor(ME))) +
geom_density(alpha = 0.5) +
ylab(NULL) +
theme(legend.position = "none")
p12 <- ggplot(midi_melodies, aes(x = med_note, fill = as.factor(ME))) +
geom_density(alpha = 0.5) +
ylab(NULL) +
theme(legend.position = "none")
layout <- matrix(1:12,3,4,byrow=TRUE)
multi.p1 <- multiplot(p1, p2, p3, p4, p5, p6, p7, p8, p9, p10,
p11, p12, layout=layout)
####Melody rates for categorical features
#chance of being a melody track
frac_melody <- sum(midi_melodies$ME == 1)/nrow(midi_melodies)*100
#confidence interval
frac_melody_UL <- get_binCI(sum(midi_melodies$ME == 1),
nrow(midi_melodies))$upr * 100
frac_melody_LL <- get_binCI(sum(midi_melodies$ME == 1),
nrow(midi_melodies))$lwr * 100
#refactor name column to random forest limit of 52 categories per variable
midi_melodies <- midi_melodies %>%
mutate(name = fct_lump(name, n = 52, ties.method = "random"))
##Plot fraction melody by instrument name
p13 <- midi_melodies %>%
group_by(name, ME) %>%
count() %>%
spread(ME, n) %>%
replace_na(list('0' = 0,
'1' = 0)) %>%
mutate(frac_claim = `1`/(`1`+`0`)*100,
lwr = get_binCI(`1`,(`1`+`0`))[[1]]*100,
upr = get_binCI(`1`,(`1`+`0`))[[2]]*100
) %>%
ggplot(aes(reorder(name, -frac_claim, FUN = max), frac_claim, fill = name)) +
geom_col() +
geom_errorbar(aes(ymin = lwr, ymax = upr),
width = 0.5, size = 0.7, color = "gray30") +
theme(legend.position = "none") +
labs(x = "name", y = "Melody [%]") +
theme(axis.text.x=element_text(angle = 90, hjust = 1, vjust = 0.5)) +
geom_hline(yintercept = frac_melody, linetype = "dashed") #+
#geom_hline(yintercept = frac_melody_UL, linetype = "dotted") +
#geom_hline(yintercept = frac_melody_LL, linetype = "dotted")
##Plot fraction melody by instrument family
p14 <- midi_melodies %>%
group_by(family, ME) %>%
count() %>%
spread(ME, n) %>%
replace_na(list('0' = 0,
'1' = 0)) %>%
mutate(frac_claim = `1`/(`1`+`0`)*100,
lwr = get_binCI(`1`,(`1`+`0`))[[1]]*100,
upr = get_binCI(`1`,(`1`+`0`))[[2]]*100
) %>%
ggplot(aes(reorder(family, -frac_claim, FUN = max), frac_claim, fill = family)) +
geom_col() +
geom_errorbar(aes(ymin = lwr, ymax = upr),
width = 0.5, size = 0.7, color = "gray30") +
theme(legend.position = "none") +
labs(x = "family", y = "Melody [%]") +
theme(axis.text.x=element_text(angle = 90, hjust = 1, vjust = 0.5)) +
geom_hline(yintercept = frac_melody, linetype = "dotted")
####Dotplots
library('ggridges')
##multiplot
p15 <- ggplot(midi_melodies, aes(x=med_note, y=top_rate)) +
geom_jitter(aes(alpha = 0.5,
colour = as.factor(ME)))+
theme(legend.position = "none")
p16 <- ggplot(midi_melodies, aes(x=channel, y=med_note)) +
geom_jitter(aes(alpha = 0.5,
colour = as.factor(ME)))+
theme(legend.position = "none")
p17 <- ggplot(midi_melodies, aes(x=channel, y=top_rate)) +
geom_jitter(aes(alpha = 0.5,
colour = as.factor(ME)))+
theme(legend.position = "none")
p18 <- ggplot(midi_melodies, aes(x=channel, y=pc_entropy)) +
geom_jitter(aes(alpha = 0.5,
colour = as.factor(ME)))+
theme(legend.position = "none")
layout <- matrix(1:4,2,2,byrow=TRUE)
multi.p2 <- multiplot(p15, p16, p17, p18, layout=layout)
#categorical ridgeplots
p19 <- ggplot(midi_melodies,
aes(x = med_note, y = factor(family), fill = factor(ME))) +
geom_density_ridges(scale = 2,alpha = .5,rel_min_height = 0.01) +
theme_ridges()
p20 <- ggplot(midi_melodies,
aes(x = med_note, y = factor(name), fill = factor(ME))) +
geom_density_ridges(scale = 2,alpha = .5,rel_min_height = 0.01) +
theme_ridges(font_size = 9)
pdf(title = "_midi_plots.pdf")
corrplot(x)
layout <- matrix(1:12,3,4,byrow=TRUE)
multi.p1 <- multiplot(p1, p2, p3, p4, p5, p6, p7, p8, p9, p10,
p11, p12, layout=layout)
p13
p14
layout <- matrix(1:4,2,2,byrow=TRUE)
multi.p2 <- multiplot(p15, p16, p17, p18, layout=layout)
p19
p20
dev.off()
|
library(shiny)
library(tidyverse)
library(quantmod)
library(PerformanceAnalytics)
library(RColorBrewer)
library(tseries)
library(lubridate)
library(Quandl)
Quandl.api_key("zrcB2Ejv9UmvhPCUsy2_")
options("getSymbols.yahoo.warning"=FALSE)
options("getSymbols.warning4.0"=FALSE)
shinyUI(fluidPage(
titlePanel("Stock Market Analysis"),
sidebarLayout(
sidebarPanel(
selectInput("name", "Select Company:", c("Microsoft", "IBM", "Netflix", "Apple Inc.")),
sliderInput("year", "Select Time Range:", min = 2008, max = 2021, value = c(2012, 2016), sep = "", pre = "Year "),
selectInput("tech_ind", "Select Technical Indicators:", c("Bollinger Bands", "Relative Strength Index",
"Exponential Moving Averages",
"Moving Averages Convergence Divergence")),
selectInput("trade_strat", "Select Trading Strategy:", c("Simple Buy Filter", "Simple Buy and Sell Filter",
"Relative Strength Index Buy Filter",
"Relative Strength Index Buy and Sell Filter")),
submitButton("Chart!")
),
mainPanel(
tabsetPanel(type = "tabs",
tabPanel("Documentation", br(), strong("Select Company: "), "Select a company from the given list of 4 to chart. The default is Microsoft.",
br(), strong("Select Time Range: "), "Choose for which years [from 2008 to 2021] should the charts be plotted. The default is from 2012 to 2016.",
br(), strong("Select Technical Indicators: "), "Choose which Technical Indicators should be added to the plot. The default is Bollinger Bands.",
br(), strong("Select Trading Strategy: "), "Select which Trading Strategy should be used. The default is the Simple Buy Filter",
br(), "The charts are in the 'Charts' tab, while the trading strategies are in the 'Trading Strategies' tab",
br(), "For more information, please see the accomapanying RStudio Presentation."),
tabPanel("Charts", br(), "The selected company is: ", strong(textOutput("symbol")),
br(), "The Selected Time Range is from Year ", textOutput("year1"), " to Year ", textOutput("year2"),
br(), "The Selected Technical Indicator is: ", textOutput("ta"),
br(), plotOutput("chart1"),
br(), plotOutput("chart2")),
tabPanel("Trading Strategies",
br(), "The Selected Trading Strategy is: ", textOutput("ts"),
br(), plotOutput("chart3"))
)
)
)
))
|
/ui.R
|
no_license
|
World-of-Python-and-R/Stock-Market-Analysis-Project
|
R
| false
| false
| 3,117
|
r
|
library(shiny)
library(tidyverse)
library(quantmod)
library(PerformanceAnalytics)
library(RColorBrewer)
library(tseries)
library(lubridate)
library(Quandl)
Quandl.api_key("zrcB2Ejv9UmvhPCUsy2_")
options("getSymbols.yahoo.warning"=FALSE)
options("getSymbols.warning4.0"=FALSE)
shinyUI(fluidPage(
titlePanel("Stock Market Analysis"),
sidebarLayout(
sidebarPanel(
selectInput("name", "Select Company:", c("Microsoft", "IBM", "Netflix", "Apple Inc.")),
sliderInput("year", "Select Time Range:", min = 2008, max = 2021, value = c(2012, 2016), sep = "", pre = "Year "),
selectInput("tech_ind", "Select Technical Indicators:", c("Bollinger Bands", "Relative Strength Index",
"Exponential Moving Averages",
"Moving Averages Convergence Divergence")),
selectInput("trade_strat", "Select Trading Strategy:", c("Simple Buy Filter", "Simple Buy and Sell Filter",
"Relative Strength Index Buy Filter",
"Relative Strength Index Buy and Sell Filter")),
submitButton("Chart!")
),
mainPanel(
tabsetPanel(type = "tabs",
tabPanel("Documentation", br(), strong("Select Company: "), "Select a company from the given list of 4 to chart. The default is Microsoft.",
br(), strong("Select Time Range: "), "Choose for which years [from 2008 to 2021] should the charts be plotted. The default is from 2012 to 2016.",
br(), strong("Select Technical Indicators: "), "Choose which Technical Indicators should be added to the plot. The default is Bollinger Bands.",
br(), strong("Select Trading Strategy: "), "Select which Trading Strategy should be used. The default is the Simple Buy Filter",
br(), "The charts are in the 'Charts' tab, while the trading strategies are in the 'Trading Strategies' tab",
br(), "For more information, please see the accomapanying RStudio Presentation."),
tabPanel("Charts", br(), "The selected company is: ", strong(textOutput("symbol")),
br(), "The Selected Time Range is from Year ", textOutput("year1"), " to Year ", textOutput("year2"),
br(), "The Selected Technical Indicator is: ", textOutput("ta"),
br(), plotOutput("chart1"),
br(), plotOutput("chart2")),
tabPanel("Trading Strategies",
br(), "The Selected Trading Strategy is: ", textOutput("ts"),
br(), plotOutput("chart3"))
)
)
)
))
|
library(glmnet)
mydata = read.table("./TrainingSet/Correlation/stomach.csv",head=T,sep=",")
x = as.matrix(mydata[,4:ncol(mydata)])
y = as.matrix(mydata[,1])
set.seed(123)
glm = cv.glmnet(x,y,nfolds=10,type.measure="mse",alpha=0.2,family="gaussian",standardize=FALSE)
sink('./Model/EN/Correlation/stomach/stomach_034.txt',append=TRUE)
print(glm$glmnet.fit)
sink()
|
/Model/EN/Correlation/stomach/stomach_034.R
|
no_license
|
leon1003/QSMART
|
R
| false
| false
| 363
|
r
|
library(glmnet)
mydata = read.table("./TrainingSet/Correlation/stomach.csv",head=T,sep=",")
x = as.matrix(mydata[,4:ncol(mydata)])
y = as.matrix(mydata[,1])
set.seed(123)
glm = cv.glmnet(x,y,nfolds=10,type.measure="mse",alpha=0.2,family="gaussian",standardize=FALSE)
sink('./Model/EN/Correlation/stomach/stomach_034.txt',append=TRUE)
print(glm$glmnet.fit)
sink()
|
#' Print Method for S3 \code{spv} classes
#'
#' Simple print methods for S3 classes \code{spv}, \code{spvlist}, \code{spvforlist} and \code{spvlistforlist}. See
#' \code{\link{plot.spv}} for examples.
#'
#' @aliases print.spv print.spvlist print.spvforlist print.spvlistforlist
#' @param x Object of class \code{spv} or \code{spvlist}
#' @param \dots Unimplemented
#' @author Pieter C. Schoonees
#' @export
#' @keywords print
print.spv <- function(x, ...){
cat("\nObject of class 'spv'\n")
cat("\nCall:\n", paste(deparse(x$call), sep = "\n", collapse = "\n"), "\n\n", sep = "")
cat("Sample dimensions:\n", nrow(x$sample), " columns and ", ncol(x$sample), " rows\n\n", sep = "")
if(!is.null(as.list(x$call)$type)){
if(as.list(x$call)$type %in% c("s", "S", "sphere")) stype <- "Spherical"
else stype <- "Cuboidal"
cat("Design space type:\n", stype, "\n\n", sep = "")
}
cat("Summary of", ifelse(x$unscaled, "Unscaled Prediction Variance (UPV):\n", "Scaled Prediction Variance (SPV):\n"))
print(summary(x$spv))
}
|
/vdg/R/print.spv.R
|
no_license
|
ingted/R-Examples
|
R
| false
| false
| 1,042
|
r
|
#' Print Method for S3 \code{spv} classes
#'
#' Simple print methods for S3 classes \code{spv}, \code{spvlist}, \code{spvforlist} and \code{spvlistforlist}. See
#' \code{\link{plot.spv}} for examples.
#'
#' @aliases print.spv print.spvlist print.spvforlist print.spvlistforlist
#' @param x Object of class \code{spv} or \code{spvlist}
#' @param \dots Unimplemented
#' @author Pieter C. Schoonees
#' @export
#' @keywords print
print.spv <- function(x, ...){
cat("\nObject of class 'spv'\n")
cat("\nCall:\n", paste(deparse(x$call), sep = "\n", collapse = "\n"), "\n\n", sep = "")
cat("Sample dimensions:\n", nrow(x$sample), " columns and ", ncol(x$sample), " rows\n\n", sep = "")
if(!is.null(as.list(x$call)$type)){
if(as.list(x$call)$type %in% c("s", "S", "sphere")) stype <- "Spherical"
else stype <- "Cuboidal"
cat("Design space type:\n", stype, "\n\n", sep = "")
}
cat("Summary of", ifelse(x$unscaled, "Unscaled Prediction Variance (UPV):\n", "Scaled Prediction Variance (SPV):\n"))
print(summary(x$spv))
}
|
% Generated by roxygen2 (4.1.1): do not edit by hand
% Please edit documentation in R/oauth-endpoint.r
\name{oauth_endpoints}
\alias{oauth_endpoints}
\title{Popular oauth endpoints.}
\usage{
oauth_endpoints(name)
}
\arguments{
\item{name}{One of the following endpoints: linkedin, twitter,
vimeo, google, facebook, github.}
}
\description{
Provides some common OAuth endpoints.
}
\examples{
oauth_endpoints("twitter")
}
|
/Data Science/Miscellaenous/Misc - httr info/man/oauth_endpoints.Rd
|
no_license
|
Mike-Kuklinski/Coursera
|
R
| false
| false
| 421
|
rd
|
% Generated by roxygen2 (4.1.1): do not edit by hand
% Please edit documentation in R/oauth-endpoint.r
\name{oauth_endpoints}
\alias{oauth_endpoints}
\title{Popular oauth endpoints.}
\usage{
oauth_endpoints(name)
}
\arguments{
\item{name}{One of the following endpoints: linkedin, twitter,
vimeo, google, facebook, github.}
}
\description{
Provides some common OAuth endpoints.
}
\examples{
oauth_endpoints("twitter")
}
|
# TODO: Add comment
#
# Author: lsalas
###############################################################################
## This file sets the BodyConditionHistory object for the simple WESE simulation
#' Abstract class for BodyConditionHistory
#'
#' Abstract class for BodyConditionHistory
#'
#' @slot CurrentBCH Numeric, a vector with the mean and sd (in that order) of WESE body condition
#' @slot HistoryBCH A list object that holds the history of mean and sd values by timestep (i.e., the body condition distribution's trend data)
#' @slot WeightToBC A list object containing the model that describes the link between weight and body condition.
#' @exportClass BodyConditionHistory
setClass(Class="BodyConditionHistory", representation(
CurrentBCH = "numeric",
HistoryBCH = "list",
WeightToBC = "list"
))
############################################ SLOT METHODS #######################################
########################## Set CurrentBCH slot
#' Set generic to method that sets the CurrentBCH slot of a BodyConditionHistory object.
#'
#' @name setCurrentBCH
#' @param object A BodyConditionHistory object
#' @param value The current value to place in the CurrentBCH slot of the object
#' @nord
setGeneric("CurrentBCH<-",
function(object, value) standardGeneric("CurrentBCH<-"))
#' Set the CurrentBCH slot of a BodyConditionHistory object.
#'
#' @name setCurrentBCH
#' @param object A BodyConditionHistory object
#' @param value The current mean and sd (in that order) of WESE body condition
setReplaceMethod("CurrentBCH",signature(object="BodyConditionHistory"),
function(object,value) {
slot(object,"CurrentBCH")<-value
validObject(object)
object
})
#' Set generic to the method that retrieves the CurrentBCH slot of a BodyConditionHistory object.
#'
#' @name CurrentBCH
#' @param object A BodyConditionHistory object
#' @nord
setGeneric("CurrentBCH",
function(object) standardGeneric("CurrentBCH"))
#' Retrieve the CurrentBCH slot value of a BodyConditionHistory object.
#'
#' @name CurrentBCH
#' @param object A BodyConditionHistory object
setMethod("CurrentBCH", signature(object="BodyConditionHistory"),
function(object) slot(object,"CurrentBCH"))
##########################
########################## Set HistoryBCH slot
#' Set generic to method that sets the HistoryBCH slot of a BodyConditionHistory object.
#'
#' @name setHistoryBCH
#' @param object A BodyConditionHistory object
#' @param value A list object
#' @nord
setGeneric("HistoryBCH<-",
function(object, value) standardGeneric("HistoryBCH<-"))
#' Set the HistoryBCH slot of a BodyConditionHistory object.
#'
#' @name setHistoryBCH
#' @param object A BodyConditionHistory object
#' @param value A list object that holds the history of mean and sd values by timestep (i.e., the body condition distribution's trend data)
setReplaceMethod("HistoryBCH",signature(object="BodyConditionHistory"),
function(object,value) {
slot(object,"HistoryBCH")<-value
validObject(object)
object
})
#' Set generic to the method that retrieves the HistoryBCH slot value of a BodyConditionHistory object.
#'
#' @name HistoryBCH
#' @param object A BodyConditionHistory object
#' @nord
setGeneric("HistoryBCH",
function(object) standardGeneric("HistoryBCH"))
#' Retrieve the HistoryBCH slot value of a BodyConditionHistory object.
#'
#' @name HistoryBCH
#' @param object A BodyConditionHistory object
setMethod("HistoryBCH", signature(object="BodyConditionHistory"),
function(object) slot(object,"HistoryBCH"))
##########################
########################## Set WeightToBC slot
#' Set generic to method that sets the WeightToBC slot of a BodyConditionHistory object.
#'
#' @name setWeightToBC
#' @param object A BodyConditionHistory object
#' @param value A list object
#' @nord
setGeneric("WeightToBC<-",
function(object, value) standardGeneric("WeightToBC<-"))
#' Set the WeightToBC slot of a BodyConditionHistory object.
#'
#' @name setWeightToBC
#' @param object A BodyConditionHistory object
#' @param value A list object that holds the model that relates weight to body condition
setReplaceMethod("WeightToBC",signature(object="BodyConditionHistory"),
function(object,value) {
slot(object,"WeightToBC")<-value
validObject(object)
object
})
#' Set generic to the method that retrieves the WeightToBC slot value of a BodyConditionHistory object.
#'
#' @name WeightToBC
#' @param object A BodyConditionHistory object
#' @nord
setGeneric("WeightToBC",
function(object) standardGeneric("WeightToBC"))
#' Retrieve the WeightToBC slot value of a BodyConditionHistory object.
#'
#' @name WeightToBC
#' @param object A BodyConditionHistory object
setMethod("WeightToBC", signature(object="BodyConditionHistory"),
function(object) slot(object,"WeightToBC"))
##########################
############################################ INITIALIZE ####################################################
#' Instantiate a new BodyConditionHistory object
#'
#' @name initialize
#' @nord
#' @exportMethod initialize
setMethod("initialize",
signature(.Object = "BodyConditionHistory"),
function (.Object, ...)
{
.Object@CurrentBCH<-numeric()
.Object@HistoryBCH<-list()
.Object@WeightToBC<-list()
.Object
}
)
############################################ BODYCONDITIONHISTORY METHODS #######################################
########################## Fit new bodyCondition, update history of distBodyCondition
#' Set generic to method that creates current the current value of a BodyConditionHistory object. The CurrentBCH slot is updated by requesting the mean and SD of weight gain from the
#' WeightTrend object, then converting these to body condition values using the function in the WeightToBC slot.
#'
#' @name UpdateBCH
#' @param object A BodyConditionHistory object
setGeneric("UpdateBCH",
function(object, ...) standardGeneric("UpdateBCH"))
#' Create current bodyCondition value by sampling a value of WESE body condition
#'
#' @param object A BodyConditionHistory object
#' @param wtMean Numeric. The current mean seal weight
#' @param wtStdev Numeric. The current standard deviation of seal weight - should match the value in Garrott et al. 2013
#' @param timestep Integer, a value for the current timestep
setMethod("UpdateBCH", signature(object = "BodyConditionHistory"),
function(object, wtMean,wtStdev,timestep) {
ea.call<-match.call()
if (is.null(object)) stop("A BodyConditionHistory object is required.")
if (is.null(wtMean) ) stop("A value of mean seal weight is required.")
if (is.null(wtStdev)) stop("A value of standard deviation of seal weight is required.")
if (is.null(timestep) | (class(timestep)!="integer")) stop("A valid timestep value is required.")
histLst<-HistoryBCH(object)
#NOTE: The first body condition value is provided by direct assignment to the slot - all other updates go though this method
# Thus, HistoryBCH starts with length 1, not 0. As a consequence, the length of histLst should always match the timestep-1
if(length(histLst)!=timestep-1)stop("The timestep value does not match the current lenght of the HistoryBCH list.")
wgtbcmodel<-WeightToBC(object)[[1]]
#need to get a value from this function for the mean and mean + sd
wtMnSd<-wtMean+wtStdev; newdata=data.frame(weightGain=c(wtMean,wtMnSd))
pred<-try(predict(wgtbcmodel,newdata=newdata))
if(!inherits(pred,"try-error")){
bcmean<-ifelse(pred[1]>0,pred[1],0)
bcsd<-pred[2]-pred[1];if(bcsd<0){bcsd<-bcsd*-1}
newBodyCond<-c(bcmean,bcsd)
nstp<-timestep
histLst[[nstp]]<-newBodyCond
HistoryBCH(object)<-histLst
CurrentBCH(object)<-newBodyCond
return(object)
}else{
stop("Failed to generate parameters for the distribution of body condition")
}
}
)
##########################
########################## Create trend table of Body Condition data
#' Set generic to a method that creates output tables from the HistoryBCH of a BodyConditionHistory object.
#'
#' @name SummarizeBCH
#' @param object A BodyConditionHistory object
setGeneric("SummarizeBCH",
function(object, ...) standardGeneric("SummarizeBCH"))
#' A method that creates output tables from from the HistoryBCH of Body Condition (HistoryBCH slot) in the object
#'
#' @param object A BodyConditionHistory object
setMethod("SummarizeBCH", signature(object = "BodyConditionHistory"),
function(object) {
ea.call<-match.call()
if (is.null(object)) stop("A BodyConditionHistory object is required.")
histLst<-HistoryBCH(object)
if(length(histLst)<5)stop("The HistoryBCH list has very little or no data. Check to see that a simulation has been run.")
body.cond<-data.frame()
for(ii in 1:length(histLst)){
tmp.mx<-histLst[[ii]]
dat<-data.frame(time=ii,bc.mean=tmp.mx[1],bc.se=tmp.mx[2])
body.cond<-rbind(body.cond,dat)
}
return(body.cond)
}
)
##########################
|
/DemogObjects/BodyConditionHistory.R
|
permissive
|
pointblue/weddell-seal-toothfish-model
|
R
| false
| false
| 9,180
|
r
|
# TODO: Add comment
#
# Author: lsalas
###############################################################################
## This file sets the BodyConditionHistory object for the simple WESE simulation
#' Abstract class for BodyConditionHistory
#'
#' Abstract class for BodyConditionHistory
#'
#' @slot CurrentBCH Numeric, a vector with the mean and sd (in that order) of WESE body condition
#' @slot HistoryBCH A list object that holds the history of mean and sd values by timestep (i.e., the body condition distribution's trend data)
#' @slot WeightToBC A list object containing the model that describes the link between weight and body condition.
#' @exportClass BodyConditionHistory
setClass(Class="BodyConditionHistory", representation(
CurrentBCH = "numeric",
HistoryBCH = "list",
WeightToBC = "list"
))
############################################ SLOT METHODS #######################################
########################## Set CurrentBCH slot
#' Set generic to method that sets the CurrentBCH slot of a BodyConditionHistory object.
#'
#' @name setCurrentBCH
#' @param object A BodyConditionHistory object
#' @param value The current value to place in the CurrentBCH slot of the object
#' @nord
setGeneric("CurrentBCH<-",
function(object, value) standardGeneric("CurrentBCH<-"))
#' Set the CurrentBCH slot of a BodyConditionHistory object.
#'
#' @name setCurrentBCH
#' @param object A BodyConditionHistory object
#' @param value The current mean and sd (in that order) of WESE body condition
setReplaceMethod("CurrentBCH",signature(object="BodyConditionHistory"),
function(object,value) {
slot(object,"CurrentBCH")<-value
validObject(object)
object
})
#' Set generic to the method that retrieves the CurrentBCH slot of a BodyConditionHistory object.
#'
#' @name CurrentBCH
#' @param object A BodyConditionHistory object
#' @nord
setGeneric("CurrentBCH",
function(object) standardGeneric("CurrentBCH"))
#' Retrieve the CurrentBCH slot value of a BodyConditionHistory object.
#'
#' @name CurrentBCH
#' @param object A BodyConditionHistory object
setMethod("CurrentBCH", signature(object="BodyConditionHistory"),
function(object) slot(object,"CurrentBCH"))
##########################
########################## Set HistoryBCH slot
#' Set generic to method that sets the HistoryBCH slot of a BodyConditionHistory object.
#'
#' @name setHistoryBCH
#' @param object A BodyConditionHistory object
#' @param value A list object
#' @nord
setGeneric("HistoryBCH<-",
function(object, value) standardGeneric("HistoryBCH<-"))
#' Set the HistoryBCH slot of a BodyConditionHistory object.
#'
#' @name setHistoryBCH
#' @param object A BodyConditionHistory object
#' @param value A list object that holds the history of mean and sd values by timestep (i.e., the body condition distribution's trend data)
setReplaceMethod("HistoryBCH",signature(object="BodyConditionHistory"),
function(object,value) {
slot(object,"HistoryBCH")<-value
validObject(object)
object
})
#' Set generic to the method that retrieves the HistoryBCH slot value of a BodyConditionHistory object.
#'
#' @name HistoryBCH
#' @param object A BodyConditionHistory object
#' @nord
setGeneric("HistoryBCH",
function(object) standardGeneric("HistoryBCH"))
#' Retrieve the HistoryBCH slot value of a BodyConditionHistory object.
#'
#' @name HistoryBCH
#' @param object A BodyConditionHistory object
setMethod("HistoryBCH", signature(object="BodyConditionHistory"),
function(object) slot(object,"HistoryBCH"))
##########################
########################## Set WeightToBC slot
#' Set generic to method that sets the WeightToBC slot of a BodyConditionHistory object.
#'
#' @name setWeightToBC
#' @param object A BodyConditionHistory object
#' @param value A list object
#' @nord
setGeneric("WeightToBC<-",
function(object, value) standardGeneric("WeightToBC<-"))
#' Set the WeightToBC slot of a BodyConditionHistory object.
#'
#' @name setWeightToBC
#' @param object A BodyConditionHistory object
#' @param value A list object that holds the model that relates weight to body condition
setReplaceMethod("WeightToBC",signature(object="BodyConditionHistory"),
function(object,value) {
slot(object,"WeightToBC")<-value
validObject(object)
object
})
#' Set generic to the method that retrieves the WeightToBC slot value of a BodyConditionHistory object.
#'
#' @name WeightToBC
#' @param object A BodyConditionHistory object
#' @nord
setGeneric("WeightToBC",
function(object) standardGeneric("WeightToBC"))
#' Retrieve the WeightToBC slot value of a BodyConditionHistory object.
#'
#' @name WeightToBC
#' @param object A BodyConditionHistory object
setMethod("WeightToBC", signature(object="BodyConditionHistory"),
function(object) slot(object,"WeightToBC"))
##########################
############################################ INITIALIZE ####################################################
#' Instantiate a new BodyConditionHistory object
#'
#' @name initialize
#' @nord
#' @exportMethod initialize
setMethod("initialize",
signature(.Object = "BodyConditionHistory"),
function (.Object, ...)
{
.Object@CurrentBCH<-numeric()
.Object@HistoryBCH<-list()
.Object@WeightToBC<-list()
.Object
}
)
############################################ BODYCONDITIONHISTORY METHODS #######################################
########################## Fit new bodyCondition, update history of distBodyCondition
#' Set generic to method that creates current the current value of a BodyConditionHistory object. The CurrentBCH slot is updated by requesting the mean and SD of weight gain from the
#' WeightTrend object, then converting these to body condition values using the function in the WeightToBC slot.
#'
#' @name UpdateBCH
#' @param object A BodyConditionHistory object
setGeneric("UpdateBCH",
function(object, ...) standardGeneric("UpdateBCH"))
#' Create current bodyCondition value by sampling a value of WESE body condition
#'
#' @param object A BodyConditionHistory object
#' @param wtMean Numeric. The current mean seal weight
#' @param wtStdev Numeric. The current standard deviation of seal weight - should match the value in Garrott et al. 2013
#' @param timestep Integer, a value for the current timestep
setMethod("UpdateBCH", signature(object = "BodyConditionHistory"),
function(object, wtMean,wtStdev,timestep) {
ea.call<-match.call()
if (is.null(object)) stop("A BodyConditionHistory object is required.")
if (is.null(wtMean) ) stop("A value of mean seal weight is required.")
if (is.null(wtStdev)) stop("A value of standard deviation of seal weight is required.")
if (is.null(timestep) | (class(timestep)!="integer")) stop("A valid timestep value is required.")
histLst<-HistoryBCH(object)
#NOTE: The first body condition value is provided by direct assignment to the slot - all other updates go though this method
# Thus, HistoryBCH starts with length 1, not 0. As a consequence, the length of histLst should always match the timestep-1
if(length(histLst)!=timestep-1)stop("The timestep value does not match the current lenght of the HistoryBCH list.")
wgtbcmodel<-WeightToBC(object)[[1]]
#need to get a value from this function for the mean and mean + sd
wtMnSd<-wtMean+wtStdev; newdata=data.frame(weightGain=c(wtMean,wtMnSd))
pred<-try(predict(wgtbcmodel,newdata=newdata))
if(!inherits(pred,"try-error")){
bcmean<-ifelse(pred[1]>0,pred[1],0)
bcsd<-pred[2]-pred[1];if(bcsd<0){bcsd<-bcsd*-1}
newBodyCond<-c(bcmean,bcsd)
nstp<-timestep
histLst[[nstp]]<-newBodyCond
HistoryBCH(object)<-histLst
CurrentBCH(object)<-newBodyCond
return(object)
}else{
stop("Failed to generate parameters for the distribution of body condition")
}
}
)
##########################
########################## Create trend table of Body Condition data
#' Set generic to a method that creates output tables from the HistoryBCH of a BodyConditionHistory object.
#'
#' @name SummarizeBCH
#' @param object A BodyConditionHistory object
setGeneric("SummarizeBCH",
function(object, ...) standardGeneric("SummarizeBCH"))
#' A method that creates output tables from from the HistoryBCH of Body Condition (HistoryBCH slot) in the object
#'
#' @param object A BodyConditionHistory object
setMethod("SummarizeBCH", signature(object = "BodyConditionHistory"),
function(object) {
ea.call<-match.call()
if (is.null(object)) stop("A BodyConditionHistory object is required.")
histLst<-HistoryBCH(object)
if(length(histLst)<5)stop("The HistoryBCH list has very little or no data. Check to see that a simulation has been run.")
body.cond<-data.frame()
for(ii in 1:length(histLst)){
tmp.mx<-histLst[[ii]]
dat<-data.frame(time=ii,bc.mean=tmp.mx[1],bc.se=tmp.mx[2])
body.cond<-rbind(body.cond,dat)
}
return(body.cond)
}
)
##########################
|
.mplusObjectArgNames <- formalArgs(mplusObject)
.mplusModelerArgNames <- formalArgs(mplusModeler)
#' Estimate latent profiles using Mplus
#'
#' Estimates latent profiles (finite mixture models) using the commercial
#' program Mplus, through the R-interface of
#' \code{\link[MplusAutomation:mplusModeler]{MplusAutomation}}.
#' @param df data.frame with two or more columns with continuous variables
#' @param n_profiles Numeric vector. The number of profiles (or mixture
#' components) to be estimated. Each number in the vector corresponds to an
#' analysis with that many mixture components.
#' @param model_numbers Numeric vector. Numbers of the models to be estimated.
#' See \code{\link{estimate_profiles}} for a description of the models available
#' in tidyLPA.
#' @param select_vars Character. Optional vector of variable names in \code{df},
#' to be used for model estimation. Defaults to \code{NULL}, which means all
#' variables in \code{df} are used.
#' @param ... Parameters passed directly to
#' \code{\link[MplusAutomation]{mplusModeler}}. See the documentation of
#' \code{\link[MplusAutomation]{mplusModeler}}.
#' @param keepfiles Logical. Whether to retain the files created by
#' \code{mplusModeler} (e.g., for future reference, or to manually edit them).
#' @author Caspar J. van Lissa
#' @return An object of class 'tidyLPA' and 'list'
#' @importFrom methods hasArg
#' @import MplusAutomation
estimate_profiles_mplus2 <-
function(df, n_profiles, model_numbers, select_vars, ..., keepfiles = FALSE) {
arg_list <- as.list(match.call())[-1]
df_full <- df
df <- df[, select_vars, drop = FALSE]
all_na_rows <- rowSums(is.na(df)) == ncol(df)
if(any(all_na_rows)){
warning("Data set contains cases with missing on all variables. These cases were not included in the analysis.\n")
df <- df[!all_na_rows, , drop = FALSE]
}
# Always rename all variables for Mplus; simpler than handling
# restrictions on variable name length and characters used:
original_names <- selected_variables <- names(df)
names(df) <- param_names <- paste0("X", 1:ncol(df))
# Check which arguments the user tried to pass.
# First, check for arguments that cannot be used at all
.estimate_profiles_mplus2ArgNames <- c("df", "n_profiles", "model_numbers", "select_vars", "keepfiles")
if(any(!(names(arg_list) %in% c(.estimate_profiles_mplus2ArgNames, .mplusModelerArgNames, .mplusObjectArgNames)))){
illegal_args <- names(arg_list)[which(!(names(arg_list) %in% c(.estimate_profiles_mplus2ArgNames, .mplusModelerArgNames, .mplusObjectArgNames)))]
stop("The following illegal arguments were detected in the call to estimate_profiles:\n", paste0(" ", illegal_args, collapse = "\n"), "\nThese are not valid arguments to estimate_profiles(), mplusObject(), or mplusModeler(). Drop these arguments, and try again.", sep = "", call. = FALSE)
}
# Next, check for arguments that are constructed by estimate_profiles
Args <- list(...)
if(any(c("rdata", "usevariables", "MODEL") %in% names(Args))){
illegal_args <- c("rdata", "usevariables", "MODEL")[which(c("rdata", "usevariables", "MODEL") %in% names(Args))]
stop("The following illegal arguments were detected in the call to estimate_profiles:\n", paste0(" ", illegal_args, collapse = "\n"), "\nThese arguments are constructed by estimate_profiles(), and cannot be passed by users. Drop these arguments, and try again.", sep = "", call. = FALSE)
}
Args <- c(list(rdata = df, usevariables = param_names), Args)
# Create necessary mplusObject arguments for mixture model, taking
# into account any existing arguments passed by the user
if("model_overall" %in% names(arg_list)){
model_overall <- arg_list[["model_overall"]]
for(i in 1:length(original_names)){
model_overall <- gsub(original_names[i], param_names[i], model_overall)
}
} else {
model_overall <- ""
}
filename_stem <- NULL
if("filename_stem" %in% names(arg_list)) filename_stem <- arg_list[["filename_stem"]]
if (hasArg("OUTPUT")) {
Args[["OUTPUT"]] <- paste0("TECH14;\n", Args[["OUTPUT"]])
} else {
Args[["OUTPUT"]] <- "TECH14;\n"
}
if (hasArg("ANALYSIS")) {
Args[["ANALYSIS"]] <-
paste0("TYPE = mixture;\n", Args[["ANALYSIS"]])
} else {
Args[["ANALYSIS"]] <- "TYPE = mixture;\n"
}
char_args <- which(sapply(Args, is.character))
Args[char_args] <-
lapply(Args[char_args], function(x) {
gsub(" ", "\n", x)
})
# Separate arguments for mplusObject and mplusModeler
mplusObjectArgs <- Args[which(names(Args) %in% .mplusObjectArgNames)]
mplusModelerArgs <- Args[which(names(Args) %in% .mplusModelerArgNames)]
# Create mplusObject template for all models
base_object <- invisible(suppressMessages(do.call(mplusObject, mplusObjectArgs)))
if(ncol(df) == 1){
base_object$VARIABLE <- paste0("NAMES = ", names(df), ";\n")
}
run_models <-
expand.grid(prof = n_profiles, mod = model_numbers)
out_list <- mapply(
FUN = function(this_class, this_model) {
# Generate specific Mplus object for each individual model
base_object$VARIABLE <-
paste0(base_object$VARIABLE, paste(c(
"CLASSES = ",
paste(
"c1(",
this_class,
")",
sep = "",
collapse = " "
),
";\n"
), collapse = ""))
model_class_specific <-
gsub(" ",
"\n",
syntax_class_specific(this_model, param_names))
expand_class_specific <- ""
for (this_class in 1:this_class) {
expand_class_specific <-
paste0(expand_class_specific,
gsub("\\{C\\}", this_class, paste(
c(
"%c1#",
this_class,
"%\n",
model_class_specific,
"\n\n"
),
collapse = ""
)))
}
base_object$MODEL <-
paste0(base_object$MODEL,
expand_class_specific)
base_object$SAVEDATA <-
paste0(
"FILE IS ",
paste0(
ifelse(
!is.null(filename_stem),
paste0(filename_stem, "_"),
""
),
"model_",
this_model,
"_class_",
this_class
),
".dat;\nSAVE = cprobabilities;"
)
base_object$TITLE <-
trimws(paste(
ifelse(!is.null(filename_stem), filename_stem, ""),
"model",
this_model,
"with",
this_class,
"classes"
))
# Run analysis ------------------------------------------------------------
filename = c(inp = ifelse(
!is.null(filename_stem),
paste0(
paste(
filename_stem,
"model",
this_model,
"class",
this_class,
sep = "_"
),
".inp"
),
paste0(
paste("model", this_model, "class", this_class, sep = "_"),
".inp"
)
))
out <- list(model = quiet(suppressMessages(
do.call(what = mplusModeler,
args = c(
list(
object = base_object,
dataout = ifelse(
!is.null(filename_stem),
paste0("data_", filename_stem, ".dat"),
"data.dat"
),
modelout = filename["inp"],
run = 1L,
check = FALSE,
varwarnings = TRUE,
writeData = "ifmissing",
hashfilename = TRUE
),
mplusModelerArgs
))
))$results)
warnings <- NULL
if(!is.null(out$model$summaries$LL)){
out$fit <-
c(Model = this_model,
Classes = this_class,
calc_fitindices(out$model))
estimates <- estimates(out$model)
if(!is.null(estimates)){
estimates$Model <- this_model
estimates$Classes <- this_class
for(which_name in 1:length(param_names)){
estimates$Parameter <- gsub(toupper(param_names[which_name]), original_names[which_name], estimates$Parameter)
}
}
out$estimates <- estimates
if(!is.null(out$model[["savedata"]])){
#dff <- out$model$savedata
outdat <- as.matrix(out$model$savedata[, grep("CPROB1", names(out$model$savedata)):ncol(out$model$savedata)])
dff <- matrix(NA_real_, dim(df_full)[1], dim(outdat)[2])
dff[!all_na_rows, ] <- outdat
colnames(dff) <- c(paste0("CPROB", 1:(ncol(dff)-1)), "Class")
out$dff <- as_tibble(cbind(df_full, dff))
out$dff$model_number <- this_model
out$dff$classes_number <- this_class
out$dff <- out$dff[, c((ncol(out$dff)-1), ncol(out$dff), 1:(ncol(out$dff)-2))]
attr(out$dff, "selected") <- selected_variables
}
# Check for warnings ------------------------------------------------------
if(!is.na(out$fit[["prob_min"]])){
if(out$fit[["prob_min"]]< .001) warnings <- c(warnings, "Some classes were not assigned any cases with more than .1% probability. Consequently, these solutions are effectively identical to a solution with one class less.")
}
if(!is.na(out$fit[["n_min"]])){
if(out$fit[["n_min"]] < .01) warnings <- c(warnings, "Less than 1% of cases were assigned to one of the profiles. Interpret this solution with caution and consider other models.")
}
} else {
out$fit <-
c(Model = this_model,
Classes = this_class,
"LogLik" = NA, "AIC" = NA, "AWE" = NA, "BIC" = NA, "CAIC" = NA, "CLC" = NA, "KIC" = NA, "SABIC" = NA, "ICL" = NA, "Entropy" = NA, "prob_min" = NA, "prob_max" = NA, "n_min" = NA, "n_max" = NA, "BLRT_val" = NA, "BLRT_p" = NA)
out$estimates <- NULL
}
warnings <- unlist(c(warnings, sapply(out$model$warnings, paste, collapse = " "), sapply(out$model$errors, paste, collapse = " ")))
if(this_class == 1){
warnings <- warnings[!sapply(warnings, grepl, pattern = "TECH14 option is not available for TYPE=MIXTURE with only one class.")]
}
if(this_model %in% c(1, 2)){
warnings <- warnings[!sapply(warnings, grepl, pattern = "All variables are uncorrelated with all other variables within class.")]
}
if(length(warnings)) out$warnings <- warnings
class(out) <- c("tidyProfile.mplus", "tidyProfile", "list")
out
},
this_class = run_models$prof,
this_model = run_models$mod,
SIMPLIFY = FALSE
)
all_files <-
paste0(
ifelse(!is.null(filename_stem), paste0(filename_stem, "_"), ""),
paste("model_", run_models$mod, "_class_", run_models$prof, sep = "")
)
if (!keepfiles) {
remove_files <-
c(
out_list[[1]]$model$input$data$file,
paste0(all_files, ".inp"),
paste0(all_files, ".out"),
paste0(all_files, ".dat")
)
remove_files <- remove_files[which(remove_files %in% list.files())]
if(length(remove_files) > 0){
invisible(file.remove(remove_files))
}
}
names(out_list) <-
paste("model_", run_models$mod, "_class_", run_models$prof, sep = "")
out_list
}
|
/R/estimate-profiles-mplus.R
|
no_license
|
bretsw/tidyLPA
|
R
| false
| false
| 14,204
|
r
|
.mplusObjectArgNames <- formalArgs(mplusObject)
.mplusModelerArgNames <- formalArgs(mplusModeler)
#' Estimate latent profiles using Mplus
#'
#' Estimates latent profiles (finite mixture models) using the commercial
#' program Mplus, through the R-interface of
#' \code{\link[MplusAutomation:mplusModeler]{MplusAutomation}}.
#' @param df data.frame with two or more columns with continuous variables
#' @param n_profiles Numeric vector. The number of profiles (or mixture
#' components) to be estimated. Each number in the vector corresponds to an
#' analysis with that many mixture components.
#' @param model_numbers Numeric vector. Numbers of the models to be estimated.
#' See \code{\link{estimate_profiles}} for a description of the models available
#' in tidyLPA.
#' @param select_vars Character. Optional vector of variable names in \code{df},
#' to be used for model estimation. Defaults to \code{NULL}, which means all
#' variables in \code{df} are used.
#' @param ... Parameters passed directly to
#' \code{\link[MplusAutomation]{mplusModeler}}. See the documentation of
#' \code{\link[MplusAutomation]{mplusModeler}}.
#' @param keepfiles Logical. Whether to retain the files created by
#' \code{mplusModeler} (e.g., for future reference, or to manually edit them).
#' @author Caspar J. van Lissa
#' @return An object of class 'tidyLPA' and 'list'
#' @importFrom methods hasArg
#' @import MplusAutomation
estimate_profiles_mplus2 <-
function(df, n_profiles, model_numbers, select_vars, ..., keepfiles = FALSE) {
arg_list <- as.list(match.call())[-1]
df_full <- df
df <- df[, select_vars, drop = FALSE]
all_na_rows <- rowSums(is.na(df)) == ncol(df)
if(any(all_na_rows)){
warning("Data set contains cases with missing on all variables. These cases were not included in the analysis.\n")
df <- df[!all_na_rows, , drop = FALSE]
}
# Always rename all variables for Mplus; simpler than handling
# restrictions on variable name length and characters used:
original_names <- selected_variables <- names(df)
names(df) <- param_names <- paste0("X", 1:ncol(df))
# Check which arguments the user tried to pass.
# First, check for arguments that cannot be used at all
.estimate_profiles_mplus2ArgNames <- c("df", "n_profiles", "model_numbers", "select_vars", "keepfiles")
if(any(!(names(arg_list) %in% c(.estimate_profiles_mplus2ArgNames, .mplusModelerArgNames, .mplusObjectArgNames)))){
illegal_args <- names(arg_list)[which(!(names(arg_list) %in% c(.estimate_profiles_mplus2ArgNames, .mplusModelerArgNames, .mplusObjectArgNames)))]
stop("The following illegal arguments were detected in the call to estimate_profiles:\n", paste0(" ", illegal_args, collapse = "\n"), "\nThese are not valid arguments to estimate_profiles(), mplusObject(), or mplusModeler(). Drop these arguments, and try again.", sep = "", call. = FALSE)
}
# Next, check for arguments that are constructed by estimate_profiles
Args <- list(...)
if(any(c("rdata", "usevariables", "MODEL") %in% names(Args))){
illegal_args <- c("rdata", "usevariables", "MODEL")[which(c("rdata", "usevariables", "MODEL") %in% names(Args))]
stop("The following illegal arguments were detected in the call to estimate_profiles:\n", paste0(" ", illegal_args, collapse = "\n"), "\nThese arguments are constructed by estimate_profiles(), and cannot be passed by users. Drop these arguments, and try again.", sep = "", call. = FALSE)
}
Args <- c(list(rdata = df, usevariables = param_names), Args)
# Create necessary mplusObject arguments for mixture model, taking
# into account any existing arguments passed by the user
if("model_overall" %in% names(arg_list)){
model_overall <- arg_list[["model_overall"]]
for(i in 1:length(original_names)){
model_overall <- gsub(original_names[i], param_names[i], model_overall)
}
} else {
model_overall <- ""
}
filename_stem <- NULL
if("filename_stem" %in% names(arg_list)) filename_stem <- arg_list[["filename_stem"]]
if (hasArg("OUTPUT")) {
Args[["OUTPUT"]] <- paste0("TECH14;\n", Args[["OUTPUT"]])
} else {
Args[["OUTPUT"]] <- "TECH14;\n"
}
if (hasArg("ANALYSIS")) {
Args[["ANALYSIS"]] <-
paste0("TYPE = mixture;\n", Args[["ANALYSIS"]])
} else {
Args[["ANALYSIS"]] <- "TYPE = mixture;\n"
}
char_args <- which(sapply(Args, is.character))
Args[char_args] <-
lapply(Args[char_args], function(x) {
gsub(" ", "\n", x)
})
# Separate arguments for mplusObject and mplusModeler
mplusObjectArgs <- Args[which(names(Args) %in% .mplusObjectArgNames)]
mplusModelerArgs <- Args[which(names(Args) %in% .mplusModelerArgNames)]
# Create mplusObject template for all models
base_object <- invisible(suppressMessages(do.call(mplusObject, mplusObjectArgs)))
if(ncol(df) == 1){
base_object$VARIABLE <- paste0("NAMES = ", names(df), ";\n")
}
run_models <-
expand.grid(prof = n_profiles, mod = model_numbers)
out_list <- mapply(
FUN = function(this_class, this_model) {
# Generate specific Mplus object for each individual model
base_object$VARIABLE <-
paste0(base_object$VARIABLE, paste(c(
"CLASSES = ",
paste(
"c1(",
this_class,
")",
sep = "",
collapse = " "
),
";\n"
), collapse = ""))
model_class_specific <-
gsub(" ",
"\n",
syntax_class_specific(this_model, param_names))
expand_class_specific <- ""
for (this_class in 1:this_class) {
expand_class_specific <-
paste0(expand_class_specific,
gsub("\\{C\\}", this_class, paste(
c(
"%c1#",
this_class,
"%\n",
model_class_specific,
"\n\n"
),
collapse = ""
)))
}
base_object$MODEL <-
paste0(base_object$MODEL,
expand_class_specific)
base_object$SAVEDATA <-
paste0(
"FILE IS ",
paste0(
ifelse(
!is.null(filename_stem),
paste0(filename_stem, "_"),
""
),
"model_",
this_model,
"_class_",
this_class
),
".dat;\nSAVE = cprobabilities;"
)
base_object$TITLE <-
trimws(paste(
ifelse(!is.null(filename_stem), filename_stem, ""),
"model",
this_model,
"with",
this_class,
"classes"
))
# Run analysis ------------------------------------------------------------
filename = c(inp = ifelse(
!is.null(filename_stem),
paste0(
paste(
filename_stem,
"model",
this_model,
"class",
this_class,
sep = "_"
),
".inp"
),
paste0(
paste("model", this_model, "class", this_class, sep = "_"),
".inp"
)
))
out <- list(model = quiet(suppressMessages(
do.call(what = mplusModeler,
args = c(
list(
object = base_object,
dataout = ifelse(
!is.null(filename_stem),
paste0("data_", filename_stem, ".dat"),
"data.dat"
),
modelout = filename["inp"],
run = 1L,
check = FALSE,
varwarnings = TRUE,
writeData = "ifmissing",
hashfilename = TRUE
),
mplusModelerArgs
))
))$results)
warnings <- NULL
if(!is.null(out$model$summaries$LL)){
out$fit <-
c(Model = this_model,
Classes = this_class,
calc_fitindices(out$model))
estimates <- estimates(out$model)
if(!is.null(estimates)){
estimates$Model <- this_model
estimates$Classes <- this_class
for(which_name in 1:length(param_names)){
estimates$Parameter <- gsub(toupper(param_names[which_name]), original_names[which_name], estimates$Parameter)
}
}
out$estimates <- estimates
if(!is.null(out$model[["savedata"]])){
#dff <- out$model$savedata
outdat <- as.matrix(out$model$savedata[, grep("CPROB1", names(out$model$savedata)):ncol(out$model$savedata)])
dff <- matrix(NA_real_, dim(df_full)[1], dim(outdat)[2])
dff[!all_na_rows, ] <- outdat
colnames(dff) <- c(paste0("CPROB", 1:(ncol(dff)-1)), "Class")
out$dff <- as_tibble(cbind(df_full, dff))
out$dff$model_number <- this_model
out$dff$classes_number <- this_class
out$dff <- out$dff[, c((ncol(out$dff)-1), ncol(out$dff), 1:(ncol(out$dff)-2))]
attr(out$dff, "selected") <- selected_variables
}
# Check for warnings ------------------------------------------------------
if(!is.na(out$fit[["prob_min"]])){
if(out$fit[["prob_min"]]< .001) warnings <- c(warnings, "Some classes were not assigned any cases with more than .1% probability. Consequently, these solutions are effectively identical to a solution with one class less.")
}
if(!is.na(out$fit[["n_min"]])){
if(out$fit[["n_min"]] < .01) warnings <- c(warnings, "Less than 1% of cases were assigned to one of the profiles. Interpret this solution with caution and consider other models.")
}
} else {
out$fit <-
c(Model = this_model,
Classes = this_class,
"LogLik" = NA, "AIC" = NA, "AWE" = NA, "BIC" = NA, "CAIC" = NA, "CLC" = NA, "KIC" = NA, "SABIC" = NA, "ICL" = NA, "Entropy" = NA, "prob_min" = NA, "prob_max" = NA, "n_min" = NA, "n_max" = NA, "BLRT_val" = NA, "BLRT_p" = NA)
out$estimates <- NULL
}
warnings <- unlist(c(warnings, sapply(out$model$warnings, paste, collapse = " "), sapply(out$model$errors, paste, collapse = " ")))
if(this_class == 1){
warnings <- warnings[!sapply(warnings, grepl, pattern = "TECH14 option is not available for TYPE=MIXTURE with only one class.")]
}
if(this_model %in% c(1, 2)){
warnings <- warnings[!sapply(warnings, grepl, pattern = "All variables are uncorrelated with all other variables within class.")]
}
if(length(warnings)) out$warnings <- warnings
class(out) <- c("tidyProfile.mplus", "tidyProfile", "list")
out
},
this_class = run_models$prof,
this_model = run_models$mod,
SIMPLIFY = FALSE
)
all_files <-
paste0(
ifelse(!is.null(filename_stem), paste0(filename_stem, "_"), ""),
paste("model_", run_models$mod, "_class_", run_models$prof, sep = "")
)
if (!keepfiles) {
remove_files <-
c(
out_list[[1]]$model$input$data$file,
paste0(all_files, ".inp"),
paste0(all_files, ".out"),
paste0(all_files, ".dat")
)
remove_files <- remove_files[which(remove_files %in% list.files())]
if(length(remove_files) > 0){
invisible(file.remove(remove_files))
}
}
names(out_list) <-
paste("model_", run_models$mod, "_class_", run_models$prof, sep = "")
out_list
}
|
# Data import and processsing - Cosumnes
# All data is reprojected (if necessary) to lat/long
source("R/0_utilities.R")
# ---------------------------------------------------------------------
# Import VIC data
# Import basin-averaged daily VIC data
etqswe_cosumnes_canesm2_hist <- readr::read_csv("../BasinAvg/Cosumnes_CanESM2_Historical_Q_ET_SWE_1950-2005.csv")
etqswe_cosumnes_ccsm4_hist <- readr::read_csv("../BasinAvg/Cosumnes_CCSM4_Historical_Q_ET_SWE_1950-2005.csv")
etqswe_cosumnes_cnrm_hist <- readr::read_csv("../BasinAvg/Cosumnes_CNRM-CM5_Historical_Q_ET_SWE_1950-2005.csv")
etqswe_cosumnes_hadgemcc_hist <- readr::read_csv("../BasinAvg/Cosumnes_HadGEM2-CC365_Historical_Q_ET_SWE_1950-2005.csv")
etqswe_cosumnes_hadgemec_hist <- readr::read_csv("../BasinAvg/Cosumnes_HadGEM2-ES365_Historical_Q_ET_SWE_1950-2005.csv")
etqswe_cosumnes_miroc5_hist <- readr::read_csv("../BasinAvg/Cosumnes_MIROC5_Historical_Q_ET_SWE_1950-2005.csv")
etqswe_cosumnes_canesm2_45 <- readr::read_csv("../BasinAvg/Cosumnes_CanESM2_RCP45_Q_ET_SWE_2006-2099.csv")
etqswe_cosumnes_ccsm4_45 <- readr::read_csv("../BasinAvg/Cosumnes_CCSM4_RCP45_Q_ET_SWE_2006-2099.csv")
etqswe_cosumnes_cnrm_45 <- readr::read_csv("../BasinAvg/Cosumnes_CNRM-CM5_RCP45_Q_ET_SWE_2006-2099.csv")
etqswe_cosumnes_hadgemcc_45 <- readr::read_csv("../BasinAvg/Cosumnes_HadGEM2-CC365_RCP45_Q_ET_SWE_2006-2099.csv")
etqswe_cosumnes_hadgemec_45 <- readr::read_csv("../BasinAvg/Cosumnes_HadGEM2-ES365_RCP45_Q_ET_SWE_2006-2099.csv")
etqswe_cosumnes_miroc5_45 <- readr::read_csv("../BasinAvg/Cosumnes_MIROC5_RCP45_Q_ET_SWE_2006-2099.csv")
etqswe_cosumnes_canesm2_85 <- readr::read_csv("../BasinAvg/Cosumnes_CanESM2_RCP85_Q_ET_SWE_2006-2099.csv")
etqswe_cosumnes_ccsm4_85 <- readr::read_csv("../BasinAvg/Cosumnes_CCSM4_RCP85_Q_ET_SWE_2006-2099.csv")
etqswe_cosumnes_cnrm_85 <- readr::read_csv("../BasinAvg/Cosumnes_CNRM-CM5_RCP85_Q_ET_SWE_2006-2099.csv")
etqswe_cosumnes_hadgemcc_85 <- readr::read_csv("../BasinAvg/Cosumnes_HadGEM2-CC365_RCP85_Q_ET_SWE_2006-2099.csv")
etqswe_cosumnes_hadgemec_85 <- readr::read_csv("../BasinAvg/Cosumnes_HadGEM2-ES365_RCP85_Q_ET_SWE_2006-2099.csv")
etqswe_cosumnes_miroc5_85 <- readr::read_csv("../BasinAvg/Cosumnes_MIROC5_RCP85_Q_ET_SWE_2006-2099.csv")
etqswe_cosumnes_canesm2_85alt40pc <- readr::read_csv("../BasinAvg/Cosumnes_CanESM2_RCP85_Q_ET_SWE_2006-2099_40PC_Conductence.csv")
etqswe_cosumnes_ccsm4_85alt40pc <- readr::read_csv("../BasinAvg/Cosumnes_CCSM4_RCP85_Q_ET_SWE_2006-2099_40PC_Conductence.csv")
etqswe_cosumnes_cnrm_85alt40pc <- readr::read_csv("../BasinAvg/Cosumnes_CNRM-CM5_RCP85_Q_ET_SWE_2006-2099_40PC_Conductence.csv")
etqswe_cosumnes_hadgemcc_85alt40pc <- readr::read_csv("../BasinAvg/Cosumnes_HadGEM2-CC365_RCP85_Q_ET_SWE_2006-2099_40PC_Conductence.csv")
etqswe_cosumnes_hadgemec_85alt40pc <- readr::read_csv("../BasinAvg/Cosumnes_HadGEM2-ES365_RCP85_Q_ET_SWE_2006-2099_40PC_Conductence.csv")
etqswe_cosumnes_miroc5_85alt40pc <- readr::read_csv("../BasinAvg/Cosumnes_MIROC5_RCP85_Q_ET_SWE_2006-2099_40PC_Conductence.csv")
# Observed values
etqswe_cosumnes_obs <- readr::read_csv("../BasinAvg/Cosumnes_Obs_Q_ET_SWE_1950-2011.csv")
# Consolidate basin-averaged daily data to lists
etqswe_cosumnes <- list(
cosumnes_canesm2_hist=etqswe_cosumnes_canesm2_hist,cosumnes_ccsm4_hist=etqswe_cosumnes_ccsm4_hist,
cosumnes_cnrm_hist=etqswe_cosumnes_cnrm_hist,cosumnes_hadgemcc_hist=etqswe_cosumnes_hadgemcc_hist,
cosumnes_hadgemec_hist=etqswe_cosumnes_hadgemec_hist,cosumnes_miroc5_hist=etqswe_cosumnes_miroc5_hist,
cosumnes_canesm2_45=etqswe_cosumnes_canesm2_45,cosumnes_ccsm4_45=etqswe_cosumnes_ccsm4_45,
cosumnes_cnrm_45=etqswe_cosumnes_cnrm_45,cosumnes_hadgemcc_45=etqswe_cosumnes_hadgemcc_45,
cosumnes_hadgemec_45=etqswe_cosumnes_hadgemec_45,cosumnes_miroc5_45=etqswe_cosumnes_miroc5_45,
cosumnes_canesm2_85=etqswe_cosumnes_canesm2_85,cosumnes_ccsm4_85=etqswe_cosumnes_ccsm4_85,
cosumnes_cnrm_85=etqswe_cosumnes_cnrm_85,cosumnes_hadgemcc_85=etqswe_cosumnes_hadgemcc_85,
cosumnes_hadgemec_85=etqswe_cosumnes_hadgemec_85,cosumnes_miroc5_85=etqswe_cosumnes_miroc5_85,
cosumnes_canesm2_85alt40pc=etqswe_cosumnes_canesm2_85alt40pc,cosumnes_ccsm4_85alt40pc=etqswe_cosumnes_ccsm4_85alt40pc,
cosumnes_cnrm_85alt40pc=etqswe_cosumnes_cnrm_85alt40pc,cosumnes_hadgemcc_85alt40pc=etqswe_cosumnes_hadgemcc_85alt40pc,
cosumnes_hadgemec_85alt40pc=etqswe_cosumnes_hadgemec_85alt40pc,cosumnes_miroc5_85alt40pc=etqswe_cosumnes_miroc5_85alt40pc,
cosumnes_magicalgcm_obs=etqswe_cosumnes_obs
)
|
/R/1.1_data_processing_cosumnes.R
|
no_license
|
ryanrbart/vic_sierra_nevada_analysis
|
R
| false
| false
| 4,524
|
r
|
# Data import and processsing - Cosumnes
# All data is reprojected (if necessary) to lat/long
source("R/0_utilities.R")
# ---------------------------------------------------------------------
# Import VIC data
# Import basin-averaged daily VIC data
etqswe_cosumnes_canesm2_hist <- readr::read_csv("../BasinAvg/Cosumnes_CanESM2_Historical_Q_ET_SWE_1950-2005.csv")
etqswe_cosumnes_ccsm4_hist <- readr::read_csv("../BasinAvg/Cosumnes_CCSM4_Historical_Q_ET_SWE_1950-2005.csv")
etqswe_cosumnes_cnrm_hist <- readr::read_csv("../BasinAvg/Cosumnes_CNRM-CM5_Historical_Q_ET_SWE_1950-2005.csv")
etqswe_cosumnes_hadgemcc_hist <- readr::read_csv("../BasinAvg/Cosumnes_HadGEM2-CC365_Historical_Q_ET_SWE_1950-2005.csv")
etqswe_cosumnes_hadgemec_hist <- readr::read_csv("../BasinAvg/Cosumnes_HadGEM2-ES365_Historical_Q_ET_SWE_1950-2005.csv")
etqswe_cosumnes_miroc5_hist <- readr::read_csv("../BasinAvg/Cosumnes_MIROC5_Historical_Q_ET_SWE_1950-2005.csv")
etqswe_cosumnes_canesm2_45 <- readr::read_csv("../BasinAvg/Cosumnes_CanESM2_RCP45_Q_ET_SWE_2006-2099.csv")
etqswe_cosumnes_ccsm4_45 <- readr::read_csv("../BasinAvg/Cosumnes_CCSM4_RCP45_Q_ET_SWE_2006-2099.csv")
etqswe_cosumnes_cnrm_45 <- readr::read_csv("../BasinAvg/Cosumnes_CNRM-CM5_RCP45_Q_ET_SWE_2006-2099.csv")
etqswe_cosumnes_hadgemcc_45 <- readr::read_csv("../BasinAvg/Cosumnes_HadGEM2-CC365_RCP45_Q_ET_SWE_2006-2099.csv")
etqswe_cosumnes_hadgemec_45 <- readr::read_csv("../BasinAvg/Cosumnes_HadGEM2-ES365_RCP45_Q_ET_SWE_2006-2099.csv")
etqswe_cosumnes_miroc5_45 <- readr::read_csv("../BasinAvg/Cosumnes_MIROC5_RCP45_Q_ET_SWE_2006-2099.csv")
etqswe_cosumnes_canesm2_85 <- readr::read_csv("../BasinAvg/Cosumnes_CanESM2_RCP85_Q_ET_SWE_2006-2099.csv")
etqswe_cosumnes_ccsm4_85 <- readr::read_csv("../BasinAvg/Cosumnes_CCSM4_RCP85_Q_ET_SWE_2006-2099.csv")
etqswe_cosumnes_cnrm_85 <- readr::read_csv("../BasinAvg/Cosumnes_CNRM-CM5_RCP85_Q_ET_SWE_2006-2099.csv")
etqswe_cosumnes_hadgemcc_85 <- readr::read_csv("../BasinAvg/Cosumnes_HadGEM2-CC365_RCP85_Q_ET_SWE_2006-2099.csv")
etqswe_cosumnes_hadgemec_85 <- readr::read_csv("../BasinAvg/Cosumnes_HadGEM2-ES365_RCP85_Q_ET_SWE_2006-2099.csv")
etqswe_cosumnes_miroc5_85 <- readr::read_csv("../BasinAvg/Cosumnes_MIROC5_RCP85_Q_ET_SWE_2006-2099.csv")
etqswe_cosumnes_canesm2_85alt40pc <- readr::read_csv("../BasinAvg/Cosumnes_CanESM2_RCP85_Q_ET_SWE_2006-2099_40PC_Conductence.csv")
etqswe_cosumnes_ccsm4_85alt40pc <- readr::read_csv("../BasinAvg/Cosumnes_CCSM4_RCP85_Q_ET_SWE_2006-2099_40PC_Conductence.csv")
etqswe_cosumnes_cnrm_85alt40pc <- readr::read_csv("../BasinAvg/Cosumnes_CNRM-CM5_RCP85_Q_ET_SWE_2006-2099_40PC_Conductence.csv")
etqswe_cosumnes_hadgemcc_85alt40pc <- readr::read_csv("../BasinAvg/Cosumnes_HadGEM2-CC365_RCP85_Q_ET_SWE_2006-2099_40PC_Conductence.csv")
etqswe_cosumnes_hadgemec_85alt40pc <- readr::read_csv("../BasinAvg/Cosumnes_HadGEM2-ES365_RCP85_Q_ET_SWE_2006-2099_40PC_Conductence.csv")
etqswe_cosumnes_miroc5_85alt40pc <- readr::read_csv("../BasinAvg/Cosumnes_MIROC5_RCP85_Q_ET_SWE_2006-2099_40PC_Conductence.csv")
# Observed values
etqswe_cosumnes_obs <- readr::read_csv("../BasinAvg/Cosumnes_Obs_Q_ET_SWE_1950-2011.csv")
# Consolidate basin-averaged daily data to lists
etqswe_cosumnes <- list(
cosumnes_canesm2_hist=etqswe_cosumnes_canesm2_hist,cosumnes_ccsm4_hist=etqswe_cosumnes_ccsm4_hist,
cosumnes_cnrm_hist=etqswe_cosumnes_cnrm_hist,cosumnes_hadgemcc_hist=etqswe_cosumnes_hadgemcc_hist,
cosumnes_hadgemec_hist=etqswe_cosumnes_hadgemec_hist,cosumnes_miroc5_hist=etqswe_cosumnes_miroc5_hist,
cosumnes_canesm2_45=etqswe_cosumnes_canesm2_45,cosumnes_ccsm4_45=etqswe_cosumnes_ccsm4_45,
cosumnes_cnrm_45=etqswe_cosumnes_cnrm_45,cosumnes_hadgemcc_45=etqswe_cosumnes_hadgemcc_45,
cosumnes_hadgemec_45=etqswe_cosumnes_hadgemec_45,cosumnes_miroc5_45=etqswe_cosumnes_miroc5_45,
cosumnes_canesm2_85=etqswe_cosumnes_canesm2_85,cosumnes_ccsm4_85=etqswe_cosumnes_ccsm4_85,
cosumnes_cnrm_85=etqswe_cosumnes_cnrm_85,cosumnes_hadgemcc_85=etqswe_cosumnes_hadgemcc_85,
cosumnes_hadgemec_85=etqswe_cosumnes_hadgemec_85,cosumnes_miroc5_85=etqswe_cosumnes_miroc5_85,
cosumnes_canesm2_85alt40pc=etqswe_cosumnes_canesm2_85alt40pc,cosumnes_ccsm4_85alt40pc=etqswe_cosumnes_ccsm4_85alt40pc,
cosumnes_cnrm_85alt40pc=etqswe_cosumnes_cnrm_85alt40pc,cosumnes_hadgemcc_85alt40pc=etqswe_cosumnes_hadgemcc_85alt40pc,
cosumnes_hadgemec_85alt40pc=etqswe_cosumnes_hadgemec_85alt40pc,cosumnes_miroc5_85alt40pc=etqswe_cosumnes_miroc5_85alt40pc,
cosumnes_magicalgcm_obs=etqswe_cosumnes_obs
)
|
#----------------------------------------------------------------
# Environment Set-up
#----------------------------------------------------------------
rm(list=ls(all=TRUE))
gc()
options(scipen=999)
library(xgboost)
setwd("/home/rstudio/Dropbox/Public/Springleaf")
subversion <- 1
version <- "24_4"
startTime <- Sys.time()
#----------------------------------------------------------------
# Data
#----------------------------------------------------------------
load("Kaggle_RawData.RData")
y <- train$target
train <- train[, -c(1, 1934)]
test <- test[, -1]
# Character variables
train_char <- train[, sapply(train, is.character)]
train_date <- train_char[, grep("JAN1|FEB1|MAR1", train_char), ]
train_char <- train_char[, !colnames(train_char) %in% colnames(train_date)]
train_date <- sapply(train_date, function(x) strptime(x, "%d%B%y:%H:%M:%S"))
train_date <- do.call(cbind.data.frame, train_date)
train_date <- sapply(train_date, function(x) as.numeric(format(x, "%Y")))
train_date <- data.frame(train_date)
train[, names(train_char)] <- train_char
train[, names(train_date)] <- train_date
test_char <- test[, sapply(test, is.character)]
test_date <- test_char[, grep("JAN1|FEB1|MAR1", test_char), ]
test_char <- test_char[, !colnames(test_char) %in% colnames(test_date)]
test_date <- sapply(test_date, function(x) strptime(x, "%d%B%y:%H:%M:%S"))
test_date <- do.call(cbind.data.frame, test_date)
test_date <- sapply(test_date, function(x) as.numeric(format(x, "%Y")))
test_date <- data.frame(test_date)
test[, names(test_char)] <- test_char
test[, names(test_date)] <- test_date
for(i in 1:ncol(train)) {
if(class(train[, i]) == "character") {
tmp <- as.numeric(as.factor(c(train[, i], test[, i])))
train[, i] <- head(tmp, nrow(train))
test[, i] <- tail(tmp, nrow(test))
}
}
train[train==-99999] <- NA
test[test==-99999] <- NA
# drop variables with all missing observations
dropVars <- names(which(sapply(train, class) == "logical"))
train <- train[, setdiff(names(train), dropVars)]
test <- test[, setdiff(names(test), dropVars)]
# Remove variables for which standard deviation is zero
SD <- sapply(train, sd, na.rm=TRUE)
train <- train[, setdiff(names(train), names(SD[SD==0]))]
test <- test[, setdiff(names(test), names(SD[SD==0]))]
#
maxValues <- sapply(train, function(x) max(x, na.rm=TRUE))
tmpData <- train[, names(maxValues[maxValues==99])]
tempUnique <- sapply(tmpData, function(x) length(unique(na.omit(x))))
for(i in names(tempUnique[tempUnique < 99])) {
tmp <- as.numeric(as.factor(c(train[, i], test[, i])))
train[, i] <- head(tmp, nrow(train))
test[, i] <- tail(tmp, nrow(test))
}
set.seed(1948 ^ subversion)
hold <- sample(1:nrow(train), 15000) #10% training data for stopping
xgtrain <- xgb.DMatrix(as.matrix(train[-hold, ]), label = y[-hold], missing = NA)
xgval <- xgb.DMatrix(as.matrix(train[hold, ]), label = y[hold], missing = NA)
gc()
#----------------------------------------------------------------
# Model
#----------------------------------------------------------------
sink(file=paste0("test_submission_seed_", version, ".txt"))
param0 <- list(
"objective" = "binary:logistic"
, "eval_metric" = "auc"
, "eta" = 0.001
, "subsample" = 0.8
, "colsample_bytree" = 0.5
, "min_child_weight" = 6
, "max_depth" = 10
, "alpha" = 4
)
set.seed(2012)
watchlist <- list('val' = xgval)
model = xgb.train(
nrounds = 60000 # increase for more results at home
, params = param0
, data = xgtrain
# , early.stop.round = 5
, watchlist = watchlist
, print.every.n = 5
)
endTime <- Sys.time()
difftime(endTime, startTime)
sink()
#----------------------------------------------------------------
# Scoring
#----------------------------------------------------------------
# Extract best tree
tempOut <- readLines(paste0("test_submission_seed_", version, ".txt"))
tempOut <- tempOut[-length(tempOut)]
AUC <- sapply(tempOut, function(x) as.numeric(unlist(strsplit(x, split=":"))[2]))
names(AUC) <- NULL
modPerf <- data.frame(AUC)
tree <- sapply(tempOut, function(x) unlist(strsplit(x, split=":"))[1])
names(tree) <- NULL
tree <- gsub("\\t", "", tree)
tree <- gsub("val-auc", "", tree)
tree <- gsub(" ", "", tree)
tree <- gsub("]", "", tree)
tree <- gsub("\\[", "", tree)
tree <- as.numeric(tree)
modPerf$tree <- tree
modPerf <- modPerf[order(modPerf$AUC, decreasing=TRUE), ]
xgtest <- xgb.DMatrix(as.matrix(test), missing = NA)
preds_out <- predict(model, xgtest, ntreelimit = modPerf$tree[1])
sub <- read.csv("sample_submission.csv")
sub$target <- preds_out
write.csv(sub, paste0("test_submission_seed_", version, ".csv"), row.names=FALSE)
|
/Benchmark Scripts/Seed/test_submission_seed_24_4.R
|
no_license
|
vikasnitk85/SpringleafMarketingesponse
|
R
| false
| false
| 4,621
|
r
|
#----------------------------------------------------------------
# Environment Set-up
#----------------------------------------------------------------
rm(list=ls(all=TRUE))
gc()
options(scipen=999)
library(xgboost)
setwd("/home/rstudio/Dropbox/Public/Springleaf")
subversion <- 1
version <- "24_4"
startTime <- Sys.time()
#----------------------------------------------------------------
# Data
#----------------------------------------------------------------
load("Kaggle_RawData.RData")
y <- train$target
train <- train[, -c(1, 1934)]
test <- test[, -1]
# Character variables
train_char <- train[, sapply(train, is.character)]
train_date <- train_char[, grep("JAN1|FEB1|MAR1", train_char), ]
train_char <- train_char[, !colnames(train_char) %in% colnames(train_date)]
train_date <- sapply(train_date, function(x) strptime(x, "%d%B%y:%H:%M:%S"))
train_date <- do.call(cbind.data.frame, train_date)
train_date <- sapply(train_date, function(x) as.numeric(format(x, "%Y")))
train_date <- data.frame(train_date)
train[, names(train_char)] <- train_char
train[, names(train_date)] <- train_date
test_char <- test[, sapply(test, is.character)]
test_date <- test_char[, grep("JAN1|FEB1|MAR1", test_char), ]
test_char <- test_char[, !colnames(test_char) %in% colnames(test_date)]
test_date <- sapply(test_date, function(x) strptime(x, "%d%B%y:%H:%M:%S"))
test_date <- do.call(cbind.data.frame, test_date)
test_date <- sapply(test_date, function(x) as.numeric(format(x, "%Y")))
test_date <- data.frame(test_date)
test[, names(test_char)] <- test_char
test[, names(test_date)] <- test_date
for(i in 1:ncol(train)) {
if(class(train[, i]) == "character") {
tmp <- as.numeric(as.factor(c(train[, i], test[, i])))
train[, i] <- head(tmp, nrow(train))
test[, i] <- tail(tmp, nrow(test))
}
}
train[train==-99999] <- NA
test[test==-99999] <- NA
# drop variables with all missing observations
dropVars <- names(which(sapply(train, class) == "logical"))
train <- train[, setdiff(names(train), dropVars)]
test <- test[, setdiff(names(test), dropVars)]
# Remove variables for which standard deviation is zero
SD <- sapply(train, sd, na.rm=TRUE)
train <- train[, setdiff(names(train), names(SD[SD==0]))]
test <- test[, setdiff(names(test), names(SD[SD==0]))]
#
maxValues <- sapply(train, function(x) max(x, na.rm=TRUE))
tmpData <- train[, names(maxValues[maxValues==99])]
tempUnique <- sapply(tmpData, function(x) length(unique(na.omit(x))))
for(i in names(tempUnique[tempUnique < 99])) {
tmp <- as.numeric(as.factor(c(train[, i], test[, i])))
train[, i] <- head(tmp, nrow(train))
test[, i] <- tail(tmp, nrow(test))
}
set.seed(1948 ^ subversion)
hold <- sample(1:nrow(train), 15000) #10% training data for stopping
xgtrain <- xgb.DMatrix(as.matrix(train[-hold, ]), label = y[-hold], missing = NA)
xgval <- xgb.DMatrix(as.matrix(train[hold, ]), label = y[hold], missing = NA)
gc()
#----------------------------------------------------------------
# Model
#----------------------------------------------------------------
sink(file=paste0("test_submission_seed_", version, ".txt"))
param0 <- list(
"objective" = "binary:logistic"
, "eval_metric" = "auc"
, "eta" = 0.001
, "subsample" = 0.8
, "colsample_bytree" = 0.5
, "min_child_weight" = 6
, "max_depth" = 10
, "alpha" = 4
)
set.seed(2012)
watchlist <- list('val' = xgval)
model = xgb.train(
nrounds = 60000 # increase for more results at home
, params = param0
, data = xgtrain
# , early.stop.round = 5
, watchlist = watchlist
, print.every.n = 5
)
endTime <- Sys.time()
difftime(endTime, startTime)
sink()
#----------------------------------------------------------------
# Scoring
#----------------------------------------------------------------
# Extract best tree
tempOut <- readLines(paste0("test_submission_seed_", version, ".txt"))
tempOut <- tempOut[-length(tempOut)]
AUC <- sapply(tempOut, function(x) as.numeric(unlist(strsplit(x, split=":"))[2]))
names(AUC) <- NULL
modPerf <- data.frame(AUC)
tree <- sapply(tempOut, function(x) unlist(strsplit(x, split=":"))[1])
names(tree) <- NULL
tree <- gsub("\\t", "", tree)
tree <- gsub("val-auc", "", tree)
tree <- gsub(" ", "", tree)
tree <- gsub("]", "", tree)
tree <- gsub("\\[", "", tree)
tree <- as.numeric(tree)
modPerf$tree <- tree
modPerf <- modPerf[order(modPerf$AUC, decreasing=TRUE), ]
xgtest <- xgb.DMatrix(as.matrix(test), missing = NA)
preds_out <- predict(model, xgtest, ntreelimit = modPerf$tree[1])
sub <- read.csv("sample_submission.csv")
sub$target <- preds_out
write.csv(sub, paste0("test_submission_seed_", version, ".csv"), row.names=FALSE)
|
library(checkarg)
### Name: isStrictlyNegativeIntegerOrNanVectorOrNull
### Title: Wrapper for the checkarg function, using specific parameter
### settings.
### Aliases: isStrictlyNegativeIntegerOrNanVectorOrNull
### ** Examples
isStrictlyNegativeIntegerOrNanVectorOrNull(-2)
# returns TRUE (argument is valid)
isStrictlyNegativeIntegerOrNanVectorOrNull("X")
# returns FALSE (argument is invalid)
#isStrictlyNegativeIntegerOrNanVectorOrNull("X", stopIfNot = TRUE)
# throws exception with message defined by message and argumentName parameters
isStrictlyNegativeIntegerOrNanVectorOrNull(-2, default = -1)
# returns -2 (the argument, rather than the default, since it is not NULL)
#isStrictlyNegativeIntegerOrNanVectorOrNull("X", default = -1)
# throws exception with message defined by message and argumentName parameters
isStrictlyNegativeIntegerOrNanVectorOrNull(NULL, default = -1)
# returns -1 (the default, rather than the argument, since it is NULL)
|
/data/genthat_extracted_code/checkarg/examples/isStrictlyNegativeIntegerOrNanVectorOrNull.Rd.R
|
no_license
|
surayaaramli/typeRrh
|
R
| false
| false
| 982
|
r
|
library(checkarg)
### Name: isStrictlyNegativeIntegerOrNanVectorOrNull
### Title: Wrapper for the checkarg function, using specific parameter
### settings.
### Aliases: isStrictlyNegativeIntegerOrNanVectorOrNull
### ** Examples
isStrictlyNegativeIntegerOrNanVectorOrNull(-2)
# returns TRUE (argument is valid)
isStrictlyNegativeIntegerOrNanVectorOrNull("X")
# returns FALSE (argument is invalid)
#isStrictlyNegativeIntegerOrNanVectorOrNull("X", stopIfNot = TRUE)
# throws exception with message defined by message and argumentName parameters
isStrictlyNegativeIntegerOrNanVectorOrNull(-2, default = -1)
# returns -2 (the argument, rather than the default, since it is not NULL)
#isStrictlyNegativeIntegerOrNanVectorOrNull("X", default = -1)
# throws exception with message defined by message and argumentName parameters
isStrictlyNegativeIntegerOrNanVectorOrNull(NULL, default = -1)
# returns -1 (the default, rather than the argument, since it is NULL)
|
options(stringsAsFactors=FALSE)
file.sensitivity <- "auc_recomputed_drug_association.RData"
require(stringr) || stop("Library stringr is not available!")
require(corrplot) || stop("Library corrplot is not available!")
require(VennDiagram) || stop("Library VennDiagram is not available!")
require(RColorBrewer) || stop("Library RColorBrewer is not available!")
require(SDMTools) || stop("Library SDMTools is not available!")
require(calibrate) || stop("Library calibrate is not available!")
require(Hmisc) || stop("Library Hmisc is not available!")
require(grid) || stop("Library grid is not available!")
require(gridBase) || stop("Library gridBase is not available!")
require(lattice) || stop("Library lattice is not available!")
require(WriteXLS) || stop("Library WriteXLS is not available!")
require(PharmacoGx) || stop("Library PharmacoGx is not available!")
require(Biobase) || stop("Library Biobase is not available!")
require(magicaxis) || stop("Library magicaxis is not available!") #add minor tick marks to the plot
require("np") || stop("Library np is not available!")
require(ggplot2) || stop("Library ggplot2 is not available!")
require(gridExtra) || stop("Library gridExtra is not available!")
require(easyGgplot2) || stop("Library easyGgplot2 is not available!")
path.data <- "data"
path.code <- file.path("code")
path.result <- file.path("result")
effect.size.cut.off <- 0.55
fdr.cut.off <- 0.01
tissue <- "breast"
model.method <- "glm"
glm.family <- "gaussian"
effect.size <- "cindex" #c("r.squared", "cindex")
adjustment.method <- "fdr"#c("bonferroni", "fdr")
breast.specific <- ifelse(regexpr("breast", file.sensitivity) < 1, FALSE, TRUE)
data.type <- ifelse(regexpr("mut", file.sensitivity) >= 1, "all", "expression")
source(file.path(path.code, "foo.R"))
path.diagrams<- "result/auc_recomputed_ccle_gdsc"
if (!file.exists(path.diagrams)){dir.create(file.path(path.diagrams))}
path.training.data <- "data/training_ccle_gdsc.RData"
load(path.training.data, verbose=TRUE)
if("gdsc.drug.sensitivity" %in% ls()) {
training.type <-"CCLE_GDSC"
} else {
training.type <-"CCLE"
}
if(data.type == "all") {
Models <- c("M3B", "M2", "MC", "MM")
Models.names <- c("Isoforms", "Genes", "Copy Number Variations", "Mutations")
}else {
Models <- c("M2", "M3B")
Models.names <- c("Genes", "Isoforms")
}
names(Models.names) <- Models
ccle.tissuetype <- tissueTypes
ccle.tissuetype[,1] <- as.character(ccle.tissuetype[,1])
ccle.primary.tissuetype <- ccle.tissuetype
ccle.primary.drug.sensitivity <- ccle.drug.sensitivity
mean.ccle.isoforms.fpkm <- colMeans(ccle.isoforms.fpkm, na.rm=TRUE)
if("haematopoietic_and_lymphoid_tissue" %in% ccle.tissuetype[,1])
{
ccle.tissuetype[which(ccle.tissuetype[,1] == "haematopoietic_and_lymphoid_tissue"), 1] <- "haematopoietic_and_lymphoid"
}
########Color GDSC
A <- NULL; for(i in 1:length(drugs)){A <- union(A, unique(gdsc.drug.sensitivity[,i]))}; A<- A[order(A)]
color.sensitivity <- matrix(NA, nrow=length(A), ncol=1)
rownames(color.sensitivity) <- A
colnames(color.sensitivity) <- "col"
color.sensitivity[1:nrow(color.sensitivity)-1, "col"] <- colorRampPalette(c("blue" , "purple", "red"))(nrow(color.sensitivity)-1)
color.sensitivity[nrow(color.sensitivity), "col"] <- "#000000"
###### drug sensitivity
objOrderDrugs <- fnOrderDrugs(data=ccle.drug.sensitivity, filename=file.path(path.diagrams, "CCLE_DrugSensitivity.pdf"), ylab="auc recomputed", main="CCLE Drug sensitivity")
invisible(fnOrderDrugs(gdsc.drug.sensitivity, file.path(path.diagrams, "GDSC_DrugSensitivity.pdf"), ylab="auc recomputed", main="GDSC Drug sensitivity"))
##gray
#objOrderDrugs <- fnOrderDrugs(ccle.drug.sensitivity, file.path(path.diagrams, "DrugSensitivity_allgenes.pdf"))
DOI <- objOrderDrugs$order
ccle.drug.sensitivity.ordered <- objOrderDrugs$ordered
load(file.path(path.data, file.sensitivity), verbose=T)
if(length(drug.association)==2) {
drug.association <- drug.association[[effect.size]]
drug.association.statistics <- drug.association.statistics[[effect.size]]
}
Prototype <- drug.association[[1]][[1]]
myf <- file.path(path.diagrams, "allGenes_association_matrices.RData")
if(file.exists(myf)){
load(myf)
}else{
models.drugs.names <- expand.grid(drugs, Models)
FDR_List <- matrix(NA, ncol=(length(drugs) * length(Models)), nrow=length(drug.association), dimnames=list(names(drug.association), paste(models.drugs.names[, 1], models.drugs.names[, 2], sep ="_")))
estimate_List <- Pvalues.Numinals <- FDR_List
models.plus.drugs.names <- expand.grid(drugs, c("M0", Models))
statistics.matrix <- matrix(NA, ncol=(length(drugs) * (length(Models) + 1)), nrow=length(drug.association), dimnames=list(names(drug.association), paste(models.plus.drugs.names[, 1], models.plus.drugs.names[, 2], sep ="_")))
best.isoforms.matrix <- matrix(NA, ncol=length(drugs) , nrow=length(drug.association))
colnames(best.isoforms.matrix) <- drugs
rownames(best.isoforms.matrix) <- names(drug.association)
Min.Pvalues <- NULL
for(i in drugs) {
statistics.matrix[,paste(i, "M0",sep ="_")] <- sapply(drug.association.statistics, function(x){ifelse(!is.null(x[[i]]["median", "M0"]), x[[i]]["median", "M0"], 0)})
for(j in 1:length(Models)) {
FDR_List[, paste(i, Models[j], sep ="_")] <- p.adjust(sapply(drug.association, function(x){ifelse(!is.null(x[[i]]["M0",Models[j]]), x[[i]]["M0", Models[j]], 1)}) ,method=adjustment.method)
Min.Pvalues[paste(i,Models[j],sep ="_")] <- min(sapply(drug.association, function(x){ifelse(!is.null(x[[i]]["M0", Models[j]]), x[[i]]["M0", Models[j]], 1)}))
estimate_List[,paste(i,Models[j],sep ="_")] <- sapply(drug.association, function(x){ifelse(!is.null(x[[i]][Models[j], "M0"]), x[[i]][Models[j], "M0"], 0)})
Pvalues.Numinals[,paste(i,Models[j],sep ="_")] <- sapply(drug.association, function(x){ifelse(!is.null(x[[i]]["M0",Models[j]]), x[[i]]["M0",Models[j]], 1)})
statistics.matrix[,paste(i,Models[j],sep ="_")] <- sapply(drug.association.statistics, function(x){ifelse(!is.null(x[[i]]["median", Models[j]]), x[[i]]["median", Models[j]], 0)})
}
best.isoforms.matrix[,i] <- sapply(drug.association.best.isoforms, function(x){ifelse(!is.null(x[[i]]), x[[i]], "")})
}
load("data/ensembl.map.genes.isoforms.GRCh38.87.RData")
fnNumber_Isoforms_of_Gene <- function(Gene_Map=ensembl.map.genes.isoforms, GeneId)
{
return(length(which(unlist(strsplit(Gene_Map[, as.character(GeneId)], ",")) %in% colnames(ccle.isoforms.fpkm))))
}
isoforms_No_List <- matrix(NA, ncol=1, nrow=nrow(annot.ensembl.all.genes))
colnames(isoforms_No_List) <- "isoforms.NO"
rownames(isoforms_No_List) <- rownames(annot.ensembl.all.genes)
for( i in 1:nrow(isoforms_No_List))
{
isoforms_No_List[i,] <- fnNumber_Isoforms_of_Gene(GeneId=as.character(rownames(isoforms_No_List)[i]))
}
isoforms_No_List <- data.frame(isoforms_No_List,stringsAsFactors=FALSE)
# save(isoforms_No_List, file=myf)
#}
isoforms_No_List <- isoforms_No_List[GeneList, , drop=FALSE]
save(FDR_List, estimate_List, Pvalues.Numinals, statistics.matrix, best.isoforms.matrix, Min.Pvalues, isoforms_No_List, file=myf)
}
##########Analyses
result.effect.size <- fnComputeAssociateGenes.effect.size(FDR_CutOff=fdr.cut.off, effect.size_CutOff=effect.size.cut.off)
write.csv(fnWilcox(result.effect.size, TRUE)$comparison, file=file.path(path.diagrams, "comparison_test_wilcox.csv"))
###Figure 2A
### The number of significant predictive biomarkers identified in training
### biomarkerd are plotted in seperate bars for isoforms and gene models
barplot.models(model=result.effect.size, isoforms_No="all", sign="all", prototype=Models, main.title=sprintf("%s < 1%% \n %s > 0.55", adjustment.method, effect.size), yaxis="Log", breakpoint="Regular", cex=1.2)
cindexDistributions()
###
barplot.models(model=result.effect.size, isoforms_No="1.isoform", sign="all", prototype=Models, main.title=sprintf("%s < 1%% \n %s > 0.55", adjustment.method, effect.size), yaxis="Log", breakpoint="Regular", cex=0.7)
barplot.models(model=result.effect.size, isoforms_No="n.isoforms", sign="all", prototype=Models, main.title=sprintf("%s < 1%% \n %s > 0.55", adjustment.method, effect.size), yaxis="Log", breakpoint="Regular", cex=0.7)
#barplot.models(model=result.effect.size, isoforms_No="all", sign="positive", prototype=Models, main.title=sprintf("%s < 1%% \n %s > 0.55", adjustment.method, effect.size), yaxis="Log", breakpoint="Regular", cex=0.7)
#barplot.models(model=result.effect.size, isoforms_No="all", sign="negative", prototype=Models, main.title=sprintf("%s < 1%% \n %s > 0.55", adjustment.method, effect.size), yaxis="Log", breakpoint="Regular", cex=0.7)
associations <- associations.all.drugs(model.rank="M3B", annot.ensembl.all.genes=annot.ensembl.all.genes)
save(associations, file=file.path(path.diagrams, "associations.RData"))
##all.biomarkers would include all the significant biomarkers
all.biomarkers <- fnTop.significant.biomarkers(associations, cut_off=fdr.cut.off, BioNo="all", rank.type="pvalue.adj")
##constraint the analyses to those predicted biomarkers with effect size greater than 0.55
for(i in names(all.biomarkers)) {
all.biomarkers[[i]] <- all.biomarkers[[i]][which(all.biomarkers[[i]]$cindex > effect.size_CutOff),]
}
save(all.biomarkers, file=file.path(path.diagrams, "all.biomarkers.original.RData"))
if(!breast.specific)
{
source("code/foo_training.R")
all.biomarkers <- fnidentify.tissue.specific.biomarkers(biomarkers=all.biomarkers)
}else{
for( i in 1:length(all.biomarkers))
{
if(all(is.na(all.biomarkers[[i]])))
{
all.biomarkers[[i]][, tissue] <- NA
all.biomarkers[[i]][, paste0(tissue, "_boot")] <- NA
}else{
all.biomarkers[[i]][, tissue] <- 1
all.biomarkers[[i]][, paste0(tissue, "_boot")] <- 1
}
}
}
all.biomarkers <- lapply(all.biomarkers, function(x){if("breast" %in% colnames(x)){x[order(x[, "breast"], na.last=T, decreasing=T),]}else{x}})
save(all.biomarkers, file=file.path(path.diagrams, "all.biomarkers.RData"))
#WriteXLS::WriteXLS("all.biomarkers", ExcelFileName=file.path(path.diagrams, "all.biomarkers.xlsx"), row.names=TRUE)
BiomarkersNo <- matrix(NA, ncol=3, nrow=length(all.biomarkers))
rownames(BiomarkersNo) <- names(all.biomarkers)
colnames(BiomarkersNo) <- c("gene.specific", "isoform.specific", "common")
for( i in names(all.biomarkers))
{
top.significant.table <- table(all.biomarkers[[i]][, "specificity"])
BiomarkersNo[i, "isoform.specific"] <- top.significant.table["isoform.specific"]
BiomarkersNo[i, "gene.specific"] <- top.significant.table["gene.specific"]
BiomarkersNo[i, "common"] <- top.significant.table["common"]/2
}
write.csv(BiomarkersNo, file=file.path(path.diagrams, "Biomarkers.csv"))
TOP <- matrix(NA, ncol=9, nrow=length(all.biomarkers))
rownames(TOP) <- names(all.biomarkers)
colnames(TOP) <- c("symbol", "isoforms.no", "biomarker.id", "type", "specificity", "estimate", effect.size, "fdr", "delta.rank")
for( i in names(all.biomarkers)) {
xx <- which.max(all.biomarkers[[i]][,effect.size])
#xx <- 1
if(length(xx) > 0) {
TOP[i, "symbol"] <- as.character(all.biomarkers[[i]][xx, "symbol"])
TOP[i, "isoforms.no"] <- all.biomarkers[[i]][xx, "isoforms.no"]
TOP[i, "biomarker.id"] <- as.character(all.biomarkers[[i]][xx, "biomarker.id"])
TOP[i, "type"] <- all.biomarkers[[i]][xx, "type"]
TOP[i, "specificity"] <- all.biomarkers[[i]][xx, "specificity"]
TOP[i, "estimate"] <- all.biomarkers[[i]][xx, "estimate"]
TOP[i, effect.size] <- all.biomarkers[[i]][xx, effect.size]
TOP[i, "fdr"] <- all.biomarkers[[i]][xx, adjustment.method]
TOP[i, "delta.rank"] <- all.biomarkers[[i]][xx, "delta.rank"]
}
}
write.csv(TOP, file=file.path(path.diagrams, "TopOne.csv"))
Check.KnownAssociations(associations)
percentage.biomarkers.type <- fnPercentageBiomarkersType(all.biomarkers)
###Figure 2B
### The ratio of biomarkers according to their specificity in training
mystacked.barplot.simple(Filename=file.path(path.diagrams, "ProportionBiomarkersType.pdf"), data=percentage.biomarkers.type, main.label="Proportion of specificity of biomarkers", cex=1.2)
###
### Supplementary Figure 10
## annotation of differnet set of biomarkers based on their biotaypes
mycol <- RColorBrewer::brewer.pal(n=4, name="Set1")[c(1, 4, 2)]
pdf("result/auc_recomputed_ccle_gdsc/biomarkers_specificity_biotypes2.pdf", width=21, height=10)
par(mfrow=c(2,3))
par(mar=c(7.1, 4.1, 4.1, 10.1), xpd=TRUE)
biotypes <- c("protein_coding", "antisense", "processed_transcript", "lincRNA", "pseudogene", "other")
rr <- list()
for(i in 1:length(biotypes)) {
rr[[i]] <- matrix(0, nrow=length(all.biomarkers), ncol=3, dimnames=list(names(all.biomarkers), c("isoform.specific", "common", "gene.specific")))
}
for(drug in names(all.biomarkers)){
if(all(!is.na(all.biomarkers[[drug]]$specificity))){
xx <- table(all.biomarkers[[drug]]$specificity, all.biomarkers[[drug]]$biotype)
for(i in 1:length(biotypes)) {
yy <- grep(biotypes[i], colnames(xx))
if(length(yy) > 0) {
mm <- apply(xx[ , yy, drop=FALSE], 1, sum)
rr[[i]][drug, intersect(colnames(rr[[i]]), names(mm))] <- mm[intersect(colnames(rr[[i]]), names(mm))]/sum(mm)
xx <- xx[ ,-yy, drop=FALSE]
}else if (biotypes[i] == "other"){
mm <- apply(xx, 1, sum)
rr[[i]][drug, intersect(colnames(rr[[i]]), names(mm))] <- mm[intersect(colnames(rr[[i]]), names(mm))]/sum(mm)
xx <- xx[ , -yy, drop=FALSE]
}
}
}
}
for(i in 1:length(biotypes)) {
barplot(t(rr[[i]]), las=2,col=mycol, main=biotypes[i], ylab="percentage")
}
legend("topright", inset=c(-0.2,0), legend=colnames(rr[[1]]), fill=mycol, bty="n")
dev.off()
###
### Supplementary Figure 11
## annotation of differnet set of biomarkers based on the number of alternative spliced products in their corresponding gene
mycol <- RColorBrewer::brewer.pal(n=3, name="Set1")[c(2,1)]
isoform.specific<- gene.specific<- common <- matrix(NA, nrow=length(all.biomarkers), ncol=2, dimnames=list(names(all.biomarkers), c("1 isoform", "n isoforms")))
pdf("result/auc_recomputed_ccle_gdsc/biomarkers_specificity_isoform_no.pdf", width=21, height=5)
par(mfrow=c(1,3))
par(mar=c(7.1, 4.1, 4.1, 10.1), xpd=TRUE)
rr <- list()
for(drug in names(all.biomarkers)){
xx <- table(all.biomarkers[[drug]]$specificity, all.biomarkers[[drug]]$isoforms.no)
mm <- apply(xx, MARGIN=1, function(x){s <- x[which(x!=0)];c(s[1], (sum(s)-s[1]))})
common[drug, ] <- if("common" %in% colnames(mm)){mm[,"common"]}else{c(0,0)}
gene.specific[drug, ] <- if("gene.specific" %in% colnames(mm)){mm[,"gene.specific"]}else{c(0,0)}
isoform.specific[drug, ] <- if("isoform.specific" %in% colnames(mm)){mm[,"isoform.specific"]}else{c(0,0)}
rr[[drug]] <- mm
}
common <- common/apply(common,MARGIN = 1, function(x){ss<- sum(x);ifelse(ss!=0, ss, 1)})
barplot(t(common), las=2,col=mycol[1:2], main="common", ylab="percentage")
gene.specific <- gene.specific/apply(gene.specific,MARGIN = 1, function(x){ss<- sum(x);ifelse(ss!=0, ss, 1)})
barplot(t(gene.specific), las=2,col=mycol[1:2], main="gene specific", ylab="percentage")
isoform.specific <- isoform.specific/apply(isoform.specific,MARGIN = 1, function(x){ss<- sum(x);ifelse(ss!=0, ss, 1)})
barplot(t(isoform.specific), las=2,col=mycol[1:2], main="isoform specific", ylab="percentage")
legend("topright", inset=c(-0.2,0), legend = colnames(isoform.specific), fill = mycol[1:2], bty="n")
dev.off()
###
## annotation of differnet set of biomarkers based on their biotaypes
##update all.biomarkers with gene/transcript biotypes
for(drug in names(all.biomarkers)) {
ii <- which(all.biomarkers[[drug]][,"type"] == "isoform")
gg <- which(all.biomarkers[[drug]][,"type"] == "gene")
if(length(ii) > 0){
all.biomarkers[[drug]][ii, "biotype"] <- annot.ensembl.all.isoforms[all.biomarkers[[drug]][ii, "transcript.id"], "TranscriptBioType"]
}
if(length(gg) > 0){
all.biomarkers[[drug]][gg, "biotype"] <- annot.ensembl.all.genes[all.biomarkers[[drug]][gg, "gene.id"], "GeneBioType"]
}
}
mycol3 <- RColorBrewer::brewer.pal(n=5, name="Set3")
pdf("result/auc_recomputed_ccle_gdsc/biomarkers_specificity_biotypes.pdf", width=21, height=5)
par(mfrow=c(1,3))
par(mar=c(7.1, 4.1, 4.1, 10.1), xpd=TRUE)
biotypes <- c("protein_coding", "antisense", "processed_transcript", "lincRNA")
isoform.specific<- gene.specific<- common <- matrix(0, nrow=length(all.biomarkers), ncol=length(biotypes)+1, dimnames=list(names(all.biomarkers), c(biotypes, "other")))
for(drug in names(all.biomarkers)){
if(all(!is.na(all.biomarkers[[drug]]$specificity))){
xx <- table(all.biomarkers[[drug]]$specificity, all.biomarkers[[drug]]$biotype)
mm <- cbind(xx[, intersect(biotypes, colnames(xx))], "other"=apply(xx[,which(!colnames(xx) %in% biotypes), drop=FALSE], 1, sum))
if("common" %in% rownames(mm)){
common[drug, colnames(mm)] <- mm["common",]/apply(mm , 1, sum)["common"]
}
if("gene.specific" %in% rownames(mm)){
gene.specific[drug, colnames(mm)] <- mm["gene.specific",]/apply(mm , 1, sum)["gene.specific"]
}
if("isoform.specific" %in% rownames(mm)){
isoform.specific[drug, colnames(mm)] <- mm["isoform.specific",]/apply(mm , 1, sum)["isoform.specific"]
}
}
}
barplot(t(common), las=2,col=mycol3, main="common", ylab="percentage")
barplot(t(gene.specific), las=2,col=mycol3, main="gene specific", ylab="percentage")
barplot(t(isoform.specific), las=2,col=mycol3, main="isoform specific", ylab="percentage")
legend("topright", inset=c(-0.2,0), legend=c(biotypes, "other"), fill = mycol3, bty="n")
dev.off()
|
/training_results.R
|
no_license
|
xulijunji/RNASeqDrug
|
R
| false
| false
| 17,618
|
r
|
options(stringsAsFactors=FALSE)
file.sensitivity <- "auc_recomputed_drug_association.RData"
require(stringr) || stop("Library stringr is not available!")
require(corrplot) || stop("Library corrplot is not available!")
require(VennDiagram) || stop("Library VennDiagram is not available!")
require(RColorBrewer) || stop("Library RColorBrewer is not available!")
require(SDMTools) || stop("Library SDMTools is not available!")
require(calibrate) || stop("Library calibrate is not available!")
require(Hmisc) || stop("Library Hmisc is not available!")
require(grid) || stop("Library grid is not available!")
require(gridBase) || stop("Library gridBase is not available!")
require(lattice) || stop("Library lattice is not available!")
require(WriteXLS) || stop("Library WriteXLS is not available!")
require(PharmacoGx) || stop("Library PharmacoGx is not available!")
require(Biobase) || stop("Library Biobase is not available!")
require(magicaxis) || stop("Library magicaxis is not available!") #add minor tick marks to the plot
require("np") || stop("Library np is not available!")
require(ggplot2) || stop("Library ggplot2 is not available!")
require(gridExtra) || stop("Library gridExtra is not available!")
require(easyGgplot2) || stop("Library easyGgplot2 is not available!")
path.data <- "data"
path.code <- file.path("code")
path.result <- file.path("result")
effect.size.cut.off <- 0.55
fdr.cut.off <- 0.01
tissue <- "breast"
model.method <- "glm"
glm.family <- "gaussian"
effect.size <- "cindex" #c("r.squared", "cindex")
adjustment.method <- "fdr"#c("bonferroni", "fdr")
breast.specific <- ifelse(regexpr("breast", file.sensitivity) < 1, FALSE, TRUE)
data.type <- ifelse(regexpr("mut", file.sensitivity) >= 1, "all", "expression")
source(file.path(path.code, "foo.R"))
path.diagrams<- "result/auc_recomputed_ccle_gdsc"
if (!file.exists(path.diagrams)){dir.create(file.path(path.diagrams))}
path.training.data <- "data/training_ccle_gdsc.RData"
load(path.training.data, verbose=TRUE)
if("gdsc.drug.sensitivity" %in% ls()) {
training.type <-"CCLE_GDSC"
} else {
training.type <-"CCLE"
}
if(data.type == "all") {
Models <- c("M3B", "M2", "MC", "MM")
Models.names <- c("Isoforms", "Genes", "Copy Number Variations", "Mutations")
}else {
Models <- c("M2", "M3B")
Models.names <- c("Genes", "Isoforms")
}
names(Models.names) <- Models
ccle.tissuetype <- tissueTypes
ccle.tissuetype[,1] <- as.character(ccle.tissuetype[,1])
ccle.primary.tissuetype <- ccle.tissuetype
ccle.primary.drug.sensitivity <- ccle.drug.sensitivity
mean.ccle.isoforms.fpkm <- colMeans(ccle.isoforms.fpkm, na.rm=TRUE)
if("haematopoietic_and_lymphoid_tissue" %in% ccle.tissuetype[,1])
{
ccle.tissuetype[which(ccle.tissuetype[,1] == "haematopoietic_and_lymphoid_tissue"), 1] <- "haematopoietic_and_lymphoid"
}
########Color GDSC
A <- NULL; for(i in 1:length(drugs)){A <- union(A, unique(gdsc.drug.sensitivity[,i]))}; A<- A[order(A)]
color.sensitivity <- matrix(NA, nrow=length(A), ncol=1)
rownames(color.sensitivity) <- A
colnames(color.sensitivity) <- "col"
color.sensitivity[1:nrow(color.sensitivity)-1, "col"] <- colorRampPalette(c("blue" , "purple", "red"))(nrow(color.sensitivity)-1)
color.sensitivity[nrow(color.sensitivity), "col"] <- "#000000"
###### drug sensitivity
objOrderDrugs <- fnOrderDrugs(data=ccle.drug.sensitivity, filename=file.path(path.diagrams, "CCLE_DrugSensitivity.pdf"), ylab="auc recomputed", main="CCLE Drug sensitivity")
invisible(fnOrderDrugs(gdsc.drug.sensitivity, file.path(path.diagrams, "GDSC_DrugSensitivity.pdf"), ylab="auc recomputed", main="GDSC Drug sensitivity"))
##gray
#objOrderDrugs <- fnOrderDrugs(ccle.drug.sensitivity, file.path(path.diagrams, "DrugSensitivity_allgenes.pdf"))
DOI <- objOrderDrugs$order
ccle.drug.sensitivity.ordered <- objOrderDrugs$ordered
load(file.path(path.data, file.sensitivity), verbose=T)
if(length(drug.association)==2) {
drug.association <- drug.association[[effect.size]]
drug.association.statistics <- drug.association.statistics[[effect.size]]
}
Prototype <- drug.association[[1]][[1]]
myf <- file.path(path.diagrams, "allGenes_association_matrices.RData")
if(file.exists(myf)){
load(myf)
}else{
models.drugs.names <- expand.grid(drugs, Models)
FDR_List <- matrix(NA, ncol=(length(drugs) * length(Models)), nrow=length(drug.association), dimnames=list(names(drug.association), paste(models.drugs.names[, 1], models.drugs.names[, 2], sep ="_")))
estimate_List <- Pvalues.Numinals <- FDR_List
models.plus.drugs.names <- expand.grid(drugs, c("M0", Models))
statistics.matrix <- matrix(NA, ncol=(length(drugs) * (length(Models) + 1)), nrow=length(drug.association), dimnames=list(names(drug.association), paste(models.plus.drugs.names[, 1], models.plus.drugs.names[, 2], sep ="_")))
best.isoforms.matrix <- matrix(NA, ncol=length(drugs) , nrow=length(drug.association))
colnames(best.isoforms.matrix) <- drugs
rownames(best.isoforms.matrix) <- names(drug.association)
Min.Pvalues <- NULL
for(i in drugs) {
statistics.matrix[,paste(i, "M0",sep ="_")] <- sapply(drug.association.statistics, function(x){ifelse(!is.null(x[[i]]["median", "M0"]), x[[i]]["median", "M0"], 0)})
for(j in 1:length(Models)) {
FDR_List[, paste(i, Models[j], sep ="_")] <- p.adjust(sapply(drug.association, function(x){ifelse(!is.null(x[[i]]["M0",Models[j]]), x[[i]]["M0", Models[j]], 1)}) ,method=adjustment.method)
Min.Pvalues[paste(i,Models[j],sep ="_")] <- min(sapply(drug.association, function(x){ifelse(!is.null(x[[i]]["M0", Models[j]]), x[[i]]["M0", Models[j]], 1)}))
estimate_List[,paste(i,Models[j],sep ="_")] <- sapply(drug.association, function(x){ifelse(!is.null(x[[i]][Models[j], "M0"]), x[[i]][Models[j], "M0"], 0)})
Pvalues.Numinals[,paste(i,Models[j],sep ="_")] <- sapply(drug.association, function(x){ifelse(!is.null(x[[i]]["M0",Models[j]]), x[[i]]["M0",Models[j]], 1)})
statistics.matrix[,paste(i,Models[j],sep ="_")] <- sapply(drug.association.statistics, function(x){ifelse(!is.null(x[[i]]["median", Models[j]]), x[[i]]["median", Models[j]], 0)})
}
best.isoforms.matrix[,i] <- sapply(drug.association.best.isoforms, function(x){ifelse(!is.null(x[[i]]), x[[i]], "")})
}
load("data/ensembl.map.genes.isoforms.GRCh38.87.RData")
fnNumber_Isoforms_of_Gene <- function(Gene_Map=ensembl.map.genes.isoforms, GeneId)
{
return(length(which(unlist(strsplit(Gene_Map[, as.character(GeneId)], ",")) %in% colnames(ccle.isoforms.fpkm))))
}
isoforms_No_List <- matrix(NA, ncol=1, nrow=nrow(annot.ensembl.all.genes))
colnames(isoforms_No_List) <- "isoforms.NO"
rownames(isoforms_No_List) <- rownames(annot.ensembl.all.genes)
for( i in 1:nrow(isoforms_No_List))
{
isoforms_No_List[i,] <- fnNumber_Isoforms_of_Gene(GeneId=as.character(rownames(isoforms_No_List)[i]))
}
isoforms_No_List <- data.frame(isoforms_No_List,stringsAsFactors=FALSE)
# save(isoforms_No_List, file=myf)
#}
isoforms_No_List <- isoforms_No_List[GeneList, , drop=FALSE]
save(FDR_List, estimate_List, Pvalues.Numinals, statistics.matrix, best.isoforms.matrix, Min.Pvalues, isoforms_No_List, file=myf)
}
##########Analyses
result.effect.size <- fnComputeAssociateGenes.effect.size(FDR_CutOff=fdr.cut.off, effect.size_CutOff=effect.size.cut.off)
write.csv(fnWilcox(result.effect.size, TRUE)$comparison, file=file.path(path.diagrams, "comparison_test_wilcox.csv"))
###Figure 2A
### The number of significant predictive biomarkers identified in training
### biomarkerd are plotted in seperate bars for isoforms and gene models
barplot.models(model=result.effect.size, isoforms_No="all", sign="all", prototype=Models, main.title=sprintf("%s < 1%% \n %s > 0.55", adjustment.method, effect.size), yaxis="Log", breakpoint="Regular", cex=1.2)
cindexDistributions()
###
barplot.models(model=result.effect.size, isoforms_No="1.isoform", sign="all", prototype=Models, main.title=sprintf("%s < 1%% \n %s > 0.55", adjustment.method, effect.size), yaxis="Log", breakpoint="Regular", cex=0.7)
barplot.models(model=result.effect.size, isoforms_No="n.isoforms", sign="all", prototype=Models, main.title=sprintf("%s < 1%% \n %s > 0.55", adjustment.method, effect.size), yaxis="Log", breakpoint="Regular", cex=0.7)
#barplot.models(model=result.effect.size, isoforms_No="all", sign="positive", prototype=Models, main.title=sprintf("%s < 1%% \n %s > 0.55", adjustment.method, effect.size), yaxis="Log", breakpoint="Regular", cex=0.7)
#barplot.models(model=result.effect.size, isoforms_No="all", sign="negative", prototype=Models, main.title=sprintf("%s < 1%% \n %s > 0.55", adjustment.method, effect.size), yaxis="Log", breakpoint="Regular", cex=0.7)
associations <- associations.all.drugs(model.rank="M3B", annot.ensembl.all.genes=annot.ensembl.all.genes)
save(associations, file=file.path(path.diagrams, "associations.RData"))
##all.biomarkers would include all the significant biomarkers
all.biomarkers <- fnTop.significant.biomarkers(associations, cut_off=fdr.cut.off, BioNo="all", rank.type="pvalue.adj")
##constraint the analyses to those predicted biomarkers with effect size greater than 0.55
for(i in names(all.biomarkers)) {
all.biomarkers[[i]] <- all.biomarkers[[i]][which(all.biomarkers[[i]]$cindex > effect.size_CutOff),]
}
save(all.biomarkers, file=file.path(path.diagrams, "all.biomarkers.original.RData"))
if(!breast.specific)
{
source("code/foo_training.R")
all.biomarkers <- fnidentify.tissue.specific.biomarkers(biomarkers=all.biomarkers)
}else{
for( i in 1:length(all.biomarkers))
{
if(all(is.na(all.biomarkers[[i]])))
{
all.biomarkers[[i]][, tissue] <- NA
all.biomarkers[[i]][, paste0(tissue, "_boot")] <- NA
}else{
all.biomarkers[[i]][, tissue] <- 1
all.biomarkers[[i]][, paste0(tissue, "_boot")] <- 1
}
}
}
all.biomarkers <- lapply(all.biomarkers, function(x){if("breast" %in% colnames(x)){x[order(x[, "breast"], na.last=T, decreasing=T),]}else{x}})
save(all.biomarkers, file=file.path(path.diagrams, "all.biomarkers.RData"))
#WriteXLS::WriteXLS("all.biomarkers", ExcelFileName=file.path(path.diagrams, "all.biomarkers.xlsx"), row.names=TRUE)
BiomarkersNo <- matrix(NA, ncol=3, nrow=length(all.biomarkers))
rownames(BiomarkersNo) <- names(all.biomarkers)
colnames(BiomarkersNo) <- c("gene.specific", "isoform.specific", "common")
for( i in names(all.biomarkers))
{
top.significant.table <- table(all.biomarkers[[i]][, "specificity"])
BiomarkersNo[i, "isoform.specific"] <- top.significant.table["isoform.specific"]
BiomarkersNo[i, "gene.specific"] <- top.significant.table["gene.specific"]
BiomarkersNo[i, "common"] <- top.significant.table["common"]/2
}
write.csv(BiomarkersNo, file=file.path(path.diagrams, "Biomarkers.csv"))
TOP <- matrix(NA, ncol=9, nrow=length(all.biomarkers))
rownames(TOP) <- names(all.biomarkers)
colnames(TOP) <- c("symbol", "isoforms.no", "biomarker.id", "type", "specificity", "estimate", effect.size, "fdr", "delta.rank")
for( i in names(all.biomarkers)) {
xx <- which.max(all.biomarkers[[i]][,effect.size])
#xx <- 1
if(length(xx) > 0) {
TOP[i, "symbol"] <- as.character(all.biomarkers[[i]][xx, "symbol"])
TOP[i, "isoforms.no"] <- all.biomarkers[[i]][xx, "isoforms.no"]
TOP[i, "biomarker.id"] <- as.character(all.biomarkers[[i]][xx, "biomarker.id"])
TOP[i, "type"] <- all.biomarkers[[i]][xx, "type"]
TOP[i, "specificity"] <- all.biomarkers[[i]][xx, "specificity"]
TOP[i, "estimate"] <- all.biomarkers[[i]][xx, "estimate"]
TOP[i, effect.size] <- all.biomarkers[[i]][xx, effect.size]
TOP[i, "fdr"] <- all.biomarkers[[i]][xx, adjustment.method]
TOP[i, "delta.rank"] <- all.biomarkers[[i]][xx, "delta.rank"]
}
}
write.csv(TOP, file=file.path(path.diagrams, "TopOne.csv"))
Check.KnownAssociations(associations)
percentage.biomarkers.type <- fnPercentageBiomarkersType(all.biomarkers)
###Figure 2B
### The ratio of biomarkers according to their specificity in training
mystacked.barplot.simple(Filename=file.path(path.diagrams, "ProportionBiomarkersType.pdf"), data=percentage.biomarkers.type, main.label="Proportion of specificity of biomarkers", cex=1.2)
###
### Supplementary Figure 10
## annotation of differnet set of biomarkers based on their biotaypes
mycol <- RColorBrewer::brewer.pal(n=4, name="Set1")[c(1, 4, 2)]
pdf("result/auc_recomputed_ccle_gdsc/biomarkers_specificity_biotypes2.pdf", width=21, height=10)
par(mfrow=c(2,3))
par(mar=c(7.1, 4.1, 4.1, 10.1), xpd=TRUE)
biotypes <- c("protein_coding", "antisense", "processed_transcript", "lincRNA", "pseudogene", "other")
rr <- list()
for(i in 1:length(biotypes)) {
rr[[i]] <- matrix(0, nrow=length(all.biomarkers), ncol=3, dimnames=list(names(all.biomarkers), c("isoform.specific", "common", "gene.specific")))
}
for(drug in names(all.biomarkers)){
if(all(!is.na(all.biomarkers[[drug]]$specificity))){
xx <- table(all.biomarkers[[drug]]$specificity, all.biomarkers[[drug]]$biotype)
for(i in 1:length(biotypes)) {
yy <- grep(biotypes[i], colnames(xx))
if(length(yy) > 0) {
mm <- apply(xx[ , yy, drop=FALSE], 1, sum)
rr[[i]][drug, intersect(colnames(rr[[i]]), names(mm))] <- mm[intersect(colnames(rr[[i]]), names(mm))]/sum(mm)
xx <- xx[ ,-yy, drop=FALSE]
}else if (biotypes[i] == "other"){
mm <- apply(xx, 1, sum)
rr[[i]][drug, intersect(colnames(rr[[i]]), names(mm))] <- mm[intersect(colnames(rr[[i]]), names(mm))]/sum(mm)
xx <- xx[ , -yy, drop=FALSE]
}
}
}
}
for(i in 1:length(biotypes)) {
barplot(t(rr[[i]]), las=2,col=mycol, main=biotypes[i], ylab="percentage")
}
legend("topright", inset=c(-0.2,0), legend=colnames(rr[[1]]), fill=mycol, bty="n")
dev.off()
###
### Supplementary Figure 11
## annotation of differnet set of biomarkers based on the number of alternative spliced products in their corresponding gene
mycol <- RColorBrewer::brewer.pal(n=3, name="Set1")[c(2,1)]
isoform.specific<- gene.specific<- common <- matrix(NA, nrow=length(all.biomarkers), ncol=2, dimnames=list(names(all.biomarkers), c("1 isoform", "n isoforms")))
pdf("result/auc_recomputed_ccle_gdsc/biomarkers_specificity_isoform_no.pdf", width=21, height=5)
par(mfrow=c(1,3))
par(mar=c(7.1, 4.1, 4.1, 10.1), xpd=TRUE)
rr <- list()
for(drug in names(all.biomarkers)){
xx <- table(all.biomarkers[[drug]]$specificity, all.biomarkers[[drug]]$isoforms.no)
mm <- apply(xx, MARGIN=1, function(x){s <- x[which(x!=0)];c(s[1], (sum(s)-s[1]))})
common[drug, ] <- if("common" %in% colnames(mm)){mm[,"common"]}else{c(0,0)}
gene.specific[drug, ] <- if("gene.specific" %in% colnames(mm)){mm[,"gene.specific"]}else{c(0,0)}
isoform.specific[drug, ] <- if("isoform.specific" %in% colnames(mm)){mm[,"isoform.specific"]}else{c(0,0)}
rr[[drug]] <- mm
}
common <- common/apply(common,MARGIN = 1, function(x){ss<- sum(x);ifelse(ss!=0, ss, 1)})
barplot(t(common), las=2,col=mycol[1:2], main="common", ylab="percentage")
gene.specific <- gene.specific/apply(gene.specific,MARGIN = 1, function(x){ss<- sum(x);ifelse(ss!=0, ss, 1)})
barplot(t(gene.specific), las=2,col=mycol[1:2], main="gene specific", ylab="percentage")
isoform.specific <- isoform.specific/apply(isoform.specific,MARGIN = 1, function(x){ss<- sum(x);ifelse(ss!=0, ss, 1)})
barplot(t(isoform.specific), las=2,col=mycol[1:2], main="isoform specific", ylab="percentage")
legend("topright", inset=c(-0.2,0), legend = colnames(isoform.specific), fill = mycol[1:2], bty="n")
dev.off()
###
## annotation of differnet set of biomarkers based on their biotaypes
##update all.biomarkers with gene/transcript biotypes
for(drug in names(all.biomarkers)) {
ii <- which(all.biomarkers[[drug]][,"type"] == "isoform")
gg <- which(all.biomarkers[[drug]][,"type"] == "gene")
if(length(ii) > 0){
all.biomarkers[[drug]][ii, "biotype"] <- annot.ensembl.all.isoforms[all.biomarkers[[drug]][ii, "transcript.id"], "TranscriptBioType"]
}
if(length(gg) > 0){
all.biomarkers[[drug]][gg, "biotype"] <- annot.ensembl.all.genes[all.biomarkers[[drug]][gg, "gene.id"], "GeneBioType"]
}
}
mycol3 <- RColorBrewer::brewer.pal(n=5, name="Set3")
pdf("result/auc_recomputed_ccle_gdsc/biomarkers_specificity_biotypes.pdf", width=21, height=5)
par(mfrow=c(1,3))
par(mar=c(7.1, 4.1, 4.1, 10.1), xpd=TRUE)
biotypes <- c("protein_coding", "antisense", "processed_transcript", "lincRNA")
isoform.specific<- gene.specific<- common <- matrix(0, nrow=length(all.biomarkers), ncol=length(biotypes)+1, dimnames=list(names(all.biomarkers), c(biotypes, "other")))
for(drug in names(all.biomarkers)){
if(all(!is.na(all.biomarkers[[drug]]$specificity))){
xx <- table(all.biomarkers[[drug]]$specificity, all.biomarkers[[drug]]$biotype)
mm <- cbind(xx[, intersect(biotypes, colnames(xx))], "other"=apply(xx[,which(!colnames(xx) %in% biotypes), drop=FALSE], 1, sum))
if("common" %in% rownames(mm)){
common[drug, colnames(mm)] <- mm["common",]/apply(mm , 1, sum)["common"]
}
if("gene.specific" %in% rownames(mm)){
gene.specific[drug, colnames(mm)] <- mm["gene.specific",]/apply(mm , 1, sum)["gene.specific"]
}
if("isoform.specific" %in% rownames(mm)){
isoform.specific[drug, colnames(mm)] <- mm["isoform.specific",]/apply(mm , 1, sum)["isoform.specific"]
}
}
}
barplot(t(common), las=2,col=mycol3, main="common", ylab="percentage")
barplot(t(gene.specific), las=2,col=mycol3, main="gene specific", ylab="percentage")
barplot(t(isoform.specific), las=2,col=mycol3, main="isoform specific", ylab="percentage")
legend("topright", inset=c(-0.2,0), legend=c(biotypes, "other"), fill = mycol3, bty="n")
dev.off()
|
ggplot(gapminder,
aes(x = year, y = lifeExp, group = continent, color = continent)) +
geom_point(lwd = 1, show.legend = FALSE) + geom_smooth(method="auto") + facet_wrap(~ continent)
scale_color_manual(values = continent_colors) +
theme_bw() + theme(strip.text = element_text(size = rel(1.1)))
|
/Fanli_Si.R
|
no_license
|
FanliSi/Alpha_GW1
|
R
| false
| false
| 308
|
r
|
ggplot(gapminder,
aes(x = year, y = lifeExp, group = continent, color = continent)) +
geom_point(lwd = 1, show.legend = FALSE) + geom_smooth(method="auto") + facet_wrap(~ continent)
scale_color_manual(values = continent_colors) +
theme_bw() + theme(strip.text = element_text(size = rel(1.1)))
|
library(intrinsicDimension)
### Name: oblongNormal
### Title: Oblong Normal Distribution
### Aliases: oblongNormal
### Keywords: datagen
### ** Examples
datap <- oblongNormal(100, 10)
par(mfrow = c(1, 2))
plot(datap[, 1], datap[, 2])
plot(datap[, 1], datap[, 6])
|
/data/genthat_extracted_code/intrinsicDimension/examples/oblong.Rd.R
|
no_license
|
surayaaramli/typeRrh
|
R
| false
| false
| 270
|
r
|
library(intrinsicDimension)
### Name: oblongNormal
### Title: Oblong Normal Distribution
### Aliases: oblongNormal
### Keywords: datagen
### ** Examples
datap <- oblongNormal(100, 10)
par(mfrow = c(1, 2))
plot(datap[, 1], datap[, 2])
plot(datap[, 1], datap[, 6])
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/errcheck_times.R
\name{errcheck_times}
\alias{errcheck_times}
\title{Error check \code{times}}
\usage{
errcheck_times(times, callfunc)
}
\arguments{
\item{times}{Tests whether this is a numeric vector with unit-spaced increasing values}
\item{callfunc}{Function calling this one, for better error messaging}
}
\value{
\code{errcheck_times} returns nothing but throws and error if the conditions are not met
}
\description{
Error check whether a vector can represent times at which data suitable
for wavelet transforms were measured
}
\author{
Daniel Reuman, \email{reuman@ku.edu}
}
|
/man/errcheck_times.Rd
|
no_license
|
cran/wsyn
|
R
| false
| true
| 662
|
rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/errcheck_times.R
\name{errcheck_times}
\alias{errcheck_times}
\title{Error check \code{times}}
\usage{
errcheck_times(times, callfunc)
}
\arguments{
\item{times}{Tests whether this is a numeric vector with unit-spaced increasing values}
\item{callfunc}{Function calling this one, for better error messaging}
}
\value{
\code{errcheck_times} returns nothing but throws and error if the conditions are not met
}
\description{
Error check whether a vector can represent times at which data suitable
for wavelet transforms were measured
}
\author{
Daniel Reuman, \email{reuman@ku.edu}
}
|
testlist <- list(Beta = 0, CAL = numeric(0), CVLinf = 0, L50 = 0, L95 = 0, LenBins = numeric(0), LenMids = numeric(0), Linf = 0, MK = 0, Ml = numeric(0), Prob = structure(0, .Dim = c(1L, 1L)), nage = 0L, nlen = 0L, pars = c(3.81959242373749e-313, 0), rLens = numeric(0))
result <- do.call(DLMtool:::LBSPRopt,testlist)
str(result)
|
/DLMtool/inst/testfiles/LBSPRopt/AFL_LBSPRopt/LBSPRopt_valgrind_files/1615838117-test.R
|
no_license
|
akhikolla/updatedatatype-list2
|
R
| false
| false
| 344
|
r
|
testlist <- list(Beta = 0, CAL = numeric(0), CVLinf = 0, L50 = 0, L95 = 0, LenBins = numeric(0), LenMids = numeric(0), Linf = 0, MK = 0, Ml = numeric(0), Prob = structure(0, .Dim = c(1L, 1L)), nage = 0L, nlen = 0L, pars = c(3.81959242373749e-313, 0), rLens = numeric(0))
result <- do.call(DLMtool:::LBSPRopt,testlist)
str(result)
|
# Tidy Tuesday 2020 Week 15
# Tour de France
# data provided by Alastair Rushworth
library(remotes)
devtools::install_github("alastairrushworth/tdf")
library(tdf)
library(tidyverse)
glimpse(editions)
library(ggthemes)
library(ggplot2)
library(ggrepl)
caption <- paste (strwrap ("TidyTuesday wk 15 April 2020 @RegisOconnor"), collapse = "\n")
editions %>%
ggplot(aes(y =weight / height ^2, x = distance/time_overall, color = edition)) +
geom_point(na.rm = TRUE, size = 3) +
geom_smooth(aes(y = weight / height ^2, x = distance/time_overall))+
xlab('Average Speed - km / h') +
ylab('BMI') +
ggtitle('Tour de France Winners BMI vs. speed') +
theme(legend.position = "none") +
geom_label_repel(data = editions %>% sample_n(10),
aes(label = winner_name), size = 2.5,
nudge_y = 4, na.rm = TRUE,
segment.alpha = 0.2) +
labs(caption = caption) +
theme_tufte()
|
/tt2020w15TourdeFrance.R
|
permissive
|
regiso/tidytuesday
|
R
| false
| false
| 981
|
r
|
# Tidy Tuesday 2020 Week 15
# Tour de France
# data provided by Alastair Rushworth
library(remotes)
devtools::install_github("alastairrushworth/tdf")
library(tdf)
library(tidyverse)
glimpse(editions)
library(ggthemes)
library(ggplot2)
library(ggrepl)
caption <- paste (strwrap ("TidyTuesday wk 15 April 2020 @RegisOconnor"), collapse = "\n")
editions %>%
ggplot(aes(y =weight / height ^2, x = distance/time_overall, color = edition)) +
geom_point(na.rm = TRUE, size = 3) +
geom_smooth(aes(y = weight / height ^2, x = distance/time_overall))+
xlab('Average Speed - km / h') +
ylab('BMI') +
ggtitle('Tour de France Winners BMI vs. speed') +
theme(legend.position = "none") +
geom_label_repel(data = editions %>% sample_n(10),
aes(label = winner_name), size = 2.5,
nudge_y = 4, na.rm = TRUE,
segment.alpha = 0.2) +
labs(caption = caption) +
theme_tufte()
|
library(tidyverse)
library(ggpubr)
library(gridExtra)
data <- read_tsv("Swaziland.txt")
data <- data %>% mutate(deathBirth = babyDeath / babyBirth)
p1 <-
ggscatter(
data,
x = "lifeMan",
y = "lifeWoman",
add = "reg.line",
add.params = list(color = "blue", fill = "lightgray"),
conf.int = TRUE,
title = "男性与女性出生时预期寿命的相关性",
xlab = "男性出生时的预期寿命",
ylab = "女性出生时的预期寿命"
) + stat_cor(
method = "pearson",
label.x = 50,
label.y = 60,
size = 8,
color = "red"
)
p2 <-
ggscatter(
data,
x = "lifeMan",
y = "babyBirth",
add = "reg.line",
add.params = list(color = "blue", fill = "lightgray"),
conf.int = TRUE,
title = "(男性)出生时预期寿命与出生率的相关性",
xlab = "男性出生时的预期寿命",
ylab = "出生率"
) + stat_cor(
method = "pearson",
label.x = 50,
label.y = 51,
size = 8,
color = "red"
)
p3 <-
ggscatter(
data,
x = "lifeMan",
y = "babyDeath",
add = "reg.line",
add.params = list(color = "blue", fill = "lightgray"),
conf.int = TRUE,
title = "(男性)出生时预期寿命与婴儿死亡率的相关性",
xlab = "男性出生时的预期寿命",
ylab = "婴儿死亡率"
) + stat_cor(
method = "pearson",
label.x = 52,
label.y = 130,
size = 8,
color = "red"
)
p4 <-
ggscatter(
data,
x = "lifeMan",
y = "deathBirth",
add = "reg.line",
add.params = list(color = "blue", fill = "lightgray"),
conf.int = TRUE,
title = "(男性)出生时预期寿命与婴儿死亡率/出生率的相关性",
xlab = "男性出生时的预期寿命",
ylab = "婴儿死亡率/出生率"
) + stat_cor(
method = "pearson",
label.x = 50,
label.y = 3,
size = 8,
color = "red"
)
png(
"swaziland_correlation.png",
width = 2048,
height = 1024,
res = 95
)
grid.arrange(p1, p2, p3, p4, ncol = 4)
dev.off()
|
/show/Swaziland/Swaziland_cor.R
|
no_license
|
Yixf-Education/course_Statistics_Story
|
R
| false
| false
| 2,027
|
r
|
library(tidyverse)
library(ggpubr)
library(gridExtra)
data <- read_tsv("Swaziland.txt")
data <- data %>% mutate(deathBirth = babyDeath / babyBirth)
p1 <-
ggscatter(
data,
x = "lifeMan",
y = "lifeWoman",
add = "reg.line",
add.params = list(color = "blue", fill = "lightgray"),
conf.int = TRUE,
title = "男性与女性出生时预期寿命的相关性",
xlab = "男性出生时的预期寿命",
ylab = "女性出生时的预期寿命"
) + stat_cor(
method = "pearson",
label.x = 50,
label.y = 60,
size = 8,
color = "red"
)
p2 <-
ggscatter(
data,
x = "lifeMan",
y = "babyBirth",
add = "reg.line",
add.params = list(color = "blue", fill = "lightgray"),
conf.int = TRUE,
title = "(男性)出生时预期寿命与出生率的相关性",
xlab = "男性出生时的预期寿命",
ylab = "出生率"
) + stat_cor(
method = "pearson",
label.x = 50,
label.y = 51,
size = 8,
color = "red"
)
p3 <-
ggscatter(
data,
x = "lifeMan",
y = "babyDeath",
add = "reg.line",
add.params = list(color = "blue", fill = "lightgray"),
conf.int = TRUE,
title = "(男性)出生时预期寿命与婴儿死亡率的相关性",
xlab = "男性出生时的预期寿命",
ylab = "婴儿死亡率"
) + stat_cor(
method = "pearson",
label.x = 52,
label.y = 130,
size = 8,
color = "red"
)
p4 <-
ggscatter(
data,
x = "lifeMan",
y = "deathBirth",
add = "reg.line",
add.params = list(color = "blue", fill = "lightgray"),
conf.int = TRUE,
title = "(男性)出生时预期寿命与婴儿死亡率/出生率的相关性",
xlab = "男性出生时的预期寿命",
ylab = "婴儿死亡率/出生率"
) + stat_cor(
method = "pearson",
label.x = 50,
label.y = 3,
size = 8,
color = "red"
)
png(
"swaziland_correlation.png",
width = 2048,
height = 1024,
res = 95
)
grid.arrange(p1, p2, p3, p4, ncol = 4)
dev.off()
|
#Figure 4.5
#http://www.amazon.com/Lattice-Multivariate-Data-Visualization-Use/dp/0387759689/ref=cm_cr_pr_product_top
require(rCharts)
require(reshape2)
data( postdoc, package = "latticeExtra")
#get data in form that the example uses and then melt it
data = melt(data.frame(prop.table(postdoc,margin=1)))
colnames( data )[4] <- "Proportion"
chart4_5 <- rPlot(
Proportion ~ Field,
data = data,
type = 'bar',
color = "Reason"
)
chart4_5$coord( type = "cartesian", flip = TRUE )
chart4_5$guides(
y = list( numticks = length ( unique (data$Field ) ) ),
color = list(
numticks = length( unique (data$Reason) ),
levels = unique( data$Reason )
)
)
chart4_5
|
/polycharts_versions/figure4_05.R
|
no_license
|
arturochian/rCharts_lattice_book
|
R
| false
| false
| 677
|
r
|
#Figure 4.5
#http://www.amazon.com/Lattice-Multivariate-Data-Visualization-Use/dp/0387759689/ref=cm_cr_pr_product_top
require(rCharts)
require(reshape2)
data( postdoc, package = "latticeExtra")
#get data in form that the example uses and then melt it
data = melt(data.frame(prop.table(postdoc,margin=1)))
colnames( data )[4] <- "Proportion"
chart4_5 <- rPlot(
Proportion ~ Field,
data = data,
type = 'bar',
color = "Reason"
)
chart4_5$coord( type = "cartesian", flip = TRUE )
chart4_5$guides(
y = list( numticks = length ( unique (data$Field ) ) ),
color = list(
numticks = length( unique (data$Reason) ),
levels = unique( data$Reason )
)
)
chart4_5
|
#------------------------------------------------------------
### Acute
#------------------------------------------------------------
# load("all__matrices__norm__and__raw.Rdata")
# remove outlier sample
# acute.meta = acute.meta[-14,]
# acute.matrix = acute.matrix %>% select(-s_11043640)
acute.InputMatrix = cbind(ctrl.matrix, acute.matrix)
library(dplyr)
library(purrr)
library(tidyr)
acute.randomizeLabels <- function(inputMatrix){
# get random order of sample labels
acute.permuted.labels <- sample(colnames(inputMatrix), size = ncol(inputMatrix), replace = F)
# assign random labels to colnames of inputMatrix
colnames(inputMatrix) <- acute.permuted.labels
# subset the get the new matrix of cases
caseM <- as.data.frame(inputMatrix) %>% select(unname(unlist(filter(acute.meta, Type != "AMCffpe") %>% select(simpleName))))
# subset to get the new matrix of ctrls
ctrlM <- as.data.frame(inputMatrix) %>% select(control.meta$simpleName)
# RUN WILCOXON TEST
# create an empty list
acute.permuted.res <- list()
for (i in 1:nrow(inputMatrix)) {
acute.permuted.res[[i]] <- wilcox.test(x = unlist(unname(caseM[i,])), y = unlist(unname(ctrlM[i,])),
alternative = "two.sided", paired = F, conf.level = "0.95", exact = F, correct = F)
}
names(acute.permuted.res) <- rownames(inputMatrix)
return(acute.permuted.res)
}
MEGA.acute.permuted.res = list()
for (i in 1:1000) {
MEGA.acute.permuted.res[[i]] <- acute.randomizeLabels(inputMatrix = acute.InputMatrix)
}
saveRDS(MEGA.acute.permuted.res, "results/bacteria_MEGA.acute.permuted.res.Rds")
getPs.acute <- function(x){
LCf = list()
for (i in 1:length(MEGA.acute.permuted.res)) {
LCf[[i]] <- pluck(MEGA.acute.permuted.res, i, x, 'p.value')
}
return(unlist(LCf))
}
allPs.list.Acute = list()
for (i in 1:nrow(ctrl.matrix)) {
allPs.list.Acute[[i]] <- getPs.acute(x = i)
}
names(allPs.list.Acute) <- rownames(ctrl.matrix)
acute.1000.table = do.call("rbind", allPs.list.Acute)
props.acute.list = list()
for (i in 1:nrow(acute.1000.table)) {
props.acute.list[[i]] = round(as.data.frame(acute.1000.table) %>% slice(i),3) %>% select_if(~any(. <= round(p.vals.acute, 3)[i])) %>% length() /1000
names(props.acute.list)[[i]] = rownames(acute.1000.table)[i]
}
acute.FDR = as.data.frame(do.call("rbind", props.acute.list))
acute.FDR$p.value = p.vals.acute
colnames(acute.FDR)[1] = "p.adj"
acute.RESULTS = cbind(acute.FDR, acute.InputMatrix)
write.csv(acute.RESULTS, "results/bacteria__acute.v.ctrl.Results.csv")
|
/04__permute__bacteria__acute.v.ctrl.R
|
no_license
|
bixBeta/FHC-heart
|
R
| false
| false
| 2,631
|
r
|
#------------------------------------------------------------
### Acute
#------------------------------------------------------------
# load("all__matrices__norm__and__raw.Rdata")
# remove outlier sample
# acute.meta = acute.meta[-14,]
# acute.matrix = acute.matrix %>% select(-s_11043640)
acute.InputMatrix = cbind(ctrl.matrix, acute.matrix)
library(dplyr)
library(purrr)
library(tidyr)
acute.randomizeLabels <- function(inputMatrix){
# get random order of sample labels
acute.permuted.labels <- sample(colnames(inputMatrix), size = ncol(inputMatrix), replace = F)
# assign random labels to colnames of inputMatrix
colnames(inputMatrix) <- acute.permuted.labels
# subset the get the new matrix of cases
caseM <- as.data.frame(inputMatrix) %>% select(unname(unlist(filter(acute.meta, Type != "AMCffpe") %>% select(simpleName))))
# subset to get the new matrix of ctrls
ctrlM <- as.data.frame(inputMatrix) %>% select(control.meta$simpleName)
# RUN WILCOXON TEST
# create an empty list
acute.permuted.res <- list()
for (i in 1:nrow(inputMatrix)) {
acute.permuted.res[[i]] <- wilcox.test(x = unlist(unname(caseM[i,])), y = unlist(unname(ctrlM[i,])),
alternative = "two.sided", paired = F, conf.level = "0.95", exact = F, correct = F)
}
names(acute.permuted.res) <- rownames(inputMatrix)
return(acute.permuted.res)
}
MEGA.acute.permuted.res = list()
for (i in 1:1000) {
MEGA.acute.permuted.res[[i]] <- acute.randomizeLabels(inputMatrix = acute.InputMatrix)
}
saveRDS(MEGA.acute.permuted.res, "results/bacteria_MEGA.acute.permuted.res.Rds")
getPs.acute <- function(x){
LCf = list()
for (i in 1:length(MEGA.acute.permuted.res)) {
LCf[[i]] <- pluck(MEGA.acute.permuted.res, i, x, 'p.value')
}
return(unlist(LCf))
}
allPs.list.Acute = list()
for (i in 1:nrow(ctrl.matrix)) {
allPs.list.Acute[[i]] <- getPs.acute(x = i)
}
names(allPs.list.Acute) <- rownames(ctrl.matrix)
acute.1000.table = do.call("rbind", allPs.list.Acute)
props.acute.list = list()
for (i in 1:nrow(acute.1000.table)) {
props.acute.list[[i]] = round(as.data.frame(acute.1000.table) %>% slice(i),3) %>% select_if(~any(. <= round(p.vals.acute, 3)[i])) %>% length() /1000
names(props.acute.list)[[i]] = rownames(acute.1000.table)[i]
}
acute.FDR = as.data.frame(do.call("rbind", props.acute.list))
acute.FDR$p.value = p.vals.acute
colnames(acute.FDR)[1] = "p.adj"
acute.RESULTS = cbind(acute.FDR, acute.InputMatrix)
write.csv(acute.RESULTS, "results/bacteria__acute.v.ctrl.Results.csv")
|
# Walmart Sales
# Data Visualization
#
library(readr)
# Store Dept Date Weekly_Sales IsHoliday: 2010-02-05 ~ 2012-10-26
data1<-read_csv("C:\\Users\\44180\\Documents\\Surface-workandstudy\\soe\\bigdata\\Walmart_train.csv")
data1<-read_csv("D:\\PC-workandstudy\\soe\\bigdata\\data of case3\\Walmart_train.csv")
# Store Type Size
data2<-read_csv("C:\\Users\\44180\\Documents\\Surface-workandstudy\\soe\\bigdata\\Walmart_stores.csv")
data2<-read_csv("D:\\PC-workandstudy\\soe\\bigdata\\data of case3\\Walmart_stores.csv")
# Store Date Temperature Fuel_Price Markdown1-5 CPI Unemplyment IsHoliday
data3<-read_csv("C:\\Users\\44180\\Documents\\Surface-workandstudy\\soe\\bigdata\\Walmart_features.csv")
data3<-read_csv("D:\\PC-workandstudy\\soe\\bigdata\\data of case3\\Walmart_features.csv")
# Store Dept Date IsHoliday: 2012-11-02 ~ 2013-07-26
data4<-read_csv("C:\\Users\\44180\\Documents\\Surface-workandstudy\\soe\\bigdata\\Walmart_test.csv")
data4<-read_csv("D:\\PC-workandstudy\\soe\\bigdata\\data of case3\\Walmart_test.csv")
# training dataset: 2010-02-05 ~ 2012-10-26
train<-merge(data1,merge(data2,data3))
# testing dataset: 2012-11-02 ~ 2013-07-26
test<-merge(data4,merge(data2,data3))
rm(data1,data2,data3,data4)
summary(train)
dim(train)
# Analysis for train data only
# some variables include missing values
train$Date<-as.Date(train$Date)
train$Store<-as.factor(train$Store)
train$Dept<-as.factor(train$Dept)
train$Type<-as.factor(train$Type)
with(train,table(Dept,Store)) # 45 stores with 99 (index) departments
with(train,table(Store,Type)) # 3 types (Store and Type are exclusive)
with(train,table(Dept,Type))
library(ggplot2)
# 99 Depts within 45 Stores which are in 3 types
ggplot(data=train,aes(x=Store,fill=Dept))+geom_bar()
ggplot(data=train,aes(x=Store,fill=Dept))+geom_bar()+facet_grid(Type ~ .)
ggplot(data=train,aes(x=Dept,fill=Type))+geom_bar()+coord_flip()
# Sales<-train(,c("Store","Dept","Weekly_Sales","Type","Size"))
# (Store, Type, Size) is used to identify each store individually
# total sales across 45 stores
Sales_Total<-aggregate(train$Weekly_Sales,
by=list(Store=train$Store,Type=train$Type,Size=train$Size),sum)
s0<-labs(title="Walmart Sales by Stores (429 Weeks Total)",y="Sales $")
ggplot(data=Sales_Total,aes(x=Store,y=x,fill=Type))+geom_bar(stat="identity")+s0
ggplot(data=Sales_Total,aes(x=Store,y=I(x/Size),fill=Type))+geom_bar(stat="identity")+s0
# Weekly_Sales trends
# 429 weeks, 3 types of stores (aggregate over 45 stores and 99 departments in each store)
Sales_All<-aggregate(train$Weekly_Sales,
by=list(Date=train$Date,Type=train$Type),sum)
# 429 weeks, 3 types of 45 stores (aggregate over 99 departments in each store)
Sales_Store<-aggregate(train$Weekly_Sales,
by=list(Store=train$Store,Date=train$Date,Type=train$Type),sum)
# 429 weeks, 3 types of 99 departments (aggregate over 45 stores)
Sales_Dept<-aggregate(train$Weekly_Sales,
by=list(Dept=train$Dept,Date=train$Date,Type=train$Type),sum)
g0<-labs(title="Walmart Sales Trend",x="Date",y="Weekly Sales")
g1<-ggplot(data=Sales_All,aes(x=Date,y=x)) + geom_line(aes(col=Type))
g1+g0
g2<-ggplot(data=Sales_Store,aes(x=Date,y=x)) + geom_line(aes(col=Store))
g2+g0
g2+facet_grid(Type ~ .)+g0
g3<-ggplot(data=Sales_Dept,aes(x=Date,y=x)) + geom_line(aes(col=Dept))
g3+g0
g3+facet_grid(Type ~ .)+g0
|
/R/big data/case3/case3_1.R
|
no_license
|
hunterlinsq/R-in-SOE
|
R
| false
| false
| 3,421
|
r
|
# Walmart Sales
# Data Visualization
#
library(readr)
# Store Dept Date Weekly_Sales IsHoliday: 2010-02-05 ~ 2012-10-26
data1<-read_csv("C:\\Users\\44180\\Documents\\Surface-workandstudy\\soe\\bigdata\\Walmart_train.csv")
data1<-read_csv("D:\\PC-workandstudy\\soe\\bigdata\\data of case3\\Walmart_train.csv")
# Store Type Size
data2<-read_csv("C:\\Users\\44180\\Documents\\Surface-workandstudy\\soe\\bigdata\\Walmart_stores.csv")
data2<-read_csv("D:\\PC-workandstudy\\soe\\bigdata\\data of case3\\Walmart_stores.csv")
# Store Date Temperature Fuel_Price Markdown1-5 CPI Unemplyment IsHoliday
data3<-read_csv("C:\\Users\\44180\\Documents\\Surface-workandstudy\\soe\\bigdata\\Walmart_features.csv")
data3<-read_csv("D:\\PC-workandstudy\\soe\\bigdata\\data of case3\\Walmart_features.csv")
# Store Dept Date IsHoliday: 2012-11-02 ~ 2013-07-26
data4<-read_csv("C:\\Users\\44180\\Documents\\Surface-workandstudy\\soe\\bigdata\\Walmart_test.csv")
data4<-read_csv("D:\\PC-workandstudy\\soe\\bigdata\\data of case3\\Walmart_test.csv")
# training dataset: 2010-02-05 ~ 2012-10-26
train<-merge(data1,merge(data2,data3))
# testing dataset: 2012-11-02 ~ 2013-07-26
test<-merge(data4,merge(data2,data3))
rm(data1,data2,data3,data4)
summary(train)
dim(train)
# Analysis for train data only
# some variables include missing values
train$Date<-as.Date(train$Date)
train$Store<-as.factor(train$Store)
train$Dept<-as.factor(train$Dept)
train$Type<-as.factor(train$Type)
with(train,table(Dept,Store)) # 45 stores with 99 (index) departments
with(train,table(Store,Type)) # 3 types (Store and Type are exclusive)
with(train,table(Dept,Type))
library(ggplot2)
# 99 Depts within 45 Stores which are in 3 types
ggplot(data=train,aes(x=Store,fill=Dept))+geom_bar()
ggplot(data=train,aes(x=Store,fill=Dept))+geom_bar()+facet_grid(Type ~ .)
ggplot(data=train,aes(x=Dept,fill=Type))+geom_bar()+coord_flip()
# Sales<-train(,c("Store","Dept","Weekly_Sales","Type","Size"))
# (Store, Type, Size) is used to identify each store individually
# total sales across 45 stores
Sales_Total<-aggregate(train$Weekly_Sales,
by=list(Store=train$Store,Type=train$Type,Size=train$Size),sum)
s0<-labs(title="Walmart Sales by Stores (429 Weeks Total)",y="Sales $")
ggplot(data=Sales_Total,aes(x=Store,y=x,fill=Type))+geom_bar(stat="identity")+s0
ggplot(data=Sales_Total,aes(x=Store,y=I(x/Size),fill=Type))+geom_bar(stat="identity")+s0
# Weekly_Sales trends
# 429 weeks, 3 types of stores (aggregate over 45 stores and 99 departments in each store)
Sales_All<-aggregate(train$Weekly_Sales,
by=list(Date=train$Date,Type=train$Type),sum)
# 429 weeks, 3 types of 45 stores (aggregate over 99 departments in each store)
Sales_Store<-aggregate(train$Weekly_Sales,
by=list(Store=train$Store,Date=train$Date,Type=train$Type),sum)
# 429 weeks, 3 types of 99 departments (aggregate over 45 stores)
Sales_Dept<-aggregate(train$Weekly_Sales,
by=list(Dept=train$Dept,Date=train$Date,Type=train$Type),sum)
g0<-labs(title="Walmart Sales Trend",x="Date",y="Weekly Sales")
g1<-ggplot(data=Sales_All,aes(x=Date,y=x)) + geom_line(aes(col=Type))
g1+g0
g2<-ggplot(data=Sales_Store,aes(x=Date,y=x)) + geom_line(aes(col=Store))
g2+g0
g2+facet_grid(Type ~ .)+g0
g3<-ggplot(data=Sales_Dept,aes(x=Date,y=x)) + geom_line(aes(col=Dept))
g3+g0
g3+facet_grid(Type ~ .)+g0
|
setwd("../../analysis/hiccup_loops")
files= list.files(pattern="requested_list_10000",path="merged_loops",full.names=T,recursive=T)
names = sub("merged_loops/(D.._HiC_Rep.)/requested_list_10000","\\1",files)
dat = list()
for (i in 1:12){
dat[[i]] = data.frame(fread(files[[i]]))
}
datm = Reduce(function(...)merge(...,by=c("chr1","x1","y1"),all.x=T,all.y=T),dat)
B = datm[,seq(17,ncol(datm),17)]
D = datm[,seq(18,ncol(datm),17)]
H = datm[,seq(19,ncol(datm),17)]
V = datm[,seq(20,ncol(datm),17)]
#Sig = ( B<0.05 | D <0.05 | H < 0.05 | V <0.05 )
Sig = ( B<0.1 | D <0.1 | H < 0.1 | V <0.1 )
#output = cbind(datm[,c(1,2,3)],datm[,seq(8,ncol(datm),17)],datm[,seq(17,ncol(datm),17)],datm[,seq(18,ncol(datm),17)])
out = cbind(datm[,c(1,2,3)],datm[,seq(8,ncol(datm),17)],Sig)
colnames(out) = c("chr","x1","y1",paste0("ob.",names),paste0("sig.",names))
write.table(out,"combined_loops.uniq.counts.hiccup_tests.txt",row.names=F,quote=F,sep='\t')
|
/archive/hiccup_loop/concatenate_loop_tests.r
|
no_license
|
bioinfx/cvdc_scripts
|
R
| false
| false
| 947
|
r
|
setwd("../../analysis/hiccup_loops")
files= list.files(pattern="requested_list_10000",path="merged_loops",full.names=T,recursive=T)
names = sub("merged_loops/(D.._HiC_Rep.)/requested_list_10000","\\1",files)
dat = list()
for (i in 1:12){
dat[[i]] = data.frame(fread(files[[i]]))
}
datm = Reduce(function(...)merge(...,by=c("chr1","x1","y1"),all.x=T,all.y=T),dat)
B = datm[,seq(17,ncol(datm),17)]
D = datm[,seq(18,ncol(datm),17)]
H = datm[,seq(19,ncol(datm),17)]
V = datm[,seq(20,ncol(datm),17)]
#Sig = ( B<0.05 | D <0.05 | H < 0.05 | V <0.05 )
Sig = ( B<0.1 | D <0.1 | H < 0.1 | V <0.1 )
#output = cbind(datm[,c(1,2,3)],datm[,seq(8,ncol(datm),17)],datm[,seq(17,ncol(datm),17)],datm[,seq(18,ncol(datm),17)])
out = cbind(datm[,c(1,2,3)],datm[,seq(8,ncol(datm),17)],Sig)
colnames(out) = c("chr","x1","y1",paste0("ob.",names),paste0("sig.",names))
write.table(out,"combined_loops.uniq.counts.hiccup_tests.txt",row.names=F,quote=F,sep='\t')
|
# Set dir
set_wdir <- function(){
library(rstudioapi)
current_path <- getActiveDocumentContext()$path
setwd(dirname(current_path ))
print( getwd() )
}
# Set directory
set_wdir()
# Libraries
library(ggplot2)
library(reshape)
library(dplyr)
library(plyr)
library(tidyr)
library(kernlab)
library(ggsci)
library(BSDA) # z.test
set.seed(2511)
# sources
source("functions/data_cleaning.R")
source("functions/data_manipulation.R")
source("functions/utils.R")
source("functions/plot.R")
source("functions/clustering.R")
# run all days together - merge data
# input_dir <- "data/merge/test"
# results_dir <- paste(input_dir,"merged-results",sep="/")
# weekday_files <- list.files(paste(input_dir, "weekday",sep="/"))
# weekend_files <- list.files(paste(input_dir, "weekend",sep="/"))
# mergeData(input_dir, results_dir, weekday_files, weekend_files)
getDataFrame <- function(data_dir, data_clean=FALSE, weekend_sep=FALSE){
if(data_clean){
initialCleaning()
}
df_full <- read.csv(data_dir,sep=",",header=T)
df_full <- subset(df_full, select=c("square_id", "internet_traffic", "activity_date","activity_time","weekday"))
df_full$activity_time <- as.factor(df_full$activity_time)
df_full$activity_date <- as.factor(df_full$activity_date)
return(df_full)
}
writeClustersCsv <- function(df_full_clustered, nclusters, results_dir_ml){
days <- unique(df_full_clustered$activity_date)
for(d in days){
df_filtered <- filter(df_full_clustered, activity_date == d)
for(i in 1:nclusters){
for(j in 0:1){ # weekday-weekend
df_filtered <- filter(df_full_clustered, cluster == i & weekday == j)
df_filtered <- select(df_filtered, -c(1,2,3))
if(j == 1){ # weekday
write.csv(df_filtered, file = paste(results_dir_ml,"weekday", paste(d,"df_full_cluster",i,".csv",sep=""), sep="/"))
}else{ # weekend
write.csv(df_filtered, file = paste(results_dir_ml,"weekend", paste(d,"df_full_cluster",i,".csv",sep=""), sep="/"))
}
}
}
}
}
runAnalysis <- function(df_full, results_dir, simulation_name, simulation_type, nclusters){
cur_time <- Sys.time()
simulation_time <- strftime(cur_time, format="%Y-%m-%d_%H-%M")
dir.create(file.path(results_dir), showWarnings = FALSE)
dir.create(file.path(results_dir,simulation_name), showWarnings = FALSE)
dir.create(file.path(results_dir,simulation_name,simulation_time), showWarnings = FALSE)
dir.create(file.path(results_dir,simulation_name,simulation_time, simulation_type), showWarnings = FALSE)
dir.create(file.path(results_dir,simulation_name,simulation_time, simulation_type,"csv"), showWarnings = FALSE)
dir.create(file.path(results_dir,simulation_name,simulation_time, simulation_type,"pdf"), showWarnings = FALSE)
# dir.create(file.path(results_dir,simulation_name,simulation_time, simulation_type,"ml-inputdata"), showWarnings = FALSE)
# dir.create(file.path(results_dir,simulation_name,simulation_time, simulation_type,"ml-inputdata","weekend"), showWarnings = FALSE)
# dir.create(file.path(results_dir,simulation_name,simulation_time, simulation_type,"ml-inputdata","weekday"), showWarnings = FALSE)
results_dir_full = paste(results_dir, simulation_name, simulation_time, simulation_type, sep="/")
results_dir_full_pdf = paste(results_dir_full, "pdf", sep="/")
results_dir_full_csv = paste(results_dir_full, "csv", sep="/")
# results_dir_ml = paste(results_dir_full, "ml-inputdata", sep="/")
# Aggregate XY
df_internet_ag_sum <- aggragateTrafficXY(df_full)
if(simulation_type == "milano"){
x_max <- 80
x_min <- 40
y_max <- 75
y_min <- 35
# df_internet_ag_sum_fullmap <- df_internet_ag_sum
df_internet_ag_sum <- subMap(df_internet_ag_sum, x_max, x_min, y_max, y_min)
}else if (simulation_type == "anomaly"){
x_max <- 61+1
x_min <- 59-1
y_max <- 51+1
y_min <- 50-1
# df_internet_ag_sum_fullmap <- df_internet_ag_sum
df_internet_ag_sum <- subMap(df_internet_ag_sum, x_max, x_min, y_max, y_min)
}
# START SIMULATION
pdf(paste(results_dir_full_pdf,"plots.pdf", sep="/"))
# elbow test
elbowTest(select(df_internet_ag_sum,-c(activity_date)))
# plot heat map
norm_df_internet_ag_sum <- df_internet_ag_sum
norm_df_internet_ag_sum$internet_traffic <- normalize(df_internet_ag_sum$internet_traffic)
norm_weekday_df_internet_ag_sum <- filter(df_internet_ag_sum, weekday == 1)
norm_weekend_df_internet_ag_sum <- filter(df_internet_ag_sum, weekday == 0)
plotHeatMap(norm_df_internet_ag_sum)
plotHeatMap2(norm_df_internet_ag_sum)
# write.csv(df_internet_ag_sum, file = paste(results_dir_full_csv, "df_internet_ag_sum.csv", sep="/"))
# Clustering
df_internet_ag_sum_clustered <- applyKmeans(df_internet_ag_sum, nclusters=5) # Forgy
pdf(paste(results_dir_full_pdf, "clusters.pdf",sep="/"))
p <- ggplot(df_internet_ag_sum_clustered, aes(x,y))
print(p + geom_point(shape = 15, aes(colour=cluster), size=3)+ coord_fixed(ratio = 1) + labs(colour = "Cluster")+ xlab("Square.x") + ylab("Square.y")+scale_color_npg()+ theme_bw())#scale_color_manual(values=c("#F8766D", "#A3A500", "#00BF7D", "#00B0F6", "#E76BF3"))) ##scale_color_manual(values = c("#FC4E07", "#E7B800", "#00AFBB", "#4E84C4", "#52854C")))# + scale_color_brewer(palette="Set2"))
dev.off()
# write.csv(df_internet_ag_sum_clustered, file = paste(results_dir_full_csv,"df_internet_ag_sum_clustered.csv", sep="/"))
# Boxplot (normalized)
df_internet_full_clustered <- mergeClusterActivityTime(df_full, df_internet_ag_sum_clustered, TRUE)
# write.csv(df_internet_full_clustered, file = paste(results_dir_full_csv,"df_internet_full_clustered.csv", sep="/"))
df_internet_full_clustered_norm <- df_internet_full_clustered
#df_internet_full_clustered_norm$internet_traffic <- normalize(df_internet_full_clustered$internet_traffic)
df_internet_full_clustered_norm$internet_traffic <- scales::rescale(df_internet_full_clustered_norm$internet_traffic, to=c(0,1))
# Write CSV for full data clustered (norm)
# write.csv(df_internet_full_clustered_norm, file = paste(results_dir_full_csv,"df_internet_full_clustered-norm.csv", sep="/"))
# Write filtered full data clustered (separated by cluster and day type) (norm)
# writeClustersCsv(df_internet_full_clustered_norm, nclusters, results_dir_ml)
# for(i in 0:1){
# boxplotActivityCluster(filter(df_internet_full_clustered_norm, weekday==i), nclusters)
# if(i==0){
# write.csv(filter(df_internet_full_clustered_norm, weekday==i), file = paste(results_dir_full_csv,"weekend-df_internet_clustered_norm.csv", sep="/"))
# }else{
# write.csv(filter(df_internet_full_clustered_norm, weekday==i), file = paste(results_dir_full_csv,"weekday-df_internet_clustered_norm.csv", sep="/"))
# }
# }
df_internet_full_sum_clustered <- aggregate(internet_traffic ~ weekday + cluster + activity_time, select(df_internet_full_clustered,-c(activity_date)), FUN=mean)
df_internet_full_sum_clustered$internet_traffic <- normalize(df_internet_full_sum_clustered$internet_traffic)
# # Separated by date
# df_internet_full_sum_clustered_wdate <- aggregate(internet_traffic ~ weekday + cluster + activity_time + activity_date, df_internet_full_clustered, FUN=mean)
# df_internet_full_sum_clustered_wdate$internet_traffic <- normalize(df_internet_full_sum_clustered_wdate$internet_traffic)
# for(i in 0:1){
#
# barplotActivityCluster(filter(df_internet_full_sum_clustered, weekday==i), nclusters, divide=FALSE)
# if(i == 0){
# write.csv(filter(df_internet_full_sum_clustered, weekday==i), file = paste(results_dir_full_csv,"weekend-df_internet_sum_clustered.csv", sep="/"))
# }else{
# write.csv(filter(df_internet_full_sum_clustered, weekday==i), file = paste(results_dir_full_csv,"weekday-df_internet_sum_clustered.csv", sep="/"))
# }
#
# }
df_internet_full_sum_clustered_sd <- data.frame(weekday=factor(),
cluster=factor(),
activity_time=factor(),
internet_traffic=numeric(),
internet_traffic_sd=numeric(),
stringsAsFactors=FALSE)
for(i in 1:nclusters){
for(j in 0:1){
df_act <- subset(filter(filter(df_internet_full_sum_clustered, weekday==j), cluster == i), select=c("activity_time","internet_traffic"))
df_act <- merge(getICPerTime(filter(select(df_internet_full_clustered_norm,-c(activity_date)), cluster == i), 1), df_act, by=c("activity_time"))
df_act <- df_act[order(df_act$activity_time),]
df_act <- df_act[,c(1,3,2)]
colnames(df_act) <- c("activity_time","internet_traffic","internet_traffic_sd")
rows = data.frame(rep(j, nrow(df_act)), rep(i, nrow(df_act)), df_act$activity_time, df_act$internet_traffic, df_act$internet_traffic_sd)
colnames(rows) <- c("weekday","cluster","activity_time","internet_traffic","internet_traffic_sd")
df_internet_full_sum_clustered_sd <- rbind(df_internet_full_sum_clustered_sd, rows)
if(j == 0){
write.csv(df_act, file = paste(results_dir_full_csv, paste("weekend-cluster",i,".csv", sep=""), sep="/"))
}else{
write.csv(df_act, file = paste(results_dir_full_csv, paste("weekday-cluster",i,".csv", sep=""), sep="/"))
}
}
}
dev.off()
# #df_internet_full_sum_clustered_sd <- aggregate(internet_traffic ~ weekday + cluster + activity_time, select(df_internet_full_sum_clustered_wdate_sd,-c(activity_date)), FUN=mean)
# #df_internet_full_sum_clustered_sd$internet_traffic_sd <- (aggregate(internet_traffic_sd ~ weekday + cluster + activity_time, select(df_internet_full_sum_clustered_wdate_sd,-c(activity_date)), FUN=mean))$internet_traffic_sd
# df_internet_full_sum_clustered_wdate_sd %>%
# group_by(weekday, cluster, activity_time) %>%
# summarise_at(vars(-activity_date), funs(mean(., na.rm=TRUE)))
weekday.labs <- c("Weekend","Weekday")
names(weekday.labs) <- c(0,1)
df_internet_full_sum_clustered_sd$weekday = factor(df_internet_full_sum_clustered_sd$weekday, levels=c(1,0))
#df_internet_full_sum_clustered_sd$activity_time <- as.numeric(levels(df_internet_full_sum_clustered_sd$activity_time))[df_internet_full_sum_clustered_sd$activity_time]
pdf(file = paste(results_dir_full_pdf,"resume.pdf", sep="/"), width = 21, height = 6 ) # numbers are cm
print(ggplot(data=df_internet_full_sum_clustered_sd, aes(x=activity_time, y=internet_traffic)) +
geom_bar(stat="identity") +
xlab("Hour of day") + ylab("Internet traffic")+ scale_x_discrete(breaks=seq(0,24,1))+ #scale_x_continuous(limits=c(0, 24),breaks=seq(0,24,1))+
facet_grid(weekday~cluster, labeller = labeller(weekday = weekday.labs))+theme_bw()+
geom_errorbar(aes(ymin=internet_traffic-internet_traffic_sd, ymax=internet_traffic+internet_traffic_sd), width=.2,
position=position_dodge(.9)))
dev.off()
write.csv(df_internet_full_sum_clustered_sd, file = paste(results_dir_full_csv, paste("fresult-df_internet_full_sum_clustered_sd.csv", sep=""), sep="/"))
# Separated by date
df_internet_full_sum_clustered_wdate <- aggregate(internet_traffic ~ weekday + cluster + activity_time + activity_date, df_internet_full_clustered, FUN=mean)
df_internet_full_sum_clustered_wdate$internet_traffic <- normalize(df_internet_full_sum_clustered_wdate$internet_traffic)
sep_date <- TRUE
if(sep_date){
df_internet_full_sum_clustered_wdate_sd <- data.frame(weekday=factor(),
cluster=factor(),
activity_date=factor(),
activity_time=factor(),
internet_traffic=numeric(),
internet_traffic_sd=numeric(),
stringsAsFactors=FALSE)
for(i in 1:nclusters){
for(j in 0:1){
df_act <- subset(filter(filter(df_internet_full_sum_clustered_wdate, weekday==j), cluster == i), select=c("activity_date","activity_time","internet_traffic"))
df_act <- merge(getICPerTime(filter(df_internet_full_clustered_norm, weekday==j, cluster == i), 1, TRUE), df_act, by=c("activity_date","activity_time"))
df_act <- df_act[order(df_act$activity_time),]
df_act <- df_act[,c(1,2,4,3)]
colnames(df_act) <- c("activity_date","activity_time","internet_traffic","internet_traffic_sd")
rows = data.frame(rep(j, nrow(df_act)), rep(i, nrow(df_act)), df_act$activity_date, df_act$activity_time, df_act$internet_traffic, df_act$internet_traffic_sd)
colnames(rows) <- c("weekday","cluster","activity_date","activity_time","internet_traffic","internet_traffic_sd")
df_internet_full_sum_clustered_wdate_sd <- rbind(df_internet_full_sum_clustered_wdate_sd, rows)
if(j == 0){
write.csv(df_act, file = paste(results_dir_full_csv, paste("weekend-sepdays-cluster",i,".csv", sep=""), sep="/"))
}else{
write.csv(df_act, file = paste(results_dir_full_csv, paste("weekday-sepdays-cluster",i,".csv", sep=""), sep="/"))
}
}
}
# weekday.labs <- c("Weekend","Weekday")
# names(weekday.labs) <- c(0,1)
# df_internet_full_sum_clustered_wdate_sd$weekday = factor(df_internet_full_sum_clustered_wdate_sd$weekday, levels=c(1,0))
# for(d in unique(df_internet_full_sum_clustered_wdate[["activity_date"]])){
#
# pdf(file = paste(results_dir_full_pdf,"/resume",d,".pdf", sep=""), width = 21, height = 6 ) # numbers are cm
# print(ggplot(data=filter(df_internet_full_sum_clustered_wdate_sd, activity_date == d), aes(x=activity_time, y=internet_traffic)) +
# geom_bar(stat="identity") +
# xlab("Hour of day") + ylab("Internet traffic") +
# facet_grid(weekday~cluster, labeller = labeller(weekday = weekday.labs))+theme_bw()+
# geom_errorbar(aes(ymin=internet_traffic-internet_traffic_sd, ymax=internet_traffic+internet_traffic_sd), width=.2,
# position=position_dodge(.9)))
# dev.off()
# }
weekday.labs <-unique(df_internet_full_sum_clustered_wdate$activity_date)
weekday.labs <- weekday.labs[-1]
weekday.labs <- sort(weekday.labs)
names(weekday.labs) <- c(1:length(weekday.labs[-1]))
df_internet_full_sum_clustered_wdate_sd$activity_date = factor(df_internet_full_sum_clustered_wdate_sd$activity_date)
df_internet_full_sum_clustered_wdate_sd <- filter(df_internet_full_sum_clustered_wdate_sd, activity_date != sort(unique(df_internet_full_sum_clustered_wdate_sd$activity_date))[1])
# weekday.labs <-unique(df_internet_full_sum_clustered_wdate$activity_date)
# weekday.labs <- weekday.labs
# names(weekday.labs) <- c(1:length(weekday.labs))
# df_internet_full_sum_clustered_wdate_sd$activity_date = factor(df_internet_full_sum_clustered_wdate_sd$activity_date)
pdf(file = paste(results_dir_full_pdf,"resume-sep-days.pdf", sep="/"), width = 21, height = 30 ) # numbers are cm
print(ggplot(data=filter(df_internet_full_sum_clustered_wdate_sd), aes(x=activity_time, y=internet_traffic)) +
geom_bar(stat="identity") +
xlab("Hour of day") + ylab("Internet traffic")+ scale_x_discrete(breaks=seq(0,24,1))+#scale_x_continuous(limits=c(0, 24),breaks=seq(0,24,1))+
facet_grid(activity_date~cluster, labeller = labeller(activity_day = weekday.labs))+theme_bw()+
geom_errorbar(aes(ymin=internet_traffic-internet_traffic_sd, ymax=internet_traffic+internet_traffic_sd), width=.2,
position=position_dodge(.9)))
dev.off()
write.csv(df_internet_full_sum_clustered_wdate_sd, file = paste(results_dir_full_csv, paste("fresult-df_internet_full_sum_clustered_wdate_sd.csv", sep=""), sep="/"))
}
# df_internet_full_sum_clustered_sd$cluster[df_internet_full_sum_clustered_sd$cluster==1] <- "c4"
# df_internet_full_sum_clustered_sd$cluster[df_internet_full_sum_clustered_sd$cluster==4] <- "c1"
# df_internet_full_sum_clustered_sd$cluster[df_internet_full_sum_clustered_sd$cluster=="c1"] <- 1
# df_internet_full_sum_clustered_sd$cluster[df_internet_full_sum_clustered_sd$cluster=="c4"] <- 4
#
# df_internet_full_sum_clustered_sd$cluster[df_internet_full_sum_clustered_sd$cluster==1] <- "Cluster 1"
# df_internet_full_sum_clustered_sd$cluster[df_internet_full_sum_clustered_sd$cluster==2] <- "Cluster 2"
# df_internet_full_sum_clustered_sd$cluster[df_internet_full_sum_clustered_sd$cluster==3] <- "Cluster 3"
# df_internet_full_sum_clustered_sd$cluster[df_internet_full_sum_clustered_sd$cluster==4] <- "Cluster 4"
# df_internet_full_sum_clustered_sd$cluster[df_internet_full_sum_clustered_sd$cluster==5] <- "Cluster 5"
}
# Set simulation parameters
results_dir <- "results"
simulation_name <- "1day" #week, 5days...
simulation_type <- "anomaly" # milano / fullmap / trento
data_dir <- paste("data/","sms-call-internet-mi-2013-11-18.txt-minutes_cln.csv",sep="")
nclusters <- 5
# Get data
# df_full <- getDataFrame(data_dir)
df_full <- subset(df_full, select=c("square_id", "internet_traffic", "activity_date","activity_time"))
df_full$activity_time <- as.factor(df_full$activity_time)
df_full$activity_date <- as.factor(df_full$activity_date)
df_full$activity_time <- round(as.numeric(levels(df_full$activity_time))[as.integer(df_full$activity_time)],2)
df_full$activity_time <- as.factor(df_full$activity_time)
df_full$weekday <- 1
# Run simulation
runAnalysis(df_full, results_dir, simulation_name, simulation_type, nclusters)
# # run all days separated
# input_dir <- "data/29jan2020/test"
#
# weekday_files <- list.files(paste(input_dir,"weekday/", sep="/"))
# weekend_files <- list.files(paste(input_dir,"weekend/", sep= "/"))
#
# i <- 1
# for(file in weekday_files[-1]){
# results_dir <- "results"
# simulation_name <- "oneday" #week, 5days...
# data_clean <- FALSE
# simulation_type <- "milano" # milano / fullmap
#
# weekday_data_dir <- paste(input_dir,"weekday", weekday_files[i], sep="/")
# weekend_data_dir <- paste(input_dir,"weekend", weekend_files[i], sep="/")
# # Test
# # weekday_data_dir <- "data/cleaned/ml-input/oneday/test/oneday-weekday-test_cln.csv"
# # weekend_data_dir <- "data/cleaned/ml-input/oneday/test/oneday-weekend-test_cln.csv"
#
# nclusters <- 5
# # Get data
# df_full <- getDataFrame(weekday_data_dir, weekend_data_dir, data_clean)
#
# # Run simulation
# runAnalysis(df_full, results_dir, simulation_name, simulation_type, nclusters)
#
# i <- i + 1
# }
|
/r-datapreprocess/cluster.R
|
permissive
|
jonathanalmd/anomaly-detection-in-mobile-networks
|
R
| false
| false
| 19,072
|
r
|
# Set dir
set_wdir <- function(){
library(rstudioapi)
current_path <- getActiveDocumentContext()$path
setwd(dirname(current_path ))
print( getwd() )
}
# Set directory
set_wdir()
# Libraries
library(ggplot2)
library(reshape)
library(dplyr)
library(plyr)
library(tidyr)
library(kernlab)
library(ggsci)
library(BSDA) # z.test
set.seed(2511)
# sources
source("functions/data_cleaning.R")
source("functions/data_manipulation.R")
source("functions/utils.R")
source("functions/plot.R")
source("functions/clustering.R")
# run all days together - merge data
# input_dir <- "data/merge/test"
# results_dir <- paste(input_dir,"merged-results",sep="/")
# weekday_files <- list.files(paste(input_dir, "weekday",sep="/"))
# weekend_files <- list.files(paste(input_dir, "weekend",sep="/"))
# mergeData(input_dir, results_dir, weekday_files, weekend_files)
getDataFrame <- function(data_dir, data_clean=FALSE, weekend_sep=FALSE){
if(data_clean){
initialCleaning()
}
df_full <- read.csv(data_dir,sep=",",header=T)
df_full <- subset(df_full, select=c("square_id", "internet_traffic", "activity_date","activity_time","weekday"))
df_full$activity_time <- as.factor(df_full$activity_time)
df_full$activity_date <- as.factor(df_full$activity_date)
return(df_full)
}
writeClustersCsv <- function(df_full_clustered, nclusters, results_dir_ml){
days <- unique(df_full_clustered$activity_date)
for(d in days){
df_filtered <- filter(df_full_clustered, activity_date == d)
for(i in 1:nclusters){
for(j in 0:1){ # weekday-weekend
df_filtered <- filter(df_full_clustered, cluster == i & weekday == j)
df_filtered <- select(df_filtered, -c(1,2,3))
if(j == 1){ # weekday
write.csv(df_filtered, file = paste(results_dir_ml,"weekday", paste(d,"df_full_cluster",i,".csv",sep=""), sep="/"))
}else{ # weekend
write.csv(df_filtered, file = paste(results_dir_ml,"weekend", paste(d,"df_full_cluster",i,".csv",sep=""), sep="/"))
}
}
}
}
}
runAnalysis <- function(df_full, results_dir, simulation_name, simulation_type, nclusters){
cur_time <- Sys.time()
simulation_time <- strftime(cur_time, format="%Y-%m-%d_%H-%M")
dir.create(file.path(results_dir), showWarnings = FALSE)
dir.create(file.path(results_dir,simulation_name), showWarnings = FALSE)
dir.create(file.path(results_dir,simulation_name,simulation_time), showWarnings = FALSE)
dir.create(file.path(results_dir,simulation_name,simulation_time, simulation_type), showWarnings = FALSE)
dir.create(file.path(results_dir,simulation_name,simulation_time, simulation_type,"csv"), showWarnings = FALSE)
dir.create(file.path(results_dir,simulation_name,simulation_time, simulation_type,"pdf"), showWarnings = FALSE)
# dir.create(file.path(results_dir,simulation_name,simulation_time, simulation_type,"ml-inputdata"), showWarnings = FALSE)
# dir.create(file.path(results_dir,simulation_name,simulation_time, simulation_type,"ml-inputdata","weekend"), showWarnings = FALSE)
# dir.create(file.path(results_dir,simulation_name,simulation_time, simulation_type,"ml-inputdata","weekday"), showWarnings = FALSE)
results_dir_full = paste(results_dir, simulation_name, simulation_time, simulation_type, sep="/")
results_dir_full_pdf = paste(results_dir_full, "pdf", sep="/")
results_dir_full_csv = paste(results_dir_full, "csv", sep="/")
# results_dir_ml = paste(results_dir_full, "ml-inputdata", sep="/")
# Aggregate XY
df_internet_ag_sum <- aggragateTrafficXY(df_full)
if(simulation_type == "milano"){
x_max <- 80
x_min <- 40
y_max <- 75
y_min <- 35
# df_internet_ag_sum_fullmap <- df_internet_ag_sum
df_internet_ag_sum <- subMap(df_internet_ag_sum, x_max, x_min, y_max, y_min)
}else if (simulation_type == "anomaly"){
x_max <- 61+1
x_min <- 59-1
y_max <- 51+1
y_min <- 50-1
# df_internet_ag_sum_fullmap <- df_internet_ag_sum
df_internet_ag_sum <- subMap(df_internet_ag_sum, x_max, x_min, y_max, y_min)
}
# START SIMULATION
pdf(paste(results_dir_full_pdf,"plots.pdf", sep="/"))
# elbow test
elbowTest(select(df_internet_ag_sum,-c(activity_date)))
# plot heat map
norm_df_internet_ag_sum <- df_internet_ag_sum
norm_df_internet_ag_sum$internet_traffic <- normalize(df_internet_ag_sum$internet_traffic)
norm_weekday_df_internet_ag_sum <- filter(df_internet_ag_sum, weekday == 1)
norm_weekend_df_internet_ag_sum <- filter(df_internet_ag_sum, weekday == 0)
plotHeatMap(norm_df_internet_ag_sum)
plotHeatMap2(norm_df_internet_ag_sum)
# write.csv(df_internet_ag_sum, file = paste(results_dir_full_csv, "df_internet_ag_sum.csv", sep="/"))
# Clustering
df_internet_ag_sum_clustered <- applyKmeans(df_internet_ag_sum, nclusters=5) # Forgy
pdf(paste(results_dir_full_pdf, "clusters.pdf",sep="/"))
p <- ggplot(df_internet_ag_sum_clustered, aes(x,y))
print(p + geom_point(shape = 15, aes(colour=cluster), size=3)+ coord_fixed(ratio = 1) + labs(colour = "Cluster")+ xlab("Square.x") + ylab("Square.y")+scale_color_npg()+ theme_bw())#scale_color_manual(values=c("#F8766D", "#A3A500", "#00BF7D", "#00B0F6", "#E76BF3"))) ##scale_color_manual(values = c("#FC4E07", "#E7B800", "#00AFBB", "#4E84C4", "#52854C")))# + scale_color_brewer(palette="Set2"))
dev.off()
# write.csv(df_internet_ag_sum_clustered, file = paste(results_dir_full_csv,"df_internet_ag_sum_clustered.csv", sep="/"))
# Boxplot (normalized)
df_internet_full_clustered <- mergeClusterActivityTime(df_full, df_internet_ag_sum_clustered, TRUE)
# write.csv(df_internet_full_clustered, file = paste(results_dir_full_csv,"df_internet_full_clustered.csv", sep="/"))
df_internet_full_clustered_norm <- df_internet_full_clustered
#df_internet_full_clustered_norm$internet_traffic <- normalize(df_internet_full_clustered$internet_traffic)
df_internet_full_clustered_norm$internet_traffic <- scales::rescale(df_internet_full_clustered_norm$internet_traffic, to=c(0,1))
# Write CSV for full data clustered (norm)
# write.csv(df_internet_full_clustered_norm, file = paste(results_dir_full_csv,"df_internet_full_clustered-norm.csv", sep="/"))
# Write filtered full data clustered (separated by cluster and day type) (norm)
# writeClustersCsv(df_internet_full_clustered_norm, nclusters, results_dir_ml)
# for(i in 0:1){
# boxplotActivityCluster(filter(df_internet_full_clustered_norm, weekday==i), nclusters)
# if(i==0){
# write.csv(filter(df_internet_full_clustered_norm, weekday==i), file = paste(results_dir_full_csv,"weekend-df_internet_clustered_norm.csv", sep="/"))
# }else{
# write.csv(filter(df_internet_full_clustered_norm, weekday==i), file = paste(results_dir_full_csv,"weekday-df_internet_clustered_norm.csv", sep="/"))
# }
# }
df_internet_full_sum_clustered <- aggregate(internet_traffic ~ weekday + cluster + activity_time, select(df_internet_full_clustered,-c(activity_date)), FUN=mean)
df_internet_full_sum_clustered$internet_traffic <- normalize(df_internet_full_sum_clustered$internet_traffic)
# # Separated by date
# df_internet_full_sum_clustered_wdate <- aggregate(internet_traffic ~ weekday + cluster + activity_time + activity_date, df_internet_full_clustered, FUN=mean)
# df_internet_full_sum_clustered_wdate$internet_traffic <- normalize(df_internet_full_sum_clustered_wdate$internet_traffic)
# for(i in 0:1){
#
# barplotActivityCluster(filter(df_internet_full_sum_clustered, weekday==i), nclusters, divide=FALSE)
# if(i == 0){
# write.csv(filter(df_internet_full_sum_clustered, weekday==i), file = paste(results_dir_full_csv,"weekend-df_internet_sum_clustered.csv", sep="/"))
# }else{
# write.csv(filter(df_internet_full_sum_clustered, weekday==i), file = paste(results_dir_full_csv,"weekday-df_internet_sum_clustered.csv", sep="/"))
# }
#
# }
df_internet_full_sum_clustered_sd <- data.frame(weekday=factor(),
cluster=factor(),
activity_time=factor(),
internet_traffic=numeric(),
internet_traffic_sd=numeric(),
stringsAsFactors=FALSE)
for(i in 1:nclusters){
for(j in 0:1){
df_act <- subset(filter(filter(df_internet_full_sum_clustered, weekday==j), cluster == i), select=c("activity_time","internet_traffic"))
df_act <- merge(getICPerTime(filter(select(df_internet_full_clustered_norm,-c(activity_date)), cluster == i), 1), df_act, by=c("activity_time"))
df_act <- df_act[order(df_act$activity_time),]
df_act <- df_act[,c(1,3,2)]
colnames(df_act) <- c("activity_time","internet_traffic","internet_traffic_sd")
rows = data.frame(rep(j, nrow(df_act)), rep(i, nrow(df_act)), df_act$activity_time, df_act$internet_traffic, df_act$internet_traffic_sd)
colnames(rows) <- c("weekday","cluster","activity_time","internet_traffic","internet_traffic_sd")
df_internet_full_sum_clustered_sd <- rbind(df_internet_full_sum_clustered_sd, rows)
if(j == 0){
write.csv(df_act, file = paste(results_dir_full_csv, paste("weekend-cluster",i,".csv", sep=""), sep="/"))
}else{
write.csv(df_act, file = paste(results_dir_full_csv, paste("weekday-cluster",i,".csv", sep=""), sep="/"))
}
}
}
dev.off()
# #df_internet_full_sum_clustered_sd <- aggregate(internet_traffic ~ weekday + cluster + activity_time, select(df_internet_full_sum_clustered_wdate_sd,-c(activity_date)), FUN=mean)
# #df_internet_full_sum_clustered_sd$internet_traffic_sd <- (aggregate(internet_traffic_sd ~ weekday + cluster + activity_time, select(df_internet_full_sum_clustered_wdate_sd,-c(activity_date)), FUN=mean))$internet_traffic_sd
# df_internet_full_sum_clustered_wdate_sd %>%
# group_by(weekday, cluster, activity_time) %>%
# summarise_at(vars(-activity_date), funs(mean(., na.rm=TRUE)))
weekday.labs <- c("Weekend","Weekday")
names(weekday.labs) <- c(0,1)
df_internet_full_sum_clustered_sd$weekday = factor(df_internet_full_sum_clustered_sd$weekday, levels=c(1,0))
#df_internet_full_sum_clustered_sd$activity_time <- as.numeric(levels(df_internet_full_sum_clustered_sd$activity_time))[df_internet_full_sum_clustered_sd$activity_time]
pdf(file = paste(results_dir_full_pdf,"resume.pdf", sep="/"), width = 21, height = 6 ) # numbers are cm
print(ggplot(data=df_internet_full_sum_clustered_sd, aes(x=activity_time, y=internet_traffic)) +
geom_bar(stat="identity") +
xlab("Hour of day") + ylab("Internet traffic")+ scale_x_discrete(breaks=seq(0,24,1))+ #scale_x_continuous(limits=c(0, 24),breaks=seq(0,24,1))+
facet_grid(weekday~cluster, labeller = labeller(weekday = weekday.labs))+theme_bw()+
geom_errorbar(aes(ymin=internet_traffic-internet_traffic_sd, ymax=internet_traffic+internet_traffic_sd), width=.2,
position=position_dodge(.9)))
dev.off()
write.csv(df_internet_full_sum_clustered_sd, file = paste(results_dir_full_csv, paste("fresult-df_internet_full_sum_clustered_sd.csv", sep=""), sep="/"))
# Separated by date
df_internet_full_sum_clustered_wdate <- aggregate(internet_traffic ~ weekday + cluster + activity_time + activity_date, df_internet_full_clustered, FUN=mean)
df_internet_full_sum_clustered_wdate$internet_traffic <- normalize(df_internet_full_sum_clustered_wdate$internet_traffic)
sep_date <- TRUE
if(sep_date){
df_internet_full_sum_clustered_wdate_sd <- data.frame(weekday=factor(),
cluster=factor(),
activity_date=factor(),
activity_time=factor(),
internet_traffic=numeric(),
internet_traffic_sd=numeric(),
stringsAsFactors=FALSE)
for(i in 1:nclusters){
for(j in 0:1){
df_act <- subset(filter(filter(df_internet_full_sum_clustered_wdate, weekday==j), cluster == i), select=c("activity_date","activity_time","internet_traffic"))
df_act <- merge(getICPerTime(filter(df_internet_full_clustered_norm, weekday==j, cluster == i), 1, TRUE), df_act, by=c("activity_date","activity_time"))
df_act <- df_act[order(df_act$activity_time),]
df_act <- df_act[,c(1,2,4,3)]
colnames(df_act) <- c("activity_date","activity_time","internet_traffic","internet_traffic_sd")
rows = data.frame(rep(j, nrow(df_act)), rep(i, nrow(df_act)), df_act$activity_date, df_act$activity_time, df_act$internet_traffic, df_act$internet_traffic_sd)
colnames(rows) <- c("weekday","cluster","activity_date","activity_time","internet_traffic","internet_traffic_sd")
df_internet_full_sum_clustered_wdate_sd <- rbind(df_internet_full_sum_clustered_wdate_sd, rows)
if(j == 0){
write.csv(df_act, file = paste(results_dir_full_csv, paste("weekend-sepdays-cluster",i,".csv", sep=""), sep="/"))
}else{
write.csv(df_act, file = paste(results_dir_full_csv, paste("weekday-sepdays-cluster",i,".csv", sep=""), sep="/"))
}
}
}
# weekday.labs <- c("Weekend","Weekday")
# names(weekday.labs) <- c(0,1)
# df_internet_full_sum_clustered_wdate_sd$weekday = factor(df_internet_full_sum_clustered_wdate_sd$weekday, levels=c(1,0))
# for(d in unique(df_internet_full_sum_clustered_wdate[["activity_date"]])){
#
# pdf(file = paste(results_dir_full_pdf,"/resume",d,".pdf", sep=""), width = 21, height = 6 ) # numbers are cm
# print(ggplot(data=filter(df_internet_full_sum_clustered_wdate_sd, activity_date == d), aes(x=activity_time, y=internet_traffic)) +
# geom_bar(stat="identity") +
# xlab("Hour of day") + ylab("Internet traffic") +
# facet_grid(weekday~cluster, labeller = labeller(weekday = weekday.labs))+theme_bw()+
# geom_errorbar(aes(ymin=internet_traffic-internet_traffic_sd, ymax=internet_traffic+internet_traffic_sd), width=.2,
# position=position_dodge(.9)))
# dev.off()
# }
weekday.labs <-unique(df_internet_full_sum_clustered_wdate$activity_date)
weekday.labs <- weekday.labs[-1]
weekday.labs <- sort(weekday.labs)
names(weekday.labs) <- c(1:length(weekday.labs[-1]))
df_internet_full_sum_clustered_wdate_sd$activity_date = factor(df_internet_full_sum_clustered_wdate_sd$activity_date)
df_internet_full_sum_clustered_wdate_sd <- filter(df_internet_full_sum_clustered_wdate_sd, activity_date != sort(unique(df_internet_full_sum_clustered_wdate_sd$activity_date))[1])
# weekday.labs <-unique(df_internet_full_sum_clustered_wdate$activity_date)
# weekday.labs <- weekday.labs
# names(weekday.labs) <- c(1:length(weekday.labs))
# df_internet_full_sum_clustered_wdate_sd$activity_date = factor(df_internet_full_sum_clustered_wdate_sd$activity_date)
pdf(file = paste(results_dir_full_pdf,"resume-sep-days.pdf", sep="/"), width = 21, height = 30 ) # numbers are cm
print(ggplot(data=filter(df_internet_full_sum_clustered_wdate_sd), aes(x=activity_time, y=internet_traffic)) +
geom_bar(stat="identity") +
xlab("Hour of day") + ylab("Internet traffic")+ scale_x_discrete(breaks=seq(0,24,1))+#scale_x_continuous(limits=c(0, 24),breaks=seq(0,24,1))+
facet_grid(activity_date~cluster, labeller = labeller(activity_day = weekday.labs))+theme_bw()+
geom_errorbar(aes(ymin=internet_traffic-internet_traffic_sd, ymax=internet_traffic+internet_traffic_sd), width=.2,
position=position_dodge(.9)))
dev.off()
write.csv(df_internet_full_sum_clustered_wdate_sd, file = paste(results_dir_full_csv, paste("fresult-df_internet_full_sum_clustered_wdate_sd.csv", sep=""), sep="/"))
}
# df_internet_full_sum_clustered_sd$cluster[df_internet_full_sum_clustered_sd$cluster==1] <- "c4"
# df_internet_full_sum_clustered_sd$cluster[df_internet_full_sum_clustered_sd$cluster==4] <- "c1"
# df_internet_full_sum_clustered_sd$cluster[df_internet_full_sum_clustered_sd$cluster=="c1"] <- 1
# df_internet_full_sum_clustered_sd$cluster[df_internet_full_sum_clustered_sd$cluster=="c4"] <- 4
#
# df_internet_full_sum_clustered_sd$cluster[df_internet_full_sum_clustered_sd$cluster==1] <- "Cluster 1"
# df_internet_full_sum_clustered_sd$cluster[df_internet_full_sum_clustered_sd$cluster==2] <- "Cluster 2"
# df_internet_full_sum_clustered_sd$cluster[df_internet_full_sum_clustered_sd$cluster==3] <- "Cluster 3"
# df_internet_full_sum_clustered_sd$cluster[df_internet_full_sum_clustered_sd$cluster==4] <- "Cluster 4"
# df_internet_full_sum_clustered_sd$cluster[df_internet_full_sum_clustered_sd$cluster==5] <- "Cluster 5"
}
# Set simulation parameters
results_dir <- "results"
simulation_name <- "1day" #week, 5days...
simulation_type <- "anomaly" # milano / fullmap / trento
data_dir <- paste("data/","sms-call-internet-mi-2013-11-18.txt-minutes_cln.csv",sep="")
nclusters <- 5
# Get data
# df_full <- getDataFrame(data_dir)
df_full <- subset(df_full, select=c("square_id", "internet_traffic", "activity_date","activity_time"))
df_full$activity_time <- as.factor(df_full$activity_time)
df_full$activity_date <- as.factor(df_full$activity_date)
df_full$activity_time <- round(as.numeric(levels(df_full$activity_time))[as.integer(df_full$activity_time)],2)
df_full$activity_time <- as.factor(df_full$activity_time)
df_full$weekday <- 1
# Run simulation
runAnalysis(df_full, results_dir, simulation_name, simulation_type, nclusters)
# # run all days separated
# input_dir <- "data/29jan2020/test"
#
# weekday_files <- list.files(paste(input_dir,"weekday/", sep="/"))
# weekend_files <- list.files(paste(input_dir,"weekend/", sep= "/"))
#
# i <- 1
# for(file in weekday_files[-1]){
# results_dir <- "results"
# simulation_name <- "oneday" #week, 5days...
# data_clean <- FALSE
# simulation_type <- "milano" # milano / fullmap
#
# weekday_data_dir <- paste(input_dir,"weekday", weekday_files[i], sep="/")
# weekend_data_dir <- paste(input_dir,"weekend", weekend_files[i], sep="/")
# # Test
# # weekday_data_dir <- "data/cleaned/ml-input/oneday/test/oneday-weekday-test_cln.csv"
# # weekend_data_dir <- "data/cleaned/ml-input/oneday/test/oneday-weekend-test_cln.csv"
#
# nclusters <- 5
# # Get data
# df_full <- getDataFrame(weekday_data_dir, weekend_data_dir, data_clean)
#
# # Run simulation
# runAnalysis(df_full, results_dir, simulation_name, simulation_type, nclusters)
#
# i <- i + 1
# }
|
library(rMEA)
### Name: MEAccf
### Title: Moving-windows lagged cross-correlation routine for 'MEA'
### objects
### Aliases: MEAccf
### ** Examples
## read a single file
path_normal <- system.file("extdata/normal/200_01.txt", package = "rMEA")
mea_normal <- readMEA(path_normal, sampRate = 25, s1Col = 1, s2Col = 2,
s1Name = "Patient", s2Name = "Therapist", skip=1,
idOrder = c("id","session"), idSep="_")
## perform ccf analysis
mea_ccf = MEAccf(mea_normal, lagSec = 5, winSec = 60, incSec = 30, r2Z = TRUE, ABS = TRUE)
summary(mea_ccf)
#visualize the analysis results for the first file
MEAheatmap(mea_ccf[[1]])
|
/data/genthat_extracted_code/rMEA/examples/MEAccf.Rd.R
|
no_license
|
surayaaramli/typeRrh
|
R
| false
| false
| 668
|
r
|
library(rMEA)
### Name: MEAccf
### Title: Moving-windows lagged cross-correlation routine for 'MEA'
### objects
### Aliases: MEAccf
### ** Examples
## read a single file
path_normal <- system.file("extdata/normal/200_01.txt", package = "rMEA")
mea_normal <- readMEA(path_normal, sampRate = 25, s1Col = 1, s2Col = 2,
s1Name = "Patient", s2Name = "Therapist", skip=1,
idOrder = c("id","session"), idSep="_")
## perform ccf analysis
mea_ccf = MEAccf(mea_normal, lagSec = 5, winSec = 60, incSec = 30, r2Z = TRUE, ABS = TRUE)
summary(mea_ccf)
#visualize the analysis results for the first file
MEAheatmap(mea_ccf[[1]])
|
library(FutureManager)
library(testthat)
test_that(
desc = "fmStatus methods work correctly",
code = {
status <- fmStatus(
id = "dummy",
status = "success",
message = "Job completed",
value = iris
)
expect_equal(
object = status %>% filter(Species == "setosa") %>% nrow(),
expected = 50
)
expect_equal(
object = status %>% arrange(Species) %>% nrow(),
expected = 150
)
expect_equal(
object = status %>% mutate(Species = toupper(Species)) %>% pull(Species) %>% unique(),
expected = c("SETOSA", "VERSICOLOR", "VIRGINICA")
)
expect_equal(
object = status %>% select(Petal.Width, Petal.Length) %>% names(),
expected = c("Petal.Width", "Petal.Length")
)
expect_equal(
object = status %>% rename(pw = Petal.Width, pl = Petal.Length) %>% names(),
expected = c("Sepal.Length", "Sepal.Width", "pl", "pw", "Species")
)
expect_equal(
object = status %>% tbl_vars() %>% as.character(),
expected = c("Sepal.Length", "Sepal.Width", "Petal.Length", "Petal.Width", "Species")
)
expect_equal(
object = status %>% group_vars(),
expected = character(0)
)
expect_equal(
object = status[c("Sepal.Length", "Petal.Width")] %>% names(),
expected = c("Sepal.Length", "Petal.Width")
)
expect_output(
object = print(status),
regexp = "dummy \\[success\\]"
)
expect_output(
object = print(status),
regexp = "msg: Job completed"
)
expect_output(
object = print(status),
regexp = "value: data.frame-class object"
)
}
)
test_that(
desc = "fmStatus value method works correctly",
code = {
status <- fmStatus(
id = "dummy",
status = "success",
message = "Job completed",
value = list(
df = iris,
x = "something else"
)
)
expect_equal(
object = status$df,
expected = iris
)
expect_equal(
object = status$x,
expected = "something else"
)
expect_null(status$y)
status2 <- fmStatus(
id = "dummy2",
status = "error",
message = "Something went wrong",
value = NULL
)
expect_error(
object = suppressWarnings(status2$df),
regexp = "Something went wrong"
)
status3 <- fmStatus(
id = "dummy3",
status = "failed",
message = "task failed",
value = fmError("missing data")
)
expect_s3_class(
object = suppressWarnings(status3$df),
class = "fmError"
)
expect_warning(
object = status3$df,
regexp = "missing data"
)
}
)
|
/tests/testthat/test-methods.R
|
permissive
|
Boehringer-Ingelheim/FutureManager
|
R
| false
| false
| 2,677
|
r
|
library(FutureManager)
library(testthat)
test_that(
desc = "fmStatus methods work correctly",
code = {
status <- fmStatus(
id = "dummy",
status = "success",
message = "Job completed",
value = iris
)
expect_equal(
object = status %>% filter(Species == "setosa") %>% nrow(),
expected = 50
)
expect_equal(
object = status %>% arrange(Species) %>% nrow(),
expected = 150
)
expect_equal(
object = status %>% mutate(Species = toupper(Species)) %>% pull(Species) %>% unique(),
expected = c("SETOSA", "VERSICOLOR", "VIRGINICA")
)
expect_equal(
object = status %>% select(Petal.Width, Petal.Length) %>% names(),
expected = c("Petal.Width", "Petal.Length")
)
expect_equal(
object = status %>% rename(pw = Petal.Width, pl = Petal.Length) %>% names(),
expected = c("Sepal.Length", "Sepal.Width", "pl", "pw", "Species")
)
expect_equal(
object = status %>% tbl_vars() %>% as.character(),
expected = c("Sepal.Length", "Sepal.Width", "Petal.Length", "Petal.Width", "Species")
)
expect_equal(
object = status %>% group_vars(),
expected = character(0)
)
expect_equal(
object = status[c("Sepal.Length", "Petal.Width")] %>% names(),
expected = c("Sepal.Length", "Petal.Width")
)
expect_output(
object = print(status),
regexp = "dummy \\[success\\]"
)
expect_output(
object = print(status),
regexp = "msg: Job completed"
)
expect_output(
object = print(status),
regexp = "value: data.frame-class object"
)
}
)
test_that(
desc = "fmStatus value method works correctly",
code = {
status <- fmStatus(
id = "dummy",
status = "success",
message = "Job completed",
value = list(
df = iris,
x = "something else"
)
)
expect_equal(
object = status$df,
expected = iris
)
expect_equal(
object = status$x,
expected = "something else"
)
expect_null(status$y)
status2 <- fmStatus(
id = "dummy2",
status = "error",
message = "Something went wrong",
value = NULL
)
expect_error(
object = suppressWarnings(status2$df),
regexp = "Something went wrong"
)
status3 <- fmStatus(
id = "dummy3",
status = "failed",
message = "task failed",
value = fmError("missing data")
)
expect_s3_class(
object = suppressWarnings(status3$df),
class = "fmError"
)
expect_warning(
object = status3$df,
regexp = "missing data"
)
}
)
|
#------------------------------------------------------------------------------#
#' Coerce a list of rows to a data frame
#'
#' Function to coerce a list (of rows) to a data frame.
#'
#' @param x A list. Each element corresponds to a row of the upcoming data frame.
#' @param col_names Optional. If not provided and if the first list element is
#' a named vector, this vector is used to name the columns of the data frame.
#' @param stringsAsFactors Logical. Should the character vector be converted to
#' a factor?
#'
#' @examples
#' my_list <- list(c(23, 21, 41), c(3, 55), 8, c(39, 31, 14))
#' list2df(my_list)
#'
#' # Name the columns:
#' # - Option #1:
#' list2df(my_list, col_names = letters[1:3])
#' # - Option #2:
#' my_list <- list(c(a = 23, b = 21, c = 41), c(d = 3, e = 55), c(f = 8),
#' c(g = 39, h = 31, i = 14))
#' list2df(my_list)
#'
#' # A more tricky case:
#' my_list <- list(r1 = c(a = 23, 21), r2 = c(c = 41, d = 3, e = 55),
#' r3 = c(f = 8), r4 = c(g = 39, h = 31, i = 14))
#' list2df(my_list)
#'
#' @export
#------------------------------------------------------------------------------#
list2df <- function(x, col_names, ...,
stringsAsFactors = default.stringsAsFactors()) {
seq_max <- seq_len(max(lengths(x)))
res <- as.data.frame.matrix(t(sapply(x, "[", i = seq_max)), ...,
stringsAsFactors = stringsAsFactors)
if (!missing(col_names)) {
setNames(res, col_names)
} else if (!is.null(col_names <- names(x[[1]]))) {
col_names <- c(col_names, rep(NA, ncol(res) - length(col_names)))
id_empty <- sort(c(which(is.na(col_names)), # if name is NA
which(nchar(col_names) == 0))) # if name is ""
col_names[id_empty] <- paste0("V", id_empty)
setNames(res, col_names)
} else {
res
}
}
|
/R/list2df.R
|
no_license
|
chgigot/cgmisc
|
R
| false
| false
| 1,906
|
r
|
#------------------------------------------------------------------------------#
#' Coerce a list of rows to a data frame
#'
#' Function to coerce a list (of rows) to a data frame.
#'
#' @param x A list. Each element corresponds to a row of the upcoming data frame.
#' @param col_names Optional. If not provided and if the first list element is
#' a named vector, this vector is used to name the columns of the data frame.
#' @param stringsAsFactors Logical. Should the character vector be converted to
#' a factor?
#'
#' @examples
#' my_list <- list(c(23, 21, 41), c(3, 55), 8, c(39, 31, 14))
#' list2df(my_list)
#'
#' # Name the columns:
#' # - Option #1:
#' list2df(my_list, col_names = letters[1:3])
#' # - Option #2:
#' my_list <- list(c(a = 23, b = 21, c = 41), c(d = 3, e = 55), c(f = 8),
#' c(g = 39, h = 31, i = 14))
#' list2df(my_list)
#'
#' # A more tricky case:
#' my_list <- list(r1 = c(a = 23, 21), r2 = c(c = 41, d = 3, e = 55),
#' r3 = c(f = 8), r4 = c(g = 39, h = 31, i = 14))
#' list2df(my_list)
#'
#' @export
#------------------------------------------------------------------------------#
list2df <- function(x, col_names, ...,
stringsAsFactors = default.stringsAsFactors()) {
seq_max <- seq_len(max(lengths(x)))
res <- as.data.frame.matrix(t(sapply(x, "[", i = seq_max)), ...,
stringsAsFactors = stringsAsFactors)
if (!missing(col_names)) {
setNames(res, col_names)
} else if (!is.null(col_names <- names(x[[1]]))) {
col_names <- c(col_names, rep(NA, ncol(res) - length(col_names)))
id_empty <- sort(c(which(is.na(col_names)), # if name is NA
which(nchar(col_names) == 0))) # if name is ""
col_names[id_empty] <- paste0("V", id_empty)
setNames(res, col_names)
} else {
res
}
}
|
#Jash Mehta
#04/13/2016
#hw svm
install.packages("kernlab")
library("kernlab")
#STEP1
Food <- read.csv("C:\\Users\\jashm\\Google Drive\\IM\\687\\Home Work\\SVM\\Food_Service_Establishment__Last_Inspection.csv")
Food$V <-NULL
#Step2
Food <- na.omit(Food)
Food$NewVio <- "Null"
head(Food$NewVio,20)
fix(Food)
for(i in 1:length(Food$VIOLATIONS))
{
if(Food[i,4] == "No violations found.")
{
Food$NewVio[i] <- "N"
}
else
{
Food$NewVio[i] <- "Y"
}
}
#Converting ViolInd into a Factor type of variable
#Currently it is numeric type
Food$NewVio <- factor(Food$NewVio)
#Checking count of violations and non-violation
table(Food$NewVio)
#Creating training and test dataset
#First we will create the random index
RndmIndex <- sample(1:nrow(Food))
#To check whether a random index is created or not
summary(RndmIndex)
head(RndmIndex)
TrainTestCutPoint <- floor(2*nrow(Food)/3)
TrainTestCutPoint
#So here 17868 is the cut point
#Now we take from 1 to 17868 random indexes for training data
#and from 17869 till the end random indexed for test data
trainData <- Food[RndmIndex[1:TrainTestCutPoint],]
testData <- Food[RndmIndex[(TrainTestCutPoint+1):nrow(Food)],]
#checking length of Training & Test Data
nrow(trainData)
nrow(testData)
#Having a look at the training and test data set
View(trainData)
View(testData)
#Step 3: Building a Model using KSVM
NewVioSVM <- ksvm(NewVio~TOTAL...CRITICAL.VIOLATIONS+TOTAL...NONCRITICAL.VIOLATIONS,data=trainData,kernel="rbfdot",kpar="automatic",C=50,cross=10,prob.model=TRUE)
#Checking output of NewVioSVM
NewVioSVM
#Having a look over the range of support vectors
hist(alpha(ViolSVM)[[1]])
#Tried creating SVM model with different value of regularization paramter i.e from 5 to 50
#and k-fold Cross validation parameter from 2 to 10
#But Training error and cross-validation error remained almost same in all the cases
#Not able to lower the error value any more
#So testing the test data for prediction using the model
#Predicting the variable using model
ViolSvmPred <- predict(NewVioSVM,testData,type="votes")
#Creating a new data from to check the truth versus prediction
comparison <- data.frame(testData[,27],factor(ViolSvmPred[2,]))
#Renaming columns as GroundTruth and Prediction
colnames(comparison) <- c("Truth","Predicted")
#Printing out the confusion matrix
ConfusionMatrix <- table(comparison)
ConfusionMatrix
##Predicted
##Truth 0 1
##N 2967 4
##Y 2 5901
#Checking the accuracy of prediction by calculating error rate
#Formula is to sum incorreclty classified instances and divide by total instances
paste("Prediction Error rate = ",round(((ConfusionMatrix[1,2]+ConfusionMatrix[2,1])/nrow(comparison))*100,2),"%")
#Step 4: Creating second model with additional predictors
NewVioSVM2 <- ksvm(NewVio~TOTAL...CRITICAL.VIOLATIONS+TOTAL...NONCRITICAL.VIOLATIONS+INSPECTION.TYPE+LAST.INSPECTED,data=trainData,kernel="rbfdot",kpar="automatic",C=50,cross=10,prob.model=TRUE)
#Checking output of NewVioSVM2
NewVioSVM2
#Having a look over the range of support vectors
hist(alpha(NewVioSVM2)[[1]])
#Tried creating SVM model with different value of regularization paramter i.e from 5 to 50
#and k-fold Cross validation parameter from 2 to 10
#Predicting the variable using model
ViolSvmPred2 <- predict(NewVioSVM2,testData,type="votes")
#Creating a new data from to check the ground truth versus prediction
comparison2 <- data.frame(testData[,27],factor(ViolSvmPred2[2,]))
#Renaming columns as Truth and Prediction
colnames(comparison2) <- c("Truth","Predicted")
#Printing out the confusion matrix
ConfusionMatrix1 <- table(comparison2)
ConfusionMatrix1
##Predicted
##Truth 0 1
##N 2934 1
##Y 14 5925
#Difference between 1st and 2nd model
#In the second model added more predictors compared to 1st model like INSPECTION.TYPE, LAST.INSPECTED etc.
#Also, the confustion matrix has slight difference.
#Model 1 had 9 incorrectly identified instances whereas model2 had little more.
#Model 1, itself, is performing very well. So, cannot drive down the error anymore
#If I had chosen some different predictors in model1 then I could have reduced the error in model 2
#I would have then chosen good predictors for model2 and would have drived down the error rate
#Checking the accuracy of prediction by calculating error rate
#Formula is to sum incorreclty classified instances and divide by total instances
paste("Prediction Error rate = ",round(((ConfusionMatrix1[1,2]+ConfusionMatrix1[2,1])/nrow(comparison2))*100,2),"%")
#End of program
|
/week 10 SVM/Jash Mehta, HW SVM, JM.R
|
no_license
|
jashmehta89/IST-687-Applied-Data-Science-LabWork
|
R
| false
| false
| 4,763
|
r
|
#Jash Mehta
#04/13/2016
#hw svm
install.packages("kernlab")
library("kernlab")
#STEP1
Food <- read.csv("C:\\Users\\jashm\\Google Drive\\IM\\687\\Home Work\\SVM\\Food_Service_Establishment__Last_Inspection.csv")
Food$V <-NULL
#Step2
Food <- na.omit(Food)
Food$NewVio <- "Null"
head(Food$NewVio,20)
fix(Food)
for(i in 1:length(Food$VIOLATIONS))
{
if(Food[i,4] == "No violations found.")
{
Food$NewVio[i] <- "N"
}
else
{
Food$NewVio[i] <- "Y"
}
}
#Converting ViolInd into a Factor type of variable
#Currently it is numeric type
Food$NewVio <- factor(Food$NewVio)
#Checking count of violations and non-violation
table(Food$NewVio)
#Creating training and test dataset
#First we will create the random index
RndmIndex <- sample(1:nrow(Food))
#To check whether a random index is created or not
summary(RndmIndex)
head(RndmIndex)
TrainTestCutPoint <- floor(2*nrow(Food)/3)
TrainTestCutPoint
#So here 17868 is the cut point
#Now we take from 1 to 17868 random indexes for training data
#and from 17869 till the end random indexed for test data
trainData <- Food[RndmIndex[1:TrainTestCutPoint],]
testData <- Food[RndmIndex[(TrainTestCutPoint+1):nrow(Food)],]
#checking length of Training & Test Data
nrow(trainData)
nrow(testData)
#Having a look at the training and test data set
View(trainData)
View(testData)
#Step 3: Building a Model using KSVM
NewVioSVM <- ksvm(NewVio~TOTAL...CRITICAL.VIOLATIONS+TOTAL...NONCRITICAL.VIOLATIONS,data=trainData,kernel="rbfdot",kpar="automatic",C=50,cross=10,prob.model=TRUE)
#Checking output of NewVioSVM
NewVioSVM
#Having a look over the range of support vectors
hist(alpha(ViolSVM)[[1]])
#Tried creating SVM model with different value of regularization paramter i.e from 5 to 50
#and k-fold Cross validation parameter from 2 to 10
#But Training error and cross-validation error remained almost same in all the cases
#Not able to lower the error value any more
#So testing the test data for prediction using the model
#Predicting the variable using model
ViolSvmPred <- predict(NewVioSVM,testData,type="votes")
#Creating a new data from to check the truth versus prediction
comparison <- data.frame(testData[,27],factor(ViolSvmPred[2,]))
#Renaming columns as GroundTruth and Prediction
colnames(comparison) <- c("Truth","Predicted")
#Printing out the confusion matrix
ConfusionMatrix <- table(comparison)
ConfusionMatrix
##Predicted
##Truth 0 1
##N 2967 4
##Y 2 5901
#Checking the accuracy of prediction by calculating error rate
#Formula is to sum incorreclty classified instances and divide by total instances
paste("Prediction Error rate = ",round(((ConfusionMatrix[1,2]+ConfusionMatrix[2,1])/nrow(comparison))*100,2),"%")
#Step 4: Creating second model with additional predictors
NewVioSVM2 <- ksvm(NewVio~TOTAL...CRITICAL.VIOLATIONS+TOTAL...NONCRITICAL.VIOLATIONS+INSPECTION.TYPE+LAST.INSPECTED,data=trainData,kernel="rbfdot",kpar="automatic",C=50,cross=10,prob.model=TRUE)
#Checking output of NewVioSVM2
NewVioSVM2
#Having a look over the range of support vectors
hist(alpha(NewVioSVM2)[[1]])
#Tried creating SVM model with different value of regularization paramter i.e from 5 to 50
#and k-fold Cross validation parameter from 2 to 10
#Predicting the variable using model
ViolSvmPred2 <- predict(NewVioSVM2,testData,type="votes")
#Creating a new data from to check the ground truth versus prediction
comparison2 <- data.frame(testData[,27],factor(ViolSvmPred2[2,]))
#Renaming columns as Truth and Prediction
colnames(comparison2) <- c("Truth","Predicted")
#Printing out the confusion matrix
ConfusionMatrix1 <- table(comparison2)
ConfusionMatrix1
##Predicted
##Truth 0 1
##N 2934 1
##Y 14 5925
#Difference between 1st and 2nd model
#In the second model added more predictors compared to 1st model like INSPECTION.TYPE, LAST.INSPECTED etc.
#Also, the confustion matrix has slight difference.
#Model 1 had 9 incorrectly identified instances whereas model2 had little more.
#Model 1, itself, is performing very well. So, cannot drive down the error anymore
#If I had chosen some different predictors in model1 then I could have reduced the error in model 2
#I would have then chosen good predictors for model2 and would have drived down the error rate
#Checking the accuracy of prediction by calculating error rate
#Formula is to sum incorreclty classified instances and divide by total instances
paste("Prediction Error rate = ",round(((ConfusionMatrix1[1,2]+ConfusionMatrix1[2,1])/nrow(comparison2))*100,2),"%")
#End of program
|
ncvint <- function(X, y, family=c("gaussian","binomial","poisson"), penalty=c("MCP", "SCAD", "lasso"),
gamma=switch(penalty, SCAD=3.7, 3), alpha=1, lambda.min=ifelse(n>p,.001,.05), nlambda=100,
lambda=NULL, eps=.001, max.iter=1000, convex=TRUE, dfmax=p+1, penalty.factor=rep(1, ncol(X)),
warn=TRUE, returnX=FALSE, ...) {
# Coersion
family <- match.arg(family)
penalty <- match.arg(penalty)
if (class(X) != "matrix") {
tmp <- try(X <- model.matrix(~0+., data=X), silent=TRUE)
if (class(tmp)[1] == "try-error") stop("X must be a matrix or able to be coerced to a matrix")
}
if (storage.mode(X)=="integer") storage.mode(X) <- "double"
if (class(y) != "numeric") {
tmp <- try(y <- as.numeric(y), silent=TRUE)
if (class(tmp)[1] == "try-error") stop("y must numeric or able to be coerced to numeric")
}
if (storage.mode(penalty.factor) != "double") storage.mode(penalty.factor) <- "double"
# Error checking
standardize <- FALSE
if (gamma <= 1 & penalty=="MCP") stop("gamma must be greater than 1 for the MC penalty")
if (gamma <= 2 & penalty=="SCAD") stop("gamma must be greater than 2 for the SCAD penalty")
if (nlambda < 2) stop("nlambda must be at least 2")
if (alpha <= 0) stop("alpha must be greater than 0; choose a small positive number instead")
if (any(is.na(y)) | any(is.na(X))) stop("Missing data (NA's) detected. Take actions (e.g., removing cases, removing features, imputation) to eliminate missing data before passing X and y to ncvreg")
if (length(penalty.factor)!=ncol(X)) stop("penalty.factor does not match up with X")
if (family=="binomial" & length(table(y)) > 2) stop("Attemping to use family='binomial' with non-binary data")
if (family=="binomial" & !identical(sort(unique(y)), 0:1)) y <- as.numeric(y==max(y))
if (length(y) != nrow(X)) stop("X and y do not have the same number of observations")
## Deprication support
dots <- list(...)
if ("n.lambda" %in% names(dots)) nlambda <- dots$n.lambda
## Set up XX, yy, lambda
if (standardize) {
# std <- .Call("standardize1", X)
# XX <- std[[1]]
# center <- std[[2]]
# scale <- std[[3]]
# nz <- which(scale > 1e-6)
# if (length(nz) != ncol(XX)) XX <- XX[ ,nz, drop=FALSE]
# penalty.factor <- penalty.factor[nz]
} else {
XX <- X
}
p <- ncol(XX)
if (family=="gaussian") {
yy <- y - mean(y)
} else {
yy <- y
}
n <- length(yy)
if (is.null(lambda)) {
lambda <- setupLambda(if (standardize) XX else X, yy, family, alpha, lambda.min, nlambda, penalty.factor)
user.lambda <- FALSE
} else {
nlambda <- length(lambda)
user.lambda <- TRUE
}
## Fit
if (family=="gaussian" & standardize==TRUE) {
# res <- .Call("cdfit_gaussian", XX, yy, penalty, lambda, eps, as.integer(max.iter), as.double(gamma), penalty.factor, alpha, as.integer(dfmax), as.integer(user.lambda | any(penalty.factor==0)))
# a <- rep(mean(y),nlambda)
# b <- matrix(res[[1]], p, nlambda)
# loss <- res[[2]]
# iter <- res[[3]]
} else if (family=="gaussian" & standardize==FALSE) {
# res <- .Call("cdfit_raw", X, y, penalty, lambda, eps, as.integer(max.iter), as.double(gamma), penalty.factor, alpha, as.integer(dfmax), as.integer(user.lambda | any(penalty.factor==0)))
# b <- matrix(res[[1]], p, nlambda)
# #print(b)
# loss <- res[[2]]
# iter <- res[[3]]
} else if (family=="binomial") {
res <- .Call("cdfit_binomial", XX, yy, penalty, lambda, eps, as.integer(max.iter), as.double(gamma), penalty.factor, alpha, as.integer(dfmax), as.integer(user.lambda | any(penalty.factor==0)), as.integer(warn))
a <- res[[1]]
b <- matrix(res[[2]], p, nlambda)
loss <- res[[3]]
iter <- res[[4]]
} else if (family=="poisson") {
# res <- .Call("cdfit_poisson", XX, yy, penalty, lambda, eps, as.integer(max.iter), as.double(gamma), penalty.factor, alpha, as.integer(dfmax), as.integer(user.lambda | any(penalty.factor==0)), as.integer(warn))
# a <- res[[1]]
# b <- matrix(res[[2]], p, nlambda)
# loss <- res[[3]]
# iter <- res[[4]]
}
## Eliminate saturated lambda values, if any
ind <- !is.na(iter)
if (family!="gaussian" | standardize==TRUE) a <- a[ind]
b <- b[, ind, drop=FALSE]
iter <- iter[ind]
lambda <- lambda[ind]
loss <- loss[ind]
if (warn & any(iter==max.iter)) warning("Algorithm failed to converge for some values of lambda")
## Local convexity?
convex.min <- if (convex) convexMin(b, XX, penalty, gamma, lambda*(1-alpha), family, penalty.factor, a=a) else NULL
## Unstandardize
if (standardize) {
# beta <- matrix(0, nrow=(ncol(X)+1), ncol=length(lambda))
# bb <- b/scale[nz]
# beta[nz+1,] <- bb
# beta[1,] <- a - crossprod(center[nz], bb)
} else {
# beta <- if (family=="gaussian") b else rbind(a, b)
beta <- if (family=="gaussian") rbind(0, b) else rbind(a, b)
}
#print(beta)
## Names
varnames <- if (is.null(colnames(X))) paste("V",1:ncol(X),sep="") else colnames(X)
varnames <- c("(Intercept)", varnames)
#if (family!="gaussian" | standardize==TRUE) varnames <- c("(Intercept)", varnames)
#print(varnames)
dimnames(beta) <- list(varnames, lamNames(lambda))
## Output
val <- structure(list(beta = beta,
iter = iter,
lambda = lambda,
penalty = penalty,
family = family,
gamma = gamma,
alpha = alpha,
convex.min = convex.min,
loss = loss,
penalty.factor = penalty.factor,
n = n),
class = c("ncvint","ncvreg"))
if (family=="poisson") val$y <- y
if (returnX) {
# val$X <- XX
# val$center <- center
# val$scale <- scale
# val$y <- yy
}
val
}
|
/R/ncvint.R
|
no_license
|
Jiahua1982/TVsMiss
|
R
| false
| false
| 5,907
|
r
|
ncvint <- function(X, y, family=c("gaussian","binomial","poisson"), penalty=c("MCP", "SCAD", "lasso"),
gamma=switch(penalty, SCAD=3.7, 3), alpha=1, lambda.min=ifelse(n>p,.001,.05), nlambda=100,
lambda=NULL, eps=.001, max.iter=1000, convex=TRUE, dfmax=p+1, penalty.factor=rep(1, ncol(X)),
warn=TRUE, returnX=FALSE, ...) {
# Coersion
family <- match.arg(family)
penalty <- match.arg(penalty)
if (class(X) != "matrix") {
tmp <- try(X <- model.matrix(~0+., data=X), silent=TRUE)
if (class(tmp)[1] == "try-error") stop("X must be a matrix or able to be coerced to a matrix")
}
if (storage.mode(X)=="integer") storage.mode(X) <- "double"
if (class(y) != "numeric") {
tmp <- try(y <- as.numeric(y), silent=TRUE)
if (class(tmp)[1] == "try-error") stop("y must numeric or able to be coerced to numeric")
}
if (storage.mode(penalty.factor) != "double") storage.mode(penalty.factor) <- "double"
# Error checking
standardize <- FALSE
if (gamma <= 1 & penalty=="MCP") stop("gamma must be greater than 1 for the MC penalty")
if (gamma <= 2 & penalty=="SCAD") stop("gamma must be greater than 2 for the SCAD penalty")
if (nlambda < 2) stop("nlambda must be at least 2")
if (alpha <= 0) stop("alpha must be greater than 0; choose a small positive number instead")
if (any(is.na(y)) | any(is.na(X))) stop("Missing data (NA's) detected. Take actions (e.g., removing cases, removing features, imputation) to eliminate missing data before passing X and y to ncvreg")
if (length(penalty.factor)!=ncol(X)) stop("penalty.factor does not match up with X")
if (family=="binomial" & length(table(y)) > 2) stop("Attemping to use family='binomial' with non-binary data")
if (family=="binomial" & !identical(sort(unique(y)), 0:1)) y <- as.numeric(y==max(y))
if (length(y) != nrow(X)) stop("X and y do not have the same number of observations")
## Deprication support
dots <- list(...)
if ("n.lambda" %in% names(dots)) nlambda <- dots$n.lambda
## Set up XX, yy, lambda
if (standardize) {
# std <- .Call("standardize1", X)
# XX <- std[[1]]
# center <- std[[2]]
# scale <- std[[3]]
# nz <- which(scale > 1e-6)
# if (length(nz) != ncol(XX)) XX <- XX[ ,nz, drop=FALSE]
# penalty.factor <- penalty.factor[nz]
} else {
XX <- X
}
p <- ncol(XX)
if (family=="gaussian") {
yy <- y - mean(y)
} else {
yy <- y
}
n <- length(yy)
if (is.null(lambda)) {
lambda <- setupLambda(if (standardize) XX else X, yy, family, alpha, lambda.min, nlambda, penalty.factor)
user.lambda <- FALSE
} else {
nlambda <- length(lambda)
user.lambda <- TRUE
}
## Fit
if (family=="gaussian" & standardize==TRUE) {
# res <- .Call("cdfit_gaussian", XX, yy, penalty, lambda, eps, as.integer(max.iter), as.double(gamma), penalty.factor, alpha, as.integer(dfmax), as.integer(user.lambda | any(penalty.factor==0)))
# a <- rep(mean(y),nlambda)
# b <- matrix(res[[1]], p, nlambda)
# loss <- res[[2]]
# iter <- res[[3]]
} else if (family=="gaussian" & standardize==FALSE) {
# res <- .Call("cdfit_raw", X, y, penalty, lambda, eps, as.integer(max.iter), as.double(gamma), penalty.factor, alpha, as.integer(dfmax), as.integer(user.lambda | any(penalty.factor==0)))
# b <- matrix(res[[1]], p, nlambda)
# #print(b)
# loss <- res[[2]]
# iter <- res[[3]]
} else if (family=="binomial") {
res <- .Call("cdfit_binomial", XX, yy, penalty, lambda, eps, as.integer(max.iter), as.double(gamma), penalty.factor, alpha, as.integer(dfmax), as.integer(user.lambda | any(penalty.factor==0)), as.integer(warn))
a <- res[[1]]
b <- matrix(res[[2]], p, nlambda)
loss <- res[[3]]
iter <- res[[4]]
} else if (family=="poisson") {
# res <- .Call("cdfit_poisson", XX, yy, penalty, lambda, eps, as.integer(max.iter), as.double(gamma), penalty.factor, alpha, as.integer(dfmax), as.integer(user.lambda | any(penalty.factor==0)), as.integer(warn))
# a <- res[[1]]
# b <- matrix(res[[2]], p, nlambda)
# loss <- res[[3]]
# iter <- res[[4]]
}
## Eliminate saturated lambda values, if any
ind <- !is.na(iter)
if (family!="gaussian" | standardize==TRUE) a <- a[ind]
b <- b[, ind, drop=FALSE]
iter <- iter[ind]
lambda <- lambda[ind]
loss <- loss[ind]
if (warn & any(iter==max.iter)) warning("Algorithm failed to converge for some values of lambda")
## Local convexity?
convex.min <- if (convex) convexMin(b, XX, penalty, gamma, lambda*(1-alpha), family, penalty.factor, a=a) else NULL
## Unstandardize
if (standardize) {
# beta <- matrix(0, nrow=(ncol(X)+1), ncol=length(lambda))
# bb <- b/scale[nz]
# beta[nz+1,] <- bb
# beta[1,] <- a - crossprod(center[nz], bb)
} else {
# beta <- if (family=="gaussian") b else rbind(a, b)
beta <- if (family=="gaussian") rbind(0, b) else rbind(a, b)
}
#print(beta)
## Names
varnames <- if (is.null(colnames(X))) paste("V",1:ncol(X),sep="") else colnames(X)
varnames <- c("(Intercept)", varnames)
#if (family!="gaussian" | standardize==TRUE) varnames <- c("(Intercept)", varnames)
#print(varnames)
dimnames(beta) <- list(varnames, lamNames(lambda))
## Output
val <- structure(list(beta = beta,
iter = iter,
lambda = lambda,
penalty = penalty,
family = family,
gamma = gamma,
alpha = alpha,
convex.min = convex.min,
loss = loss,
penalty.factor = penalty.factor,
n = n),
class = c("ncvint","ncvreg"))
if (family=="poisson") val$y <- y
if (returnX) {
# val$X <- XX
# val$center <- center
# val$scale <- scale
# val$y <- yy
}
val
}
|
#
# Exploratory Data Analysis
# Project 1
# 2015.04.10
#
setwd("C://Users//jnewell.BI//Documents//Coursera//Exploratory Data Analysis//Project1")
# Date;Time;Global_active_power;Global_reactive_power;Voltage;Global_intensity;Sub_metering_1;Sub_metering_2;Sub_metering_3
# 1/2/2007;00:00:00;0.326;0.128;243.150;1.400;0.000;0.000;0.000
energyData <- read.table("data/household_power_consumption.txt",
header = TRUE,
sep = ";",
colClasses = c("character", "character","numeric","numeric","numeric","numeric","numeric","numeric"),
col.names = c("Date","Time","Global_active_power","Global_reactive_power","Voltage","Global_intensity","Sub_metering_1","Sub_metering_2","Sub_metering_3"),
na.strings="?"
)
#convert the Dates
energyData[,1]<-as.Date(energyData[,1],format="%d/%m/%Y")
#subset out the dates of interest
plot1_ss<-subset(energyData, Date>="2007-02-01" & Date<="2007-02-02")
png(file = "plot1.png",width=480,height=480)
#construct plot 1 adding the title, color and x and y axis labels
hist(plot1_ss$Global_active_power,
main="Global Active Power",
col="red",
xlab="Global active power (kilowatts)",
ylab="Frequency")
dev.off() ## Don't forget to close the PNG device!
|
/plot1.R
|
no_license
|
gtjnewell/ExData_Plotting1
|
R
| false
| false
| 1,314
|
r
|
#
# Exploratory Data Analysis
# Project 1
# 2015.04.10
#
setwd("C://Users//jnewell.BI//Documents//Coursera//Exploratory Data Analysis//Project1")
# Date;Time;Global_active_power;Global_reactive_power;Voltage;Global_intensity;Sub_metering_1;Sub_metering_2;Sub_metering_3
# 1/2/2007;00:00:00;0.326;0.128;243.150;1.400;0.000;0.000;0.000
energyData <- read.table("data/household_power_consumption.txt",
header = TRUE,
sep = ";",
colClasses = c("character", "character","numeric","numeric","numeric","numeric","numeric","numeric"),
col.names = c("Date","Time","Global_active_power","Global_reactive_power","Voltage","Global_intensity","Sub_metering_1","Sub_metering_2","Sub_metering_3"),
na.strings="?"
)
#convert the Dates
energyData[,1]<-as.Date(energyData[,1],format="%d/%m/%Y")
#subset out the dates of interest
plot1_ss<-subset(energyData, Date>="2007-02-01" & Date<="2007-02-02")
png(file = "plot1.png",width=480,height=480)
#construct plot 1 adding the title, color and x and y axis labels
hist(plot1_ss$Global_active_power,
main="Global Active Power",
col="red",
xlab="Global active power (kilowatts)",
ylab="Frequency")
dev.off() ## Don't forget to close the PNG device!
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/calcular.optimo.derivada.R
\name{calcular.optimo.derivada}
\alias{calcular.optimo.derivada}
\title{calculates optimum: second derivative equals 0 (change signs from - to +, or + to -)}
\usage{
calcular.optimo.derivada(i.curva.map)
}
\description{
calculates optimum: second derivative equals 0 (change signs from - to +, or + to -)
}
\keyword{internal}
|
/man/calcular.optimo.derivada.Rd
|
no_license
|
cran/mem
|
R
| false
| true
| 443
|
rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/calcular.optimo.derivada.R
\name{calcular.optimo.derivada}
\alias{calcular.optimo.derivada}
\title{calculates optimum: second derivative equals 0 (change signs from - to +, or + to -)}
\usage{
calcular.optimo.derivada(i.curva.map)
}
\description{
calculates optimum: second derivative equals 0 (change signs from - to +, or + to -)
}
\keyword{internal}
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/weightedDetection.R
\name{weightedDetection}
\alias{weightedDetection}
\title{weightedDetection}
\usage{
weightedDetection(occWeights = data.frame(sites = 1:10, weights = c(0.05,
0.05, 0.05, 0.05, 0.1, 0.1, 0.1, 0.1, 0.2, 0.2)),
visWeights = data.frame(sites = 1:10, weights = c(0.05, 0.05, 0.05,
0.05, 0.1, 0.1, 0.1, 0.1, 0.2, 0.2)), noOccur = 5, noLocations = 5,
noVisits = 5, detectProb = 0.1, nIter = 999)
}
\arguments{
\item{occWeights}{occurrenceWeights}
}
\description{
Calculate the probability of detecting a species with weighted occurrences and visits
}
\examples{
\dontrun{
tt <- weightedDetection(noOccur = 1,
noLocations = 1:10,
detectProb = 0.5)
plot(tt)
occ <- createOccProb(map10km)
visWeights <- occ \%>\%
select(sites = ssbid,
weights = prob)
system.time(tt <- weightedDetection(occWeights = visWeights,
visWeights = visWeights,
noOccur = 100,
noLocations = seq(10, 100, by = 10),
noVisits = 5
))
}
}
|
/man/weightedDetection.Rd
|
no_license
|
NINAnor/SurveyPower
|
R
| false
| true
| 1,169
|
rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/weightedDetection.R
\name{weightedDetection}
\alias{weightedDetection}
\title{weightedDetection}
\usage{
weightedDetection(occWeights = data.frame(sites = 1:10, weights = c(0.05,
0.05, 0.05, 0.05, 0.1, 0.1, 0.1, 0.1, 0.2, 0.2)),
visWeights = data.frame(sites = 1:10, weights = c(0.05, 0.05, 0.05,
0.05, 0.1, 0.1, 0.1, 0.1, 0.2, 0.2)), noOccur = 5, noLocations = 5,
noVisits = 5, detectProb = 0.1, nIter = 999)
}
\arguments{
\item{occWeights}{occurrenceWeights}
}
\description{
Calculate the probability of detecting a species with weighted occurrences and visits
}
\examples{
\dontrun{
tt <- weightedDetection(noOccur = 1,
noLocations = 1:10,
detectProb = 0.5)
plot(tt)
occ <- createOccProb(map10km)
visWeights <- occ \%>\%
select(sites = ssbid,
weights = prob)
system.time(tt <- weightedDetection(occWeights = visWeights,
visWeights = visWeights,
noOccur = 100,
noLocations = seq(10, 100, by = 10),
noVisits = 5
))
}
}
|
x <- 4
class(x)
x <- c(4, TRUE)
class(x)
x <- c(1,3, 5)
y <- c(3, 2, 10)
rbind(x, y)
x <- list(2, "a", "b", TRUE)
x[[2]]
x <- 1:4
y <- 2:3
x+y
x <- c(3, 5, 1, 10, 12, 6)
x
x[x < 6] <- 0
x
x <- c(3, 5, 1, 10, 12, 6)
x[x %in% 1:5] <- 0
x
data <- read.csv("hw1_data.csv")
head(data)
data[c(152,153),]
dim(data)
data$Ozone[47]
data[47,"Ozone"]
na_oz <- data$Ozone[is.na(data$Ozone)]
complete_oz <- data$Ozone[complete.cases(data$Ozone)]
mean(complete_oz)
high_oz_temp <- subset(data,Ozone >31 & Temp > 90)
mean(high_oz_temp$Solar.R)
data_6 <- subset(data,Month==6)
mean(data_6$Temp[complete.cases(data_6$Temp)])
data_5 <- subset(data,Month==5)
data_5
max(data_5$Ozone[complete.cases(data_5$Ozone)] )
x <- 1:4
y <- 2
class(x+y)
x+y
|
/02_R_programming/quizz_week_1.R
|
no_license
|
danpon/coursera_jhu_datascience
|
R
| false
| false
| 729
|
r
|
x <- 4
class(x)
x <- c(4, TRUE)
class(x)
x <- c(1,3, 5)
y <- c(3, 2, 10)
rbind(x, y)
x <- list(2, "a", "b", TRUE)
x[[2]]
x <- 1:4
y <- 2:3
x+y
x <- c(3, 5, 1, 10, 12, 6)
x
x[x < 6] <- 0
x
x <- c(3, 5, 1, 10, 12, 6)
x[x %in% 1:5] <- 0
x
data <- read.csv("hw1_data.csv")
head(data)
data[c(152,153),]
dim(data)
data$Ozone[47]
data[47,"Ozone"]
na_oz <- data$Ozone[is.na(data$Ozone)]
complete_oz <- data$Ozone[complete.cases(data$Ozone)]
mean(complete_oz)
high_oz_temp <- subset(data,Ozone >31 & Temp > 90)
mean(high_oz_temp$Solar.R)
data_6 <- subset(data,Month==6)
mean(data_6$Temp[complete.cases(data_6$Temp)])
data_5 <- subset(data,Month==5)
data_5
max(data_5$Ozone[complete.cases(data_5$Ozone)] )
x <- 1:4
y <- 2
class(x+y)
x+y
|
darken <- function(color, factor=1.4){
col <- col2rgb(color)
col <- col/factor
col <- rgb(t(col), maxColorValue=255)
col
}
#' @export
plot_hospital<- function(initial_report= 1000,
final_report = 10000,
distribution= "exponential",
young=.24,
medium=.6,
M=352,
L=1781,
t = 60,
chi_C=0.1,
chi_L=.142857,
growth_rate=1,
mu_C1 = .1,
mu_C2 = .1,
mu_C3 = .1,
rampslope=1.2,
Cinit = .25,
Finit = .5,
Lfinal=1781,
Lramp=c(0,0),
Mfinal=352,
Mramp=c(0,0),
doprotocols=0){
hospital <- hospital_queues(initial_report=initial_report,
final_report = final_report,
distribution= distribution,
young=young,
medium=medium,
M=M,
L=L,
t=t,
chi_C=chi_C,
chi_L=chi_L,
growth_rate=growth_rate,
mu_C1 = mu_C1,
mu_C2 = mu_C2,
mu_C3 = mu_C3,
rampslope=rampslope,
Cinit = Cinit,
Finit = Finit,
Lfinal=Lfinal,
Lramp=Lramp,
Mfinal=Mfinal,
Mramp=Mramp,
doprotocols=doprotocols)
hospital$totaldead<- hospital$Dead_at_ICU + hospital$Dead_in_ED + hospital$Dead_on_Floor+ hospital$Dead_waiting_for_Floor+ hospital$Dead_waiting_for_ICU+ hospital$Dead_with_mild_symptoms
hospital$totalWC<- hospital$WC1 + hospital$WC2 + hospital$WC3
hospital$totalWF<- hospital$WF1 + hospital$WF2 + hospital$WF3
hospital_melt<- hospital %>% gather(variable, value, -time);
p1 <-ggplot(hospital,
aes(x=time, y=reports))+
geom_bar(size=1.5, stat="identity")+
theme_bw(base_size=14) +
labs(x="Time (Day)", y="Patients")+
ggtitle("ED visits per day")+
theme(panel.border = element_blank(), axis.line = element_line(colour = "black"))
p2 <-ggplot(hospital_melt,
aes(x=time,y=value, color=variable))+
geom_line(data= hospital_melt[hospital_melt$variable %in% c("Number_seen_at_ED", "totaldead"),],size=1.5)+
theme_bw(base_size=14) +
scale_color_manual( name=element_blank(), values=c("black", "red"), labels=c("Number_seen_at_ED"="ED throughput", "totaldead"="Deaths"))+
labs(x="Time (Day)", y="Patients")+
ggtitle("Cumulative ED triages and deaths")+
theme(panel.border = element_blank(), axis.line = element_line(colour = "black"), legend.position = c(0.25, 0.75), legend.text=element_text(size=11), legend.title=element_text(size=8),legend.background = element_rect(fill="transparent"))
p3 <-ggplot(hospital_melt,
aes(x=time,y=value, fill=variable))+
geom_area(data= hospital_melt[hospital_melt$variable %in% c("Dead_at_ICU", "Dead_waiting_for_ICU", "Dead_on_Floor", "Dead_waiting_for_Floor", "Dead_with_mild_symptoms", "Dead_in_ED"),],size=1.5)+
theme_bw(base_size=14)+
scale_fill_manual( name=element_blank(), values=(c("black", "yellow", "red", "pink", "grey", "orange")), labels=c("Dead_at_ICU"="In ICU", "Dead_waiting_for_ICU"="Waiting for ICU beds", "Dead_on_Floor"= "On floor", "Dead_waiting_for_Floor"="Waiting for floor beds", "Dead_with_mild_symptoms"="Post discharge from ED", "Dead_in_ED"="In ED"))+
labs(x="Time (Day)", y="Patients")+
ggtitle("Cumulative deaths by location")+
theme(panel.border = element_blank(), axis.line = element_line(colour = "black"), legend.position = c(0.25, 0.65), legend.text=element_text(size=11), legend.title=element_text(size=8),legend.background = element_rect(fill="transparent"))
p4 <-ggplot(hospital_melt,
aes(x=time,y=value, color=variable))+
geom_line(data= hospital_melt[hospital_melt$variable %in% c("CTotal", "FTotal", "totalWC", "totalWF"),],size=1.5)+
theme_bw(base_size=14) +
scale_color_manual( name=element_blank(), values=c("black", "red", "grey", "pink"), labels=c("CTotal"="In ICU", "FTotal"= "On floor", "totalWC" ="Waiting for ICU beds", "totalWF"="Waiting for floor beds"))+
labs(x="Time (Day)", y="Patients")+
ggtitle("ICU and floor utilization")+
theme(panel.border = element_blank(), axis.line = element_line(colour = "black"), legend.position = c(0.25, 0.75), legend.text=element_text(size=11), legend.title=element_text(size=8),legend.background = element_rect(fill="transparent"))+
#geom_hline(yintercept=M, linetype="dashed", color = "black", size=1.5)+
#geom_hline(yintercept=L, linetype="dashed", color = "red", size=1.5)
geom_line(data= hospital_melt[hospital_melt$variable == "capacity_L",],size=1.5, linetype="dashed", color = "red")+
geom_line(data= hospital_melt[hospital_melt$variable == "capacity_M",],size=1.5, linetype="dashed", color = "black")
### determine when the hospital exceeds capacity
#ICU queue
ICUover = (hospital$WC1+hospital$WC2+hospital$WC3>=1)
#floor queue
floorover = (hospital$WF1+hospital$WF2+hospital$WF3>=1)
if(sum(floorover)>0){
floorover<- min(which(floorover))
p4 <- p4 +annotate(geom="label", x=floorover, y=hospital$capacity_L[floorover], label=paste("Day", as.character(floorover)), size=4, color="red")
}
if(sum(ICUover)>0){
ICUover<- min(which(ICUover))
p4 <- p4 +annotate(geom="label", x=ICUover, y=hospital$capacity_M[ICUover], label=paste("Day", as.character(ICUover)), size=4)
}
list(p1, p2, p3, p4, ICUover, floorover)
}
|
/R/plot_hospital.R
|
permissive
|
ctesta01/covid19_icu
|
R
| false
| false
| 6,643
|
r
|
darken <- function(color, factor=1.4){
col <- col2rgb(color)
col <- col/factor
col <- rgb(t(col), maxColorValue=255)
col
}
#' @export
plot_hospital<- function(initial_report= 1000,
final_report = 10000,
distribution= "exponential",
young=.24,
medium=.6,
M=352,
L=1781,
t = 60,
chi_C=0.1,
chi_L=.142857,
growth_rate=1,
mu_C1 = .1,
mu_C2 = .1,
mu_C3 = .1,
rampslope=1.2,
Cinit = .25,
Finit = .5,
Lfinal=1781,
Lramp=c(0,0),
Mfinal=352,
Mramp=c(0,0),
doprotocols=0){
hospital <- hospital_queues(initial_report=initial_report,
final_report = final_report,
distribution= distribution,
young=young,
medium=medium,
M=M,
L=L,
t=t,
chi_C=chi_C,
chi_L=chi_L,
growth_rate=growth_rate,
mu_C1 = mu_C1,
mu_C2 = mu_C2,
mu_C3 = mu_C3,
rampslope=rampslope,
Cinit = Cinit,
Finit = Finit,
Lfinal=Lfinal,
Lramp=Lramp,
Mfinal=Mfinal,
Mramp=Mramp,
doprotocols=doprotocols)
hospital$totaldead<- hospital$Dead_at_ICU + hospital$Dead_in_ED + hospital$Dead_on_Floor+ hospital$Dead_waiting_for_Floor+ hospital$Dead_waiting_for_ICU+ hospital$Dead_with_mild_symptoms
hospital$totalWC<- hospital$WC1 + hospital$WC2 + hospital$WC3
hospital$totalWF<- hospital$WF1 + hospital$WF2 + hospital$WF3
hospital_melt<- hospital %>% gather(variable, value, -time);
p1 <-ggplot(hospital,
aes(x=time, y=reports))+
geom_bar(size=1.5, stat="identity")+
theme_bw(base_size=14) +
labs(x="Time (Day)", y="Patients")+
ggtitle("ED visits per day")+
theme(panel.border = element_blank(), axis.line = element_line(colour = "black"))
p2 <-ggplot(hospital_melt,
aes(x=time,y=value, color=variable))+
geom_line(data= hospital_melt[hospital_melt$variable %in% c("Number_seen_at_ED", "totaldead"),],size=1.5)+
theme_bw(base_size=14) +
scale_color_manual( name=element_blank(), values=c("black", "red"), labels=c("Number_seen_at_ED"="ED throughput", "totaldead"="Deaths"))+
labs(x="Time (Day)", y="Patients")+
ggtitle("Cumulative ED triages and deaths")+
theme(panel.border = element_blank(), axis.line = element_line(colour = "black"), legend.position = c(0.25, 0.75), legend.text=element_text(size=11), legend.title=element_text(size=8),legend.background = element_rect(fill="transparent"))
p3 <-ggplot(hospital_melt,
aes(x=time,y=value, fill=variable))+
geom_area(data= hospital_melt[hospital_melt$variable %in% c("Dead_at_ICU", "Dead_waiting_for_ICU", "Dead_on_Floor", "Dead_waiting_for_Floor", "Dead_with_mild_symptoms", "Dead_in_ED"),],size=1.5)+
theme_bw(base_size=14)+
scale_fill_manual( name=element_blank(), values=(c("black", "yellow", "red", "pink", "grey", "orange")), labels=c("Dead_at_ICU"="In ICU", "Dead_waiting_for_ICU"="Waiting for ICU beds", "Dead_on_Floor"= "On floor", "Dead_waiting_for_Floor"="Waiting for floor beds", "Dead_with_mild_symptoms"="Post discharge from ED", "Dead_in_ED"="In ED"))+
labs(x="Time (Day)", y="Patients")+
ggtitle("Cumulative deaths by location")+
theme(panel.border = element_blank(), axis.line = element_line(colour = "black"), legend.position = c(0.25, 0.65), legend.text=element_text(size=11), legend.title=element_text(size=8),legend.background = element_rect(fill="transparent"))
p4 <-ggplot(hospital_melt,
aes(x=time,y=value, color=variable))+
geom_line(data= hospital_melt[hospital_melt$variable %in% c("CTotal", "FTotal", "totalWC", "totalWF"),],size=1.5)+
theme_bw(base_size=14) +
scale_color_manual( name=element_blank(), values=c("black", "red", "grey", "pink"), labels=c("CTotal"="In ICU", "FTotal"= "On floor", "totalWC" ="Waiting for ICU beds", "totalWF"="Waiting for floor beds"))+
labs(x="Time (Day)", y="Patients")+
ggtitle("ICU and floor utilization")+
theme(panel.border = element_blank(), axis.line = element_line(colour = "black"), legend.position = c(0.25, 0.75), legend.text=element_text(size=11), legend.title=element_text(size=8),legend.background = element_rect(fill="transparent"))+
#geom_hline(yintercept=M, linetype="dashed", color = "black", size=1.5)+
#geom_hline(yintercept=L, linetype="dashed", color = "red", size=1.5)
geom_line(data= hospital_melt[hospital_melt$variable == "capacity_L",],size=1.5, linetype="dashed", color = "red")+
geom_line(data= hospital_melt[hospital_melt$variable == "capacity_M",],size=1.5, linetype="dashed", color = "black")
### determine when the hospital exceeds capacity
#ICU queue
ICUover = (hospital$WC1+hospital$WC2+hospital$WC3>=1)
#floor queue
floorover = (hospital$WF1+hospital$WF2+hospital$WF3>=1)
if(sum(floorover)>0){
floorover<- min(which(floorover))
p4 <- p4 +annotate(geom="label", x=floorover, y=hospital$capacity_L[floorover], label=paste("Day", as.character(floorover)), size=4, color="red")
}
if(sum(ICUover)>0){
ICUover<- min(which(ICUover))
p4 <- p4 +annotate(geom="label", x=ICUover, y=hospital$capacity_M[ICUover], label=paste("Day", as.character(ICUover)), size=4)
}
list(p1, p2, p3, p4, ICUover, floorover)
}
|
##
## (1) Make R packages available
##
library(devtools)
library(roxygen2)
##
## (2) Create documentation file(s)
##
document("../nmarank")
##
## (3) Build R package and PDF file with help pages
##
build("../nmarank")
build_manual("../nmarank")
##
## (4) Install R package
##
install("../nmarank")
##
## (5) Check R package
##
check(env_vars = c(NOT_CRAN = "FALSE","_R_CHECK_CRAN_INCOMING_"=TRUE))
##
## (6) Check R package (with dontrun examples)
##
check("../nmarank", run_dont_test = TRUE)
|
/roxygen2.R
|
no_license
|
esm-ispm-unibe-ch/nmarank
|
R
| false
| false
| 503
|
r
|
##
## (1) Make R packages available
##
library(devtools)
library(roxygen2)
##
## (2) Create documentation file(s)
##
document("../nmarank")
##
## (3) Build R package and PDF file with help pages
##
build("../nmarank")
build_manual("../nmarank")
##
## (4) Install R package
##
install("../nmarank")
##
## (5) Check R package
##
check(env_vars = c(NOT_CRAN = "FALSE","_R_CHECK_CRAN_INCOMING_"=TRUE))
##
## (6) Check R package (with dontrun examples)
##
check("../nmarank", run_dont_test = TRUE)
|
setSummaryTemplate(lmerMod = c("Log-likelihood" = "($logLik:f#)",
"Deviance" = "($deviance:f#)",
"AIC" = "($AIC:f#)",
"BIC" = "($BIC:f#)",
N = "($N:d)"))
setSummaryTemplate(glmerMod = c("Log-likelihood" = "($logLik:f#)",
"Deviance" = "($deviance:f#)",
"AIC" = "($AIC:f#)",
"BIC" = "($BIC:f#)",
N = "($N:d)"))
getSummary.merMod <- function (obj, alpha = 0.05, ...) {
smry <- summary(obj)
coef <- smry$coefficients
lower <- qnorm(p = alpha/2, mean = coef[, 1], sd = coef[,2])
upper <- qnorm(p = 1 - alpha/2, mean = coef[, 1], sd = coef[,2])
if (ncol(coef) == 3) {
p <- (1 - pnorm(abs(coef[, 3]))) * 2
coef <- cbind(coef, p, lower, upper)
}
else {
coef <- cbind(coef, lower, upper)
}
dn.cf <- list(
rownames(coef),
c("est","se","stat","p","lwr","upr"),
names(obj@frame)[1]
)
dim(coef) <- c(dim(coef)[1],dim(coef)[2],1)
dimnames(coef) <- dn.cf
varcor <- smry$varcor
VarPar <- NULL
for(i in seq_along(varcor)){
vc.i <- varcor[[i]]
lv.i <- names(varcor)[i]
vr.i <- diag(vc.i)
cv.i <- vc.i[lower.tri(vc.i)]
nms.i <- rownames(vc.i)
nms.i <- gsub("(Intercept)","1",nms.i,fixed=TRUE)
vrnames.i <- paste0("Var(~",nms.i,"|",lv.i,")")
cvnames.i <- t(outer(nms.i,nms.i,FUN=paste,sep=":"))
cvnames.i <- cvnames.i[lower.tri(cvnames.i)]
if(length(cvnames.i))
cvnames.i <- paste0("Cov(~",cvnames.i,"|",lv.i,")")
vp.i <- matrix(NA,nrow=length(vr.i)+length(cv.i),ncol=6)
vp.i[,1] <- c(vr.i,cv.i)
dim(vp.i) <- c(dim(vp.i),1)
dimnames(vp.i) <- list(c(vrnames.i,cvnames.i),
c("est","se","stat","p","lwr","upr"),
names(obj@frame)[1])
VarPar <- rabind2(VarPar,vp.i)
}
if(smry$sigma!=1){
vp.i <- matrix(NA,nrow=1,ncol=6)
vp.i[1] <- smry$sigma^2
dim(vp.i) <- c(dim(vp.i),1)
dimnames(vp.i) <- list("Var(residual)",
c("est","se","stat","p","lwr","upr"),
names(obj@frame)[1])
VarPar <- rabind2(VarPar,vp.i)
}
## Factor levels.
xlevels <- list()
Contr <- names(attr(model.matrix(obj), "contrasts"))
for (c in Contr) xlevels[[c]] <- levels(obj@frame[,c])
## Model fit statistics.
ll <- logLik(obj)[1]
isREML <- inherits(obj@resp,"lmerResp") && obj@resp$REML > 0
if(!isREML)
deviance <- deviance(obj)
else
deviance <- lme4::REMLcrit(obj)
AIC <- AIC(obj)
BIC <- BIC(obj)
G <-as.integer(smry$ngrps)
names(G) <- names(smry$ngrps)
sumstat <- c(logLik = ll,
deviance = deviance,
AIC = AIC,
BIC = BIC,
N=nobs(obj))
## Return model summary.
ans <- list(coef= coef)
ans <- c(ans,list(Variances=VarPar))
ans <- c(ans,
list(Groups = G,
sumstat = sumstat,
contrasts = Contr, ## Reuse 'Contr'
xlevels = xlevels,
call = obj@call))
return(ans)
}
|
/pkg/R/yz-getSummary-merMod.R
|
no_license
|
jeffreyhanson/memisc
|
R
| false
| false
| 3,203
|
r
|
setSummaryTemplate(lmerMod = c("Log-likelihood" = "($logLik:f#)",
"Deviance" = "($deviance:f#)",
"AIC" = "($AIC:f#)",
"BIC" = "($BIC:f#)",
N = "($N:d)"))
setSummaryTemplate(glmerMod = c("Log-likelihood" = "($logLik:f#)",
"Deviance" = "($deviance:f#)",
"AIC" = "($AIC:f#)",
"BIC" = "($BIC:f#)",
N = "($N:d)"))
getSummary.merMod <- function (obj, alpha = 0.05, ...) {
smry <- summary(obj)
coef <- smry$coefficients
lower <- qnorm(p = alpha/2, mean = coef[, 1], sd = coef[,2])
upper <- qnorm(p = 1 - alpha/2, mean = coef[, 1], sd = coef[,2])
if (ncol(coef) == 3) {
p <- (1 - pnorm(abs(coef[, 3]))) * 2
coef <- cbind(coef, p, lower, upper)
}
else {
coef <- cbind(coef, lower, upper)
}
dn.cf <- list(
rownames(coef),
c("est","se","stat","p","lwr","upr"),
names(obj@frame)[1]
)
dim(coef) <- c(dim(coef)[1],dim(coef)[2],1)
dimnames(coef) <- dn.cf
varcor <- smry$varcor
VarPar <- NULL
for(i in seq_along(varcor)){
vc.i <- varcor[[i]]
lv.i <- names(varcor)[i]
vr.i <- diag(vc.i)
cv.i <- vc.i[lower.tri(vc.i)]
nms.i <- rownames(vc.i)
nms.i <- gsub("(Intercept)","1",nms.i,fixed=TRUE)
vrnames.i <- paste0("Var(~",nms.i,"|",lv.i,")")
cvnames.i <- t(outer(nms.i,nms.i,FUN=paste,sep=":"))
cvnames.i <- cvnames.i[lower.tri(cvnames.i)]
if(length(cvnames.i))
cvnames.i <- paste0("Cov(~",cvnames.i,"|",lv.i,")")
vp.i <- matrix(NA,nrow=length(vr.i)+length(cv.i),ncol=6)
vp.i[,1] <- c(vr.i,cv.i)
dim(vp.i) <- c(dim(vp.i),1)
dimnames(vp.i) <- list(c(vrnames.i,cvnames.i),
c("est","se","stat","p","lwr","upr"),
names(obj@frame)[1])
VarPar <- rabind2(VarPar,vp.i)
}
if(smry$sigma!=1){
vp.i <- matrix(NA,nrow=1,ncol=6)
vp.i[1] <- smry$sigma^2
dim(vp.i) <- c(dim(vp.i),1)
dimnames(vp.i) <- list("Var(residual)",
c("est","se","stat","p","lwr","upr"),
names(obj@frame)[1])
VarPar <- rabind2(VarPar,vp.i)
}
## Factor levels.
xlevels <- list()
Contr <- names(attr(model.matrix(obj), "contrasts"))
for (c in Contr) xlevels[[c]] <- levels(obj@frame[,c])
## Model fit statistics.
ll <- logLik(obj)[1]
isREML <- inherits(obj@resp,"lmerResp") && obj@resp$REML > 0
if(!isREML)
deviance <- deviance(obj)
else
deviance <- lme4::REMLcrit(obj)
AIC <- AIC(obj)
BIC <- BIC(obj)
G <-as.integer(smry$ngrps)
names(G) <- names(smry$ngrps)
sumstat <- c(logLik = ll,
deviance = deviance,
AIC = AIC,
BIC = BIC,
N=nobs(obj))
## Return model summary.
ans <- list(coef= coef)
ans <- c(ans,list(Variances=VarPar))
ans <- c(ans,
list(Groups = G,
sumstat = sumstat,
contrasts = Contr, ## Reuse 'Contr'
xlevels = xlevels,
call = obj@call))
return(ans)
}
|
#' Boxcox transformation of a vector/number
#'
#' Transforms via the Box-Cox transform.
#'
#' @param x The vector to be boxcoxed boxcoxed. Default is 1.
#' @param lambda The parameter of Box–Cox transformation. Default is 1.
#'
#' @return A vector/number that is the boxcox boxcox transformation of \code{x}.
#'
#' @details
#' We only do the boxcox transformations that only requires one input.
#'
#' @examples
#' boxcox(1:10, 2)
#' boxcox(2, 0)
#' @export
boxcox <- function(x = 1, lambda = 1){
if(lambda == 0){
if(length(x[x > 0]) != length(x)){
stop('Please input positive number!\n')
}else{
res = log(x)
}
}else{
res = (x^lambda - 1)/lambda
}
return(res)
}
|
/R/boxcox.R
|
no_license
|
zxkathy/powers
|
R
| false
| false
| 683
|
r
|
#' Boxcox transformation of a vector/number
#'
#' Transforms via the Box-Cox transform.
#'
#' @param x The vector to be boxcoxed boxcoxed. Default is 1.
#' @param lambda The parameter of Box–Cox transformation. Default is 1.
#'
#' @return A vector/number that is the boxcox boxcox transformation of \code{x}.
#'
#' @details
#' We only do the boxcox transformations that only requires one input.
#'
#' @examples
#' boxcox(1:10, 2)
#' boxcox(2, 0)
#' @export
boxcox <- function(x = 1, lambda = 1){
if(lambda == 0){
if(length(x[x > 0]) != length(x)){
stop('Please input positive number!\n')
}else{
res = log(x)
}
}else{
res = (x^lambda - 1)/lambda
}
return(res)
}
|
# ------------------------ #
# Assess Variants in #
# Sequencing Runs #
# Literature Data #
# K. Sumner #
# September 12, 2019 #
# ------------------------ #
#### ------- read in the libraries ------- ####
library(tidyverse)
#### ------ read in the variant tables ------- ####
# read in the variants from the Neafsey data
neafsey_variants = read_tsv("Desktop/Dissertation Materials/SpatialR21 Grant/Final Dissertation Materials/literature_csp_variants/neafsey_haplotype_output/final_censored_output/forward_csp_final_results/neafsey_forward_snp_report")
# read in the variants from the plasmodb data
plasmodb_variants = read_csv("/Users/kelseysumner/Desktop/literature_csp_variants/plasmodb_variant_output/plasmo_db_variants_10SEPT2019.csv")
# read in the variants from the pf3k data
pf3k_variants = read_csv("/Users/kelseysumner/Desktop/literature_csp_variants/pf3k_variant_output/Pf3K csp variant table 30JUL2019.csv")
#### ------- create a merged literature file for the variants -------- ####
# set up the plasmodb and pf3k data sets for merging
plasmodb_variants = plasmodb_variants %>%
dplyr::rename("Ref Pos"="final ref position") %>%
mutate("present_in_plasmodb" = rep(1,nrow(plasmodb_variants))) %>%
select("Ref Pos","present_in_plasmodb")
pf3k_variants = pf3k_variants %>%
dplyr::rename("Ref Pos"="finalRefPosition") %>%
mutate("present_in_pf3k" = rep(1,nrow(pf3k_variants))) %>%
select("Ref Pos","present_in_pf3k")
neafsey_variants = neafsey_variants %>%
mutate("present_in_neafsey" = rep(1,nrow(neafsey_variants))) %>%
select("Ref Pos","present_in_neafsey")
# now merge the three files together
merge1 = full_join(neafsey_variants,plasmodb_variants,by="Ref Pos")
final_merge_variants = full_join(merge1,pf3k_variants,by="Ref Pos")
# reorder the file to be in numeric order
final_merge_variants = final_merge_variants[order(final_merge_variants$`Ref Pos`),]
# calculate how much overlap was found across literature values
length(which(final_merge_variants$present_in_neafsey == 1 & final_merge_variants$present_in_pf3k == 1 & final_merge_variants$present_in_plasmodb == 1))
# 21/57 (36.8%) variants found in all literature sources
# write out as a final merged file
write_csv(final_merge_variants,"Desktop/literature_csp_variants_merged.csv")
|
/SpatialR21_project/code/miscellaneous/assess_variants_in_literature.R
|
no_license
|
kelseysumner/taylorlab
|
R
| false
| false
| 2,333
|
r
|
# ------------------------ #
# Assess Variants in #
# Sequencing Runs #
# Literature Data #
# K. Sumner #
# September 12, 2019 #
# ------------------------ #
#### ------- read in the libraries ------- ####
library(tidyverse)
#### ------ read in the variant tables ------- ####
# read in the variants from the Neafsey data
neafsey_variants = read_tsv("Desktop/Dissertation Materials/SpatialR21 Grant/Final Dissertation Materials/literature_csp_variants/neafsey_haplotype_output/final_censored_output/forward_csp_final_results/neafsey_forward_snp_report")
# read in the variants from the plasmodb data
plasmodb_variants = read_csv("/Users/kelseysumner/Desktop/literature_csp_variants/plasmodb_variant_output/plasmo_db_variants_10SEPT2019.csv")
# read in the variants from the pf3k data
pf3k_variants = read_csv("/Users/kelseysumner/Desktop/literature_csp_variants/pf3k_variant_output/Pf3K csp variant table 30JUL2019.csv")
#### ------- create a merged literature file for the variants -------- ####
# set up the plasmodb and pf3k data sets for merging
plasmodb_variants = plasmodb_variants %>%
dplyr::rename("Ref Pos"="final ref position") %>%
mutate("present_in_plasmodb" = rep(1,nrow(plasmodb_variants))) %>%
select("Ref Pos","present_in_plasmodb")
pf3k_variants = pf3k_variants %>%
dplyr::rename("Ref Pos"="finalRefPosition") %>%
mutate("present_in_pf3k" = rep(1,nrow(pf3k_variants))) %>%
select("Ref Pos","present_in_pf3k")
neafsey_variants = neafsey_variants %>%
mutate("present_in_neafsey" = rep(1,nrow(neafsey_variants))) %>%
select("Ref Pos","present_in_neafsey")
# now merge the three files together
merge1 = full_join(neafsey_variants,plasmodb_variants,by="Ref Pos")
final_merge_variants = full_join(merge1,pf3k_variants,by="Ref Pos")
# reorder the file to be in numeric order
final_merge_variants = final_merge_variants[order(final_merge_variants$`Ref Pos`),]
# calculate how much overlap was found across literature values
length(which(final_merge_variants$present_in_neafsey == 1 & final_merge_variants$present_in_pf3k == 1 & final_merge_variants$present_in_plasmodb == 1))
# 21/57 (36.8%) variants found in all literature sources
# write out as a final merged file
write_csv(final_merge_variants,"Desktop/literature_csp_variants_merged.csv")
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/clarifai.R
\name{clarifai_check_auth}
\alias{clarifai_check_auth}
\title{Check if authentication information is in the environment}
\usage{
clarifai_check_auth()
}
\description{
Check if authentication information is in the environment
}
|
/man/clarifai_check_auth.Rd
|
permissive
|
cran/clarifai
|
R
| false
| true
| 327
|
rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/clarifai.R
\name{clarifai_check_auth}
\alias{clarifai_check_auth}
\title{Check if authentication information is in the environment}
\usage{
clarifai_check_auth()
}
\description{
Check if authentication information is in the environment
}
|
################################################################################
#
# Social interaction model: Sweep beta and group size parameter space
#
################################################################################
rm(list = ls())
####################
# Source necessary scripts/libraries
####################
source("scripts/util/__Util__MASTER.R")
library(parallel)
library(snowfall)
####################
# Set global variables
####################
# Initial paramters: Free to change
# Base parameters
Ns <- seq(5, 100, 5) #vector of number of individuals to simulate
m <- 2 #number of tasks
Tsteps <- 50000 #number of time steps to run simulation
reps <- 100 #number of replications per simulation (for ensemble)
# Threshold Parameters
ThreshM <- rep(50, m) #population threshold means
ThreshSD <- ThreshM * 0 #population threshold standard deviations
InitialStim <- rep(0, m) #intital vector of stimuli
deltas <- rep(0.8, m) #vector of stimuli increase rates
alpha <- m #efficiency of task performance
quitP <- 0.2 #probability of quitting task once active
# Social Network Parameters
p <- 1 #baseline probablity of initiating an interaction per time step
epsilon <- -0.1 #relative weighting of social interactions for adjusting thresholds
betas <- seq(1.05, 1.09, 0.01) #probability of interacting with individual in same state relative to others
####################
# Prep for Parallelization
####################
# Create parameter combinations for parallelization
run_in_parallel <- expand.grid(n = Ns, beta = betas)
run_in_parallel <- run_in_parallel %>%
arrange(n)
# Create directory for depositing data
storage_path <- "/scratch/gpfs/ctokita/"
file_name <- paste0("GroupSizeBetaSweep_Sigma", ThreshSD[1], "-Epsilon", epsilon)
full_path <- paste0(storage_path, file_name, '/')
dir.create(full_path, showWarnings = FALSE)
# Check if there is already some runs done
files <- list.files(full_path)
completed_runs <- data.frame(n = as.numeric(gsub(x = files, "n([0-9]+)-.*", "\\1", perl = T)))
completed_runs$beta <- as.numeric(gsub(x = files, ".*-beta([\\.0-9]+).Rdata$", "\\1", perl = T))
run_in_parallel <- anti_join(run_in_parallel, completed_runs, by = c("n", "beta"))
# Prepare for parallel
no_cores <- detectCores()
sfInit(parallel = TRUE, cpus = no_cores)
sfExportAll()
sfLibrary(dplyr)
sfLibrary(reshape2)
sfLibrary(igraph)
sfLibrary(ggplot2)
sfLibrary(msm)
sfLibrary(gtools)
sfLibrary(snowfall)
sfLibrary(tidyr)
sfLibrary(stringr)
# sfClusterSetupRNGstream(seed = 100)
####################
# Run ensemble simulation
####################
# Loop through group size (and chucnks)
parallel_simulations <- sfLapply(1:nrow(run_in_parallel), function(k) {
# Set group size
n <- run_in_parallel[k, 1]
beta <- run_in_parallel[k, 2]
# Prep lists for collection of simulation outputs from this group size
ens_entropy <- list()
# Run Simulations
for (sim in 1:reps) {
####################
# Seed structures and intial matrices
####################
# Set initial probability matrix (P_g)
P_g <- matrix(data = rep(0, n * m), ncol = m)
# Seed task (external) stimuli
stimMat <- seed_stimuls(intitial_stim = InitialStim,
Tsteps = Tsteps)
# Seed internal thresholds
threshMat <- seed_thresholds(n = n,
m = m,
threshold_means = ThreshM,
threshold_sds = ThreshSD)
# Start task performance
X_g <- matrix(data = rep(0, length(P_g)), ncol = ncol(P_g))
# Create cumulative task performance matrix
X_tot <- X_g
# Create cumulative adjacency matrix
g_tot <- matrix(data = rep(0, n * n), ncol = n)
colnames(g_tot) <- paste0("v-", 1:n)
rownames(g_tot) <- paste0("v-", 1:n)
####################
# Simulate individual run
####################
# Run simulation
for (t in 1:Tsteps) {
# Current timestep is actually t+1 in this formulation, because first row is timestep 0
# Update stimuli
stimMat <- update_stim(stim_matrix = stimMat,
deltas = deltas,
alpha = alpha,
state_matrix = X_g,
time_step = t)
# Calculate task demand based on global stimuli
P_g <- calc_determ_thresh(time_step = t + 1, # first row is generation 0
threshold_matrix = threshMat,
stimulus_matrix = stimMat)
# Update task performance
X_g <- update_task_performance(task_probs = P_g,
state_matrix = X_g,
quit_prob = quitP)
# Update social network (previously this was before probability/task update)
g_adj <- temporalNetwork(X_sub_g = X_g,
prob_interact = p,
bias = beta)
g_tot <- g_tot + g_adj
# Adjust thresholds
threshMat <- adjust_thresholds_social_capped(social_network = g_adj,
threshold_matrix = threshMat,
state_matrix = X_g,
epsilon = epsilon,
threshold_max = 100)
# Update total task performance profile
X_tot <- X_tot + X_g
}
####################
# Post run calculations
####################
# Calculate Entropy
entropy <- as.data.frame(mutualEntropy(TotalStateMat = X_tot))
entropy$n <- n
entropy$beta <- beta
# Add entropy values to list
ens_entropy[[sim]] <- entropy
# Clean
rm(X_tot, stimMat, threshMat, g_tot, g_adj, P_g, X_g)
}
# Bind together and summarise
entropy_sum <- do.call("rbind", ens_entropy)
entropy_sum <- entropy_sum %>%
group_by(n, beta) %>%
summarise(Dsym_mean = mean(Dsym),
Dysm_SD = sd(Dsym),
Dtask_mean = mean(Dtask),
Dtask_SD = sd(Dtask),
Dind_mean = mean(Dind),
Dind_SD = sd(Dind))
entropy_sum <- as.data.frame(entropy_sum)
save(entropy_sum, file = paste0(full_path,
"n",
str_pad(string = n, width = 3, pad = "0"),
"-beta",
beta,
".Rdata"))
Sys.sleep(1)
})
sfStop()
# Bind and save
# parallel_data <- do.call('rbind', parallel_simulations)
# Create directory for depositing data
# storage_path <- "/scratch/gpfs/ctokita/"
# file_name <- paste0("GroupSizeBetaSweep_Sigma", ThreshSD[1], "-Epsilon", epsilon)
# full_path <- paste0(storage_path, file_name, '.Rdata')
# save(parallel_data, file = full_path)
|
/scripts/3_para_sweep/3a_BetaParaSweep_NegEpsilon_2.R
|
no_license
|
christokita/socially-modulated-threshold-model
|
R
| false
| false
| 7,036
|
r
|
################################################################################
#
# Social interaction model: Sweep beta and group size parameter space
#
################################################################################
rm(list = ls())
####################
# Source necessary scripts/libraries
####################
source("scripts/util/__Util__MASTER.R")
library(parallel)
library(snowfall)
####################
# Set global variables
####################
# Initial paramters: Free to change
# Base parameters
Ns <- seq(5, 100, 5) #vector of number of individuals to simulate
m <- 2 #number of tasks
Tsteps <- 50000 #number of time steps to run simulation
reps <- 100 #number of replications per simulation (for ensemble)
# Threshold Parameters
ThreshM <- rep(50, m) #population threshold means
ThreshSD <- ThreshM * 0 #population threshold standard deviations
InitialStim <- rep(0, m) #intital vector of stimuli
deltas <- rep(0.8, m) #vector of stimuli increase rates
alpha <- m #efficiency of task performance
quitP <- 0.2 #probability of quitting task once active
# Social Network Parameters
p <- 1 #baseline probablity of initiating an interaction per time step
epsilon <- -0.1 #relative weighting of social interactions for adjusting thresholds
betas <- seq(1.05, 1.09, 0.01) #probability of interacting with individual in same state relative to others
####################
# Prep for Parallelization
####################
# Create parameter combinations for parallelization
run_in_parallel <- expand.grid(n = Ns, beta = betas)
run_in_parallel <- run_in_parallel %>%
arrange(n)
# Create directory for depositing data
storage_path <- "/scratch/gpfs/ctokita/"
file_name <- paste0("GroupSizeBetaSweep_Sigma", ThreshSD[1], "-Epsilon", epsilon)
full_path <- paste0(storage_path, file_name, '/')
dir.create(full_path, showWarnings = FALSE)
# Check if there is already some runs done
files <- list.files(full_path)
completed_runs <- data.frame(n = as.numeric(gsub(x = files, "n([0-9]+)-.*", "\\1", perl = T)))
completed_runs$beta <- as.numeric(gsub(x = files, ".*-beta([\\.0-9]+).Rdata$", "\\1", perl = T))
run_in_parallel <- anti_join(run_in_parallel, completed_runs, by = c("n", "beta"))
# Prepare for parallel
no_cores <- detectCores()
sfInit(parallel = TRUE, cpus = no_cores)
sfExportAll()
sfLibrary(dplyr)
sfLibrary(reshape2)
sfLibrary(igraph)
sfLibrary(ggplot2)
sfLibrary(msm)
sfLibrary(gtools)
sfLibrary(snowfall)
sfLibrary(tidyr)
sfLibrary(stringr)
# sfClusterSetupRNGstream(seed = 100)
####################
# Run ensemble simulation
####################
# Loop through group size (and chucnks)
parallel_simulations <- sfLapply(1:nrow(run_in_parallel), function(k) {
# Set group size
n <- run_in_parallel[k, 1]
beta <- run_in_parallel[k, 2]
# Prep lists for collection of simulation outputs from this group size
ens_entropy <- list()
# Run Simulations
for (sim in 1:reps) {
####################
# Seed structures and intial matrices
####################
# Set initial probability matrix (P_g)
P_g <- matrix(data = rep(0, n * m), ncol = m)
# Seed task (external) stimuli
stimMat <- seed_stimuls(intitial_stim = InitialStim,
Tsteps = Tsteps)
# Seed internal thresholds
threshMat <- seed_thresholds(n = n,
m = m,
threshold_means = ThreshM,
threshold_sds = ThreshSD)
# Start task performance
X_g <- matrix(data = rep(0, length(P_g)), ncol = ncol(P_g))
# Create cumulative task performance matrix
X_tot <- X_g
# Create cumulative adjacency matrix
g_tot <- matrix(data = rep(0, n * n), ncol = n)
colnames(g_tot) <- paste0("v-", 1:n)
rownames(g_tot) <- paste0("v-", 1:n)
####################
# Simulate individual run
####################
# Run simulation
for (t in 1:Tsteps) {
# Current timestep is actually t+1 in this formulation, because first row is timestep 0
# Update stimuli
stimMat <- update_stim(stim_matrix = stimMat,
deltas = deltas,
alpha = alpha,
state_matrix = X_g,
time_step = t)
# Calculate task demand based on global stimuli
P_g <- calc_determ_thresh(time_step = t + 1, # first row is generation 0
threshold_matrix = threshMat,
stimulus_matrix = stimMat)
# Update task performance
X_g <- update_task_performance(task_probs = P_g,
state_matrix = X_g,
quit_prob = quitP)
# Update social network (previously this was before probability/task update)
g_adj <- temporalNetwork(X_sub_g = X_g,
prob_interact = p,
bias = beta)
g_tot <- g_tot + g_adj
# Adjust thresholds
threshMat <- adjust_thresholds_social_capped(social_network = g_adj,
threshold_matrix = threshMat,
state_matrix = X_g,
epsilon = epsilon,
threshold_max = 100)
# Update total task performance profile
X_tot <- X_tot + X_g
}
####################
# Post run calculations
####################
# Calculate Entropy
entropy <- as.data.frame(mutualEntropy(TotalStateMat = X_tot))
entropy$n <- n
entropy$beta <- beta
# Add entropy values to list
ens_entropy[[sim]] <- entropy
# Clean
rm(X_tot, stimMat, threshMat, g_tot, g_adj, P_g, X_g)
}
# Bind together and summarise
entropy_sum <- do.call("rbind", ens_entropy)
entropy_sum <- entropy_sum %>%
group_by(n, beta) %>%
summarise(Dsym_mean = mean(Dsym),
Dysm_SD = sd(Dsym),
Dtask_mean = mean(Dtask),
Dtask_SD = sd(Dtask),
Dind_mean = mean(Dind),
Dind_SD = sd(Dind))
entropy_sum <- as.data.frame(entropy_sum)
save(entropy_sum, file = paste0(full_path,
"n",
str_pad(string = n, width = 3, pad = "0"),
"-beta",
beta,
".Rdata"))
Sys.sleep(1)
})
sfStop()
# Bind and save
# parallel_data <- do.call('rbind', parallel_simulations)
# Create directory for depositing data
# storage_path <- "/scratch/gpfs/ctokita/"
# file_name <- paste0("GroupSizeBetaSweep_Sigma", ThreshSD[1], "-Epsilon", epsilon)
# full_path <- paste0(storage_path, file_name, '.Rdata')
# save(parallel_data, file = full_path)
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/get_slurm_out.R
\name{get_slurm_out}
\alias{get_slurm_out}
\title{Reads the output of a function calculated on the SLURM cluster}
\usage{
get_slurm_out(slr_job, outtype = "raw")
}
\arguments{
\item{slr_job}{A \code{slurm_job} object.}
\item{outtype}{Can be "table" or "raw", see "Value" below for details.}
}
\value{
If \code{outtype = "table"}: A data frame with one column by
return value of the function passed to \code{slurm_apply}, where
each row is the output of the corresponding row in the params data frame
passed to \code{slurm_apply}.
If \code{outtype = "raw"}: A list where each element is the output
of the function passed to \code{slurm_apply} for the corresponding
row in the params data frame passed to \code{slurm_apply}.
}
\description{
This function reads all function output files (one by cluster node used) from
the specified SLURM job and returns the result in a single data frame
(if "table" format selected) or list (if "raw" format selected). It doesn't
record any messages (including warnings or errors) output to the R console
during the computation; these can be consulted by invoking
\code{\link{print_job_status}}.
}
\details{
The \code{outtype} option is only relevant for jobs submitted with
\code{slurm_apply}. Jobs sent with \code{slurm_call} only return a single
object, and setting \code{outtype = "table"} creates an error in that case.
}
\seealso{
\code{\link{slurm_apply}}, \code{\link{slurm_call}}
}
|
/man/get_slurm_out.Rd
|
no_license
|
calufrax/rslurm
|
R
| false
| true
| 1,543
|
rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/get_slurm_out.R
\name{get_slurm_out}
\alias{get_slurm_out}
\title{Reads the output of a function calculated on the SLURM cluster}
\usage{
get_slurm_out(slr_job, outtype = "raw")
}
\arguments{
\item{slr_job}{A \code{slurm_job} object.}
\item{outtype}{Can be "table" or "raw", see "Value" below for details.}
}
\value{
If \code{outtype = "table"}: A data frame with one column by
return value of the function passed to \code{slurm_apply}, where
each row is the output of the corresponding row in the params data frame
passed to \code{slurm_apply}.
If \code{outtype = "raw"}: A list where each element is the output
of the function passed to \code{slurm_apply} for the corresponding
row in the params data frame passed to \code{slurm_apply}.
}
\description{
This function reads all function output files (one by cluster node used) from
the specified SLURM job and returns the result in a single data frame
(if "table" format selected) or list (if "raw" format selected). It doesn't
record any messages (including warnings or errors) output to the R console
during the computation; these can be consulted by invoking
\code{\link{print_job_status}}.
}
\details{
The \code{outtype} option is only relevant for jobs submitted with
\code{slurm_apply}. Jobs sent with \code{slurm_call} only return a single
object, and setting \code{outtype = "table"} creates an error in that case.
}
\seealso{
\code{\link{slurm_apply}}, \code{\link{slurm_call}}
}
|
#' @export
#' @import rstan
#' @import parallel
#' @import R1magic
#' @import HDInterval
#' @import dplyr
#' @import reshape2
#' @useDynLib bnets, .registration = TRUE
proj_pred_blasso <- function(x, prior_scale){
nodes <- 1:x$stan_dat$K
beta_list <- list()
search_path <- list()
list_res <- list()
d <- max(nodes) - 1
for(i in 1:length(nodes)){
b_temp <- extract_BETA(x, nodes = nodes[i],
prior_scale = prior_scale, prob = .5)$posterior_sample_BETA
int_temp <- extract_BETA(x, nodes = nodes[i],
prior_scale = prior_scale, prob = .5)$posterior_samples_not_BETA[,1]
sigma_temp <- extract_BETA(x, nodes = nodes[i],
prior_scale = prior_scale, prob = .5)$posterior_samples_not_BETA[,2]
beta_list[[i]] <- cbind(int_temp, b_temp, sigma_temp)
}
names(beta_list) <- colnames(x$stan_dat$X)
for(i in 1:length(beta_list)){
temp_dat <- rbind(beta_list[[i]]$int_temp, t(beta_list[[i]] %>%
select(contains("x"))))
search_path[[i]] <- lm_fprojsel(temp_dat, beta_list[[i]]$sigma_temp^2,
cbind(rep(1, x$stan_dat$N) , x$stan_dat$X[,-i]))
}
mse_mat <- matrix(NA, max(nodes), max(nodes))
mlpd_mat <- matrix(NA, max(nodes), max(nodes))
for(i in 1:length(search_path)){
search_temp <- search_path[[i]]
temp_dat <- rbind(beta_list[[i]]$int_temp, t(beta_list[[i]] %>% select(contains("x"))))
for (k in 1:(d+1)) {
# projected parameters
submodel <- lm_proj(temp_dat, beta_list[[i]]$sigma_temp^2, cbind(rep(1, x$stan_dat$N),
x$stan_dat$X[,-i]), search_temp$chosen[1:k])
wp <- submodel$w
sigma2p <- submodel$sigma2
# mean squared error
ypred <- rowMeans(cbind(rep(1, x$stan_dat$N),
x$stan_dat$X[,-i]) %*% wp)
mse_mat[k,i] <- mean((x$stan_dat$X[,i]-ypred)^2)
# mean log predictive density
pd <- dnorm(x$stan_dat$X[,i], cbind(rep(1, x$stan_dat$N),
x$stan_dat$X[,-i]) %*% wp, sqrt(sigma2p))
mlpd_mat[k,i] <- mean(log(rowMeans(pd)))
}
}
for(k in 1:ncol(mse_mat)) {
search_temp <- search_path[[k]]
c_name_temp <- colnames(x$stan_dat$X[,search_temp$chosen])
c_name <- c("int", colnames(x$stan_dat$X[,-k]))
list_res[[k]] <- list(spath = search_temp$chosen,
col_names = c_name[search_temp$chosen],
kl_dist = search_temp$kl, mse = mse_mat[,k], mlpd= mlpd_mat[,k])
}
names(list_res) <- colnames(x$stan_dat$X)
list(search_results = list_res, beta_list = beta_list)
}
lm_fprojsel <- function(w, sigma2, x) {
# forward variable selection using the projection
d = dim(x)[2]
chosen <- 1 # chosen variables, start from the model with the intercept only
notchosen <- setdiff(1:d, chosen)
# start from the model having only the intercept term
kl <- rep(0,d)
kl[1] <- lm_proj(w,sigma2,x,1)$kl
# start adding variables one at a time
for (k in 2:d) {
nleft <- length(notchosen)
val <- rep(0, nleft)
for (i in 1:nleft) {
ind <- sort( c(chosen, notchosen[i]) )
proj <- lm_proj(w,sigma2,x,ind)
val[i] <- proj$kl
}
# find the variable that minimizes the kl
imin <- which.min(val)
chosen <- c(chosen, notchosen[imin])
notchosen <- setdiff(1:d, chosen)
kl[k] <- val[imin]
}
return(list(chosen=chosen, kl=kl))
}
lm_proj <- function(w,sigma2,x,indproj) {
# assume the intercept term is stacked into w, and x contains
# a corresponding vector of ones. returns the projected samples
# and estimated kl-divergence.
# pick the columns of x that form the projection subspace
n <- dim(x)[1]
xp <- x[,indproj]
# solve the projection equations
fit <- x %*% w # fit of the full model
wp <- solve(t(xp) %*% xp, t(xp) %*% fit)
sigma2p <- sigma2 + colMeans((fit - xp %*% wp)^2)
# this is the estimated kl-divergence between the full and projected model
kl <- mean(0.5*log(sigma2p/sigma2))
# reshape wp so that it has same dimensionality as x, and zeros for
# those variables that are not included in the projected model
d <- dim(w)[1]
S <- dim(w)[2]
wptemp <- matrix(0, d, S)
wptemp[indproj,] <- wp
wp <- wptemp
return(list(w=wp, sigma2=sigma2p, kl=kl))
}
|
/R/proj_pred_blasso.R
|
no_license
|
donaldRwilliams/bnets
|
R
| false
| false
| 4,481
|
r
|
#' @export
#' @import rstan
#' @import parallel
#' @import R1magic
#' @import HDInterval
#' @import dplyr
#' @import reshape2
#' @useDynLib bnets, .registration = TRUE
proj_pred_blasso <- function(x, prior_scale){
nodes <- 1:x$stan_dat$K
beta_list <- list()
search_path <- list()
list_res <- list()
d <- max(nodes) - 1
for(i in 1:length(nodes)){
b_temp <- extract_BETA(x, nodes = nodes[i],
prior_scale = prior_scale, prob = .5)$posterior_sample_BETA
int_temp <- extract_BETA(x, nodes = nodes[i],
prior_scale = prior_scale, prob = .5)$posterior_samples_not_BETA[,1]
sigma_temp <- extract_BETA(x, nodes = nodes[i],
prior_scale = prior_scale, prob = .5)$posterior_samples_not_BETA[,2]
beta_list[[i]] <- cbind(int_temp, b_temp, sigma_temp)
}
names(beta_list) <- colnames(x$stan_dat$X)
for(i in 1:length(beta_list)){
temp_dat <- rbind(beta_list[[i]]$int_temp, t(beta_list[[i]] %>%
select(contains("x"))))
search_path[[i]] <- lm_fprojsel(temp_dat, beta_list[[i]]$sigma_temp^2,
cbind(rep(1, x$stan_dat$N) , x$stan_dat$X[,-i]))
}
mse_mat <- matrix(NA, max(nodes), max(nodes))
mlpd_mat <- matrix(NA, max(nodes), max(nodes))
for(i in 1:length(search_path)){
search_temp <- search_path[[i]]
temp_dat <- rbind(beta_list[[i]]$int_temp, t(beta_list[[i]] %>% select(contains("x"))))
for (k in 1:(d+1)) {
# projected parameters
submodel <- lm_proj(temp_dat, beta_list[[i]]$sigma_temp^2, cbind(rep(1, x$stan_dat$N),
x$stan_dat$X[,-i]), search_temp$chosen[1:k])
wp <- submodel$w
sigma2p <- submodel$sigma2
# mean squared error
ypred <- rowMeans(cbind(rep(1, x$stan_dat$N),
x$stan_dat$X[,-i]) %*% wp)
mse_mat[k,i] <- mean((x$stan_dat$X[,i]-ypred)^2)
# mean log predictive density
pd <- dnorm(x$stan_dat$X[,i], cbind(rep(1, x$stan_dat$N),
x$stan_dat$X[,-i]) %*% wp, sqrt(sigma2p))
mlpd_mat[k,i] <- mean(log(rowMeans(pd)))
}
}
for(k in 1:ncol(mse_mat)) {
search_temp <- search_path[[k]]
c_name_temp <- colnames(x$stan_dat$X[,search_temp$chosen])
c_name <- c("int", colnames(x$stan_dat$X[,-k]))
list_res[[k]] <- list(spath = search_temp$chosen,
col_names = c_name[search_temp$chosen],
kl_dist = search_temp$kl, mse = mse_mat[,k], mlpd= mlpd_mat[,k])
}
names(list_res) <- colnames(x$stan_dat$X)
list(search_results = list_res, beta_list = beta_list)
}
lm_fprojsel <- function(w, sigma2, x) {
# forward variable selection using the projection
d = dim(x)[2]
chosen <- 1 # chosen variables, start from the model with the intercept only
notchosen <- setdiff(1:d, chosen)
# start from the model having only the intercept term
kl <- rep(0,d)
kl[1] <- lm_proj(w,sigma2,x,1)$kl
# start adding variables one at a time
for (k in 2:d) {
nleft <- length(notchosen)
val <- rep(0, nleft)
for (i in 1:nleft) {
ind <- sort( c(chosen, notchosen[i]) )
proj <- lm_proj(w,sigma2,x,ind)
val[i] <- proj$kl
}
# find the variable that minimizes the kl
imin <- which.min(val)
chosen <- c(chosen, notchosen[imin])
notchosen <- setdiff(1:d, chosen)
kl[k] <- val[imin]
}
return(list(chosen=chosen, kl=kl))
}
lm_proj <- function(w,sigma2,x,indproj) {
# assume the intercept term is stacked into w, and x contains
# a corresponding vector of ones. returns the projected samples
# and estimated kl-divergence.
# pick the columns of x that form the projection subspace
n <- dim(x)[1]
xp <- x[,indproj]
# solve the projection equations
fit <- x %*% w # fit of the full model
wp <- solve(t(xp) %*% xp, t(xp) %*% fit)
sigma2p <- sigma2 + colMeans((fit - xp %*% wp)^2)
# this is the estimated kl-divergence between the full and projected model
kl <- mean(0.5*log(sigma2p/sigma2))
# reshape wp so that it has same dimensionality as x, and zeros for
# those variables that are not included in the projected model
d <- dim(w)[1]
S <- dim(w)[2]
wptemp <- matrix(0, d, S)
wptemp[indproj,] <- wp
wp <- wptemp
return(list(w=wp, sigma2=sigma2p, kl=kl))
}
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/transdistfuncs.r
\name{est.transdist.temporal.bootstrap.ci}
\alias{est.transdist.temporal.bootstrap.ci}
\title{Bootstrapped confidence intervals for the change in mean transmission distance over time}
\usage{
est.transdist.temporal.bootstrap.ci(epi.data, gen.t.mean, gen.t.sd, t1,
max.sep, max.dist, n.transtree.reps = 100, mean.equals.sd = FALSE,
theta.weights = NULL, boot.iter, ci.low = 0.025, ci.high = 0.975,
parallel = FALSE, n.cores = NULL)
}
\arguments{
\item{epi.data}{a three-column matrix giving the coordinates (\code{x} and \code{y}) and time of infection (\code{t} for all cases in an epidemic (columns must be in \code{x}, \code{y}, \code{t} order)}
\item{gen.t.mean}{mean generation time of the infecting pathogen}
\item{gen.t.sd}{standard deviation of generation time of the infecting pathogen}
\item{t1}{time step to begin estimation of transmission distance}
\item{max.sep}{maximum number of time steps allowed between two cases (passed to the \code{get.transdist.theta} function)}
\item{max.dist}{maximum spatial distance between two cases considered in calculation}
\item{n.transtree.reps}{number of time to simulate transmission trees when estimating the weights of theta (passed to the \code{est.transdist.theta.weights} function, default = 10). Warning: higher values of this parameter cause significant increases in computation time.}
\item{mean.equals.sd}{logical term indicating if the mean and standard deviation of the transmission kernel are expected to be equal (default = FALSE)}
\item{theta.weights}{use external matrix of theta weights. If NULL (default) the matrix of theta weights is automatically estimated by calling the \code{est.transdist.theta.weights} function}
\item{boot.iter}{the number of bootstrapped iterations to perform}
\item{ci.low}{low end of the confidence interval (default = 0.025)}
\item{ci.high}{high end of the confidence interval (default = 0.975)}
\item{parallel}{run bootstraps in parallel (default = FALSE)}
\item{n.cores}{number of cores to use when \code{parallel} = TRUE (default = NULL, which uses half the available cores)}
}
\value{
a four-column numeric matrix containing the point estimate for mean transmission distance, low and high bootstrapped confidence intervals, and the sample size up to each time step
}
\description{
Estimates bootstrapped confidence intervals for the mean transmission distance over the duration of the epidemic by running \code{est.trandsdist} on all cases
occuring up to each time point.
}
\references{
Salje H, Cummings DAT and Lessler J (2016). “Estimating infectious disease transmission distances using the overall distribution of cases.” Epidemics, 17, pp. 10–18. ISSN 1755-4365, doi: \href{https://www.sciencedirect.com/science/article/pii/S1755436516300317}{10.1016/j.epidem.2016.10.001}.
}
\seealso{
Other transdist: \code{\link{est.transdist.bootstrap.ci}},
\code{\link{est.transdist.temporal}},
\code{\link{est.transdist.theta.weights}},
\code{\link{est.transdist}},
\code{\link{get.transdist.theta}}
}
\author{
Justin Lessler, Henrik Salje, and John Giles
}
\concept{transdist}
|
/man/est.transdist.temporal.bootstrap.ci.Rd
|
no_license
|
shauntruelove/IDSpatialStats
|
R
| false
| true
| 3,202
|
rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/transdistfuncs.r
\name{est.transdist.temporal.bootstrap.ci}
\alias{est.transdist.temporal.bootstrap.ci}
\title{Bootstrapped confidence intervals for the change in mean transmission distance over time}
\usage{
est.transdist.temporal.bootstrap.ci(epi.data, gen.t.mean, gen.t.sd, t1,
max.sep, max.dist, n.transtree.reps = 100, mean.equals.sd = FALSE,
theta.weights = NULL, boot.iter, ci.low = 0.025, ci.high = 0.975,
parallel = FALSE, n.cores = NULL)
}
\arguments{
\item{epi.data}{a three-column matrix giving the coordinates (\code{x} and \code{y}) and time of infection (\code{t} for all cases in an epidemic (columns must be in \code{x}, \code{y}, \code{t} order)}
\item{gen.t.mean}{mean generation time of the infecting pathogen}
\item{gen.t.sd}{standard deviation of generation time of the infecting pathogen}
\item{t1}{time step to begin estimation of transmission distance}
\item{max.sep}{maximum number of time steps allowed between two cases (passed to the \code{get.transdist.theta} function)}
\item{max.dist}{maximum spatial distance between two cases considered in calculation}
\item{n.transtree.reps}{number of time to simulate transmission trees when estimating the weights of theta (passed to the \code{est.transdist.theta.weights} function, default = 10). Warning: higher values of this parameter cause significant increases in computation time.}
\item{mean.equals.sd}{logical term indicating if the mean and standard deviation of the transmission kernel are expected to be equal (default = FALSE)}
\item{theta.weights}{use external matrix of theta weights. If NULL (default) the matrix of theta weights is automatically estimated by calling the \code{est.transdist.theta.weights} function}
\item{boot.iter}{the number of bootstrapped iterations to perform}
\item{ci.low}{low end of the confidence interval (default = 0.025)}
\item{ci.high}{high end of the confidence interval (default = 0.975)}
\item{parallel}{run bootstraps in parallel (default = FALSE)}
\item{n.cores}{number of cores to use when \code{parallel} = TRUE (default = NULL, which uses half the available cores)}
}
\value{
a four-column numeric matrix containing the point estimate for mean transmission distance, low and high bootstrapped confidence intervals, and the sample size up to each time step
}
\description{
Estimates bootstrapped confidence intervals for the mean transmission distance over the duration of the epidemic by running \code{est.trandsdist} on all cases
occuring up to each time point.
}
\references{
Salje H, Cummings DAT and Lessler J (2016). “Estimating infectious disease transmission distances using the overall distribution of cases.” Epidemics, 17, pp. 10–18. ISSN 1755-4365, doi: \href{https://www.sciencedirect.com/science/article/pii/S1755436516300317}{10.1016/j.epidem.2016.10.001}.
}
\seealso{
Other transdist: \code{\link{est.transdist.bootstrap.ci}},
\code{\link{est.transdist.temporal}},
\code{\link{est.transdist.theta.weights}},
\code{\link{est.transdist}},
\code{\link{get.transdist.theta}}
}
\author{
Justin Lessler, Henrik Salje, and John Giles
}
\concept{transdist}
|
library(rmarkdown)
library(tidyverse)
#----------------------------------------------------------------------------
#############################################################################
## APPLY MD RENDER ACROSS ALL SPECIES
#############################################################################
# pull spp codes
sp_dat <- read_csv("data/spp_codes.csv") %>%
rename(spp_code = "spp code") %>%
mutate(spp_code = paste("s", spp_code, sep = "")) %>%
pull(spp_code)
test_sp <- c("s832", "s93", "s132", "s73", "s901", "s833", "s711", "s263", "s129")
# apply species template to test species
lapply(test_sp, function(x) {
render("figures_md/species_template.Rmd",
output_dir = "figures_md/sp_plots",
output_file = paste(x, ".pdf", sep = ""),
params = list(SPP = x))
})
|
/code/md_render.R
|
no_license
|
jeremyash/tree_CL
|
R
| false
| false
| 823
|
r
|
library(rmarkdown)
library(tidyverse)
#----------------------------------------------------------------------------
#############################################################################
## APPLY MD RENDER ACROSS ALL SPECIES
#############################################################################
# pull spp codes
sp_dat <- read_csv("data/spp_codes.csv") %>%
rename(spp_code = "spp code") %>%
mutate(spp_code = paste("s", spp_code, sep = "")) %>%
pull(spp_code)
test_sp <- c("s832", "s93", "s132", "s73", "s901", "s833", "s711", "s263", "s129")
# apply species template to test species
lapply(test_sp, function(x) {
render("figures_md/species_template.Rmd",
output_dir = "figures_md/sp_plots",
output_file = paste(x, ".pdf", sep = ""),
params = list(SPP = x))
})
|
000010
000020 READ-CONTRF.
000030 READ CONTRF.
000040 IF WS-STATUS = "00"
000050 GO TO READ-CONTRF-EXIT.
000040 IF WS-STATUS = "23"
000070 MOVE 34 TO WS-F-ERROR
000050 GO TO READ-CONTRF-EXIT.
000060 IF WS-STAT1 = "2" OR "3" OR "4"
000070 MOVE 34 TO WS-F-ERROR
000080 PERFORM READ-ERROR.
000090 IF RECORD-LOCKED
000100 PERFORM LOCKED-RECORD
000110 GO TO READ-CONTRF.
000120 MOVE 34 TO WS-F-ERROR.
000130 PERFORM READ-ERROR.
000140
000150 READ-CONTRF-EXIT.
000160 EXIT.
|
/CONTRF.RD
|
no_license
|
pingleware/apac-accounting-code
|
R
| false
| false
| 640
|
rd
|
000010
000020 READ-CONTRF.
000030 READ CONTRF.
000040 IF WS-STATUS = "00"
000050 GO TO READ-CONTRF-EXIT.
000040 IF WS-STATUS = "23"
000070 MOVE 34 TO WS-F-ERROR
000050 GO TO READ-CONTRF-EXIT.
000060 IF WS-STAT1 = "2" OR "3" OR "4"
000070 MOVE 34 TO WS-F-ERROR
000080 PERFORM READ-ERROR.
000090 IF RECORD-LOCKED
000100 PERFORM LOCKED-RECORD
000110 GO TO READ-CONTRF.
000120 MOVE 34 TO WS-F-ERROR.
000130 PERFORM READ-ERROR.
000140
000150 READ-CONTRF-EXIT.
000160 EXIT.
|
# mcSuperLearner
#
# Created by Eric Polley on 2011-01-01.
#
mcSuperLearner <- function(Y, X, newX = NULL, family = gaussian(), SL.library, method = 'method.NNLS', id = NULL, verbose = FALSE, control = list(), cvControl = list(), obsWeights = NULL) {
.SL.require('parallel')
if(is.character(method)) {
if(exists(method, mode = 'list')) {
method <- get(method, mode = 'list')
} else if(exists(method, mode = 'function')) {
method <- get(method, mode = 'function')()
}
} else if(is.function(method)) {
method <- method()
}
if(!is.list(method)) {
stop("method is not in the appropriate format. Check out help('method.template')")
}
if(!is.null(method$require)) {
sapply(method$require, function(x) require(force(x), character.only = TRUE))
}
# get defaults for controls and make sure in correct format
control <- do.call('SuperLearner.control', control)
cvControl <- do.call('SuperLearner.CV.control', cvControl)
# put together the library
# should this be in a new environment?
library <- .createLibrary(SL.library)
.check.SL.library(library = c(unique(library$library$predAlgorithm), library$screenAlgorithm))
call <- match.call(expand.dots = TRUE)
# should we be checking X and newX for data.frame?
# data.frame not required, but most of the built-in wrappers assume a data.frame
if(!inherits(X, 'data.frame')) message('X is not a data frame. Check the algorithms in SL.library to make sure they are compatible with non data.frame inputs')
varNames <- colnames(X)
N <- dim(X)[1L]
p <- dim(X)[2L]
k <- nrow(library$library)
kScreen <- length(library$screenAlgorithm)
Z <- matrix(NA, N, k)
libraryNames <- paste(library$library$predAlgorithm, library$screenAlgorithm[library$library$rowScreen], sep="_")
# put fitLibrary in it's own environment to locate later
fitLibEnv <- new.env()
assign('fitLibrary', vector('list', length = k), envir = fitLibEnv)
assign('libraryNames', libraryNames, envir = fitLibEnv)
evalq(names(fitLibrary) <- libraryNames, envir = fitLibEnv)
# errors* records if an algorithm stops either in the CV step and/or in full data
errorsInCVLibrary <- rep(0, k)
errorsInLibrary <- rep(0, k)
# if newX is missing, use X
if(is.null(newX)) {
newX <- X
}
# Are these checks still required?
if(!identical(colnames(X), colnames(newX))) {
stop("The variable names and order in newX must be identical to the variable names and order in X")
}
if (sum(is.na(X)) > 0 | sum(is.na(newX)) > 0 | sum(is.na(Y)) > 0) {
stop("missing data is currently not supported. Check Y, X, and newX for missing values")
}
if (!is.numeric(Y)) {
stop("the outcome Y must be a numeric vector")
}
# family can be either character or function, so these lines put everything together (code from glm())
if(is.character(family))
family <- get(family, mode="function", envir=parent.frame())
if(is.function(family))
family <- family()
if (is.null(family$family)) {
print(family)
stop("'family' not recognized")
}
if (family$family != "binomial" & isTRUE("cvAUC" %in% method$require)){
stop("'method.AUC' is designed for the 'binomial' family only")
}
# create CV folds
validRows <- CVFolds(N = N, id = id, Y = Y, cvControl = cvControl)
# test id
if(is.null(id)) {
id <- seq(N)
}
if(!identical(length(id), N)) {
stop("id vector must have the same dimension as Y")
}
# test observation weights
if(is.null(obsWeights)) {
obsWeights <- rep(1, N)
}
if(!identical(length(obsWeights), N)) {
stop("obsWeights vector must have the same dimension as Y")
}
# create function for the cross-validation step:
.crossValFUN <- function(valid, Y, dataX, id, obsWeights, library, kScreen, k, p, libraryNames) {
tempLearn <- dataX[-valid, , drop = FALSE]
tempOutcome <- Y[-valid]
tempValid <- dataX[valid, , drop = FALSE]
tempWhichScreen <- matrix(NA, nrow = kScreen, ncol = p)
tempId <- id[-valid]
tempObsWeights <- obsWeights[-valid]
# should this be converted to a lapply also?
for(s in seq(kScreen)) {
testScreen <- try(do.call(library$screenAlgorithm[s], list(Y = tempOutcome, X = tempLearn, family = family, id = tempId, obsWeights = tempObsWeights)))
if(inherits(testScreen, "try-error")) {
warning(paste("replacing failed screening algorithm,", library$screenAlgorithm[s], ", with All()", "\n "))
tempWhichScreen[s, ] <- TRUE
} else {
tempWhichScreen[s, ] <- testScreen
}
if(verbose) {
message(paste("Number of covariates in ", library$screenAlgorithm[s], " is: ", sum(tempWhichScreen[s, ]), sep = ""))
}
} #end screen
# should this be converted to a lapply also?
out <- matrix(NA, nrow = nrow(tempValid), ncol = k)
for(s in seq(k)) {
testAlg <- try(do.call(library$library$predAlgorithm[s], list(Y = tempOutcome, X = subset(tempLearn, select = tempWhichScreen[library$library$rowScreen[s], ], drop=FALSE), newX = subset(tempValid, select = tempWhichScreen[library$library$rowScreen[s], ], drop=FALSE), family = family, id = tempId, obsWeights = tempObsWeights)))
if(inherits(testAlg, "try-error")) {
warning(paste("Error in algorithm", library$library$predAlgorithm[s], "\n The Algorithm will be removed from the Super Learner (i.e. given weight 0) \n" ))
# errorsInCVLibrary[s] <<- 1
# '<<-' doesn't work with mclapply.
} else {
out[, s] <- testAlg$pred
}
# verbose will not work in the GUI, but works in the terminal
if(verbose) message(paste("CV", libraryNames[s]))
} #end library
invisible(out)
}
# the lapply performs the cross-validation steps to create Z
# additional steps to put things in the correct order
# rbind unlists the output from lapply
# need to unlist folds to put the rows back in the correct order
Z[unlist(validRows, use.names = FALSE), ] <- do.call('rbind', parallel::mclapply(validRows, FUN = .crossValFUN, Y = Y, dataX = X, id = id, obsWeights = obsWeights, library = library, kScreen = kScreen, k = k, p = p, libraryNames = libraryNames))
# check for errors. If any algorithms had errors, replace entire column with 0 even if error is only in one fold.
errorsInCVLibrary <- apply(Z, 2, function(x) any(is.na(x)))
if(sum(errorsInCVLibrary) > 0) {
Z[, as.logical(errorsInCVLibrary)] <- 0
}
if(all(Z == 0)) {
stop("All algorithms dropped from library")
}
# compute weights for each algorithm in library:
getCoef <- method$computeCoef(Z = Z, Y = Y, libraryNames = libraryNames, obsWeights = obsWeights, control = control, verbose = verbose)
coef <- getCoef$coef
names(coef) <- libraryNames
# Set a default in case the method does not return the optimizer result.
if(!("optimizer" %in% names(getCoef))) {
getCoef["optimizer"] <- NA
}
# now fit all algorithms in library on entire learning data set and predict on newX
m <- dim(newX)[1L]
predY <- matrix(NA, nrow = m, ncol = k)
# whichScreen <- matrix(NA, nrow = kScreen, ncol = p)
.screenFun <- function(fun, list) {
testScreen <- try(do.call(fun, list))
if(inherits(testScreen, "try-error")) {
warning(paste("replacing failed screening algorithm,", fun, ", with All() in full data", "\n "))
out <- rep(TRUE, ncol(list$X))
} else {
out <- testScreen
}
return(out)
}
whichScreen <- t(sapply(library$screenAlgorithm, FUN = .screenFun, list = list(Y = Y, X = X, family = family, id = id, obsWeights = obsWeights)))
# change to sapply?
# for(s in 1:k) {
# testAlg <- try(do.call(library$library$predAlgorithm[s], list(Y = Y, X = subset(X, select = whichScreen[library$library$rowScreen[s], ], drop=FALSE), newX = subset(newX, select = whichScreen[library$library$rowScreen[s], ], drop=FALSE), family = family, id = id, obsWeights = obsWeights)))
# if(inherits(testAlg, "try-error")) {
# warning(paste("Error in algorithm", library$library$predAlgorithm[s], " on full data", "\n The Algorithm will be removed from the Super Learner (i.e. given weight 0) \n" ))
# errorsInLibrary[s] <- 1
# } else {
# predY[, s] <- testAlg$pred
# }
# if(control$saveFitLibrary) {
# fitLibrary[[s]] <- testAlg$fit
# }
# if(verbose) {
# message(paste("full", libraryNames[s]))
# }
# }
# assign in envirnoments doesn't work with mc and snow, change .predFun to return a list with both pred and fitLibrary elements and then parse the two.
.predFun <- function(index, lib, Y, dataX, newX, whichScreen, family, id, obsWeights, verbose, control, libraryNames) {
out <- list(pred = NA, fitLibrary = NULL)
testAlg <- try(do.call(lib$predAlgorithm[index], list(Y = Y, X = subset(dataX, select = whichScreen[lib$rowScreen[index], ], drop=FALSE), newX = subset(newX, select = whichScreen[lib$rowScreen[index], ], drop=FALSE), family = family, id = id, obsWeights = obsWeights)))
if(inherits(testAlg, "try-error")) {
warning(paste("Error in algorithm", lib$predAlgorithm[index], " on full data", "\n The Algorithm will be removed from the Super Learner (i.e. given weight 0) \n" ))
out$pred <- rep.int(NA, times = nrow(newX))
} else {
out$pred <- testAlg$pred
if(control$saveFitLibrary) {
# eval(bquote(fitLibrary[[.(index)]] <- .(testAlg$fit)), envir = fitLibEnv)
out$fitLibrary <- testAlg$fit
}
}
if(verbose) {
message(paste("full", libraryNames[index]))
}
invisible(out)
}
foo <- parallel::mclapply(seq(k), FUN = .predFun, lib = library$library, Y = Y, dataX = X, newX = newX, whichScreen = whichScreen, family = family, id = id, obsWeights = obsWeights, verbose = verbose, control = control, libraryNames = libraryNames)
predY <- do.call('cbind', lapply(foo, '[[', 'pred'))
assign('fitLibrary', lapply(foo, '[[', 'fitLibrary'), envir = fitLibEnv)
rm(foo)
# predY <- do.call('cbind', mclapply(seq(k), FUN = .predFun, lib = library$library, Y = Y, dataX = X, newX = newX, whichScreen = whichScreen, family = family, id = id, obsWeights = obsWeights, verbose = verbose, control = control, libraryNames = libraryNames))
# check for errors
errorsInLibrary <- apply(predY, 2, function(xx) any(is.na(xx)))
if(sum(errorsInLibrary) > 0) {
if(sum(coef[as.logical(errorsInLibrary)]) > 0) {
warning(paste("re-running estimation of coefficients removing failed algorithm(s) \n Orignial coefficients are: \n", coef, "\n"))
Z[, as.logical(errorsInLibrary)] <- 0
if(all(Z == 0)) {
stop("All algorithms dropped from library")
}
getCoef <- method$computeCoef(Z = Z, Y = Y, libraryNames = libraryNames, obsWeights = obsWeights, control = control, verbose = verbose)
coef <- getCoef$coef
names(coef) <- libraryNames
} else {
warning("coefficients already 0 for all failed algorithm(s)")
}
}
# compute super learner predictions on newX
getPred <- method$computePred(predY = predY, coef = coef, control = control)
# add names of algorithms to the predictions
colnames(predY) <- libraryNames
# clean up when errors in library
if(sum(errorsInCVLibrary) > 0) {
getCoef$cvRisk[as.logical(errorsInCVLibrary)] <- NA
}
# put everything together in a list
out <- list(call = call, libraryNames = libraryNames, SL.library = library, SL.predict = getPred, coef = coef, library.predict = predY, Z = Z, cvRisk = getCoef$cvRisk, family = family, fitLibrary = get('fitLibrary', envir = fitLibEnv), id = id, varNames = varNames, validRows = validRows, method = method, whichScreen = whichScreen, control = control, errorsInCVLibrary = errorsInCVLibrary, errorsInLibrary = errorsInLibrary, obsWeights = obsWeights, metaOptimizer = getCoef$optimizer)
class(out) <- c("SuperLearner")
return(out)
}
|
/R/mcSuperLearner.R
|
no_license
|
mathyCathy/SuperLearner
|
R
| false
| false
| 11,681
|
r
|
# mcSuperLearner
#
# Created by Eric Polley on 2011-01-01.
#
mcSuperLearner <- function(Y, X, newX = NULL, family = gaussian(), SL.library, method = 'method.NNLS', id = NULL, verbose = FALSE, control = list(), cvControl = list(), obsWeights = NULL) {
.SL.require('parallel')
if(is.character(method)) {
if(exists(method, mode = 'list')) {
method <- get(method, mode = 'list')
} else if(exists(method, mode = 'function')) {
method <- get(method, mode = 'function')()
}
} else if(is.function(method)) {
method <- method()
}
if(!is.list(method)) {
stop("method is not in the appropriate format. Check out help('method.template')")
}
if(!is.null(method$require)) {
sapply(method$require, function(x) require(force(x), character.only = TRUE))
}
# get defaults for controls and make sure in correct format
control <- do.call('SuperLearner.control', control)
cvControl <- do.call('SuperLearner.CV.control', cvControl)
# put together the library
# should this be in a new environment?
library <- .createLibrary(SL.library)
.check.SL.library(library = c(unique(library$library$predAlgorithm), library$screenAlgorithm))
call <- match.call(expand.dots = TRUE)
# should we be checking X and newX for data.frame?
# data.frame not required, but most of the built-in wrappers assume a data.frame
if(!inherits(X, 'data.frame')) message('X is not a data frame. Check the algorithms in SL.library to make sure they are compatible with non data.frame inputs')
varNames <- colnames(X)
N <- dim(X)[1L]
p <- dim(X)[2L]
k <- nrow(library$library)
kScreen <- length(library$screenAlgorithm)
Z <- matrix(NA, N, k)
libraryNames <- paste(library$library$predAlgorithm, library$screenAlgorithm[library$library$rowScreen], sep="_")
# put fitLibrary in it's own environment to locate later
fitLibEnv <- new.env()
assign('fitLibrary', vector('list', length = k), envir = fitLibEnv)
assign('libraryNames', libraryNames, envir = fitLibEnv)
evalq(names(fitLibrary) <- libraryNames, envir = fitLibEnv)
# errors* records if an algorithm stops either in the CV step and/or in full data
errorsInCVLibrary <- rep(0, k)
errorsInLibrary <- rep(0, k)
# if newX is missing, use X
if(is.null(newX)) {
newX <- X
}
# Are these checks still required?
if(!identical(colnames(X), colnames(newX))) {
stop("The variable names and order in newX must be identical to the variable names and order in X")
}
if (sum(is.na(X)) > 0 | sum(is.na(newX)) > 0 | sum(is.na(Y)) > 0) {
stop("missing data is currently not supported. Check Y, X, and newX for missing values")
}
if (!is.numeric(Y)) {
stop("the outcome Y must be a numeric vector")
}
# family can be either character or function, so these lines put everything together (code from glm())
if(is.character(family))
family <- get(family, mode="function", envir=parent.frame())
if(is.function(family))
family <- family()
if (is.null(family$family)) {
print(family)
stop("'family' not recognized")
}
if (family$family != "binomial" & isTRUE("cvAUC" %in% method$require)){
stop("'method.AUC' is designed for the 'binomial' family only")
}
# create CV folds
validRows <- CVFolds(N = N, id = id, Y = Y, cvControl = cvControl)
# test id
if(is.null(id)) {
id <- seq(N)
}
if(!identical(length(id), N)) {
stop("id vector must have the same dimension as Y")
}
# test observation weights
if(is.null(obsWeights)) {
obsWeights <- rep(1, N)
}
if(!identical(length(obsWeights), N)) {
stop("obsWeights vector must have the same dimension as Y")
}
# create function for the cross-validation step:
.crossValFUN <- function(valid, Y, dataX, id, obsWeights, library, kScreen, k, p, libraryNames) {
tempLearn <- dataX[-valid, , drop = FALSE]
tempOutcome <- Y[-valid]
tempValid <- dataX[valid, , drop = FALSE]
tempWhichScreen <- matrix(NA, nrow = kScreen, ncol = p)
tempId <- id[-valid]
tempObsWeights <- obsWeights[-valid]
# should this be converted to a lapply also?
for(s in seq(kScreen)) {
testScreen <- try(do.call(library$screenAlgorithm[s], list(Y = tempOutcome, X = tempLearn, family = family, id = tempId, obsWeights = tempObsWeights)))
if(inherits(testScreen, "try-error")) {
warning(paste("replacing failed screening algorithm,", library$screenAlgorithm[s], ", with All()", "\n "))
tempWhichScreen[s, ] <- TRUE
} else {
tempWhichScreen[s, ] <- testScreen
}
if(verbose) {
message(paste("Number of covariates in ", library$screenAlgorithm[s], " is: ", sum(tempWhichScreen[s, ]), sep = ""))
}
} #end screen
# should this be converted to a lapply also?
out <- matrix(NA, nrow = nrow(tempValid), ncol = k)
for(s in seq(k)) {
testAlg <- try(do.call(library$library$predAlgorithm[s], list(Y = tempOutcome, X = subset(tempLearn, select = tempWhichScreen[library$library$rowScreen[s], ], drop=FALSE), newX = subset(tempValid, select = tempWhichScreen[library$library$rowScreen[s], ], drop=FALSE), family = family, id = tempId, obsWeights = tempObsWeights)))
if(inherits(testAlg, "try-error")) {
warning(paste("Error in algorithm", library$library$predAlgorithm[s], "\n The Algorithm will be removed from the Super Learner (i.e. given weight 0) \n" ))
# errorsInCVLibrary[s] <<- 1
# '<<-' doesn't work with mclapply.
} else {
out[, s] <- testAlg$pred
}
# verbose will not work in the GUI, but works in the terminal
if(verbose) message(paste("CV", libraryNames[s]))
} #end library
invisible(out)
}
# the lapply performs the cross-validation steps to create Z
# additional steps to put things in the correct order
# rbind unlists the output from lapply
# need to unlist folds to put the rows back in the correct order
Z[unlist(validRows, use.names = FALSE), ] <- do.call('rbind', parallel::mclapply(validRows, FUN = .crossValFUN, Y = Y, dataX = X, id = id, obsWeights = obsWeights, library = library, kScreen = kScreen, k = k, p = p, libraryNames = libraryNames))
# check for errors. If any algorithms had errors, replace entire column with 0 even if error is only in one fold.
errorsInCVLibrary <- apply(Z, 2, function(x) any(is.na(x)))
if(sum(errorsInCVLibrary) > 0) {
Z[, as.logical(errorsInCVLibrary)] <- 0
}
if(all(Z == 0)) {
stop("All algorithms dropped from library")
}
# compute weights for each algorithm in library:
getCoef <- method$computeCoef(Z = Z, Y = Y, libraryNames = libraryNames, obsWeights = obsWeights, control = control, verbose = verbose)
coef <- getCoef$coef
names(coef) <- libraryNames
# Set a default in case the method does not return the optimizer result.
if(!("optimizer" %in% names(getCoef))) {
getCoef["optimizer"] <- NA
}
# now fit all algorithms in library on entire learning data set and predict on newX
m <- dim(newX)[1L]
predY <- matrix(NA, nrow = m, ncol = k)
# whichScreen <- matrix(NA, nrow = kScreen, ncol = p)
.screenFun <- function(fun, list) {
testScreen <- try(do.call(fun, list))
if(inherits(testScreen, "try-error")) {
warning(paste("replacing failed screening algorithm,", fun, ", with All() in full data", "\n "))
out <- rep(TRUE, ncol(list$X))
} else {
out <- testScreen
}
return(out)
}
whichScreen <- t(sapply(library$screenAlgorithm, FUN = .screenFun, list = list(Y = Y, X = X, family = family, id = id, obsWeights = obsWeights)))
# change to sapply?
# for(s in 1:k) {
# testAlg <- try(do.call(library$library$predAlgorithm[s], list(Y = Y, X = subset(X, select = whichScreen[library$library$rowScreen[s], ], drop=FALSE), newX = subset(newX, select = whichScreen[library$library$rowScreen[s], ], drop=FALSE), family = family, id = id, obsWeights = obsWeights)))
# if(inherits(testAlg, "try-error")) {
# warning(paste("Error in algorithm", library$library$predAlgorithm[s], " on full data", "\n The Algorithm will be removed from the Super Learner (i.e. given weight 0) \n" ))
# errorsInLibrary[s] <- 1
# } else {
# predY[, s] <- testAlg$pred
# }
# if(control$saveFitLibrary) {
# fitLibrary[[s]] <- testAlg$fit
# }
# if(verbose) {
# message(paste("full", libraryNames[s]))
# }
# }
# assign in envirnoments doesn't work with mc and snow, change .predFun to return a list with both pred and fitLibrary elements and then parse the two.
.predFun <- function(index, lib, Y, dataX, newX, whichScreen, family, id, obsWeights, verbose, control, libraryNames) {
out <- list(pred = NA, fitLibrary = NULL)
testAlg <- try(do.call(lib$predAlgorithm[index], list(Y = Y, X = subset(dataX, select = whichScreen[lib$rowScreen[index], ], drop=FALSE), newX = subset(newX, select = whichScreen[lib$rowScreen[index], ], drop=FALSE), family = family, id = id, obsWeights = obsWeights)))
if(inherits(testAlg, "try-error")) {
warning(paste("Error in algorithm", lib$predAlgorithm[index], " on full data", "\n The Algorithm will be removed from the Super Learner (i.e. given weight 0) \n" ))
out$pred <- rep.int(NA, times = nrow(newX))
} else {
out$pred <- testAlg$pred
if(control$saveFitLibrary) {
# eval(bquote(fitLibrary[[.(index)]] <- .(testAlg$fit)), envir = fitLibEnv)
out$fitLibrary <- testAlg$fit
}
}
if(verbose) {
message(paste("full", libraryNames[index]))
}
invisible(out)
}
foo <- parallel::mclapply(seq(k), FUN = .predFun, lib = library$library, Y = Y, dataX = X, newX = newX, whichScreen = whichScreen, family = family, id = id, obsWeights = obsWeights, verbose = verbose, control = control, libraryNames = libraryNames)
predY <- do.call('cbind', lapply(foo, '[[', 'pred'))
assign('fitLibrary', lapply(foo, '[[', 'fitLibrary'), envir = fitLibEnv)
rm(foo)
# predY <- do.call('cbind', mclapply(seq(k), FUN = .predFun, lib = library$library, Y = Y, dataX = X, newX = newX, whichScreen = whichScreen, family = family, id = id, obsWeights = obsWeights, verbose = verbose, control = control, libraryNames = libraryNames))
# check for errors
errorsInLibrary <- apply(predY, 2, function(xx) any(is.na(xx)))
if(sum(errorsInLibrary) > 0) {
if(sum(coef[as.logical(errorsInLibrary)]) > 0) {
warning(paste("re-running estimation of coefficients removing failed algorithm(s) \n Orignial coefficients are: \n", coef, "\n"))
Z[, as.logical(errorsInLibrary)] <- 0
if(all(Z == 0)) {
stop("All algorithms dropped from library")
}
getCoef <- method$computeCoef(Z = Z, Y = Y, libraryNames = libraryNames, obsWeights = obsWeights, control = control, verbose = verbose)
coef <- getCoef$coef
names(coef) <- libraryNames
} else {
warning("coefficients already 0 for all failed algorithm(s)")
}
}
# compute super learner predictions on newX
getPred <- method$computePred(predY = predY, coef = coef, control = control)
# add names of algorithms to the predictions
colnames(predY) <- libraryNames
# clean up when errors in library
if(sum(errorsInCVLibrary) > 0) {
getCoef$cvRisk[as.logical(errorsInCVLibrary)] <- NA
}
# put everything together in a list
out <- list(call = call, libraryNames = libraryNames, SL.library = library, SL.predict = getPred, coef = coef, library.predict = predY, Z = Z, cvRisk = getCoef$cvRisk, family = family, fitLibrary = get('fitLibrary', envir = fitLibEnv), id = id, varNames = varNames, validRows = validRows, method = method, whichScreen = whichScreen, control = control, errorsInCVLibrary = errorsInCVLibrary, errorsInLibrary = errorsInLibrary, obsWeights = obsWeights, metaOptimizer = getCoef$optimizer)
class(out) <- c("SuperLearner")
return(out)
}
|
## makeCacheMatric and cacheColve will create and cache
## the inverse of a given matrix
## This function will store the matrix along with the cached
## inverse, if computed
makeCacheMatrix <- function(x = matrix()) {
i <- NULL
## declare set function
set <- function(y) {
x <<- y
i <<- NULL
}
## declare get function
get <- function() x
setInverse <- function(inverse) i <<- inverse
getInverse <- function() i
list(set = set, get = get,
setInverse = setInverse,
getInverse = getInverse)
}
## This funciton will return the inverse (either cached
## and already computed or newly computed)
cacheSolve <- function(x, ...) {
i <- x$getInverse()
## return cached inverse
if(!is.null(i)) {
message('returning cached inverse')
return(i)
} ##else return newly calculated inverse and cache inverse
else {
data <- x$get()
i <- solve(data, ...)
x$setInverse(i)
return(i)
}
}
|
/cachematrix.R
|
no_license
|
raekenn/ProgrammingAssignment2
|
R
| false
| false
| 1,012
|
r
|
## makeCacheMatric and cacheColve will create and cache
## the inverse of a given matrix
## This function will store the matrix along with the cached
## inverse, if computed
makeCacheMatrix <- function(x = matrix()) {
i <- NULL
## declare set function
set <- function(y) {
x <<- y
i <<- NULL
}
## declare get function
get <- function() x
setInverse <- function(inverse) i <<- inverse
getInverse <- function() i
list(set = set, get = get,
setInverse = setInverse,
getInverse = getInverse)
}
## This funciton will return the inverse (either cached
## and already computed or newly computed)
cacheSolve <- function(x, ...) {
i <- x$getInverse()
## return cached inverse
if(!is.null(i)) {
message('returning cached inverse')
return(i)
} ##else return newly calculated inverse and cache inverse
else {
data <- x$get()
i <- solve(data, ...)
x$setInverse(i)
return(i)
}
}
|
##function reads series of wearable technology test data
##output is data.frame which
##provides average time/freq measurement values per subject, activity and
##action and function being measured
##should be executed from directory with test/train directories
##this should be rewritten so you pass that directory and then
##it won't matter, but out of time :-(
run_analysis <- function(){
##load plyr
library(plyr)
library(dplyr)
library(reshape2)
library(tidyr)
##read data into various data frames in prep for clipping together
actv<-read.table("activity_labels.txt")
features<-read.table("features.txt")
test_subject<-read.table("test/subject_test.txt")
test_label<-read.table("test/y_test.txt")
test_data<-read.table("test/X_test.txt")
train_subject<-read.table("train/subject_train.txt")
train_label<-read.table("train/y_train.txt")
train_data<-read.table("train/X_train.txt")
closeAllConnections()
##build the variable names
names(actv)<-c("activity_id","activity_name")
names(test_label)<-("activity_id")
names(train_label)<-("activity_id")
names(test_subject)<-("subject_id")
names(train_subject)<-("subject_id")
names(test_data)<-as.character(features[,2])
names(train_data)<-as.character(features[,2])
##compile the dataset
all_test<-cbind(test_subject,test_label,test_data)
all_train<-cbind(train_subject,train_label,train_data)
all_data<-rbind(all_test,all_train)
##merge the activity
all_data<-join(all_data,actv,by="activity_id")
##only want variable with mean() and std()
mean_std<-cbind(all_data[,1:2],all_data[,length(all_data)])
all_data_vars<-colnames(all_data)
mean_std_vars<-c("subject_id","activity_id","activity")
##conitnue to build mean_std
for (i in 1:length(all_data)){
incl<-regexpr("mean()",all_data_vars[i], fixed=TRUE)
if(incl==-1){incl<-regexpr("std()",all_data_vars[i],fixed=TRUE)}
if (!incl==-1){
mean_std<-cbind(mean_std,all_data[,i])
mean_std_vars<-c(mean_std_vars,all_data_vars[i])
}
}
names(mean_std)<-mean_std_vars
##now you have your first subset (step 4)
##time to summarize based on subject and activity
mean_std = select(mean_std,-activity_id)
mean_std_melt = melt(mean_std, id = c("subject_id","activity"))
mean_std_mean = dcast(mean_std_melt, subject_id + activity ~ variable, mean)
##tidy the data a bit, make it not so wide
tidy_msm<-gather(mean_std_mean,"action_function","measurement",
-subject_id,-activity)
tidy_msm<-mutate(tidy_msm,action_function=sub("()-","*",action_function))
tidy_msm<-separate(tidy_msm,"action_function",
c("action","function"),sep="\\*")
##add columns for time/frequency...if i had time would
##try to use chaining for sure
tidy_msm<-mutate(tidy_msm, measure_type = substring(action,1,1))
tidy_msm<-mutate(tidy_msm,action=substring(action,2,length(action)))
tidy_msm<-mutate(tidy_msm,
measure_type=sub("t","time_in_sec",measure_type))
tidy_msm<-mutate(tidy_msm,
measure_type=sub("f","freq_in_hz",measure_type))
tidy_msm<-spread(tidy_msm,measure_type,measurement)
tidy_msm
}
|
/run_analysis.R
|
no_license
|
juliastudent/getandcleandata
|
R
| false
| false
| 3,565
|
r
|
##function reads series of wearable technology test data
##output is data.frame which
##provides average time/freq measurement values per subject, activity and
##action and function being measured
##should be executed from directory with test/train directories
##this should be rewritten so you pass that directory and then
##it won't matter, but out of time :-(
run_analysis <- function(){
##load plyr
library(plyr)
library(dplyr)
library(reshape2)
library(tidyr)
##read data into various data frames in prep for clipping together
actv<-read.table("activity_labels.txt")
features<-read.table("features.txt")
test_subject<-read.table("test/subject_test.txt")
test_label<-read.table("test/y_test.txt")
test_data<-read.table("test/X_test.txt")
train_subject<-read.table("train/subject_train.txt")
train_label<-read.table("train/y_train.txt")
train_data<-read.table("train/X_train.txt")
closeAllConnections()
##build the variable names
names(actv)<-c("activity_id","activity_name")
names(test_label)<-("activity_id")
names(train_label)<-("activity_id")
names(test_subject)<-("subject_id")
names(train_subject)<-("subject_id")
names(test_data)<-as.character(features[,2])
names(train_data)<-as.character(features[,2])
##compile the dataset
all_test<-cbind(test_subject,test_label,test_data)
all_train<-cbind(train_subject,train_label,train_data)
all_data<-rbind(all_test,all_train)
##merge the activity
all_data<-join(all_data,actv,by="activity_id")
##only want variable with mean() and std()
mean_std<-cbind(all_data[,1:2],all_data[,length(all_data)])
all_data_vars<-colnames(all_data)
mean_std_vars<-c("subject_id","activity_id","activity")
##conitnue to build mean_std
for (i in 1:length(all_data)){
incl<-regexpr("mean()",all_data_vars[i], fixed=TRUE)
if(incl==-1){incl<-regexpr("std()",all_data_vars[i],fixed=TRUE)}
if (!incl==-1){
mean_std<-cbind(mean_std,all_data[,i])
mean_std_vars<-c(mean_std_vars,all_data_vars[i])
}
}
names(mean_std)<-mean_std_vars
##now you have your first subset (step 4)
##time to summarize based on subject and activity
mean_std = select(mean_std,-activity_id)
mean_std_melt = melt(mean_std, id = c("subject_id","activity"))
mean_std_mean = dcast(mean_std_melt, subject_id + activity ~ variable, mean)
##tidy the data a bit, make it not so wide
tidy_msm<-gather(mean_std_mean,"action_function","measurement",
-subject_id,-activity)
tidy_msm<-mutate(tidy_msm,action_function=sub("()-","*",action_function))
tidy_msm<-separate(tidy_msm,"action_function",
c("action","function"),sep="\\*")
##add columns for time/frequency...if i had time would
##try to use chaining for sure
tidy_msm<-mutate(tidy_msm, measure_type = substring(action,1,1))
tidy_msm<-mutate(tidy_msm,action=substring(action,2,length(action)))
tidy_msm<-mutate(tidy_msm,
measure_type=sub("t","time_in_sec",measure_type))
tidy_msm<-mutate(tidy_msm,
measure_type=sub("f","freq_in_hz",measure_type))
tidy_msm<-spread(tidy_msm,measure_type,measurement)
tidy_msm
}
|
/BIG5/04/ch04.R
|
no_license
|
Evonne0623/CH_R_Meta
|
R
| false
| false
| 4,429
|
r
| ||
###loading necessary libraries
library(cluster)
library(corrplot)
###reading the file
cust<-read.csv("Wholesalecustomersdata.csv",header=TRUE,sep = ",")
cust <- cust[,3:8]
head(cust)
###summary of the dataset
summary(cust)
###exploring the dataset more and finding out strong correlations among variables
c <- cor(cust)
corrplot(c, method="number")
#we can see that there is strong correlation among the Detergents_Paper and Grocery
####Hierarchial Clustering
d <- dist(cust,method = "euclidean") # distance matrix
d
fit <- hclust(d, method="ward.D")
plot(fit) # display dendogram
names(fit)
#creating the clusters using cuttree function
clust = cutree(fit, k=3) # cluster number to 3
clust
table(clust)
cust_c = cbind(cust, clust)
head(cust_c)
#drawing dendogram with red borders around the 3 clusters
rect.hclust(fit, k=3, border="red")
rect.hclust(fit, k=5, border="blue")
#2D representation of the Segmentation:
clusplot(cust, clust, color=TRUE, shade=TRUE,
labels=2, lines=0, main= 'customer segments')
#### K Means cluster analysis
## number of clusters
wss <- (nrow(cust)-1)*sum(apply(cust,2,var))
for (i in 2:15) wss[i] <- sum(kmeans(cust,
centers=i)$withinss)
plot(1:15, wss, type="b", xlab="Clusters",
ylab="Within groups sum of squares")
#From the above plot we can see that 3 or 5 is the optimal number of clusters, as we can see that after these numbers the curve remains less changing.
#implementing k means when k=3
fit <- kmeans(cust, 3)
fit
#K-means clustering with 3 clusters of sizes 60, 330, 50
#now implementing k means when k = 5
fit <- kmeans(cust, 5)
fit
#K-means clustering with 5 clusters of sizes 223, 23, 104, 80, 10
###Looking at the cluster means of both scenarios:
#Scenario 1 : k = 3
#Cluster 1 - highest fresh-products.
#Cluster 2 - low spenders.
#Cluster 3 - highest milk, grocery, detergents_papers spenders
#Scenario 2: k = 5
#Cluster 1 - low spenders
#Cluster 2 - highest Fresh spenders
#Cluster 3 - mediocre spenders
#Cluster 4 - low spenders
#Cluster 5 - mediocre Fresh, highest milk, Grocery, detergents_papers
#From the above analysis we can say that 3 clusters prove to be the base optimal number for quickly understanding the customer segmentation
|
/clustering project/customerSegmentation.R
|
no_license
|
TaralikaPenmetsa/Machine-Learning-Projects
|
R
| false
| false
| 2,270
|
r
|
###loading necessary libraries
library(cluster)
library(corrplot)
###reading the file
cust<-read.csv("Wholesalecustomersdata.csv",header=TRUE,sep = ",")
cust <- cust[,3:8]
head(cust)
###summary of the dataset
summary(cust)
###exploring the dataset more and finding out strong correlations among variables
c <- cor(cust)
corrplot(c, method="number")
#we can see that there is strong correlation among the Detergents_Paper and Grocery
####Hierarchial Clustering
d <- dist(cust,method = "euclidean") # distance matrix
d
fit <- hclust(d, method="ward.D")
plot(fit) # display dendogram
names(fit)
#creating the clusters using cuttree function
clust = cutree(fit, k=3) # cluster number to 3
clust
table(clust)
cust_c = cbind(cust, clust)
head(cust_c)
#drawing dendogram with red borders around the 3 clusters
rect.hclust(fit, k=3, border="red")
rect.hclust(fit, k=5, border="blue")
#2D representation of the Segmentation:
clusplot(cust, clust, color=TRUE, shade=TRUE,
labels=2, lines=0, main= 'customer segments')
#### K Means cluster analysis
## number of clusters
wss <- (nrow(cust)-1)*sum(apply(cust,2,var))
for (i in 2:15) wss[i] <- sum(kmeans(cust,
centers=i)$withinss)
plot(1:15, wss, type="b", xlab="Clusters",
ylab="Within groups sum of squares")
#From the above plot we can see that 3 or 5 is the optimal number of clusters, as we can see that after these numbers the curve remains less changing.
#implementing k means when k=3
fit <- kmeans(cust, 3)
fit
#K-means clustering with 3 clusters of sizes 60, 330, 50
#now implementing k means when k = 5
fit <- kmeans(cust, 5)
fit
#K-means clustering with 5 clusters of sizes 223, 23, 104, 80, 10
###Looking at the cluster means of both scenarios:
#Scenario 1 : k = 3
#Cluster 1 - highest fresh-products.
#Cluster 2 - low spenders.
#Cluster 3 - highest milk, grocery, detergents_papers spenders
#Scenario 2: k = 5
#Cluster 1 - low spenders
#Cluster 2 - highest Fresh spenders
#Cluster 3 - mediocre spenders
#Cluster 4 - low spenders
#Cluster 5 - mediocre Fresh, highest milk, Grocery, detergents_papers
#From the above analysis we can say that 3 clusters prove to be the base optimal number for quickly understanding the customer segmentation
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/model.R
\name{mlflow_rfunc_predict}
\alias{mlflow_rfunc_predict}
\title{Predict using RFunc MLflow Model}
\usage{
mlflow_rfunc_predict(model_dir, data, output_file = NULL,
restore = FALSE)
}
\arguments{
\item{model_dir}{The path to the MLflow model, as a string.}
\item{data}{Data frame, 'JSON' or 'CSV' file to be used for prediction.}
\item{output_file}{'JSON' or 'CSV' file where the prediction will be written to.}
\item{restore}{Should \code{mlflow_restore_snapshot()} be called before serving?}
}
\description{
Predict using an RFunc MLflow Model from a file or data frame.
}
\examples{
\dontrun{
library(mlflow)
# save simple model which roundtrips data as prediction
mlflow_save_model(function(df) df, "mlflow_roundtrip")
# save data as json
jsonlite::write_json(iris, "iris.json")
# predict existing model from json data
mlflow_rfunc_predict("mlflow_roundtrip", "iris.json")
}
}
|
/R/mlflow/man/mlflow_rfunc_predict.Rd
|
permissive
|
rstudio/mlflow-original
|
R
| false
| true
| 975
|
rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/model.R
\name{mlflow_rfunc_predict}
\alias{mlflow_rfunc_predict}
\title{Predict using RFunc MLflow Model}
\usage{
mlflow_rfunc_predict(model_dir, data, output_file = NULL,
restore = FALSE)
}
\arguments{
\item{model_dir}{The path to the MLflow model, as a string.}
\item{data}{Data frame, 'JSON' or 'CSV' file to be used for prediction.}
\item{output_file}{'JSON' or 'CSV' file where the prediction will be written to.}
\item{restore}{Should \code{mlflow_restore_snapshot()} be called before serving?}
}
\description{
Predict using an RFunc MLflow Model from a file or data frame.
}
\examples{
\dontrun{
library(mlflow)
# save simple model which roundtrips data as prediction
mlflow_save_model(function(df) df, "mlflow_roundtrip")
# save data as json
jsonlite::write_json(iris, "iris.json")
# predict existing model from json data
mlflow_rfunc_predict("mlflow_roundtrip", "iris.json")
}
}
|
hdbscanClustering <- function(db, datasetBugId){
library("dbscan")
res.hdbs <- dbscan::hdbscan(x = db, minPts = 5)
# Obter os clusters
datasetBugId$Cluster <- res.hdbs$cluster
datasetBugId$Qtd = 1;
#Realizar um agrupamento
library(data.table)
dt <- data.table(datasetBugId)
return(dt)
}
|
/evaluation/codeR/model/HDbScanClustering.R
|
permissive
|
MackMendes/Application-Clustering-Algorithms-for-Discovering-Bug-Patterns-JavaScript-Software
|
R
| false
| false
| 318
|
r
|
hdbscanClustering <- function(db, datasetBugId){
library("dbscan")
res.hdbs <- dbscan::hdbscan(x = db, minPts = 5)
# Obter os clusters
datasetBugId$Cluster <- res.hdbs$cluster
datasetBugId$Qtd = 1;
#Realizar um agrupamento
library(data.table)
dt <- data.table(datasetBugId)
return(dt)
}
|
library(ggplot2)
library(shiny)
shinyServer(function(input, output) {
source("../plottheme/styling.R", local = TRUE)
#Limits of scales
xmin <- 0
xmax <- 5
ymin <- 0
ymax <- 0.9
#Size of point
psize <- 5
#Sampling distribution standard error
se <- runif(1, 0.4, 0.6)
#Draw sample mean and store with other unique values
samplemean <- runif(1, 2, 4)
sample_ll <- samplemean - 1.96 * se
sample_ul <- samplemean + 1.96 * se
smean <- data.frame(x = samplemean, #last selected population mean
z = 0, #z value of sample mean for current pop. mean
xlow = xmin, xhigh = xmax,
ypop = ymin, #95%ci triangle
llim = samplemean, ulim = samplemean, #current ci limits
colour = "grey") #colour of triangle
#Initialize data frames/objects for clicks
df <- data.frame(x = xmin, y = ymin, colour = "grey") #First point (invisible)
#Initialize interaction results
result <- data.frame(ll_reached = "", ul_reached = "", both_reached = "", ntry = 0)
##MAIN PLOT##
output$mainplot <- renderPlot({
#Capture coordinates of click
if (!is.null(input$plot_click$x)) {
x <- input$plot_click$x
#Adjust x values outside plotable area
if (x < xmin) x <- xmin
if (x > xmax) x <- xmax
#Set x to critical value if near critical value
x <- ifelse((samplemean - x)/se > 1.9 & (samplemean - x)/se <= 1.965,
samplemean - 1.96 * se,
ifelse((samplemean - x)/se < -1.9 & (samplemean - x)/se >= -1.965,
samplemean + 1.96 * se, x))
out <- abs(samplemean - x)/se > 1.965 #Outside 95%CI?
df <<- rbind(df, data.frame(x = x,
y = ymax - psize/200,
colour = ifelse(out, brewercolors["Red"], brewercolors["Green"])))
#Update 95% confidence interval and z value of sample mean
ll_cur <- smean$llim
ul_cur <- smean$ulim
smean <<- data.frame(x = x,
z = round((samplemean - x)/se, digits = 2), # ifelse((samplemean - x)/se > 1.9 & (samplemean - x)/se <= 1.96,
# 1.96,
# ifelse((samplemean - x)/se < -1.9 & (samplemean - x)/se >= -1.96,
# -1.96, (samplemean - x)/se)), #around 1.96 -> 1.96
xlow = x - 1.96 * se,
xhigh = x + 1.96 * se,
ypop = ymax,
llim = ifelse(result$ll_reached == "", x, ll_cur),
ulim = ifelse(result$ul_reached == "", x, ul_cur),
colour = ifelse(out, brewercolors["Red"], brewercolors["Green"]))
#Update results
result$ntry <- result$ntry + 1
if (smean$z == 1.96) result$ll_reached <- "Lower limit reached"
if (smean$z == -1.96) result$ul_reached <- "Upper limit reached"
if (result$ll_reached != "" & result$ul_reached != "")
result$both_reached <- paste0(result$ntry, " Clicks")
result <<- result
}
#PLOT#
ggplot() +
geom_blank() +
#95% most likely sample means
#normal function line
stat_function(fun = dnorm,
args = list(mean = smean$x, sd = se),
data = smean,
xlim = c(xmin, xmax),
alpha = ifelse(smean$ypop == ymin, 0, 1),
colour = smean$colour,
size = 1.5 ) +
#left-tail boundary
geom_segment(aes(x = xlow, xend = xlow, y = 0, yend = dnorm(xlow, mean = x, sd = se)),
data = smean,
alpha = ifelse(smean$ypop == ymin, 0, 1),
colour = smean$colour,
size = 1.5
) +
#left-tail boundary
geom_segment(aes(x = xhigh, xend = xhigh, y = 0, yend = dnorm(xhigh, mean = x, sd = se)),
data = smean,
alpha = ifelse(smean$ypop == ymin, 0, 1),
colour = smean$colour,
size = 1.5
) +
#selected population average line
geom_vline(aes(xintercept = smean$x),
alpha = ifelse(smean$ypop == ymin, 0, 1),
size = 0.5,
colour = smean$colour) +
#interval estimate
geom_segment(aes(x = xlow, xend = xhigh,
y = ymin, yend = ymin), data = smean,
alpha = ifelse(smean$ypop == ymin, 0, 1),
size = 4,
colour = smean$colour) +
#text
geom_text(aes(x = df$x[nrow(df)], y = (ymin + ymax)/4),
label = "95% most likely samples",
alpha = ifelse(smean$ypop == ymin, 0, 1),
colour = smean$colour,
size = 4.5) +
#Critical value times standard error: arrows and text
#left arrow with label
geom_segment(aes(x = samplemean, xend = sample_ll,
y = (ymax + ymin)/2, yend = (ymax + ymin)/2),
alpha = ifelse(result$ll_reached == "", 0, 1),
size = 1,
colour = "darkgray",
arrow = arrow(type = "closed", length = unit(0.1, "inches"),
ends = ifelse(result$ll_reached == "" || result$ul_reached == "", "both", "last"))) +
geom_text(aes(x = (samplemean + sample_ll)/2, y = (ymax + ymin)*0.47,
label = ifelse(result$ll_reached == "", "", "1.96 * SE"),
vjust = 1),
colour = "darkgray") +
#right arrow with label
geom_segment(aes(x = samplemean, xend = sample_ul,
y = (ymax + ymin)/2, yend = (ymax + ymin)/2),
alpha = ifelse(result$ul_reached == "", 0, 1),
size = 1,
colour = "darkgray",
arrow = arrow(type = "closed", length = unit(0.1, "inches"),
ends = ifelse(result$ll_reached == "" || result$ul_reached == "", "both", "last"))) +
geom_text(aes(x = (samplemean + sample_ul)/2, y = (ymax + ymin)*0.47,
label = ifelse(result$ul_reached == "", "", "1.96 * SE"),
vjust = 1),
colour = "darkgray") +
#confidence interval text: display if both limits have been reached
geom_text(aes(x = samplemean, y = ymax*0.54,
label = ifelse(result$ll_reached == "" || result$ul_reached == "", "", "95% confidence interval"),
vjust = 0),
colour = "darkgray") +
#vertical line to actual sample mean (show if at least one limit has been reached)
geom_segment(aes(x = samplemean, xend = samplemean,
y = ymin, yend = (ymax + ymin)/2),
alpha = ifelse(result$ll_reached == "" && result$ul_reached == "", 0, 1),
size = 1,
colour = "darkgray") +
#vertical line to lower limit population value
geom_segment(aes(x = sample_ll, xend = sample_ll,
y = ymax, yend = (ymax + ymin)/2),
alpha = ifelse(result$ll_reached == "", 0, 1),
size = 1,
colour = "darkgray") +
#vertical line to upper limit population value
geom_segment(aes(x = sample_ul, xend = sample_ul,
y = ymax, yend = (ymax + ymin)/2),
alpha = ifelse(result$ul_reached == "", 0, 1),
size = 1,
colour = "darkgray") +
#Selected population means (on click)
geom_point(aes(x = df$x, y = df$y),
alpha = ifelse(df$y == ymin, 0, 1),
size = psize,
colour = df$colour) +
geom_text(aes(x = smean$x, y = ymax - psize/80,
label = format(round(smean$x, digits = 2), nsmall=2),
vjust = 1),
alpha = ifelse(smean$ypop == ymin || abs(smean$z) == 1.96, 0, 1)) +
#Lower and upper bounds
geom_text(aes(x = smean$llim, y = ymax - psize/80,
label = ifelse(result$ll_reached == "","",
paste0(format(round(smean$llim, digits = 2), nsmall=2), "\nLower limit")),
vjust = 1)) +
geom_text(aes(x = smean$ulim, y = ymax - psize/80,
label = ifelse(result$ul_reached == "","",
paste0(format(round(smean$ulim, digits = 2), nsmall=2), "\nUpper limit")),
vjust = 1)) +
#Sample mean
geom_point(aes(x = samplemean, y = ymin + psize/200),
size = psize) +
geom_text(aes(x = samplemean + (xmax + xmin)*0.02, y = (ymax + ymin)*0.05,
label = "Our Sample"),
hjust = 0) +
#z Value of sample mean
geom_text(aes(x = samplemean + (xmax + xmin)*0.02, y = (ymax + ymin)*0.11,
label = paste0("z = ", format(round(smean$z, digits = 2), nsmall=2))),
hjust = 0,
alpha = ifelse(smean$ypop == ymin, 0, 1)) +
#Results
geom_text(aes(x = xmin, y = (ymax + ymin)*0.7,
label = result$ll_reached[1],
hjust = 0),
colour = brewercolors["Blue"]) +
geom_text(aes(x = xmax, y = (ymax + ymin)*0.7,
label = result$ul_reached[1],
hjust = 1),
colour = brewercolors["Blue"]) +
geom_text(aes(x = (xmax + xmin)/2, y = (ymax + ymin)*0.7,
label = result$both_reached[1]),
hjust = 0.5,
colour = brewercolors["Blue"]) +
#Scaling and double axis definitions
scale_x_continuous(breaks = seq(xmin, xmax, by = 1),
#limits = c(xmin, xmax),
sec.axis = sec_axis(~ .,
seq(xmin, xmax, by = 1),
name = "Average candy weight in the population"),
expand = c(.02, .02)) +
scale_y_continuous(breaks = NULL,
#limits = c(ymin, ymax),
expand = c(0, 0)) +
coord_cartesian(xlim = c(xmin, xmax), ylim = c(ymin, ymax)) +
#Axis labels and theme
xlab("Average candy weight in the sample") +
ylab("") +
theme_general() +
theme(panel.border = element_rect(colour = NA))
})
})
|
/apps/pop-means-ci/server.R
|
no_license
|
WdeNooy/Statistical-Inference
|
R
| false
| false
| 10,763
|
r
|
library(ggplot2)
library(shiny)
shinyServer(function(input, output) {
source("../plottheme/styling.R", local = TRUE)
#Limits of scales
xmin <- 0
xmax <- 5
ymin <- 0
ymax <- 0.9
#Size of point
psize <- 5
#Sampling distribution standard error
se <- runif(1, 0.4, 0.6)
#Draw sample mean and store with other unique values
samplemean <- runif(1, 2, 4)
sample_ll <- samplemean - 1.96 * se
sample_ul <- samplemean + 1.96 * se
smean <- data.frame(x = samplemean, #last selected population mean
z = 0, #z value of sample mean for current pop. mean
xlow = xmin, xhigh = xmax,
ypop = ymin, #95%ci triangle
llim = samplemean, ulim = samplemean, #current ci limits
colour = "grey") #colour of triangle
#Initialize data frames/objects for clicks
df <- data.frame(x = xmin, y = ymin, colour = "grey") #First point (invisible)
#Initialize interaction results
result <- data.frame(ll_reached = "", ul_reached = "", both_reached = "", ntry = 0)
##MAIN PLOT##
output$mainplot <- renderPlot({
#Capture coordinates of click
if (!is.null(input$plot_click$x)) {
x <- input$plot_click$x
#Adjust x values outside plotable area
if (x < xmin) x <- xmin
if (x > xmax) x <- xmax
#Set x to critical value if near critical value
x <- ifelse((samplemean - x)/se > 1.9 & (samplemean - x)/se <= 1.965,
samplemean - 1.96 * se,
ifelse((samplemean - x)/se < -1.9 & (samplemean - x)/se >= -1.965,
samplemean + 1.96 * se, x))
out <- abs(samplemean - x)/se > 1.965 #Outside 95%CI?
df <<- rbind(df, data.frame(x = x,
y = ymax - psize/200,
colour = ifelse(out, brewercolors["Red"], brewercolors["Green"])))
#Update 95% confidence interval and z value of sample mean
ll_cur <- smean$llim
ul_cur <- smean$ulim
smean <<- data.frame(x = x,
z = round((samplemean - x)/se, digits = 2), # ifelse((samplemean - x)/se > 1.9 & (samplemean - x)/se <= 1.96,
# 1.96,
# ifelse((samplemean - x)/se < -1.9 & (samplemean - x)/se >= -1.96,
# -1.96, (samplemean - x)/se)), #around 1.96 -> 1.96
xlow = x - 1.96 * se,
xhigh = x + 1.96 * se,
ypop = ymax,
llim = ifelse(result$ll_reached == "", x, ll_cur),
ulim = ifelse(result$ul_reached == "", x, ul_cur),
colour = ifelse(out, brewercolors["Red"], brewercolors["Green"]))
#Update results
result$ntry <- result$ntry + 1
if (smean$z == 1.96) result$ll_reached <- "Lower limit reached"
if (smean$z == -1.96) result$ul_reached <- "Upper limit reached"
if (result$ll_reached != "" & result$ul_reached != "")
result$both_reached <- paste0(result$ntry, " Clicks")
result <<- result
}
#PLOT#
ggplot() +
geom_blank() +
#95% most likely sample means
#normal function line
stat_function(fun = dnorm,
args = list(mean = smean$x, sd = se),
data = smean,
xlim = c(xmin, xmax),
alpha = ifelse(smean$ypop == ymin, 0, 1),
colour = smean$colour,
size = 1.5 ) +
#left-tail boundary
geom_segment(aes(x = xlow, xend = xlow, y = 0, yend = dnorm(xlow, mean = x, sd = se)),
data = smean,
alpha = ifelse(smean$ypop == ymin, 0, 1),
colour = smean$colour,
size = 1.5
) +
#left-tail boundary
geom_segment(aes(x = xhigh, xend = xhigh, y = 0, yend = dnorm(xhigh, mean = x, sd = se)),
data = smean,
alpha = ifelse(smean$ypop == ymin, 0, 1),
colour = smean$colour,
size = 1.5
) +
#selected population average line
geom_vline(aes(xintercept = smean$x),
alpha = ifelse(smean$ypop == ymin, 0, 1),
size = 0.5,
colour = smean$colour) +
#interval estimate
geom_segment(aes(x = xlow, xend = xhigh,
y = ymin, yend = ymin), data = smean,
alpha = ifelse(smean$ypop == ymin, 0, 1),
size = 4,
colour = smean$colour) +
#text
geom_text(aes(x = df$x[nrow(df)], y = (ymin + ymax)/4),
label = "95% most likely samples",
alpha = ifelse(smean$ypop == ymin, 0, 1),
colour = smean$colour,
size = 4.5) +
#Critical value times standard error: arrows and text
#left arrow with label
geom_segment(aes(x = samplemean, xend = sample_ll,
y = (ymax + ymin)/2, yend = (ymax + ymin)/2),
alpha = ifelse(result$ll_reached == "", 0, 1),
size = 1,
colour = "darkgray",
arrow = arrow(type = "closed", length = unit(0.1, "inches"),
ends = ifelse(result$ll_reached == "" || result$ul_reached == "", "both", "last"))) +
geom_text(aes(x = (samplemean + sample_ll)/2, y = (ymax + ymin)*0.47,
label = ifelse(result$ll_reached == "", "", "1.96 * SE"),
vjust = 1),
colour = "darkgray") +
#right arrow with label
geom_segment(aes(x = samplemean, xend = sample_ul,
y = (ymax + ymin)/2, yend = (ymax + ymin)/2),
alpha = ifelse(result$ul_reached == "", 0, 1),
size = 1,
colour = "darkgray",
arrow = arrow(type = "closed", length = unit(0.1, "inches"),
ends = ifelse(result$ll_reached == "" || result$ul_reached == "", "both", "last"))) +
geom_text(aes(x = (samplemean + sample_ul)/2, y = (ymax + ymin)*0.47,
label = ifelse(result$ul_reached == "", "", "1.96 * SE"),
vjust = 1),
colour = "darkgray") +
#confidence interval text: display if both limits have been reached
geom_text(aes(x = samplemean, y = ymax*0.54,
label = ifelse(result$ll_reached == "" || result$ul_reached == "", "", "95% confidence interval"),
vjust = 0),
colour = "darkgray") +
#vertical line to actual sample mean (show if at least one limit has been reached)
geom_segment(aes(x = samplemean, xend = samplemean,
y = ymin, yend = (ymax + ymin)/2),
alpha = ifelse(result$ll_reached == "" && result$ul_reached == "", 0, 1),
size = 1,
colour = "darkgray") +
#vertical line to lower limit population value
geom_segment(aes(x = sample_ll, xend = sample_ll,
y = ymax, yend = (ymax + ymin)/2),
alpha = ifelse(result$ll_reached == "", 0, 1),
size = 1,
colour = "darkgray") +
#vertical line to upper limit population value
geom_segment(aes(x = sample_ul, xend = sample_ul,
y = ymax, yend = (ymax + ymin)/2),
alpha = ifelse(result$ul_reached == "", 0, 1),
size = 1,
colour = "darkgray") +
#Selected population means (on click)
geom_point(aes(x = df$x, y = df$y),
alpha = ifelse(df$y == ymin, 0, 1),
size = psize,
colour = df$colour) +
geom_text(aes(x = smean$x, y = ymax - psize/80,
label = format(round(smean$x, digits = 2), nsmall=2),
vjust = 1),
alpha = ifelse(smean$ypop == ymin || abs(smean$z) == 1.96, 0, 1)) +
#Lower and upper bounds
geom_text(aes(x = smean$llim, y = ymax - psize/80,
label = ifelse(result$ll_reached == "","",
paste0(format(round(smean$llim, digits = 2), nsmall=2), "\nLower limit")),
vjust = 1)) +
geom_text(aes(x = smean$ulim, y = ymax - psize/80,
label = ifelse(result$ul_reached == "","",
paste0(format(round(smean$ulim, digits = 2), nsmall=2), "\nUpper limit")),
vjust = 1)) +
#Sample mean
geom_point(aes(x = samplemean, y = ymin + psize/200),
size = psize) +
geom_text(aes(x = samplemean + (xmax + xmin)*0.02, y = (ymax + ymin)*0.05,
label = "Our Sample"),
hjust = 0) +
#z Value of sample mean
geom_text(aes(x = samplemean + (xmax + xmin)*0.02, y = (ymax + ymin)*0.11,
label = paste0("z = ", format(round(smean$z, digits = 2), nsmall=2))),
hjust = 0,
alpha = ifelse(smean$ypop == ymin, 0, 1)) +
#Results
geom_text(aes(x = xmin, y = (ymax + ymin)*0.7,
label = result$ll_reached[1],
hjust = 0),
colour = brewercolors["Blue"]) +
geom_text(aes(x = xmax, y = (ymax + ymin)*0.7,
label = result$ul_reached[1],
hjust = 1),
colour = brewercolors["Blue"]) +
geom_text(aes(x = (xmax + xmin)/2, y = (ymax + ymin)*0.7,
label = result$both_reached[1]),
hjust = 0.5,
colour = brewercolors["Blue"]) +
#Scaling and double axis definitions
scale_x_continuous(breaks = seq(xmin, xmax, by = 1),
#limits = c(xmin, xmax),
sec.axis = sec_axis(~ .,
seq(xmin, xmax, by = 1),
name = "Average candy weight in the population"),
expand = c(.02, .02)) +
scale_y_continuous(breaks = NULL,
#limits = c(ymin, ymax),
expand = c(0, 0)) +
coord_cartesian(xlim = c(xmin, xmax), ylim = c(ymin, ymax)) +
#Axis labels and theme
xlab("Average candy weight in the sample") +
ylab("") +
theme_general() +
theme(panel.border = element_rect(colour = NA))
})
})
|
/eye/R/combinedresult.R
|
no_license
|
XiangGuo1992/My-Master-Graduation-Thesis
|
R
| false
| false
| 443
|
r
| ||
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/response_heatmap_custom.r
\name{response_heatmap_custom}
\alias{response_heatmap_custom}
\title{Plot the response probabilities for all observations as a heatmap}
\usage{
response_heatmap_custom(result, xorderTable, yorderTable,
responseThreshold = NULL, xtext = "Antigen/Fc Variable",
xlines = "white", ytext = "SubjectId", ylines = NULL)
}
\arguments{
\item{result}{The BAMBAResult object.}
\item{xorderTable}{A \code{data.frame} with all ag/re/tp combinations
to include as well as ordering, labeling, and color information.
Should have the following columns: ag, re, tp, order, label, color.}
\item{yorderTable}{A \code{data.frame} with all subjectIds to include
as well as ordering, labeling, and color information.
Should have the following columns: subjectId, order, label, color}
\item{responseThreshold}{If not NULL, the threshold probability
defining a response, resulting in a two-color heatmap rather than
a continuous heatmap. Defaults to \code{NULL}}
\item{xtext}{The label for the x-axis. Defaults to 'Antigen/Fc Variable'.}
\item{xlines}{A string defining the color for lines separating groups
(by label) on the x-axis or \code{NULL} for no lines.
Defaults to 'white'.}
\item{ytext}{The label for the y-axis. Defaults to 'SubjectId'.}
\item{ylines}{A string defining the color for lines separating groups
(by label) on the y-axis or \code{NULL} for no lines.
Defaults to \code{NULL}}
}
\value{
A ggplot heatmap.
}
\description{
This function plots the response probabilities from a model fit as a
heatmap with additional options for sorting and filtering both axes.
}
|
/man/response_heatmap_custom.Rd
|
no_license
|
RGLab/BAMBA
|
R
| false
| true
| 1,673
|
rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/response_heatmap_custom.r
\name{response_heatmap_custom}
\alias{response_heatmap_custom}
\title{Plot the response probabilities for all observations as a heatmap}
\usage{
response_heatmap_custom(result, xorderTable, yorderTable,
responseThreshold = NULL, xtext = "Antigen/Fc Variable",
xlines = "white", ytext = "SubjectId", ylines = NULL)
}
\arguments{
\item{result}{The BAMBAResult object.}
\item{xorderTable}{A \code{data.frame} with all ag/re/tp combinations
to include as well as ordering, labeling, and color information.
Should have the following columns: ag, re, tp, order, label, color.}
\item{yorderTable}{A \code{data.frame} with all subjectIds to include
as well as ordering, labeling, and color information.
Should have the following columns: subjectId, order, label, color}
\item{responseThreshold}{If not NULL, the threshold probability
defining a response, resulting in a two-color heatmap rather than
a continuous heatmap. Defaults to \code{NULL}}
\item{xtext}{The label for the x-axis. Defaults to 'Antigen/Fc Variable'.}
\item{xlines}{A string defining the color for lines separating groups
(by label) on the x-axis or \code{NULL} for no lines.
Defaults to 'white'.}
\item{ytext}{The label for the y-axis. Defaults to 'SubjectId'.}
\item{ylines}{A string defining the color for lines separating groups
(by label) on the y-axis or \code{NULL} for no lines.
Defaults to \code{NULL}}
}
\value{
A ggplot heatmap.
}
\description{
This function plots the response probabilities from a model fit as a
heatmap with additional options for sorting and filtering both axes.
}
|
#' Creates a description file for a compendium
#'
#' The idea behind a compendium is to have a minimal description file that makes
#' it easy for anyone to 'install' your analysis dependencies. This makes it
#' possible for someone to run your code easily.
#'
#' To automatically populate author information, you may set usethis options in your `.rprofile` like so.
#' \code{options(
#' usethis.full_name = "Karthik Ram",
#' usethis.description = list(
#' `Authors@R` = 'person("Karthik", "Ram", email = "karthik.ram@gmail.com", role = c("aut", "cre"),
#' comment = c(ORCID = "0000-0002-0233-1757"))',
#' License = "MIT + file LICENSE",
#' Version = "0.0.0.9000"
#' )
#' )}
#'
#' @param type Default here is compendium
#' @param package Name of your compendium
#' @param description Description of your compendium
#' @param version Version of your compendium
#' @param path path to project (in case it is not in the current working directory)
#' @importFrom desc description
#'
#' @export
write_compendium_description <-
function(type = "Compendium",
package = "Compendium title",
description = "Compendium description",
version = "0.0.1",
path = ".") {
# browser()
Depends <- get_dependencies(path)
if(is.null(Depends))
stop("No packages found in any script or notebook", call. = FALSE)
remote_pkgs <- NULL
remotes <- get_remotes(Depends)
if(!is.null(remotes)) remote_pkgs <- unlist(strsplit(remotes, "/"))
# if (length(remote_pkgs > 0))
# Depends <- Depends[-which(Depends %in% remote_pkgs)]
# Commenting lines above because stuff in Remotes should also
# be in Depends.
if (length(remote_pkgs > 0)) {
fields <-
list(
Type = "Compendium",
Package = package,
Version = version,
Description = description,
Depends = paste0(
Depends,
collapse = ", "),
Remotes = paste0(remotes, collapse = ", ")
)
} else {
fields <-
list(
Type = "Compendium",
Package = package,
Version = version,
Description = description,
Depends = paste0(
Depends,
collapse = ", ")
)
}
# TO-FIX-SOMEDAY
# Using an internal function here
# A silly hack from Yihui to stop the internal function use warning.
# Not sure this is a good thing to do, but for now YOLO.
# %:::% is in zzz.R
# tidy_desc <- "usethis" %:::% "tidy_desc"
#build_desc <- "build_description"
desc <- build_description_internal(fields)
desc <- desc::description$new(text = desc)
tidy_desc_internal(desc)
lines <-
desc$str(by_field = TRUE,
normalize = FALSE,
mode = "file")
path <- sanitize_path(path) # To kill trailing slashes
usethis::write_over(glue("{path}/DESCRIPTION"), lines)
cliapp::cli_alert_info("Please update the description fields, particularly the title, description and author")
}
|
/R/write_compendium_description.R
|
permissive
|
annakrystalli/holepunch
|
R
| false
| false
| 3,118
|
r
|
#' Creates a description file for a compendium
#'
#' The idea behind a compendium is to have a minimal description file that makes
#' it easy for anyone to 'install' your analysis dependencies. This makes it
#' possible for someone to run your code easily.
#'
#' To automatically populate author information, you may set usethis options in your `.rprofile` like so.
#' \code{options(
#' usethis.full_name = "Karthik Ram",
#' usethis.description = list(
#' `Authors@R` = 'person("Karthik", "Ram", email = "karthik.ram@gmail.com", role = c("aut", "cre"),
#' comment = c(ORCID = "0000-0002-0233-1757"))',
#' License = "MIT + file LICENSE",
#' Version = "0.0.0.9000"
#' )
#' )}
#'
#' @param type Default here is compendium
#' @param package Name of your compendium
#' @param description Description of your compendium
#' @param version Version of your compendium
#' @param path path to project (in case it is not in the current working directory)
#' @importFrom desc description
#'
#' @export
write_compendium_description <-
function(type = "Compendium",
package = "Compendium title",
description = "Compendium description",
version = "0.0.1",
path = ".") {
# browser()
Depends <- get_dependencies(path)
if(is.null(Depends))
stop("No packages found in any script or notebook", call. = FALSE)
remote_pkgs <- NULL
remotes <- get_remotes(Depends)
if(!is.null(remotes)) remote_pkgs <- unlist(strsplit(remotes, "/"))
# if (length(remote_pkgs > 0))
# Depends <- Depends[-which(Depends %in% remote_pkgs)]
# Commenting lines above because stuff in Remotes should also
# be in Depends.
if (length(remote_pkgs > 0)) {
fields <-
list(
Type = "Compendium",
Package = package,
Version = version,
Description = description,
Depends = paste0(
Depends,
collapse = ", "),
Remotes = paste0(remotes, collapse = ", ")
)
} else {
fields <-
list(
Type = "Compendium",
Package = package,
Version = version,
Description = description,
Depends = paste0(
Depends,
collapse = ", ")
)
}
# TO-FIX-SOMEDAY
# Using an internal function here
# A silly hack from Yihui to stop the internal function use warning.
# Not sure this is a good thing to do, but for now YOLO.
# %:::% is in zzz.R
# tidy_desc <- "usethis" %:::% "tidy_desc"
#build_desc <- "build_description"
desc <- build_description_internal(fields)
desc <- desc::description$new(text = desc)
tidy_desc_internal(desc)
lines <-
desc$str(by_field = TRUE,
normalize = FALSE,
mode = "file")
path <- sanitize_path(path) # To kill trailing slashes
usethis::write_over(glue("{path}/DESCRIPTION"), lines)
cliapp::cli_alert_info("Please update the description fields, particularly the title, description and author")
}
|
% Generated by roxygen2 (4.0.2): do not edit by hand
\docType{package}
\name{exsic-package}
\alias{exsic-package}
\title{Provides botanists with convenience functions to create exsiccatae indices}
\description{
The tool allows creating simple specimen indices as found in
taxonomic treatments based on a table of specimen records.
An example file of tabulated speciment data is provided. In addition,
four different exsiccatae styles are provided.
The naming of the columns in the specimen table follows largely the conventions used in the BRAHMS
software package.
Each specimen record must at least have content in the following nine fields:
id, genus, species, collcite, number, colldate, country, majorarea, minorarea.
If not present, the fields are added and filled with dummy values like 's.d.' for no date or 'Unknown
country/area'.
Highly recommended fields include: collector, addcoll.
Optional fields include: locnotes, phenology, elevation, latitude, longitude, and dups
The produced indices will sort countries and species alphabetically. Within a country
records will be sorted alphabetically by 'majorarea' (if present) and by collector and
collecting nunber.
A web page in standard html format is created based on a template.
The template may be changed and specified in most word processing software.
The package provides one main function 'exsic'.
See the example in this section on how to access it.
}
\examples{
# Example
load(system.file("data/config.rda", package="exsic"))
###########################################################
# This runs the example file
# Read input file
df = system.file("samples/exsic.csv", package="exsic")
# read only first 10 records
data = read.exsic(df)[1:10,]
# Prepare output file
td = tempdir()
of = file.path(td,"out.html")
# Example 1: mostly default parameters
# Prepare exsiccatae indices
exsic(data, html = of)
# Example 2: using another format
of = file.path(td,"out_PK.html")
exsic(data, html = of, format = format.PK)
}
\author{
Reinhard Simon, David M. Spooner
}
|
/man/exsic-package.Rd
|
no_license
|
cran/exsic
|
R
| false
| false
| 2,096
|
rd
|
% Generated by roxygen2 (4.0.2): do not edit by hand
\docType{package}
\name{exsic-package}
\alias{exsic-package}
\title{Provides botanists with convenience functions to create exsiccatae indices}
\description{
The tool allows creating simple specimen indices as found in
taxonomic treatments based on a table of specimen records.
An example file of tabulated speciment data is provided. In addition,
four different exsiccatae styles are provided.
The naming of the columns in the specimen table follows largely the conventions used in the BRAHMS
software package.
Each specimen record must at least have content in the following nine fields:
id, genus, species, collcite, number, colldate, country, majorarea, minorarea.
If not present, the fields are added and filled with dummy values like 's.d.' for no date or 'Unknown
country/area'.
Highly recommended fields include: collector, addcoll.
Optional fields include: locnotes, phenology, elevation, latitude, longitude, and dups
The produced indices will sort countries and species alphabetically. Within a country
records will be sorted alphabetically by 'majorarea' (if present) and by collector and
collecting nunber.
A web page in standard html format is created based on a template.
The template may be changed and specified in most word processing software.
The package provides one main function 'exsic'.
See the example in this section on how to access it.
}
\examples{
# Example
load(system.file("data/config.rda", package="exsic"))
###########################################################
# This runs the example file
# Read input file
df = system.file("samples/exsic.csv", package="exsic")
# read only first 10 records
data = read.exsic(df)[1:10,]
# Prepare output file
td = tempdir()
of = file.path(td,"out.html")
# Example 1: mostly default parameters
# Prepare exsiccatae indices
exsic(data, html = of)
# Example 2: using another format
of = file.path(td,"out_PK.html")
exsic(data, html = of, format = format.PK)
}
\author{
Reinhard Simon, David M. Spooner
}
|
# Usually I would extract the getData() function into its own file to be
# sourced by all plot scripts, but I deliberately decided against it since the
# instructions explicitly state that "There should be four PNG files and four
# R code files" in my GitHub repository and I want to be formally on the safe
# side here. Thus I had to copy the source code of the getData() function
# into every plot script.
# This function assumes the source data is already located in the current
# working directory which should be formally safe as well, since the
# instructions only state that my code file "should include code for reading
# the data" and not for downloading the data.
getData <- function() {
fieldNames <- names (
read.table (
file = "household_power_consumption.txt",
header = TRUE,
sep = ";",
nrows = 1
)
)
data <- read.table (
file = "household_power_consumption.txt",
sep = ";",
col.names = fieldNames,
na.strings = "?",
skip = 66637,
nrows = 2880
)
data$DateTime <- strptime (
paste (
data$Date,
data$Time,
sep = " "
),
format = "%d/%m/%Y %H:%M:%S"
)
data
}
Sys.setlocale (
category = "LC_TIME",
locale = "C"
)
data <- getData()
png (
filename = "plot3.png",
width = 480,
height = 480,
units = "px"
)
with (
data = data,
expr = {
plot (
x = DateTime,
y = Sub_metering_1,
type = "n",
xlab = "",
ylab = "Energy sub metering"
)
lines (
x = DateTime,
y = Sub_metering_1,
col = "black"
)
lines (
x = DateTime,
y = Sub_metering_2,
col = "red"
)
lines (
x = DateTime,
y = Sub_metering_3,
col = "blue"
)
legend (
x = "topright",
legend = c("Sub_metering_1", "Sub_metering_2", "Sub_metering_3"),
col = c("black", "red", "blue"),
lty = "solid"
)
}
)
dev.off()
|
/plot3.R
|
no_license
|
s-bolz/ExData_Plotting1
|
R
| false
| false
| 2,264
|
r
|
# Usually I would extract the getData() function into its own file to be
# sourced by all plot scripts, but I deliberately decided against it since the
# instructions explicitly state that "There should be four PNG files and four
# R code files" in my GitHub repository and I want to be formally on the safe
# side here. Thus I had to copy the source code of the getData() function
# into every plot script.
# This function assumes the source data is already located in the current
# working directory which should be formally safe as well, since the
# instructions only state that my code file "should include code for reading
# the data" and not for downloading the data.
getData <- function() {
fieldNames <- names (
read.table (
file = "household_power_consumption.txt",
header = TRUE,
sep = ";",
nrows = 1
)
)
data <- read.table (
file = "household_power_consumption.txt",
sep = ";",
col.names = fieldNames,
na.strings = "?",
skip = 66637,
nrows = 2880
)
data$DateTime <- strptime (
paste (
data$Date,
data$Time,
sep = " "
),
format = "%d/%m/%Y %H:%M:%S"
)
data
}
Sys.setlocale (
category = "LC_TIME",
locale = "C"
)
data <- getData()
png (
filename = "plot3.png",
width = 480,
height = 480,
units = "px"
)
with (
data = data,
expr = {
plot (
x = DateTime,
y = Sub_metering_1,
type = "n",
xlab = "",
ylab = "Energy sub metering"
)
lines (
x = DateTime,
y = Sub_metering_1,
col = "black"
)
lines (
x = DateTime,
y = Sub_metering_2,
col = "red"
)
lines (
x = DateTime,
y = Sub_metering_3,
col = "blue"
)
legend (
x = "topright",
legend = c("Sub_metering_1", "Sub_metering_2", "Sub_metering_3"),
col = c("black", "red", "blue"),
lty = "solid"
)
}
)
dev.off()
|
plotTheme <- function() {
theme(
plot.title = element_text(size = 14, family = "sans", face = "plain", hjust = 0),
plot.subtitle=element_text(size = 11, family = "sans", hjust = 0),
plot.caption=element_text(size = 12, family = "sans", face = "italic", hjust = 0),
axis.title.x = element_text(size = rel(1.1), family = "sans", face = "plain", hjust = 1, vjust = -0.5),
axis.title.y = element_text(size = rel(1.1), family = "sans", face = "plain", hjust = 1, vjust = 1),
axis.text = element_text(size = rel(1.1), family = "sans", face = "plain"),
panel.background = element_blank(),
# panel.grid.minor = element_line(colour = "gray"),
panel.grid.minor = element_blank(),
# panel.grid.major = element_line(colour = "gray"),
axis.ticks = element_blank(),
legend.title = element_text(size = 10, family = "sans"),
legend.text = element_text(size = 10, family = "sans"),
axis.line = element_blank()
)
}
## Example
# Generate grid
vertical.grid = function(l,N,type = NULL){
# "u" - uniform
# "e" - exponential
if(type == "u"){
ugrid = runif(N)
res = c(sort(ugrid),1)
# res = sort(runif(N))
}else if(type == "e"){
res = exp(-(0:l)/N)
}
return(res)
}
# Quantile
sQ = function(q,Y){
# q-quantile of Y
N = length(Y)
res = Y[ceiling(N*q)]
return(res)
}
# Nested sampling (normal prior, normal likelihood)
nested_sampling = function(mu1,sigma1,mu2,sigma2,N,tol=0.001){
# "mu1", "sigma1" - likelihood
# "mu2", "sigma2" - prior
Lmax = 1/(sqrt(2*pi)*sigma1)
theta = rnorm(N,mu2,sigma2)
L = dnorm(theta,mu1,sigma1)
phi = NULL
error = 1
while(error >= tol)
{
index = which.min(L)
Lmin = min(L)
phi = c(phi,Lmin)
error = abs(Lmin-Lmax)/Lmax
term = -log(Lmin*sqrt(2*pi)*sigma1)
a = mu1 - sqrt(term*2*sigma1^2)
# a = ifelse(a>0, a, 0)
# a = a + abs(a)
b = mu1 + sqrt(term*2*sigma1^2)
newTheta = rtruncnorm(1,a,b,mean = mu2,sd = sigma2)
newL = dnorm(newTheta,mu1,sigma1)
theta[index] = newTheta
L[index] = newL
}
return (phi)
}
|
/qis/vertical_funs.r
|
no_license
|
DattaHub/DattaHub.github.io
|
R
| false
| false
| 2,108
|
r
|
plotTheme <- function() {
theme(
plot.title = element_text(size = 14, family = "sans", face = "plain", hjust = 0),
plot.subtitle=element_text(size = 11, family = "sans", hjust = 0),
plot.caption=element_text(size = 12, family = "sans", face = "italic", hjust = 0),
axis.title.x = element_text(size = rel(1.1), family = "sans", face = "plain", hjust = 1, vjust = -0.5),
axis.title.y = element_text(size = rel(1.1), family = "sans", face = "plain", hjust = 1, vjust = 1),
axis.text = element_text(size = rel(1.1), family = "sans", face = "plain"),
panel.background = element_blank(),
# panel.grid.minor = element_line(colour = "gray"),
panel.grid.minor = element_blank(),
# panel.grid.major = element_line(colour = "gray"),
axis.ticks = element_blank(),
legend.title = element_text(size = 10, family = "sans"),
legend.text = element_text(size = 10, family = "sans"),
axis.line = element_blank()
)
}
## Example
# Generate grid
vertical.grid = function(l,N,type = NULL){
# "u" - uniform
# "e" - exponential
if(type == "u"){
ugrid = runif(N)
res = c(sort(ugrid),1)
# res = sort(runif(N))
}else if(type == "e"){
res = exp(-(0:l)/N)
}
return(res)
}
# Quantile
sQ = function(q,Y){
# q-quantile of Y
N = length(Y)
res = Y[ceiling(N*q)]
return(res)
}
# Nested sampling (normal prior, normal likelihood)
nested_sampling = function(mu1,sigma1,mu2,sigma2,N,tol=0.001){
# "mu1", "sigma1" - likelihood
# "mu2", "sigma2" - prior
Lmax = 1/(sqrt(2*pi)*sigma1)
theta = rnorm(N,mu2,sigma2)
L = dnorm(theta,mu1,sigma1)
phi = NULL
error = 1
while(error >= tol)
{
index = which.min(L)
Lmin = min(L)
phi = c(phi,Lmin)
error = abs(Lmin-Lmax)/Lmax
term = -log(Lmin*sqrt(2*pi)*sigma1)
a = mu1 - sqrt(term*2*sigma1^2)
# a = ifelse(a>0, a, 0)
# a = a + abs(a)
b = mu1 + sqrt(term*2*sigma1^2)
newTheta = rtruncnorm(1,a,b,mean = mu2,sd = sigma2)
newL = dnorm(newTheta,mu1,sigma1)
theta[index] = newTheta
L[index] = newL
}
return (phi)
}
|
summary(dataset)
|
/Mode/demo/spaces/Sales/✔️✔️Loyalty Segmentation.1441b8fdcdc9/notebook/cell-number-4.493391ba49bf.r
|
no_license
|
demo-mode/demo-github-sync
|
R
| false
| false
| 16
|
r
|
summary(dataset)
|
#Create mask profile including ocean and interior
library(pracma)
library(ncdf4)
library(maps)
setwd("h:/GPCC_1982_2019")
nc<-nc_open("full.data_daily_v2018_1982.nc")
lon<-ncvar_get(nc,varid = "lon")
lat<-ncvar_get(nc,varid = "lat")
prcp<-ncvar_get(nc,varid = "precip")
nc_close(nc); rm(nc)
llon<-which(lon<=-115.5 & lon>=-139.5)
llat<-which(lat>=30.5 & lat<=59.5)
prcp<-prcp[llon,llat,1]
mask<-rot90(as.matrix(prcp,k=1))
View(mask)
mask[1,12:25]<-NA
mask[2,13:25]<-NA
mask[3,15:25]<-NA
mask[4,16:25]<-NA
mask[5,18:25]<-NA
mask[6,19:25]<-NA
mask[7,20:25]<-NA
mask[8,22:25]<-NA
mask[9,24:25]<-NA
mask[10,25]<-NA
gpcc_mask<-which(is.na(mask)==T)
saveRDS(gpcc_mask,'h:/ms_project1/output/index/gpcc_mask.rds')
mask[gpcc_mask]<-NA
mask[which(is.na(mask)==F)]<-1
par(mar=c(3,4,3,0))
dat_ras<-raster(mask,xmn=-140,xmx=-115,ymn=30,ymx=60)
plot(dat_ras, main="GPCC Cluster Area",legend=F)
map('world',add=T)#,xlim = c(-130,-115),ylim=c(30,60), add=T)
map('state',region=c('washington','oregon','california','nevada','idaho','montana','arizona','utah','colorado','new mexico'),add=T)#,
#xlim = c(-130,-115),add=T)
polygon(c(-123,-123,-120,-120,-123),y = c(38,42,42,38,38),lwd=3)
gpcc_sub_ondjfma<-readRDS('data/prcp/gpcc_tp_wc_1984_2019_ondjfma')
gpcc1<-gpcc_sub_ondjfma[,,1]
gpcc2<-gpcc_sub_ondjfma[,,2]
gpcc1[which(is.na(gpcc1)==F)]<-1
gpcc2[which(is.na(gpcc2)==F)]<-1
gpcc2[19:22,18:20,1]<-NA
par(mfrow=c(1,2))
dat_ras<-raster(gpcc1,xmn=-140,xmx=-115,ymn=30,ymx=60)
plot(dat_ras, main="GPCC Cluster Area",legend=F)
map('world',add=T)#,xlim = c(-130,-115),ylim=c(30,60), add=T)
map('state',region=c('washington','oregon','california','nevada','idaho','montana','arizona','utah','colorado','new mexico'),add=T)#,
#xlim = c(-130,-115),add=T)
polygon(c(-123,-123,-120,-120,-123),y = c(38,42,42,38,38),lwd=3)
dat_ras<-raster(gpcc2,xmn=-140,xmx=-115,ymn=30,ymx=60)
plot(dat_ras, main="GPCC Cluster Area",legend=F)
map('world',add=T)#,xlim = c(-130,-115),ylim=c(30,60), add=T)
map('state',region=c('washington','oregon','california','nevada','idaho','montana','arizona','utah','colorado','new mexico'),add=T)#,
#xlim = c(-130,-115),add=T)
polygon(c(-123,-123,-120,-120,-123),y = c(38,42,42,38,38),lwd=3)
rm(list=ls())
#################################################END#################################################
|
/data/prcp/gpcc_mask_plot.R
|
no_license
|
zpb4/ms_project1
|
R
| false
| false
| 2,330
|
r
|
#Create mask profile including ocean and interior
library(pracma)
library(ncdf4)
library(maps)
setwd("h:/GPCC_1982_2019")
nc<-nc_open("full.data_daily_v2018_1982.nc")
lon<-ncvar_get(nc,varid = "lon")
lat<-ncvar_get(nc,varid = "lat")
prcp<-ncvar_get(nc,varid = "precip")
nc_close(nc); rm(nc)
llon<-which(lon<=-115.5 & lon>=-139.5)
llat<-which(lat>=30.5 & lat<=59.5)
prcp<-prcp[llon,llat,1]
mask<-rot90(as.matrix(prcp,k=1))
View(mask)
mask[1,12:25]<-NA
mask[2,13:25]<-NA
mask[3,15:25]<-NA
mask[4,16:25]<-NA
mask[5,18:25]<-NA
mask[6,19:25]<-NA
mask[7,20:25]<-NA
mask[8,22:25]<-NA
mask[9,24:25]<-NA
mask[10,25]<-NA
gpcc_mask<-which(is.na(mask)==T)
saveRDS(gpcc_mask,'h:/ms_project1/output/index/gpcc_mask.rds')
mask[gpcc_mask]<-NA
mask[which(is.na(mask)==F)]<-1
par(mar=c(3,4,3,0))
dat_ras<-raster(mask,xmn=-140,xmx=-115,ymn=30,ymx=60)
plot(dat_ras, main="GPCC Cluster Area",legend=F)
map('world',add=T)#,xlim = c(-130,-115),ylim=c(30,60), add=T)
map('state',region=c('washington','oregon','california','nevada','idaho','montana','arizona','utah','colorado','new mexico'),add=T)#,
#xlim = c(-130,-115),add=T)
polygon(c(-123,-123,-120,-120,-123),y = c(38,42,42,38,38),lwd=3)
gpcc_sub_ondjfma<-readRDS('data/prcp/gpcc_tp_wc_1984_2019_ondjfma')
gpcc1<-gpcc_sub_ondjfma[,,1]
gpcc2<-gpcc_sub_ondjfma[,,2]
gpcc1[which(is.na(gpcc1)==F)]<-1
gpcc2[which(is.na(gpcc2)==F)]<-1
gpcc2[19:22,18:20,1]<-NA
par(mfrow=c(1,2))
dat_ras<-raster(gpcc1,xmn=-140,xmx=-115,ymn=30,ymx=60)
plot(dat_ras, main="GPCC Cluster Area",legend=F)
map('world',add=T)#,xlim = c(-130,-115),ylim=c(30,60), add=T)
map('state',region=c('washington','oregon','california','nevada','idaho','montana','arizona','utah','colorado','new mexico'),add=T)#,
#xlim = c(-130,-115),add=T)
polygon(c(-123,-123,-120,-120,-123),y = c(38,42,42,38,38),lwd=3)
dat_ras<-raster(gpcc2,xmn=-140,xmx=-115,ymn=30,ymx=60)
plot(dat_ras, main="GPCC Cluster Area",legend=F)
map('world',add=T)#,xlim = c(-130,-115),ylim=c(30,60), add=T)
map('state',region=c('washington','oregon','california','nevada','idaho','montana','arizona','utah','colorado','new mexico'),add=T)#,
#xlim = c(-130,-115),add=T)
polygon(c(-123,-123,-120,-120,-123),y = c(38,42,42,38,38),lwd=3)
rm(list=ls())
#################################################END#################################################
|
build<-function(){
rm(list=ls())
detach("package:reachR")
this_script_path<-(dirname(rstudioapi::getActiveDocumentContext()$path))
setwd(this_script_path)
getwd()
reachr_files<-paste0("./R/", list.files("./R/"))
sapply(reachr_files,source)
require("roxygen2")
require("devtools")
roxygenize(clean=T)
}
build()
|
/build_package.R
|
no_license
|
mabafaba/reachR2
|
R
| false
| false
| 317
|
r
|
build<-function(){
rm(list=ls())
detach("package:reachR")
this_script_path<-(dirname(rstudioapi::getActiveDocumentContext()$path))
setwd(this_script_path)
getwd()
reachr_files<-paste0("./R/", list.files("./R/"))
sapply(reachr_files,source)
require("roxygen2")
require("devtools")
roxygenize(clean=T)
}
build()
|
#:# libraries
library(digest)
library(mlr)
library(OpenML)
library(farff)
#:# config
set.seed(1)
#:# data
dataset <- getOMLDataSet(data.name = "blogger")
head(dataset$data)
#:# preprocessing
head(dataset$data)
#:# model
task = makeClassifTask(id = "task", data = dataset$data, target = "Class")
lrn = makeLearner("classif.randomForest", par.vals = list(ntree = 500, mtry = 1L), predict.type = "prob")
#:# hash
#:# 86695398211f58c62ccef44fb4a119e3
hash <- digest(list(task, lrn))
hash
#:# audit
cv <- makeResampleDesc("CV", iters = 5)
r <- mlr::resample(lrn, task, cv, measures = list(acc, auc, tnr, tpr, ppv, f1))
ACC <- r$aggr
ACC
#:# session info
sink(paste0("sessionInfo.txt"))
sessionInfo()
sink()
|
/models/openml_blogger/classification_Class/86695398211f58c62ccef44fb4a119e3/code.R
|
no_license
|
pysiakk/CaseStudies2019S
|
R
| false
| false
| 709
|
r
|
#:# libraries
library(digest)
library(mlr)
library(OpenML)
library(farff)
#:# config
set.seed(1)
#:# data
dataset <- getOMLDataSet(data.name = "blogger")
head(dataset$data)
#:# preprocessing
head(dataset$data)
#:# model
task = makeClassifTask(id = "task", data = dataset$data, target = "Class")
lrn = makeLearner("classif.randomForest", par.vals = list(ntree = 500, mtry = 1L), predict.type = "prob")
#:# hash
#:# 86695398211f58c62ccef44fb4a119e3
hash <- digest(list(task, lrn))
hash
#:# audit
cv <- makeResampleDesc("CV", iters = 5)
r <- mlr::resample(lrn, task, cv, measures = list(acc, auc, tnr, tpr, ppv, f1))
ACC <- r$aggr
ACC
#:# session info
sink(paste0("sessionInfo.txt"))
sessionInfo()
sink()
|
\name{closestColor}
\alias{closestColor}
\title{find color corresponding to an integer}
\usage{
closestColor(x, colscale)
}
\arguments{
\item{x}{number}
\item{colscale}{vector representing range of numbers the
color scale is representing}
}
\value{
color (from \code{heat.colors}) that most closely matches
\code{x} in the given scale
}
\description{
find color corresponding to an integer
}
\details{
internal function for \code{plotTranscripts} - not intended
for direct use
}
\author{
Alyssa Frazee
}
\seealso{
\link{\code{plotTranscripts}}
}
|
/man/closestColor.Rd
|
no_license
|
jtleek/ballgown
|
R
| false
| false
| 554
|
rd
|
\name{closestColor}
\alias{closestColor}
\title{find color corresponding to an integer}
\usage{
closestColor(x, colscale)
}
\arguments{
\item{x}{number}
\item{colscale}{vector representing range of numbers the
color scale is representing}
}
\value{
color (from \code{heat.colors}) that most closely matches
\code{x} in the given scale
}
\description{
find color corresponding to an integer
}
\details{
internal function for \code{plotTranscripts} - not intended
for direct use
}
\author{
Alyssa Frazee
}
\seealso{
\link{\code{plotTranscripts}}
}
|
#@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@
#******************************************************************
#* *
#* Features2Mart *
#* *
#******************************************************************
#------------------------------------------------------------
# Features2Mart helps to create bed files using mart annotations
# return: Stores annotations in bed file
#------------------------------------------------------------
# Dependencies
library(biomaRt)
#------------------------------------------------------------
# Define paths to write annotation file to
path2Mart <- "~/PepsRscripts/RScripts/MartObjects/"
nameOfGenes <- "Transcripts"
alignment <- "mm9"
#------------------------------------------------------------
# Get feature from Biomart
# 2014-09-29: De facto db is mm10
# martDB <- useMart("ensembl", dataset = "mmusculus_gene_ensembl")
# Access mm9
martDB <- useMart('ENSEMBL_MART_ENSEMBL',dataset='mmusculus_gene_ensembl', host="may2012.archive.ensembl.org")
# # See Datasets
# listMarts()
# listDatasets(martDB)
# # Find which attributes you want
attributes <- listAttributes(martDB)
attributes[grep(pattern="ensembl", x=attributes$description, ignore.case=T),]
# head(filters)
# Read in all known genes
# KG <- as.matrix(as.character(unique(martAns$mgi_symbol)))
#KG <- read.table(paste(path2Mart, alignment, "_KnownGenesUnique.bed", sep=""), colClasses = "character", header=TRUE)
# Get transcripts details
martAns <- getBM(attributes=c("chromosome_name", "transcript_start", "transcript_end", "strand", "ensembl_gene_id", "mgi_symbol"), mart=martDB)
# Get rid of duplicates
martAns <- martAns[!duplicated(martAns$mgi_symbol),]
dim(martAns)
length(KG[,1])
head(martAns)
# Fish out specific genes
#martAns <- martAns[grep(pattern=nameOfGenes, x=martAns$mgi_symbol, ignore.case=T),]
#martAns <- martAns[grep(pattern="^hox", x=martAns$mgi_symbol, ignore.case=T),]
#martAns <- martAns[which(martAns$mgi_symbol%in%Suz12$Gene==TRUE),]
# #------------------------------------------------------------
# # For TSS, use start +/- a given window
# winSize = 10000
# currentind <- martAns$strand==-1
# martAns[currentind,]$transcript_start <- as.numeric(martAns[currentind,]$transcript_end)-(winSize/2)
# martAns[currentind,]$transcript_end <- as.numeric(martAns[currentind,]$transcript_end)+(winSize/2)
# currentind <- martAns$strand==1
# martAns[currentind,]$transcript_end <- as.numeric(martAns[currentind,]$transcript_start)+(winSize/2)
# martAns[currentind,]$transcript_start <- as.numeric(martAns[currentind,]$transcript_start)-(winSize/2)
# head(martAns[currentind,])
# martAns$transcript_end-martAns$transcript_start
#------------------------------------------------------------
# Save feature in bed file
martAns$chromosome_name <- paste("chr", martAns$chromosome_name, sep = "")
head(martAns)
write.table(martAns, paste(path2Mart, paste(alignment, "_", nameOfGenes, ".bed", sep=""), sep=""), sep = "\t", row.names = FALSE, col.names=TRUE, quote=FALSE, na="")
|
/CustomFunctions/Features2Mart.R
|
permissive
|
gretchunkim/NEAT
|
R
| false
| false
| 3,210
|
r
|
#@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@
#******************************************************************
#* *
#* Features2Mart *
#* *
#******************************************************************
#------------------------------------------------------------
# Features2Mart helps to create bed files using mart annotations
# return: Stores annotations in bed file
#------------------------------------------------------------
# Dependencies
library(biomaRt)
#------------------------------------------------------------
# Define paths to write annotation file to
path2Mart <- "~/PepsRscripts/RScripts/MartObjects/"
nameOfGenes <- "Transcripts"
alignment <- "mm9"
#------------------------------------------------------------
# Get feature from Biomart
# 2014-09-29: De facto db is mm10
# martDB <- useMart("ensembl", dataset = "mmusculus_gene_ensembl")
# Access mm9
martDB <- useMart('ENSEMBL_MART_ENSEMBL',dataset='mmusculus_gene_ensembl', host="may2012.archive.ensembl.org")
# # See Datasets
# listMarts()
# listDatasets(martDB)
# # Find which attributes you want
attributes <- listAttributes(martDB)
attributes[grep(pattern="ensembl", x=attributes$description, ignore.case=T),]
# head(filters)
# Read in all known genes
# KG <- as.matrix(as.character(unique(martAns$mgi_symbol)))
#KG <- read.table(paste(path2Mart, alignment, "_KnownGenesUnique.bed", sep=""), colClasses = "character", header=TRUE)
# Get transcripts details
martAns <- getBM(attributes=c("chromosome_name", "transcript_start", "transcript_end", "strand", "ensembl_gene_id", "mgi_symbol"), mart=martDB)
# Get rid of duplicates
martAns <- martAns[!duplicated(martAns$mgi_symbol),]
dim(martAns)
length(KG[,1])
head(martAns)
# Fish out specific genes
#martAns <- martAns[grep(pattern=nameOfGenes, x=martAns$mgi_symbol, ignore.case=T),]
#martAns <- martAns[grep(pattern="^hox", x=martAns$mgi_symbol, ignore.case=T),]
#martAns <- martAns[which(martAns$mgi_symbol%in%Suz12$Gene==TRUE),]
# #------------------------------------------------------------
# # For TSS, use start +/- a given window
# winSize = 10000
# currentind <- martAns$strand==-1
# martAns[currentind,]$transcript_start <- as.numeric(martAns[currentind,]$transcript_end)-(winSize/2)
# martAns[currentind,]$transcript_end <- as.numeric(martAns[currentind,]$transcript_end)+(winSize/2)
# currentind <- martAns$strand==1
# martAns[currentind,]$transcript_end <- as.numeric(martAns[currentind,]$transcript_start)+(winSize/2)
# martAns[currentind,]$transcript_start <- as.numeric(martAns[currentind,]$transcript_start)-(winSize/2)
# head(martAns[currentind,])
# martAns$transcript_end-martAns$transcript_start
#------------------------------------------------------------
# Save feature in bed file
martAns$chromosome_name <- paste("chr", martAns$chromosome_name, sep = "")
head(martAns)
write.table(martAns, paste(path2Mart, paste(alignment, "_", nameOfGenes, ".bed", sep=""), sep=""), sep = "\t", row.names = FALSE, col.names=TRUE, quote=FALSE, na="")
|
setwd('/Users/vega/Desktop/MSophtho/coursera R/exploratory')
p1df <- read.table('household_power_consumption.txt', sep=';', skip=66637, nrows = 2880)
header <- read.table('household_power_consumption.txt', nrows = 1, sep =';')
colnames(p1df) <- unlist(header)
hist(p1df$Global_active_power, col='red', main="Global Active Power",
xlab="Global Active Power (kilowatts)")
|
/plot1.R
|
no_license
|
saltiago/ExData_Plotting1
|
R
| false
| false
| 378
|
r
|
setwd('/Users/vega/Desktop/MSophtho/coursera R/exploratory')
p1df <- read.table('household_power_consumption.txt', sep=';', skip=66637, nrows = 2880)
header <- read.table('household_power_consumption.txt', nrows = 1, sep =';')
colnames(p1df) <- unlist(header)
hist(p1df$Global_active_power, col='red', main="Global Active Power",
xlab="Global Active Power (kilowatts)")
|
library(NISTunits)
### Name: NISTcoulombMeterTOdebye
### Title: Convert coulomb meter to debye
### Aliases: NISTcoulombMeterTOdebye
### Keywords: programming
### ** Examples
NISTcoulombMeterTOdebye(10)
|
/data/genthat_extracted_code/NISTunits/examples/NISTcoulombMeterTOdebye.Rd.R
|
no_license
|
surayaaramli/typeRrh
|
R
| false
| false
| 209
|
r
|
library(NISTunits)
### Name: NISTcoulombMeterTOdebye
### Title: Convert coulomb meter to debye
### Aliases: NISTcoulombMeterTOdebye
### Keywords: programming
### ** Examples
NISTcoulombMeterTOdebye(10)
|
library(ape)
testtree <- read.tree("267_0.txt")
unrooted_tr <- unroot(testtree)
write.tree(unrooted_tr, file="267_0_unrooted.txt")
|
/codeml_files/newick_trees_processed/267_0/rinput.R
|
no_license
|
DaniBoo/cyanobacteria_project
|
R
| false
| false
| 133
|
r
|
library(ape)
testtree <- read.tree("267_0.txt")
unrooted_tr <- unroot(testtree)
write.tree(unrooted_tr, file="267_0_unrooted.txt")
|
library(fields)
library(rstan)
library(sp)
library(rgdal)
library(ggplot2)
library(mvtnorm)
library(maptools)
library(maps)
library(plyr)
source('r/utils/pred_helper_funs.r')
######################################################################################################################################
# user defs
######################################################################################################################################
# us albers shape file
# us.shp <- readShapeLines('attic/r/data/map_data/us_alb.shp',
# proj4string=CRS('+init=epsg:3175'))
us.shp <- readOGR('data/map_data/us_alb.shp', 'us_alb')
# # date of pollen_ts pull
# mydate = '2014-07-22'
# use the veg knots? set to false for smaller test data sets
veg_knots = TRUE
cells = NA
# cells = seq(1,100)
# grid
res = res
side = side
side = '' # 'E', 'W', or ''
# grid = 'MISP'
grid = 'umw'
grid_version = 2
grid_specs = paste0(grid, side, '_', as.character(res), 'by')
gridname = paste0(grid_specs, '_v', grid_version)
#gridname = 'umwE_3by'
# reconstruction limits and bin-width
int = 100
tmin = 150
if (cal) {
tmax = 150
} else if (one_time) {
tmin = tmin + (slice-1)*int
tmax = tmin + int
} else {
tmax = tmin + 20*int
}
# rescale
rescale = 1e6
# knots
# nclust = 75
# clust.ratio = 6# approx clust.ratio knots per cell
clust.ratio = 7# approx clust.ratio knots per cell
clust.ratio = 15# approx clust.ratio knots per cell
# suff=''
suff = paste(grid_specs, '_', version, sep='')
# suff = '3by_v0.3_test'
states_pol = c('minnesota', 'wisconsin', 'michigan:north')
states_pls = c('minnesota', 'wisconsin', 'michigan:north')
# specify the taxa to use
# must be from the list: taxa_sub = c('oak', 'pine', 'maple', 'birch', 'tamarack', 'beeh', 'elm', 'spruce', 'ash', 'hemlock')
# always have: 'other.hardwood' and 'other.conifer'
taxa_all = toupper(c('oak', 'pine', 'maple', 'birch', 'tamarack', 'beech', 'elm', 'spruce', 'ash', 'hemlock'))
taxa_sub = toupper(c('oak', 'pine', 'maple', 'birch', 'tamarack', 'beech', 'elm', 'spruce', 'ash', 'hemlock'))
K = as.integer(length(taxa_sub) + 1)
W = K-1
##########################################################################################################################
## paths and filenames to write to meta file
##########################################################################################################################
suff_veg = paste0('12taxa_6341cells_', nknots, 'knots')
path_grid = paste('data/grid/', gridname, '.rdata', sep='')
path_pls = '../stepps-data/data/composition/pls/pls_umw_v0.6.csv'
# path_pollen = '../stepps-data/data/bacon_ages/pollen_ts_bacon_meta_v8.csv'
# path_bacon = '../stepps-data/data/bacon_ages'
path_pollen = '../stepps-baconizing/data/sediment_ages_v7_varves.csv'
# path_pollen = '../stepps-baconizing/data/sediment_ages_v7.csv'
if (bchron){
path_age_samples = '../stepps-baconizing/data/bchron_ages'
} else {
path_age_samples = '../stepps-baconizing/data/bacon_ages'
}
path_cal = paste0('../stepps-calibration/output/', run$suff_fit,'.csv')
path_veg_data = paste0('../stepps-veg/r/dump/veg_data_', suff_veg, '_v0.4.rdata')
path_veg_pars = paste0('../stepps-veg/figures/', suff_veg, '_nb_v0.5/veg_pars_', nknots, 'knots.rdata')
path_ages = paste0('../stepps-baconizing/data')
##########################################################################################################################
## read in tables and data
##########################################################################################################################
# conversion tables
tree_type = read.table('data/assign_HW_CON.csv', sep=',', row.names=1, header=TRUE)
convert = read.table('data/dict-comp2stepps.csv', sep=',', row.names=1, header=TRUE)
pls.raw = data.frame(read.table(file=path_pls, sep=",", row.names=1, header=TRUE))
# read in grid
load(file=path_grid)
# pollen_ts = read.table(paste('data/pollen_ts_', mydate, '.csv', sep=''), header=TRUE, stringsAsFactors=FALSE)
# pollen_ts = read.table(paste('../stepps-data/data/pollen_ts_bacon_v1.csv', sep=','), header=TRUE, sep=',', stringsAsFactors=FALSE)
pollen_ts = read.table(path_pollen, header=TRUE, sep=',', stringsAsFactors=FALSE)
pol_ids = data.frame(id=unique(pollen_ts$id), stat_id=seq(1, length(unique(pollen_ts$id))))
# if draw=TRUE then replace mean age_bacon with draw age_bacon
if (draw) {
all_files = list.files(path_age_samples)
all_drawRDS = all_files[grep('draw', all_files)]
drawRDS = all_drawRDS[sample(seq(1, length(all_drawRDS)), 1)] # random sample from available posterior age draws
age_sample = readRDS(file.path(path_age_samples, drawRDS))
# replace age_bacon with the draw
if (bchron){
pollen_ts$age_bchron = age_sample
} else {
pollen_ts$age_bacon = age_sample
}
}
if (bchron){
pollen_ts = pollen_ts[!is.na(pollen_ts$age_bchron),]
} else {
pollen_ts = pollen_ts[!is.na(pollen_ts$age_bacon),]
}
age_ps = read.table(file=paste0(path_ages, '/pol_age_ps_v7.csv'), sep=',', header=TRUE)
# foo = remove_post_settlement(pollen_ts, age_ps)
# only removes samples that were clearly identified as post-settlement
# if no ambrosia rise, includes all samples
pollen_ts = remove_post_settlement(pollen_ts, age_ps)
# if (any(pollen_ts$age_bacon < 0)) {
# tmin = 0
# tmax = tmin + 2000
# }
# max_ages
if (constrain){
if (bchron){
max_ages = read.table(file=paste0(path_ages, '/pol_ages_bchron_6.csv'), sep=',', header=TRUE)
} else {
max_ages = read.table(file=paste0(path_ages, '/pol_ages_v6.csv'), sep=',', header=TRUE)
}
drop_samples = constrain_pollen(pollen_ts, max_ages, nbeyond=nbeyond)
if (add_varves){
vids = c(2309, 14839, 3131)
drop_samples[which(pollen_ts$id %in% vids)] = FALSE
}
pollen_ts = pollen_ts[!drop_samples,]
}
# read in calibration output
# load('calibration/r/dump/cal_data_12taxa_mid_comp_all.rdata')
# cal_fit = read_stan_csv('data/calibration_output/12taxa_mid_comp_long.csv')
# cal_fit = rstan::read_stan_csv(paste0('data/calibration_output/', run$suff_fit,'.csv'))
cal_fit = rstan::read_stan_csv(path_cal)
# read in veg data and output
# veg data specifies which knots to use
load(path_veg_data)
veg_post = readRDS(file=path_veg_pars)
# load(file=path_veg_pars)
# veg_post = post
##########################################################################################################################
## read in and organize pls data
##########################################################################################################################
colnames(pls.raw) = tolower(colnames(pls.raw))
# pull the subset of proportions
taxa.start.col = min(match(tolower(rownames(convert)), colnames(pls.raw)), na.rm=TRUE)
# # might need to fix this later, doesn't work with updated data but don't need it now
# if (any(!(tolower(sort(taxa)) == sort(colnames(pls_dat))))) {
# pls_dat = pls.raw[,taxa.start.col:ncol(pls.raw)]
# colnames(pls_dat) = as.vector(convert[match(colnames(pls_dat), tolower(rownames(convert))),1])
# pls_dat_collapse = sapply(unique(colnames(pls_dat)),
# function(x) rowSums( pls_dat[ , grep(x, names(pls_dat)), drop=FALSE]) )
# counts = data.frame(pls_dat_collapse[,sort(colnames(pls_dat_collapse))])
# }
counts = pls.raw[,taxa.start.col:ncol(pls.raw)]
meta = pls.raw[,1:(taxa.start.col-1)]
# kilometers
# pls$X = pls$X/1000
# pls$Y = pls$Y/1000
meta = split_mi(meta)
counts = counts[which(meta$state2 %in% states_pls),]
meta = meta[which(meta$state2 %in% states_pls),]
# if (length(cells) > 1){
# counts = counts[cells,]
# meta = meta[cells,]
# }
centers_pls = data.frame(x=meta$x, y=meta$y)/rescale # megameters!
plot(centers_pls[,1]*rescale, centers_pls[,2]*rescale, asp=1, axes=F, col='antiquewhite4', xlab='',ylab='', pch=19, cex=0.2)
plot(us.shp, add=T)
y_veg = convert_counts(counts, tree_type, taxa_sub)
taxa = colnames(y_veg)
y_veg = as.matrix(round(unname(y_veg)))
rownames(y_veg) = NULL
y_veg = unname(y_veg)
# y = y_build(counts, taxa_sub) # fix this if we want to use a subset of taxa
K = as.integer(ncol(y_veg))
W = K-1
N_pls = nrow(y_veg)
# make sure columns are in order!
# y_veg = y_veg[,taxa]
##########################################################################################################################
## chunk: read in coarse grid and pollen data
##########################################################################################################################
# FIXME: ADD STATE TO GRID
# coarse_domain = coarse_domain[coarse_domain$state %in% states_pls,]
coarse_centers = domain[,1:2]
if (length(cells) > 1){
coarse_centers = coarse_centers[cells,]
}
plot(coarse_centers[,1]*rescale, coarse_centers[,2]*rescale, col='blue')
plot(us.shp, add=TRUE)
# assign grid to centers_veg
centers_veg = coarse_centers
N = nrow(centers_veg)
# subdomain boundaries
xlo = min(centers_veg$x)
xhi = max(centers_veg$x)
ylo = min(centers_veg$y)
yhi = max(centers_veg$y)
##########################################################################################################################
## chunk: reorganize pollen data
##########################################################################################################################
# set tamarack to 0 at tamarack creek
pollen_ts[pollen_ts$id == 2624, 'TAMARACK'] = rep(0, sum(pollen_ts$id == 2624))
saveRDS(pollen_ts, file='data/pollen_ts.RDS')
pollen_ts1 = pollen_ts[which(pollen_ts$state %in% states_pol),]
# ## pollen data!
# if (bacon){
# pollen_ts1 = pollen_ts[which((pollen_ts$age_bacon <= 2500) & (pollen_ts$state %in% states_pol)),]
# } else {
# pollen_ts1 = pollen_ts[which((pollen_ts$age_default <= 2500) & (pollen_ts$state %in% states_pol)),]
# }
# reproject pollen coords from lat long to Albers
pollen_ts2 <- pollen_to_albers(pollen_ts1)
# pollen_ts = pollen_ts[which((pollen_ts[,'x'] <= xhi) & (pollen_ts[,'x'] >= xlo) &
# (pollen_ts[,'y'] <= yhi) & (pollen_ts[,'y'] >= ylo)),]
pollen_locs = cbind(pollen_ts2$x, pollen_ts2$y)
# pollen_int = knots_in_domain4(unique(pollen_locs), centers_veg, cell_width = res*8000/rescale)
#
# idx_pollen_int = apply(pollen_locs, 1, function(x) if (any(rdist(x, pollen_int) < 1e-8)) {return(TRUE)} else {return(FALSE)})
# pollen_ts = pollen_ts[idx_pollen_int, ]
pollen_int = cores_near_domain(pollen_locs, centers_veg, cell_width = res*8000/rescale)
idx_pollen_int = apply(pollen_locs, 1,
function(x) if (any(rdist(x, pollen_int) < 1e-8)) {return(TRUE)} else {return(FALSE)})
pollen_ts3 = pollen_ts2[idx_pollen_int, ]
# check how does splitting affects weights...
pollen_check = pollen_ts2[,1:7]
pollen_check$int = rep(FALSE, nrow(pollen_check))
pollen_check$int[which(idx_pollen_int == TRUE)] = TRUE
pollen_check=pollen_check[!duplicated(pollen_check),]
# plot domain and core locations
par(mfrow=c(1,1))
plot(centers_veg$x*rescale, centers_veg$y*rescale)
points(pollen_ts3$x*rescale, pollen_ts3$y*rescale, col='blue', pch=19)
plot(us.shp, add=T, lwd=2)
# points(pollen_ts3$x[which(pollen_ts3$id %in% vids)]*rescale, pollen_ts3$y[which(pollen_ts3$id %in% vids)]*rescale, col='red')
##########################################################################################################################
## chunk: prepare pollen data; aggregate over time intervals
##########################################################################################################################
# sum counts over int length intervals
pollen_agg = build_pollen_counts(tmin=tmin, tmax=tmax, int=int, pollen_ts=pollen_ts3, taxa_all, taxa_sub, age_model=age_model)
#pollen_agg = build_pollen_counts_fast_core(tmin=tmin, tmax=tmax, int=int, pollen_ts=pollen_ts)
# saveRDS(pollen_ts3, file=paste0(subDir, '/pollen_meta.RDS'))
meta_pol_all = pollen_agg[[3]]
meta_pol = pollen_agg[[2]]
counts = pollen_agg[[1]]
meta_pol$stat_id = pol_ids$stat_id[match(meta_pol$id, pol_ids$id)]
meta_pol_all$stat_id = pol_ids$stat_id[match(meta_pol_all$id, pol_ids$id)]
pollen_ts$stat_id = pol_ids$stat[match(pollen_ts$id, pol_ids$id)]
ages = unique(sort(meta_pol$age))
T = length(ages)
if (cal | one_time) {
lag = 0
} else {
lag = unname(as.matrix(dist(matrix(ages), upper=TRUE)))
}
N_cores = length(unique(meta_pol$id))
y = convert_counts(counts, tree_type, taxa_sub)
# make sure columns match!
if (sum(colnames(y) %in% taxa) != K){
print('The number of taxa wanted does not match the number of taxa in the data frame! Name mismatch likely.')
}
# y = y[,taxa]
y = unname(y)
centers_pol = data.frame(x=numeric(N_cores), y=numeric(N_cores))
for (i in 1:N_cores){
id = unique(meta_pol$id)[i]
idx = min(which(meta_pol$id == id))
print(idx)
centers_pol[i,] = c(meta_pol$x[idx], meta_pol$y[idx])
}
# some are duplicates, but we still need them as separate rows!
# centers_pol <- meta_pol[!duplicated(cbind(meta_pol$x, meta_pol$y)), c('x', 'y')]
# indices for which cells the cores fall in
idx_cores <- build_idx_cores(centers_pol, centers_veg, N_cores)
plot(centers_veg$x*rescale, centers_veg$y*rescale, col='lightgrey')
points(centers_veg[idx_cores,'x']*rescale, centers_veg[idx_cores,'y']*rescale, col='red', pch=19)
points(centers_pol$x*rescale, centers_pol$y*rescale, col='blue', pch=4, cex=1.4)
plot(us.shp, add=TRUE)
# check domain splitting
idx_cores_all <- build_idx_cores(cbind(pollen_check$x, pollen_check$y), centers_veg, N_cores=nrow(pollen_check))
##########################################################################################################################
## chunk 3: build distance matrices
##########################################################################################################################
if (!veg_knots){
nclust = ceiling(N/clust.ratio)
d_out = build_domain_objects(centers_veg, dx=20, cell_width=8, nclust=nclust)
d = d_out$d
# d_knots = d_out$d_knots
# d_inter = d_out$d_inter
#
knot_coords = d_out$knot_coords
} else {
if (side == '') {
# don't touch knot_coords
} else {
if (side == 'W'){
if (res == 1)
cutlines = list(list(c(0.42, 1.0), c(0.0, 1.0)), list(c(0.397,1.15), c(0.168,0.119)))
if (res == 5)
cutlines = list(list(c(0.386, 1.0), c(0.0, 1.0)), list(c(0.397,1.15), c(0.168,0.119)))
if (res == 3)
cutlines = list(list(c(0.405, 1.0), c(0.0, 1.0)), list(c(0.397,1.15), c(0.168,0.119)))
} else if (side == 'E'){
if (res %in% c(1, 5)) cutlines = list(list(c(0.253, 1.0), c(0.0, -1.0)))
if (res == 3) cutlines = list(list(c(0.27, 1.0), c(0.0, -1.0)))
}
idx = choppy(knot_coords[,1], knot_coords[,2], cutlines)
knot_coords = knot_coords[idx,]
# knot_coords2 = knot_coords[idx,]
}
}
#
# # knot_coords3 = knots_in_domain4(knot_coords, centers_veg, cell_width = res*8000/rescale)
# plot(domain[,1], domain[,2], asp=1)
plot(centers_veg[,1], centers_veg[,2], asp=1)
points(knot_coords[,1], knot_coords[,2], col='blue', pch=19)
# points(knot_coords2[,1], knot_coords2[,2], col='green', pch=19)
d = rdist(centers_veg, centers_veg)
diag(d) <- 0
d_knots = rdist(knot_coords, knot_coords)
diag(d_knots) <- 0
d_inter = rdist(centers_veg, knot_coords)
d_inter[which(d_inter<1e-8)]=0
d_pol = rdist(centers_pol, centers_veg)
d_pol[which(d_pol<1e-8)]=0
N_knots = nrow(knot_coords)
##########################################################################################################################
## chunk: qr decompose X
##########################################################################################################################
KW = FALSE
KGAMMA = FALSE
kernel = run$kernel
cal_post = rstan::extract(cal_fit, permuted=FALSE, inc_warmup=FALSE)
col_names = colnames(cal_post[,1,])
par_names = unlist(lapply(col_names, function(x) strsplit(x, "\\[")[[1]][1]))
if (draw) {
draw_cal = sample(seq(1, dim(cal_post)[1]), 1)
cal_post = cal_post[draw_cal,1,]
} else {
cal_post = colMeans(cal_post[,1,])
}
phi = unname(cal_post[which(par_names == 'phi')][1:K])
one_gamma = run$one_gamma
if (one_gamma){
# gamma = rep(mean(cal_post[,1,which(par_names == 'gamma')]), K)
gamma = unname(cal_post[which(par_names == 'gamma')])
} else {
KGAMMA = TRUE
gamma = unname(cal_post[which(par_names == 'gamma')][1:K])
}
if (kernel=='gaussian'){
one_psi = run$one_psi
if (one_psi){
# psi = rep(mean(cal_post[,1,which(par_names == 'psi')]), K)
psi = unname(cal_post[which(par_names == 'psi')])
} else {
KW = TRUE
psi = unname(cal_post[which(par_names == 'psi')][1:K])
}
} else if (kernel=='pl'){
one_a = run$one_a
if (one_a){
# a = rep(mean(cal_post[,1,which(par_names == 'a')]), K)
a = unname(cal_post[which(par_names == 'a')])
} else {
KW = TRUE
a = unname(cal_post[which(par_names == 'a')][1:K])
}
one_b = run$one_b
if (one_b){
# b = rep(mean(cal_post[,1,which(par_names == 'b')]), K)
b = unname(cal_post[which(par_names == 'b')])
} else {
KW = TRUE
b = unname(cal_post[which(par_names == 'b')][1:K])
}
}
w <- build_weight_matrix(cal_post, d_pol, idx_cores, N, N_cores, run)
# head(apply(w, 1, rowSums))
#####################################################################################
# calculate potential d
# used to determine C normalizing constant in the non-local contribution term
#####################################################################################
# x_pot = seq(-528000, 528000, by=8000)
# y_pot = seq(-416000, 416000, by=8000)
# coord_pot = expand.grid(x_pot, y_pot)
#
# d_pot = t(rdist(matrix(c(0,0), ncol=2), as.matrix(coord_pot, ncol=2))/dist.scale)
# d_pot = unname(as.matrix(count(d_pot)))
#
# N_pot = nrow(d_pot)
coord_pot = seq(-700000, 700000, by=8000)
coord_pot = expand.grid(coord_pot, coord_pot)
d_pot = t(rdist(matrix(c(0,0), ncol=2), as.matrix(coord_pot, ncol=2))/rescale)
d_pot = unname(as.matrix(count(data.frame(d_pot))))
N_pot = nrow(d_pot)
sum_w_pot = build_sumw_pot(cal_post, K, N_pot, d_pot, run)
#####################################################################################
# recompute gamma
#####################################################################################
w_coarse = build_sumw_pot(cal_post, K, length(d_hood), cbind(t(d_hood), rep(1, length(d_hood))), run)
gamma_new = recompute_gamma(w_coarse, sum_w_pot, gamma)
# #####################################################################################
# # domain splitting check
# #####################################################################################
# w_all <- build_weight_matrix(cal_post, d, idx_cores_all, N, length(idx_cores_all), run)
#
# foo=apply(w_all, 1, rowSums)
#
# pollen_check$sum_w = foo
#####################################################################################
# veg run pars
#####################################################################################
par_names = sapply(strsplit(colnames(veg_post), '\\.'), function(x) x[[1]])
eta = veg_post[,which(par_names == 'eta')]
rho = veg_post[,which(par_names == 'rho')]
if (draw){
iter = sample(seq(1,nrow(veg_post)), 1)
eta = eta[iter,]
rho = rho[iter,]
} else {
eta = colMeans(eta)
rho = colMeans(rho)
}
eta = unname(eta)[1:K]
rho = unname(rho)[1:K]
# ##########################################################################################################################
# ## chunk: qr decompose X
# ##########################################################################################################################
#
# x = matrix(1, nrow=(N*T), ncol=1)
# N_p = N*T
#
# temp = qr(x)
# Q = qr.Q(temp)
# R = qr.R(temp)
#
# P = Q %*% t(Q)
# # M = diag(N_p) - P
#
# if (all(P-P[1,1]<1.0e-12)){
# P = P[1,1]
# N_p = 1
# }
##########################################################################################################################
## save the data; rdata more efficient, use for processing
##########################################################################################################################
if (kernel == 'gaussian'){ suff = paste0('G_', suff) } else if (kernel == 'pl'){suff = paste0('PL_', suff)}
# if (KGAMMA) suff = paste0('KGAMMA_', suff)
# if (KW) suff = paste0('KW_', suff)
# if (bacon) suff = paste0(suff, '_bacon')
if (cal) suff = paste0(suff, '_cal')
if (!draw) suff = paste0(suff, '_mean')
dirName = paste0('runs/', N_knots, 'knots_', tmin, 'to', tmax, 'ybp_', suff)
if (one_time){
dirName = paste0('runs/space_slices_', suff)
}
if (AR){
dirName = paste0(dirName, '_ar')
}
if (!(file.exists(dirName))) {
dir.create(dirName)
}
if (one_time){
subDir=paste0('slice', tmin, 'to', tmax)
if (!(file.exists(file.path(dirName, subDir)))) {
dir.create(file.path(dirName, subDir))
}
} else {
subDir=paste0('run', dr)
if (!(file.exists(file.path(dirName, subDir)))) {
dir.create(file.path(dirName, subDir))
}
}
# paste0('runs/', K, 'taxa_', N, 'cells_', N_knots, 'knots_', tmin, 'to', tmax, 'ypb_', suff, '.rdata')
fname = file.path(dirName, subDir, 'input')
# note that w is column-major
save(K, N, T, N_cores, N_knots, res,
gamma, phi, rho, eta,
y,
idx_cores,
d_knots, d_inter, w, #d_pol, #d,
lag,
# P, N_p, sum_w_pot,
meta_pol, meta_pol_all,
sum_w_pot, pollen_check,
knot_coords,
centers_pls, centers_veg, centers_pol, taxa, ages, y_veg, N_pls,
file=paste0(fname, '.rdata'))
# file=paste('r/dump/', K, 'taxa_', N, 'cells_', N_knots, 'knots_', tmin, 'to', tmax, 'ypb_', suff, '.rdata',sep=""))
# convert to row-major
if (KW){
w_new = vector(length=0)
for (k in 1:K)
w_new = c(w_new, as.vector(w[k,,]))
w = array(w_new, c(K, N_cores, N))
}
dump(c('K', 'N', 'T', 'N_cores', 'N_knots', 'res',
'gamma', 'phi', 'rho', 'eta',
'y',
'idx_cores',
'd_knots', 'd_inter', 'w', #'d_pol', #'d',
'lag',
# 'P', 'N_p', 'sum_w_pot'),
'sum_w_pot'),#, 'pollen_check'),
# 'knot_coords',
# 'centers_pls', 'centers_veg', 'centers_polU', 'taxa', 'ages', 'y_veg', 'N_pls'),
file=paste0(fname, '.dump'))
# file=paste('r/dump/', K, 'taxa_', N, 'cells_', N_knots, 'knots_', tmin, 'to', tmax, 'ypb_', suff, '.dump',sep=""))
##########################################################################################################################
## write meta file with paths
##########################################################################################################################
if (dr==1){
paths = list(path_grid = path_grid,
path_pls = path_pls,
path_pollen = path_pollen,
path_ages = path_ages,
path_cal = path_cal,
path_veg_data = path_veg_data,
path_veg_pars = path_veg_pars)
conn=file(file.path(dirName, 'meta.txt'), 'wt')
write("## Path names", conn)
for (j in 1:length(paths)) {
write(paste0('## ', names(paths)[[j]], '=', paths[[j]]), conn)
}
close(con=conn)
}
|
/r/pred_build_data.r
|
no_license
|
andydawson/stepps-prediction
|
R
| false
| false
| 23,242
|
r
|
library(fields)
library(rstan)
library(sp)
library(rgdal)
library(ggplot2)
library(mvtnorm)
library(maptools)
library(maps)
library(plyr)
source('r/utils/pred_helper_funs.r')
######################################################################################################################################
# user defs
######################################################################################################################################
# us albers shape file
# us.shp <- readShapeLines('attic/r/data/map_data/us_alb.shp',
# proj4string=CRS('+init=epsg:3175'))
us.shp <- readOGR('data/map_data/us_alb.shp', 'us_alb')
# # date of pollen_ts pull
# mydate = '2014-07-22'
# use the veg knots? set to false for smaller test data sets
veg_knots = TRUE
cells = NA
# cells = seq(1,100)
# grid
res = res
side = side
side = '' # 'E', 'W', or ''
# grid = 'MISP'
grid = 'umw'
grid_version = 2
grid_specs = paste0(grid, side, '_', as.character(res), 'by')
gridname = paste0(grid_specs, '_v', grid_version)
#gridname = 'umwE_3by'
# reconstruction limits and bin-width
int = 100
tmin = 150
if (cal) {
tmax = 150
} else if (one_time) {
tmin = tmin + (slice-1)*int
tmax = tmin + int
} else {
tmax = tmin + 20*int
}
# rescale
rescale = 1e6
# knots
# nclust = 75
# clust.ratio = 6# approx clust.ratio knots per cell
clust.ratio = 7# approx clust.ratio knots per cell
clust.ratio = 15# approx clust.ratio knots per cell
# suff=''
suff = paste(grid_specs, '_', version, sep='')
# suff = '3by_v0.3_test'
states_pol = c('minnesota', 'wisconsin', 'michigan:north')
states_pls = c('minnesota', 'wisconsin', 'michigan:north')
# specify the taxa to use
# must be from the list: taxa_sub = c('oak', 'pine', 'maple', 'birch', 'tamarack', 'beeh', 'elm', 'spruce', 'ash', 'hemlock')
# always have: 'other.hardwood' and 'other.conifer'
taxa_all = toupper(c('oak', 'pine', 'maple', 'birch', 'tamarack', 'beech', 'elm', 'spruce', 'ash', 'hemlock'))
taxa_sub = toupper(c('oak', 'pine', 'maple', 'birch', 'tamarack', 'beech', 'elm', 'spruce', 'ash', 'hemlock'))
K = as.integer(length(taxa_sub) + 1)
W = K-1
##########################################################################################################################
## paths and filenames to write to meta file
##########################################################################################################################
suff_veg = paste0('12taxa_6341cells_', nknots, 'knots')
path_grid = paste('data/grid/', gridname, '.rdata', sep='')
path_pls = '../stepps-data/data/composition/pls/pls_umw_v0.6.csv'
# path_pollen = '../stepps-data/data/bacon_ages/pollen_ts_bacon_meta_v8.csv'
# path_bacon = '../stepps-data/data/bacon_ages'
path_pollen = '../stepps-baconizing/data/sediment_ages_v7_varves.csv'
# path_pollen = '../stepps-baconizing/data/sediment_ages_v7.csv'
if (bchron){
path_age_samples = '../stepps-baconizing/data/bchron_ages'
} else {
path_age_samples = '../stepps-baconizing/data/bacon_ages'
}
path_cal = paste0('../stepps-calibration/output/', run$suff_fit,'.csv')
path_veg_data = paste0('../stepps-veg/r/dump/veg_data_', suff_veg, '_v0.4.rdata')
path_veg_pars = paste0('../stepps-veg/figures/', suff_veg, '_nb_v0.5/veg_pars_', nknots, 'knots.rdata')
path_ages = paste0('../stepps-baconizing/data')
##########################################################################################################################
## read in tables and data
##########################################################################################################################
# conversion tables
tree_type = read.table('data/assign_HW_CON.csv', sep=',', row.names=1, header=TRUE)
convert = read.table('data/dict-comp2stepps.csv', sep=',', row.names=1, header=TRUE)
pls.raw = data.frame(read.table(file=path_pls, sep=",", row.names=1, header=TRUE))
# read in grid
load(file=path_grid)
# pollen_ts = read.table(paste('data/pollen_ts_', mydate, '.csv', sep=''), header=TRUE, stringsAsFactors=FALSE)
# pollen_ts = read.table(paste('../stepps-data/data/pollen_ts_bacon_v1.csv', sep=','), header=TRUE, sep=',', stringsAsFactors=FALSE)
pollen_ts = read.table(path_pollen, header=TRUE, sep=',', stringsAsFactors=FALSE)
pol_ids = data.frame(id=unique(pollen_ts$id), stat_id=seq(1, length(unique(pollen_ts$id))))
# if draw=TRUE then replace mean age_bacon with draw age_bacon
if (draw) {
all_files = list.files(path_age_samples)
all_drawRDS = all_files[grep('draw', all_files)]
drawRDS = all_drawRDS[sample(seq(1, length(all_drawRDS)), 1)] # random sample from available posterior age draws
age_sample = readRDS(file.path(path_age_samples, drawRDS))
# replace age_bacon with the draw
if (bchron){
pollen_ts$age_bchron = age_sample
} else {
pollen_ts$age_bacon = age_sample
}
}
if (bchron){
pollen_ts = pollen_ts[!is.na(pollen_ts$age_bchron),]
} else {
pollen_ts = pollen_ts[!is.na(pollen_ts$age_bacon),]
}
age_ps = read.table(file=paste0(path_ages, '/pol_age_ps_v7.csv'), sep=',', header=TRUE)
# foo = remove_post_settlement(pollen_ts, age_ps)
# only removes samples that were clearly identified as post-settlement
# if no ambrosia rise, includes all samples
pollen_ts = remove_post_settlement(pollen_ts, age_ps)
# if (any(pollen_ts$age_bacon < 0)) {
# tmin = 0
# tmax = tmin + 2000
# }
# max_ages
if (constrain){
if (bchron){
max_ages = read.table(file=paste0(path_ages, '/pol_ages_bchron_6.csv'), sep=',', header=TRUE)
} else {
max_ages = read.table(file=paste0(path_ages, '/pol_ages_v6.csv'), sep=',', header=TRUE)
}
drop_samples = constrain_pollen(pollen_ts, max_ages, nbeyond=nbeyond)
if (add_varves){
vids = c(2309, 14839, 3131)
drop_samples[which(pollen_ts$id %in% vids)] = FALSE
}
pollen_ts = pollen_ts[!drop_samples,]
}
# read in calibration output
# load('calibration/r/dump/cal_data_12taxa_mid_comp_all.rdata')
# cal_fit = read_stan_csv('data/calibration_output/12taxa_mid_comp_long.csv')
# cal_fit = rstan::read_stan_csv(paste0('data/calibration_output/', run$suff_fit,'.csv'))
cal_fit = rstan::read_stan_csv(path_cal)
# read in veg data and output
# veg data specifies which knots to use
load(path_veg_data)
veg_post = readRDS(file=path_veg_pars)
# load(file=path_veg_pars)
# veg_post = post
##########################################################################################################################
## read in and organize pls data
##########################################################################################################################
colnames(pls.raw) = tolower(colnames(pls.raw))
# pull the subset of proportions
taxa.start.col = min(match(tolower(rownames(convert)), colnames(pls.raw)), na.rm=TRUE)
# # might need to fix this later, doesn't work with updated data but don't need it now
# if (any(!(tolower(sort(taxa)) == sort(colnames(pls_dat))))) {
# pls_dat = pls.raw[,taxa.start.col:ncol(pls.raw)]
# colnames(pls_dat) = as.vector(convert[match(colnames(pls_dat), tolower(rownames(convert))),1])
# pls_dat_collapse = sapply(unique(colnames(pls_dat)),
# function(x) rowSums( pls_dat[ , grep(x, names(pls_dat)), drop=FALSE]) )
# counts = data.frame(pls_dat_collapse[,sort(colnames(pls_dat_collapse))])
# }
counts = pls.raw[,taxa.start.col:ncol(pls.raw)]
meta = pls.raw[,1:(taxa.start.col-1)]
# kilometers
# pls$X = pls$X/1000
# pls$Y = pls$Y/1000
meta = split_mi(meta)
counts = counts[which(meta$state2 %in% states_pls),]
meta = meta[which(meta$state2 %in% states_pls),]
# if (length(cells) > 1){
# counts = counts[cells,]
# meta = meta[cells,]
# }
centers_pls = data.frame(x=meta$x, y=meta$y)/rescale # megameters!
plot(centers_pls[,1]*rescale, centers_pls[,2]*rescale, asp=1, axes=F, col='antiquewhite4', xlab='',ylab='', pch=19, cex=0.2)
plot(us.shp, add=T)
y_veg = convert_counts(counts, tree_type, taxa_sub)
taxa = colnames(y_veg)
y_veg = as.matrix(round(unname(y_veg)))
rownames(y_veg) = NULL
y_veg = unname(y_veg)
# y = y_build(counts, taxa_sub) # fix this if we want to use a subset of taxa
K = as.integer(ncol(y_veg))
W = K-1
N_pls = nrow(y_veg)
# make sure columns are in order!
# y_veg = y_veg[,taxa]
##########################################################################################################################
## chunk: read in coarse grid and pollen data
##########################################################################################################################
# FIXME: ADD STATE TO GRID
# coarse_domain = coarse_domain[coarse_domain$state %in% states_pls,]
coarse_centers = domain[,1:2]
if (length(cells) > 1){
coarse_centers = coarse_centers[cells,]
}
plot(coarse_centers[,1]*rescale, coarse_centers[,2]*rescale, col='blue')
plot(us.shp, add=TRUE)
# assign grid to centers_veg
centers_veg = coarse_centers
N = nrow(centers_veg)
# subdomain boundaries
xlo = min(centers_veg$x)
xhi = max(centers_veg$x)
ylo = min(centers_veg$y)
yhi = max(centers_veg$y)
##########################################################################################################################
## chunk: reorganize pollen data
##########################################################################################################################
# set tamarack to 0 at tamarack creek
pollen_ts[pollen_ts$id == 2624, 'TAMARACK'] = rep(0, sum(pollen_ts$id == 2624))
saveRDS(pollen_ts, file='data/pollen_ts.RDS')
pollen_ts1 = pollen_ts[which(pollen_ts$state %in% states_pol),]
# ## pollen data!
# if (bacon){
# pollen_ts1 = pollen_ts[which((pollen_ts$age_bacon <= 2500) & (pollen_ts$state %in% states_pol)),]
# } else {
# pollen_ts1 = pollen_ts[which((pollen_ts$age_default <= 2500) & (pollen_ts$state %in% states_pol)),]
# }
# reproject pollen coords from lat long to Albers
pollen_ts2 <- pollen_to_albers(pollen_ts1)
# pollen_ts = pollen_ts[which((pollen_ts[,'x'] <= xhi) & (pollen_ts[,'x'] >= xlo) &
# (pollen_ts[,'y'] <= yhi) & (pollen_ts[,'y'] >= ylo)),]
pollen_locs = cbind(pollen_ts2$x, pollen_ts2$y)
# pollen_int = knots_in_domain4(unique(pollen_locs), centers_veg, cell_width = res*8000/rescale)
#
# idx_pollen_int = apply(pollen_locs, 1, function(x) if (any(rdist(x, pollen_int) < 1e-8)) {return(TRUE)} else {return(FALSE)})
# pollen_ts = pollen_ts[idx_pollen_int, ]
pollen_int = cores_near_domain(pollen_locs, centers_veg, cell_width = res*8000/rescale)
idx_pollen_int = apply(pollen_locs, 1,
function(x) if (any(rdist(x, pollen_int) < 1e-8)) {return(TRUE)} else {return(FALSE)})
pollen_ts3 = pollen_ts2[idx_pollen_int, ]
# check how does splitting affects weights...
pollen_check = pollen_ts2[,1:7]
pollen_check$int = rep(FALSE, nrow(pollen_check))
pollen_check$int[which(idx_pollen_int == TRUE)] = TRUE
pollen_check=pollen_check[!duplicated(pollen_check),]
# plot domain and core locations
par(mfrow=c(1,1))
plot(centers_veg$x*rescale, centers_veg$y*rescale)
points(pollen_ts3$x*rescale, pollen_ts3$y*rescale, col='blue', pch=19)
plot(us.shp, add=T, lwd=2)
# points(pollen_ts3$x[which(pollen_ts3$id %in% vids)]*rescale, pollen_ts3$y[which(pollen_ts3$id %in% vids)]*rescale, col='red')
##########################################################################################################################
## chunk: prepare pollen data; aggregate over time intervals
##########################################################################################################################
# sum counts over int length intervals
pollen_agg = build_pollen_counts(tmin=tmin, tmax=tmax, int=int, pollen_ts=pollen_ts3, taxa_all, taxa_sub, age_model=age_model)
#pollen_agg = build_pollen_counts_fast_core(tmin=tmin, tmax=tmax, int=int, pollen_ts=pollen_ts)
# saveRDS(pollen_ts3, file=paste0(subDir, '/pollen_meta.RDS'))
meta_pol_all = pollen_agg[[3]]
meta_pol = pollen_agg[[2]]
counts = pollen_agg[[1]]
meta_pol$stat_id = pol_ids$stat_id[match(meta_pol$id, pol_ids$id)]
meta_pol_all$stat_id = pol_ids$stat_id[match(meta_pol_all$id, pol_ids$id)]
pollen_ts$stat_id = pol_ids$stat[match(pollen_ts$id, pol_ids$id)]
ages = unique(sort(meta_pol$age))
T = length(ages)
if (cal | one_time) {
lag = 0
} else {
lag = unname(as.matrix(dist(matrix(ages), upper=TRUE)))
}
N_cores = length(unique(meta_pol$id))
y = convert_counts(counts, tree_type, taxa_sub)
# make sure columns match!
if (sum(colnames(y) %in% taxa) != K){
print('The number of taxa wanted does not match the number of taxa in the data frame! Name mismatch likely.')
}
# y = y[,taxa]
y = unname(y)
centers_pol = data.frame(x=numeric(N_cores), y=numeric(N_cores))
for (i in 1:N_cores){
id = unique(meta_pol$id)[i]
idx = min(which(meta_pol$id == id))
print(idx)
centers_pol[i,] = c(meta_pol$x[idx], meta_pol$y[idx])
}
# some are duplicates, but we still need them as separate rows!
# centers_pol <- meta_pol[!duplicated(cbind(meta_pol$x, meta_pol$y)), c('x', 'y')]
# indices for which cells the cores fall in
idx_cores <- build_idx_cores(centers_pol, centers_veg, N_cores)
plot(centers_veg$x*rescale, centers_veg$y*rescale, col='lightgrey')
points(centers_veg[idx_cores,'x']*rescale, centers_veg[idx_cores,'y']*rescale, col='red', pch=19)
points(centers_pol$x*rescale, centers_pol$y*rescale, col='blue', pch=4, cex=1.4)
plot(us.shp, add=TRUE)
# check domain splitting
idx_cores_all <- build_idx_cores(cbind(pollen_check$x, pollen_check$y), centers_veg, N_cores=nrow(pollen_check))
##########################################################################################################################
## chunk 3: build distance matrices
##########################################################################################################################
if (!veg_knots){
nclust = ceiling(N/clust.ratio)
d_out = build_domain_objects(centers_veg, dx=20, cell_width=8, nclust=nclust)
d = d_out$d
# d_knots = d_out$d_knots
# d_inter = d_out$d_inter
#
knot_coords = d_out$knot_coords
} else {
if (side == '') {
# don't touch knot_coords
} else {
if (side == 'W'){
if (res == 1)
cutlines = list(list(c(0.42, 1.0), c(0.0, 1.0)), list(c(0.397,1.15), c(0.168,0.119)))
if (res == 5)
cutlines = list(list(c(0.386, 1.0), c(0.0, 1.0)), list(c(0.397,1.15), c(0.168,0.119)))
if (res == 3)
cutlines = list(list(c(0.405, 1.0), c(0.0, 1.0)), list(c(0.397,1.15), c(0.168,0.119)))
} else if (side == 'E'){
if (res %in% c(1, 5)) cutlines = list(list(c(0.253, 1.0), c(0.0, -1.0)))
if (res == 3) cutlines = list(list(c(0.27, 1.0), c(0.0, -1.0)))
}
idx = choppy(knot_coords[,1], knot_coords[,2], cutlines)
knot_coords = knot_coords[idx,]
# knot_coords2 = knot_coords[idx,]
}
}
#
# # knot_coords3 = knots_in_domain4(knot_coords, centers_veg, cell_width = res*8000/rescale)
# plot(domain[,1], domain[,2], asp=1)
plot(centers_veg[,1], centers_veg[,2], asp=1)
points(knot_coords[,1], knot_coords[,2], col='blue', pch=19)
# points(knot_coords2[,1], knot_coords2[,2], col='green', pch=19)
d = rdist(centers_veg, centers_veg)
diag(d) <- 0
d_knots = rdist(knot_coords, knot_coords)
diag(d_knots) <- 0
d_inter = rdist(centers_veg, knot_coords)
d_inter[which(d_inter<1e-8)]=0
d_pol = rdist(centers_pol, centers_veg)
d_pol[which(d_pol<1e-8)]=0
N_knots = nrow(knot_coords)
##########################################################################################################################
## chunk: qr decompose X
##########################################################################################################################
KW = FALSE
KGAMMA = FALSE
kernel = run$kernel
cal_post = rstan::extract(cal_fit, permuted=FALSE, inc_warmup=FALSE)
col_names = colnames(cal_post[,1,])
par_names = unlist(lapply(col_names, function(x) strsplit(x, "\\[")[[1]][1]))
if (draw) {
draw_cal = sample(seq(1, dim(cal_post)[1]), 1)
cal_post = cal_post[draw_cal,1,]
} else {
cal_post = colMeans(cal_post[,1,])
}
phi = unname(cal_post[which(par_names == 'phi')][1:K])
one_gamma = run$one_gamma
if (one_gamma){
# gamma = rep(mean(cal_post[,1,which(par_names == 'gamma')]), K)
gamma = unname(cal_post[which(par_names == 'gamma')])
} else {
KGAMMA = TRUE
gamma = unname(cal_post[which(par_names == 'gamma')][1:K])
}
if (kernel=='gaussian'){
one_psi = run$one_psi
if (one_psi){
# psi = rep(mean(cal_post[,1,which(par_names == 'psi')]), K)
psi = unname(cal_post[which(par_names == 'psi')])
} else {
KW = TRUE
psi = unname(cal_post[which(par_names == 'psi')][1:K])
}
} else if (kernel=='pl'){
one_a = run$one_a
if (one_a){
# a = rep(mean(cal_post[,1,which(par_names == 'a')]), K)
a = unname(cal_post[which(par_names == 'a')])
} else {
KW = TRUE
a = unname(cal_post[which(par_names == 'a')][1:K])
}
one_b = run$one_b
if (one_b){
# b = rep(mean(cal_post[,1,which(par_names == 'b')]), K)
b = unname(cal_post[which(par_names == 'b')])
} else {
KW = TRUE
b = unname(cal_post[which(par_names == 'b')][1:K])
}
}
w <- build_weight_matrix(cal_post, d_pol, idx_cores, N, N_cores, run)
# head(apply(w, 1, rowSums))
#####################################################################################
# calculate potential d
# used to determine C normalizing constant in the non-local contribution term
#####################################################################################
# x_pot = seq(-528000, 528000, by=8000)
# y_pot = seq(-416000, 416000, by=8000)
# coord_pot = expand.grid(x_pot, y_pot)
#
# d_pot = t(rdist(matrix(c(0,0), ncol=2), as.matrix(coord_pot, ncol=2))/dist.scale)
# d_pot = unname(as.matrix(count(d_pot)))
#
# N_pot = nrow(d_pot)
coord_pot = seq(-700000, 700000, by=8000)
coord_pot = expand.grid(coord_pot, coord_pot)
d_pot = t(rdist(matrix(c(0,0), ncol=2), as.matrix(coord_pot, ncol=2))/rescale)
d_pot = unname(as.matrix(count(data.frame(d_pot))))
N_pot = nrow(d_pot)
sum_w_pot = build_sumw_pot(cal_post, K, N_pot, d_pot, run)
#####################################################################################
# recompute gamma
#####################################################################################
w_coarse = build_sumw_pot(cal_post, K, length(d_hood), cbind(t(d_hood), rep(1, length(d_hood))), run)
gamma_new = recompute_gamma(w_coarse, sum_w_pot, gamma)
# #####################################################################################
# # domain splitting check
# #####################################################################################
# w_all <- build_weight_matrix(cal_post, d, idx_cores_all, N, length(idx_cores_all), run)
#
# foo=apply(w_all, 1, rowSums)
#
# pollen_check$sum_w = foo
#####################################################################################
# veg run pars
#####################################################################################
par_names = sapply(strsplit(colnames(veg_post), '\\.'), function(x) x[[1]])
eta = veg_post[,which(par_names == 'eta')]
rho = veg_post[,which(par_names == 'rho')]
if (draw){
iter = sample(seq(1,nrow(veg_post)), 1)
eta = eta[iter,]
rho = rho[iter,]
} else {
eta = colMeans(eta)
rho = colMeans(rho)
}
eta = unname(eta)[1:K]
rho = unname(rho)[1:K]
# ##########################################################################################################################
# ## chunk: qr decompose X
# ##########################################################################################################################
#
# x = matrix(1, nrow=(N*T), ncol=1)
# N_p = N*T
#
# temp = qr(x)
# Q = qr.Q(temp)
# R = qr.R(temp)
#
# P = Q %*% t(Q)
# # M = diag(N_p) - P
#
# if (all(P-P[1,1]<1.0e-12)){
# P = P[1,1]
# N_p = 1
# }
##########################################################################################################################
## save the data; rdata more efficient, use for processing
##########################################################################################################################
if (kernel == 'gaussian'){ suff = paste0('G_', suff) } else if (kernel == 'pl'){suff = paste0('PL_', suff)}
# if (KGAMMA) suff = paste0('KGAMMA_', suff)
# if (KW) suff = paste0('KW_', suff)
# if (bacon) suff = paste0(suff, '_bacon')
if (cal) suff = paste0(suff, '_cal')
if (!draw) suff = paste0(suff, '_mean')
dirName = paste0('runs/', N_knots, 'knots_', tmin, 'to', tmax, 'ybp_', suff)
if (one_time){
dirName = paste0('runs/space_slices_', suff)
}
if (AR){
dirName = paste0(dirName, '_ar')
}
if (!(file.exists(dirName))) {
dir.create(dirName)
}
if (one_time){
subDir=paste0('slice', tmin, 'to', tmax)
if (!(file.exists(file.path(dirName, subDir)))) {
dir.create(file.path(dirName, subDir))
}
} else {
subDir=paste0('run', dr)
if (!(file.exists(file.path(dirName, subDir)))) {
dir.create(file.path(dirName, subDir))
}
}
# paste0('runs/', K, 'taxa_', N, 'cells_', N_knots, 'knots_', tmin, 'to', tmax, 'ypb_', suff, '.rdata')
fname = file.path(dirName, subDir, 'input')
# note that w is column-major
save(K, N, T, N_cores, N_knots, res,
gamma, phi, rho, eta,
y,
idx_cores,
d_knots, d_inter, w, #d_pol, #d,
lag,
# P, N_p, sum_w_pot,
meta_pol, meta_pol_all,
sum_w_pot, pollen_check,
knot_coords,
centers_pls, centers_veg, centers_pol, taxa, ages, y_veg, N_pls,
file=paste0(fname, '.rdata'))
# file=paste('r/dump/', K, 'taxa_', N, 'cells_', N_knots, 'knots_', tmin, 'to', tmax, 'ypb_', suff, '.rdata',sep=""))
# convert to row-major
if (KW){
w_new = vector(length=0)
for (k in 1:K)
w_new = c(w_new, as.vector(w[k,,]))
w = array(w_new, c(K, N_cores, N))
}
dump(c('K', 'N', 'T', 'N_cores', 'N_knots', 'res',
'gamma', 'phi', 'rho', 'eta',
'y',
'idx_cores',
'd_knots', 'd_inter', 'w', #'d_pol', #'d',
'lag',
# 'P', 'N_p', 'sum_w_pot'),
'sum_w_pot'),#, 'pollen_check'),
# 'knot_coords',
# 'centers_pls', 'centers_veg', 'centers_polU', 'taxa', 'ages', 'y_veg', 'N_pls'),
file=paste0(fname, '.dump'))
# file=paste('r/dump/', K, 'taxa_', N, 'cells_', N_knots, 'knots_', tmin, 'to', tmax, 'ypb_', suff, '.dump',sep=""))
##########################################################################################################################
## write meta file with paths
##########################################################################################################################
if (dr==1){
paths = list(path_grid = path_grid,
path_pls = path_pls,
path_pollen = path_pollen,
path_ages = path_ages,
path_cal = path_cal,
path_veg_data = path_veg_data,
path_veg_pars = path_veg_pars)
conn=file(file.path(dirName, 'meta.txt'), 'wt')
write("## Path names", conn)
for (j in 1:length(paths)) {
write(paste0('## ', names(paths)[[j]], '=', paths[[j]]), conn)
}
close(con=conn)
}
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/PrepareSQL.R
\name{r2hpcc.PrepareSQL}
\alias{r2hpcc.PrepareSQL}
\title{Use this method to submit a free-hand SQL request for later use as a parameterized query.
This compiles the query and returns the Wuid.
This Wuid is later used to execute the query with provided input parameters using the ExecutePreparedSQL method.}
\usage{
r2hpcc.PrepareSQL(conn, sqlQuery, timeOut = -1)
}
\arguments{
\item{conn}{- HPCC connection information}
\item{sqlQuery}{- Free-hand SQL text}
\item{timeOut}{- Timeout value in milliseconds. Use -1 for no timeout}
}
\value{
Workunit details
}
\description{
Use this method to submit a free-hand SQL request for later use as a parameterized query.
This compiles the query and returns the Wuid.
This Wuid is later used to execute the query with provided input parameters using the ExecutePreparedSQL method.
}
|
/man/r2hpcc.PrepareSQL.Rd
|
no_license
|
hpcc-systems/r2hpcc
|
R
| false
| true
| 917
|
rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/PrepareSQL.R
\name{r2hpcc.PrepareSQL}
\alias{r2hpcc.PrepareSQL}
\title{Use this method to submit a free-hand SQL request for later use as a parameterized query.
This compiles the query and returns the Wuid.
This Wuid is later used to execute the query with provided input parameters using the ExecutePreparedSQL method.}
\usage{
r2hpcc.PrepareSQL(conn, sqlQuery, timeOut = -1)
}
\arguments{
\item{conn}{- HPCC connection information}
\item{sqlQuery}{- Free-hand SQL text}
\item{timeOut}{- Timeout value in milliseconds. Use -1 for no timeout}
}
\value{
Workunit details
}
\description{
Use this method to submit a free-hand SQL request for later use as a parameterized query.
This compiles the query and returns the Wuid.
This Wuid is later used to execute the query with provided input parameters using the ExecutePreparedSQL method.
}
|
#' beta_init_condits.
#'
#' A function that calculates the initial conditions and the average contact rate and success between unvaccinated susceptibles and infectious individuals
#' given the age distributed new hospitalizations by week. The method assumes that the time derivatives of infected variables are zero.
#'
#' @param NEW.HOSP.AGE # New hospitalizations per week by age, a vector with 3 elements: HJ, HA, HO 'NEW.HOSP.AGE'.
#' @param PREVALENCE # Fraction of the population that already had the disease by age, a vector with 3 elements: PREVJ, PREVA, PREVO 'PREVALENCE'.
#' @param POP.DISTR # Population distributed by age, a vector with 3 elements: POPJ, POPA, POPO 'POP.DISTR'.
#' @param CONTACT.M # A contact matrix, must give as matrix 'CONTACT.M'.
#' @param EXPOSURE.PERIOD.DAYS # Average time between being infected and developing symptoms 'EXPOSURE.PERIOD.DAYS'.
#' @param SICKNESS.PERIOD.DAYS # Average time between being infectious and recovering for asymptomatic and mild 'SICKNESS.PERIOD.DAYS'.
#' @param SEVERE.PERIOD.DAYS # Average time between being infectious and recovering/dying for severe cases 'SEVERE.PERIOD.DAYS'.
#' @param CONT.REDUC.FRAC # Reduction on the expose of symptomatic (due to symptoms/quarantining) 'CONT.REDUC.FRAC'.
#' @param SEVERE.CONT.REDUC.FRAC # Reduction on the expose of severe cases (due to hospitalization) 'SEVERE.CONT.REDUC.FRAC'.
#' 0 means the same level of exposure of mild cases and 1 means no expose whatsoever.
#' @param REL.INFEC.PRESYMP # relative infectiousness of pre-symptomatic individuals 'REL.INFEC.PRESYMP'.
#' @param ASYMPTOMATIC.FRAC # Fraction of asymptomatic cases in total cases'ASYMPTOMATIC.FRAC'.
#' @param SEVERITY.FRAC # Fraction of severe cases/hospitalizations in symptomatic cases (IHR) 'SEVERITY.FRAC'.
#' @param DEATH.FRAC # Fraction of deaths in severe cases/hospitalizations of unvaccinated population (IHFR) 'DEATH.FRAC'.
#' @param V2.FRAC # Fraction of the infected people due to the second strain 'V2.FRAC'.
#'
#' @return A list where the first entry is BETA.RATE the average contact rate and success between unvaccinated susceptibles and infectious individuals
#' and the second entry is the vector with the initial conditions.
#' @export
#' @import
#'
#' @examples
init_condits <- function(NEW.HOSP.AGE, PREVALENCE, POP.DISTR, CONTACT.M, EXPOSURE.PERIOD.DAYS,
SICKNESS.PERIOD.DAYS, SEVERE.PERIOD.DAYS, CONT.REDUC.FRAC,
SEVERE.CONT.REDUC.FRAC, REL.INFEC.PRESYMP, ASYMPTOMATIC.FRAC,
SEVERITY.FRAC, DEATH.FRAC, V2.FRAC = 0.0001){
POP.TOTAL <- sum(POP.DISTR)
EXPOSURE.PERIOD.WEEKS <- EXPOSURE.PERIOD.DAYS/7.0
SICKNESS.PERIOD.WEEKS <- SICKNESS.PERIOD.DAYS/7.0
SEVERE.PERIOD.WEEKS <- SEVERE.PERIOD.DAYS/7.0
# Wild strain populations
POP.E1 <- (1 - V2.FRAC) * NEW.HOSP.AGE * EXPOSURE.PERIOD.WEEKS / SEVERITY.FRAC # Exposed,
POP.A1 <- (1 - V2.FRAC) * NEW.HOSP.AGE * ASYMPTOMATIC.FRAC * (1.0 - SEVERITY.FRAC) * SICKNESS.PERIOD.WEEKS / SEVERITY.FRAC # Asymptomatics
POP.I1 <- (1 - V2.FRAC) * NEW.HOSP.AGE * (1.0 - SEVERITY.FRAC) * (1.0 - ASYMPTOMATIC.FRAC) * SICKNESS.PERIOD.WEEKS / SEVERITY.FRAC#Infectious with mild symptoms
POP.H1 <- (1 - V2.FRAC) * NEW.HOSP.AGE * SEVERE.PERIOD.WEEKS # Hospitalized
POP.C1 <- c(0.0, 0.0, 0.0) # Cases reported
POP.R1 <- POP.DISTR * PREVALENCE # Recovered
POP.D1 <- c(0.0, 0.0, 0.0) # Deaths
# Variant strain population
POP.E2 <- V2.FRAC * NEW.HOSP.AGE * EXPOSURE.PERIOD.WEEKS / SEVERITY.FRAC # Exposed,
POP.A2 <- V2.FRAC * NEW.HOSP.AGE * ASYMPTOMATIC.FRAC * (1.0 - SEVERITY.FRAC) * SICKNESS.PERIOD.WEEKS / SEVERITY.FRAC # Asymptomatics
POP.I2 <- V2.FRAC * NEW.HOSP.AGE * (1.0 - SEVERITY.FRAC) * (1.0 - ASYMPTOMATIC.FRAC) * SICKNESS.PERIOD.WEEKS / SEVERITY.FRAC#Infectious with mild symptoms
POP.H2 <- V2.FRAC * NEW.HOSP.AGE * SEVERE.PERIOD.WEEKS # Hospitalized
POP.C2 <- c(0.0, 0.0, 0.0) # Cases reported
POP.R2 <- c(0.0, 0.0, 0.0) # Recovered
POP.D2 <- c(0.0, 0.0, 0.0) # Deaths
# Susceptibles remaining in the population
POP.S <- POP.DISTR - (POP.E1 + POP.A1 + POP.I1 + POP.H1 + POP.R1 + POP.D1 + POP.E2 + POP.A2 + POP.I2 + POP.H2 + POP.R2 + POP.D2)
POP0 <- c(POP.S, POP.E1, POP.A1, POP.I1, POP.H1, POP.C1, POP.R1, POP.D1,
POP.E2, POP.A2, POP.I2, POP.H2, POP.C2, POP.R2, POP.D2)
INFECTIVITY.VECTOR <- REL.INFEC.PRESYMP * POP.E1 + POP.A1 + CONT.REDUC.FRAC * POP.I1 + SEVERE.CONT.REDUC.FRAC * POP.H1
BETA.RATE.VECTOR <- POP.E1 * POP.TOTAL / (7.0 * EXPOSURE.PERIOD.WEEKS * POP.S * CONTACT.M %*% INFECTIVITY.VECTOR)
return(list(BETA.RATE = BETA.RATE.VECTOR[3], POP0 = POP0))
}
|
/functions/beta_init_condits.R
|
permissive
|
covid19br/VaxModel-paper
|
R
| false
| false
| 4,675
|
r
|
#' beta_init_condits.
#'
#' A function that calculates the initial conditions and the average contact rate and success between unvaccinated susceptibles and infectious individuals
#' given the age distributed new hospitalizations by week. The method assumes that the time derivatives of infected variables are zero.
#'
#' @param NEW.HOSP.AGE # New hospitalizations per week by age, a vector with 3 elements: HJ, HA, HO 'NEW.HOSP.AGE'.
#' @param PREVALENCE # Fraction of the population that already had the disease by age, a vector with 3 elements: PREVJ, PREVA, PREVO 'PREVALENCE'.
#' @param POP.DISTR # Population distributed by age, a vector with 3 elements: POPJ, POPA, POPO 'POP.DISTR'.
#' @param CONTACT.M # A contact matrix, must give as matrix 'CONTACT.M'.
#' @param EXPOSURE.PERIOD.DAYS # Average time between being infected and developing symptoms 'EXPOSURE.PERIOD.DAYS'.
#' @param SICKNESS.PERIOD.DAYS # Average time between being infectious and recovering for asymptomatic and mild 'SICKNESS.PERIOD.DAYS'.
#' @param SEVERE.PERIOD.DAYS # Average time between being infectious and recovering/dying for severe cases 'SEVERE.PERIOD.DAYS'.
#' @param CONT.REDUC.FRAC # Reduction on the expose of symptomatic (due to symptoms/quarantining) 'CONT.REDUC.FRAC'.
#' @param SEVERE.CONT.REDUC.FRAC # Reduction on the expose of severe cases (due to hospitalization) 'SEVERE.CONT.REDUC.FRAC'.
#' 0 means the same level of exposure of mild cases and 1 means no expose whatsoever.
#' @param REL.INFEC.PRESYMP # relative infectiousness of pre-symptomatic individuals 'REL.INFEC.PRESYMP'.
#' @param ASYMPTOMATIC.FRAC # Fraction of asymptomatic cases in total cases'ASYMPTOMATIC.FRAC'.
#' @param SEVERITY.FRAC # Fraction of severe cases/hospitalizations in symptomatic cases (IHR) 'SEVERITY.FRAC'.
#' @param DEATH.FRAC # Fraction of deaths in severe cases/hospitalizations of unvaccinated population (IHFR) 'DEATH.FRAC'.
#' @param V2.FRAC # Fraction of the infected people due to the second strain 'V2.FRAC'.
#'
#' @return A list where the first entry is BETA.RATE the average contact rate and success between unvaccinated susceptibles and infectious individuals
#' and the second entry is the vector with the initial conditions.
#' @export
#' @import
#'
#' @examples
init_condits <- function(NEW.HOSP.AGE, PREVALENCE, POP.DISTR, CONTACT.M, EXPOSURE.PERIOD.DAYS,
SICKNESS.PERIOD.DAYS, SEVERE.PERIOD.DAYS, CONT.REDUC.FRAC,
SEVERE.CONT.REDUC.FRAC, REL.INFEC.PRESYMP, ASYMPTOMATIC.FRAC,
SEVERITY.FRAC, DEATH.FRAC, V2.FRAC = 0.0001){
POP.TOTAL <- sum(POP.DISTR)
EXPOSURE.PERIOD.WEEKS <- EXPOSURE.PERIOD.DAYS/7.0
SICKNESS.PERIOD.WEEKS <- SICKNESS.PERIOD.DAYS/7.0
SEVERE.PERIOD.WEEKS <- SEVERE.PERIOD.DAYS/7.0
# Wild strain populations
POP.E1 <- (1 - V2.FRAC) * NEW.HOSP.AGE * EXPOSURE.PERIOD.WEEKS / SEVERITY.FRAC # Exposed,
POP.A1 <- (1 - V2.FRAC) * NEW.HOSP.AGE * ASYMPTOMATIC.FRAC * (1.0 - SEVERITY.FRAC) * SICKNESS.PERIOD.WEEKS / SEVERITY.FRAC # Asymptomatics
POP.I1 <- (1 - V2.FRAC) * NEW.HOSP.AGE * (1.0 - SEVERITY.FRAC) * (1.0 - ASYMPTOMATIC.FRAC) * SICKNESS.PERIOD.WEEKS / SEVERITY.FRAC#Infectious with mild symptoms
POP.H1 <- (1 - V2.FRAC) * NEW.HOSP.AGE * SEVERE.PERIOD.WEEKS # Hospitalized
POP.C1 <- c(0.0, 0.0, 0.0) # Cases reported
POP.R1 <- POP.DISTR * PREVALENCE # Recovered
POP.D1 <- c(0.0, 0.0, 0.0) # Deaths
# Variant strain population
POP.E2 <- V2.FRAC * NEW.HOSP.AGE * EXPOSURE.PERIOD.WEEKS / SEVERITY.FRAC # Exposed,
POP.A2 <- V2.FRAC * NEW.HOSP.AGE * ASYMPTOMATIC.FRAC * (1.0 - SEVERITY.FRAC) * SICKNESS.PERIOD.WEEKS / SEVERITY.FRAC # Asymptomatics
POP.I2 <- V2.FRAC * NEW.HOSP.AGE * (1.0 - SEVERITY.FRAC) * (1.0 - ASYMPTOMATIC.FRAC) * SICKNESS.PERIOD.WEEKS / SEVERITY.FRAC#Infectious with mild symptoms
POP.H2 <- V2.FRAC * NEW.HOSP.AGE * SEVERE.PERIOD.WEEKS # Hospitalized
POP.C2 <- c(0.0, 0.0, 0.0) # Cases reported
POP.R2 <- c(0.0, 0.0, 0.0) # Recovered
POP.D2 <- c(0.0, 0.0, 0.0) # Deaths
# Susceptibles remaining in the population
POP.S <- POP.DISTR - (POP.E1 + POP.A1 + POP.I1 + POP.H1 + POP.R1 + POP.D1 + POP.E2 + POP.A2 + POP.I2 + POP.H2 + POP.R2 + POP.D2)
POP0 <- c(POP.S, POP.E1, POP.A1, POP.I1, POP.H1, POP.C1, POP.R1, POP.D1,
POP.E2, POP.A2, POP.I2, POP.H2, POP.C2, POP.R2, POP.D2)
INFECTIVITY.VECTOR <- REL.INFEC.PRESYMP * POP.E1 + POP.A1 + CONT.REDUC.FRAC * POP.I1 + SEVERE.CONT.REDUC.FRAC * POP.H1
BETA.RATE.VECTOR <- POP.E1 * POP.TOTAL / (7.0 * EXPOSURE.PERIOD.WEEKS * POP.S * CONTACT.M %*% INFECTIVITY.VECTOR)
return(list(BETA.RATE = BETA.RATE.VECTOR[3], POP0 = POP0))
}
|
## Below are two functions that are used to create a special object
## that stores a square matrix and cache's its inverse.
## This function creates a special "matrix" object that can cache its inverse.
## This is really a list containing a function to
## 1. Set the value of the Matrix
## 2. Get the value of the Matrix
## 3. Set the value of the Inverse
## 4. Get the value of the Inverse
makeCacheMatrix <- function(x = matrix()) {
inverse <- null
set <- function(y) {
x <<- y
inverse <<- NULL
}
get <- function() x
setinverse <- function(inv) inverse <<- inv
getinverse <- function() inverse
list(set = set, get = get,
setinverse = setinverse,
getinverse = getinverse)
}
## This function computes the inverse of the special "matrix" returned
## by makeCacheMatrix above. If the inverse has already been calculated
## (and the matrix has not changed), then it retrieves the inverse from
## the cache.
cacheSolve <- function(x, ...) {
inv <- x$getinverse()
if (!is.null(inv)){
message("Getting cached data")
return inv
}
data <-x$get
inv <- solve(data, ...)
x$setinverse(inv)
inv
}
|
/cachematrix.R
|
no_license
|
sreeramkumar/ProgrammingAssignment2
|
R
| false
| false
| 1,314
|
r
|
## Below are two functions that are used to create a special object
## that stores a square matrix and cache's its inverse.
## This function creates a special "matrix" object that can cache its inverse.
## This is really a list containing a function to
## 1. Set the value of the Matrix
## 2. Get the value of the Matrix
## 3. Set the value of the Inverse
## 4. Get the value of the Inverse
makeCacheMatrix <- function(x = matrix()) {
inverse <- null
set <- function(y) {
x <<- y
inverse <<- NULL
}
get <- function() x
setinverse <- function(inv) inverse <<- inv
getinverse <- function() inverse
list(set = set, get = get,
setinverse = setinverse,
getinverse = getinverse)
}
## This function computes the inverse of the special "matrix" returned
## by makeCacheMatrix above. If the inverse has already been calculated
## (and the matrix has not changed), then it retrieves the inverse from
## the cache.
cacheSolve <- function(x, ...) {
inv <- x$getinverse()
if (!is.null(inv)){
message("Getting cached data")
return inv
}
data <-x$get
inv <- solve(data, ...)
x$setinverse(inv)
inv
}
|
#' ggtimeline
#'
#'
#' @param x is a data.frame with three columns
#' @param begin is the date the timeline begins in the form "dd/mm/yyyy"
#' @param end is the date the timeline ends in the form "dd/mm/yyyy"
#' @param group Boolean denoting whether activities should be grouped in terms of colour
#'
#' @return This function draws a timeline given a data.frame with three or four columns.
#' These columns should correspond to activity, start and end. The start and end columns
#' should be dates in the form of "dd/mm/yyyy". You also need to specific when
#' you want the time table to begin and end. Only have a "group" column if you have group = TRUE.
#' @examples timeline(timeline_dates, "1/1/2015", "1/3/2018")
#' @export
ggtimeline <- function(x, begin, end, group = F){
if(group == F){
if(ncol(x) != 3){stop("\n\nToo many columns!! You can only have three.\n\n")}
colnames(x) <- c("activity", "start", "halt")
}
else{
if(!"group" %in% colnames(x))stop("\n\nIf you want to group tasks you need a column named 'group'.\n\n")
colnames(x) <- c("activity", "start", "halt", "group")
}
require(scales)
require(ggplot2)
x$start = as.Date(x$start, "%d/%m/%Y")
x$halt = as.Date(x$halt, "%d/%m/%Y")
if(group == F){
chart <- ggplot(x, aes(start, activity, colour = activity))
}
else{
chart <- ggplot(x, aes(start, activity, colour = group))
}
chart +
geom_errorbarh(aes(xmin = start, xmax = halt, height = 0.4), size = 3) +
scale_x_date(labels = date_format("%b %Y"),
limits = c(as.Date(begin, "%d/%m/%Y"), as.Date(end, "%d/%m/%Y")),
breaks = seq(as.Date(begin, "%d/%m/%Y"), as.Date(end, "%d/%m/%Y"), by = '2 month')) +
theme(legend.position = "none",
axis.text = element_text(size = 16),
axis.title.y = element_blank(),
axis.line.y = element_blank()) +
scale_color_manual(values = g_colours)
}
#' ggplotRegression
#'
#' @param fit is a linear model from the lm function
#'
#' @return This plots a linear model with ggplot2. It is a function written by Susan
#' Johnston (https://susanejohnston.wordpress.com/2012/08/09/a-quick-and-easy-function-to-plot-lm-results-in-r/).
#' @export
ggplotRegression <- function (fit) {
require(ggplot2)
ggplot(fit$model, aes_string(x = names(fit$model)[2], y = names(fit$model)[1])) +
geom_point() +
stat_smooth(method = "lm", col = "red") +
labs(title = paste("Adj R2 = ",signif(summary(fit)$adj.r.squared, 5),
"Intercept =",signif(fit$coef[[1]],5 ),
" Slope =",signif(fit$coef[[2]], 5),
" P =",signif(summary(fit)$coef[2,4], 5)))
}
|
/R/charts.R
|
no_license
|
G-Thomson/gthor
|
R
| false
| false
| 2,705
|
r
|
#' ggtimeline
#'
#'
#' @param x is a data.frame with three columns
#' @param begin is the date the timeline begins in the form "dd/mm/yyyy"
#' @param end is the date the timeline ends in the form "dd/mm/yyyy"
#' @param group Boolean denoting whether activities should be grouped in terms of colour
#'
#' @return This function draws a timeline given a data.frame with three or four columns.
#' These columns should correspond to activity, start and end. The start and end columns
#' should be dates in the form of "dd/mm/yyyy". You also need to specific when
#' you want the time table to begin and end. Only have a "group" column if you have group = TRUE.
#' @examples timeline(timeline_dates, "1/1/2015", "1/3/2018")
#' @export
ggtimeline <- function(x, begin, end, group = F){
if(group == F){
if(ncol(x) != 3){stop("\n\nToo many columns!! You can only have three.\n\n")}
colnames(x) <- c("activity", "start", "halt")
}
else{
if(!"group" %in% colnames(x))stop("\n\nIf you want to group tasks you need a column named 'group'.\n\n")
colnames(x) <- c("activity", "start", "halt", "group")
}
require(scales)
require(ggplot2)
x$start = as.Date(x$start, "%d/%m/%Y")
x$halt = as.Date(x$halt, "%d/%m/%Y")
if(group == F){
chart <- ggplot(x, aes(start, activity, colour = activity))
}
else{
chart <- ggplot(x, aes(start, activity, colour = group))
}
chart +
geom_errorbarh(aes(xmin = start, xmax = halt, height = 0.4), size = 3) +
scale_x_date(labels = date_format("%b %Y"),
limits = c(as.Date(begin, "%d/%m/%Y"), as.Date(end, "%d/%m/%Y")),
breaks = seq(as.Date(begin, "%d/%m/%Y"), as.Date(end, "%d/%m/%Y"), by = '2 month')) +
theme(legend.position = "none",
axis.text = element_text(size = 16),
axis.title.y = element_blank(),
axis.line.y = element_blank()) +
scale_color_manual(values = g_colours)
}
#' ggplotRegression
#'
#' @param fit is a linear model from the lm function
#'
#' @return This plots a linear model with ggplot2. It is a function written by Susan
#' Johnston (https://susanejohnston.wordpress.com/2012/08/09/a-quick-and-easy-function-to-plot-lm-results-in-r/).
#' @export
ggplotRegression <- function (fit) {
require(ggplot2)
ggplot(fit$model, aes_string(x = names(fit$model)[2], y = names(fit$model)[1])) +
geom_point() +
stat_smooth(method = "lm", col = "red") +
labs(title = paste("Adj R2 = ",signif(summary(fit)$adj.r.squared, 5),
"Intercept =",signif(fit$coef[[1]],5 ),
" Slope =",signif(fit$coef[[2]], 5),
" P =",signif(summary(fit)$coef[2,4], 5)))
}
|
##############################
## TOPIC MODELING USING LDA ##
##############################
## ------------------------------------------------------------------------
# Removing stop words
library(tidytext)
library(dplyr)
library(stringr)
load("postdata.Rdata")
reg = ""|\\$|<"
Title <- postdata %>%
select(Id, Title)# %>%
#data_frame()
Title = data.frame(Title)
Title_clean <- Title %>%
mutate(text=str_replace_all(Title,pattern=reg, replacement = "")) %>%
unnest_tokens(word, text) %>%
filter(!word %in% stop_words$word,
str_detect(word, "[a-z]"))
save(Title_clean,file="Title_clean")
## ------------------------------------------------------------------------
wc_title <- Title_clean %>%
count(Id, word,sort=TRUE) %>%
ungroup()
## ------------------------------------------------------------------------
#Extracting topics using LDA
library(topicmodels)
k=20 #number of topics
lda_title <- cast_dtm(wc_title, Id, word, n) %>%
LDA( k = k, method = "VEM", control = list(seed = 11-11-2016))
save(lda_title, file="lda_title")
## ------------------------------------------------------------------------
# Top n terms for k topics sorted by beta
n=10
top_title <- lda_title %>%
tidy("beta") %>%
group_by(topic) %>%
top_n(n=n, beta) %>%
ungroup() %>%
arrange(topic, -beta)
|
/explore/TopicModel/Topic_LDA.R
|
no_license
|
vynguyent/Stack_Exchange_Data_Explore
|
R
| false
| false
| 1,324
|
r
|
##############################
## TOPIC MODELING USING LDA ##
##############################
## ------------------------------------------------------------------------
# Removing stop words
library(tidytext)
library(dplyr)
library(stringr)
load("postdata.Rdata")
reg = ""|\\$|<"
Title <- postdata %>%
select(Id, Title)# %>%
#data_frame()
Title = data.frame(Title)
Title_clean <- Title %>%
mutate(text=str_replace_all(Title,pattern=reg, replacement = "")) %>%
unnest_tokens(word, text) %>%
filter(!word %in% stop_words$word,
str_detect(word, "[a-z]"))
save(Title_clean,file="Title_clean")
## ------------------------------------------------------------------------
wc_title <- Title_clean %>%
count(Id, word,sort=TRUE) %>%
ungroup()
## ------------------------------------------------------------------------
#Extracting topics using LDA
library(topicmodels)
k=20 #number of topics
lda_title <- cast_dtm(wc_title, Id, word, n) %>%
LDA( k = k, method = "VEM", control = list(seed = 11-11-2016))
save(lda_title, file="lda_title")
## ------------------------------------------------------------------------
# Top n terms for k topics sorted by beta
n=10
top_title <- lda_title %>%
tidy("beta") %>%
group_by(topic) %>%
top_n(n=n, beta) %>%
ungroup() %>%
arrange(topic, -beta)
|
source("/home/mr984/diversity_metrics/scripts/checkplot_initials.R")
source("/home/mr984/diversity_metrics/scripts/checkplot_inf.R")
reps<-50
outerreps<-1000
size<-rev(round(10^seq(2, 5, 0.25)))[
7
]
nc<-12
plan(strategy=multisession, workers=nc)
map(rev(1:outerreps), function(x){
start<-Sys.time()
out<-checkplot_inf(flatten(flatten(SADs_list))[[5]], l=0, inds=size, reps=reps)
write.csv(out, paste("/scratch/mr984/SAD5","l",0,"inds", size, "outernew", x, ".csv", sep="_"), row.names=F)
rm(out)
print(Sys.time()-start)
})
|
/scripts/checkplots_for_parallel_amarel/asy_517.R
|
no_license
|
dushoff/diversity_metrics
|
R
| false
| false
| 533
|
r
|
source("/home/mr984/diversity_metrics/scripts/checkplot_initials.R")
source("/home/mr984/diversity_metrics/scripts/checkplot_inf.R")
reps<-50
outerreps<-1000
size<-rev(round(10^seq(2, 5, 0.25)))[
7
]
nc<-12
plan(strategy=multisession, workers=nc)
map(rev(1:outerreps), function(x){
start<-Sys.time()
out<-checkplot_inf(flatten(flatten(SADs_list))[[5]], l=0, inds=size, reps=reps)
write.csv(out, paste("/scratch/mr984/SAD5","l",0,"inds", size, "outernew", x, ".csv", sep="_"), row.names=F)
rm(out)
print(Sys.time()-start)
})
|
#' @title Predictions for a new dataset using an existing probit_bartBMA object
#'
#' @description This function produces predictions for a new dataset using a previously obtained bartBMA object.
#' @param object A probit_bartBMA object obtained using the probit_bartBMA function.
#' @param newdata Covariate matrix for new dataset.
#' @export
#' @return A vector of predictions for the new dataset.
predict_probit_bartBMA<-function(object,newdata){
#preds<-get_BART_BMA_test_predictions(newdata,object$bic,object$sumoftrees,object$y_minmax)
if(is.null(newdata) && length(object)==16){
#if test data specified separately
preds<-preds_bbma_lin_alg_outsamp(object$sumoftrees,object$obs_to_termNodesMatrix,object$response,object$bic,num_iter, burnin,object$nrowTrain,
nrow(object$test_data),object$a,object$sigma,0,object$nu,
object$lambda,#diff_inital_resids,
object$test_data
)
}else{if(is.null(newdata) && length(object)==14){
#else return Pred Ints for training data
preds<-preds_bbma_lin_alg_insamp(object$sumoftrees,object$obs_to_termNodesMatrix,object$response,object$bic,num_iter, burnin,object$nrowTrain,
object$a,object$sigma,0,object$nu,
object$lambda#diff_inital_resids
)
}else{
#if test data included in call to object
preds<-preds_bbma_lin_alg_outsamp(object$sumoftrees,object$obs_to_termNodesMatrix,object$response,object$bic,num_iter, burnin,object$nrowTrain,
nrow(newdata), object$a,object$sigma,0,object$nu,
object$lambda,#diff_inital_resids,
newdata
)
}}
ret <- list()
#probs <-pnorm(preds[[1]])
probs <-pnorm(preds)
pred_binary <- ifelse(probs<=0.5,0,1)
ret$probs <- probs
ret$pred_binary <- pred_binary
ret
}
|
/R/predict_probit_bartBMA.R
|
no_license
|
EoghanONeill/bartBMAnew
|
R
| false
| false
| 2,031
|
r
|
#' @title Predictions for a new dataset using an existing probit_bartBMA object
#'
#' @description This function produces predictions for a new dataset using a previously obtained bartBMA object.
#' @param object A probit_bartBMA object obtained using the probit_bartBMA function.
#' @param newdata Covariate matrix for new dataset.
#' @export
#' @return A vector of predictions for the new dataset.
predict_probit_bartBMA<-function(object,newdata){
#preds<-get_BART_BMA_test_predictions(newdata,object$bic,object$sumoftrees,object$y_minmax)
if(is.null(newdata) && length(object)==16){
#if test data specified separately
preds<-preds_bbma_lin_alg_outsamp(object$sumoftrees,object$obs_to_termNodesMatrix,object$response,object$bic,num_iter, burnin,object$nrowTrain,
nrow(object$test_data),object$a,object$sigma,0,object$nu,
object$lambda,#diff_inital_resids,
object$test_data
)
}else{if(is.null(newdata) && length(object)==14){
#else return Pred Ints for training data
preds<-preds_bbma_lin_alg_insamp(object$sumoftrees,object$obs_to_termNodesMatrix,object$response,object$bic,num_iter, burnin,object$nrowTrain,
object$a,object$sigma,0,object$nu,
object$lambda#diff_inital_resids
)
}else{
#if test data included in call to object
preds<-preds_bbma_lin_alg_outsamp(object$sumoftrees,object$obs_to_termNodesMatrix,object$response,object$bic,num_iter, burnin,object$nrowTrain,
nrow(newdata), object$a,object$sigma,0,object$nu,
object$lambda,#diff_inital_resids,
newdata
)
}}
ret <- list()
#probs <-pnorm(preds[[1]])
probs <-pnorm(preds)
pred_binary <- ifelse(probs<=0.5,0,1)
ret$probs <- probs
ret$pred_binary <- pred_binary
ret
}
|
\name{rate02}
\alias{rate02}
\docType{data}
\title{
Rate data for the year 2002
}
\description{
Visual meteor rate data for the year 2002.
}
\usage{rate02}
\format{
A data frame with 13380 observations on the following 34 variables.
\describe{
\item{\code{IMOcode}}{factor IMO observer code}
\item{\code{sitecode}}{numeric IMO site code}
\item{\code{long}}{numeric Longitude of the observing site in degrees}
\item{\code{EW}}{factor East (E) or west (W) position from the prime meridian}
\item{\code{lat}}{numeric Latitude of the observing site in degrees}
\item{\code{NS}}{factor North (N) or south (S) position from the equator}
\item{\code{day}}{numeric Day of the month}
\item{\code{month}}{numeric Month of the year}
\item{\code{year}}{numeric Year 2002}
\item{\code{start}}{numeric Beginning of the observing time period}
\item{\code{stop}}{numeric End of the observing time period}
\item{\code{sollong}}{numeric Solar longitude of the middle of observing time period}
\item{\code{fovRA}}{numeric Right ascension of the center of the field of view}
\item{\code{fovDEC}}{numeric Declination of the center of the field of view}
\item{\code{Teff}}{numeric Effective observing time}
\item{\code{F}}{numeric Correction factor for clouds}
\item{\code{lmg}}{numeric Limiting magnitude}
\item{\code{SPO}}{numeric Number of observed sporadics}
\item{\code{Shw1}}{factor Abbreviation of the first shower}
\item{\code{N1}}{numeric Number of meteors belonging to the first shower}
\item{\code{Shw2}}{factor Abbreviation of the second shower}
\item{\code{N2}}{numeric Number of meteors belonging to the second shower}
\item{\code{Shw3}}{factor Abbreviation of the third shower}
\item{\code{N3}}{numeric Number of meteors belonging to the third shower}
\item{\code{Shw4}}{factor Abbreviation of the forth shower}
\item{\code{N4}}{numeric Number of meteors belonging to the forth shower}
\item{\code{Shw5}}{factor Abbreviation of the fifth shower}
\item{\code{N5}}{numeric Number of meteors belonging to the fifth shower}
\item{\code{Shw6}}{factor Abbreviation of the 6th shower}
\item{\code{N6}}{numeric Number of meteors belonging to the 6th shower}
\item{\code{Shw7}}{factor Abbreviation of the 7th shower}
\item{\code{N7}}{numeric Number of meteors belonging to the 7th shower}
\item{\code{Shw8}}{factor Abbreviation of the 8th shower}
\item{\code{N8}}{numeric Number of meteors belonging to the 8th shower}
}
}
\source{
Visual Meteor Database, \url{http://www.imo.net/data/visual}
}
|
/man/rate02.Rd
|
no_license
|
arturochian/MetFns
|
R
| false
| false
| 2,708
|
rd
|
\name{rate02}
\alias{rate02}
\docType{data}
\title{
Rate data for the year 2002
}
\description{
Visual meteor rate data for the year 2002.
}
\usage{rate02}
\format{
A data frame with 13380 observations on the following 34 variables.
\describe{
\item{\code{IMOcode}}{factor IMO observer code}
\item{\code{sitecode}}{numeric IMO site code}
\item{\code{long}}{numeric Longitude of the observing site in degrees}
\item{\code{EW}}{factor East (E) or west (W) position from the prime meridian}
\item{\code{lat}}{numeric Latitude of the observing site in degrees}
\item{\code{NS}}{factor North (N) or south (S) position from the equator}
\item{\code{day}}{numeric Day of the month}
\item{\code{month}}{numeric Month of the year}
\item{\code{year}}{numeric Year 2002}
\item{\code{start}}{numeric Beginning of the observing time period}
\item{\code{stop}}{numeric End of the observing time period}
\item{\code{sollong}}{numeric Solar longitude of the middle of observing time period}
\item{\code{fovRA}}{numeric Right ascension of the center of the field of view}
\item{\code{fovDEC}}{numeric Declination of the center of the field of view}
\item{\code{Teff}}{numeric Effective observing time}
\item{\code{F}}{numeric Correction factor for clouds}
\item{\code{lmg}}{numeric Limiting magnitude}
\item{\code{SPO}}{numeric Number of observed sporadics}
\item{\code{Shw1}}{factor Abbreviation of the first shower}
\item{\code{N1}}{numeric Number of meteors belonging to the first shower}
\item{\code{Shw2}}{factor Abbreviation of the second shower}
\item{\code{N2}}{numeric Number of meteors belonging to the second shower}
\item{\code{Shw3}}{factor Abbreviation of the third shower}
\item{\code{N3}}{numeric Number of meteors belonging to the third shower}
\item{\code{Shw4}}{factor Abbreviation of the forth shower}
\item{\code{N4}}{numeric Number of meteors belonging to the forth shower}
\item{\code{Shw5}}{factor Abbreviation of the fifth shower}
\item{\code{N5}}{numeric Number of meteors belonging to the fifth shower}
\item{\code{Shw6}}{factor Abbreviation of the 6th shower}
\item{\code{N6}}{numeric Number of meteors belonging to the 6th shower}
\item{\code{Shw7}}{factor Abbreviation of the 7th shower}
\item{\code{N7}}{numeric Number of meteors belonging to the 7th shower}
\item{\code{Shw8}}{factor Abbreviation of the 8th shower}
\item{\code{N8}}{numeric Number of meteors belonging to the 8th shower}
}
}
\source{
Visual Meteor Database, \url{http://www.imo.net/data/visual}
}
|
library(DAMEfinder)
DATA_PATH_DIR <- "inst/extdata"
get_data_path <- function(file_name) file.path(DATA_PATH_DIR, file_name)
bam_files <- sapply(c("CRC1_chr19_trim.bam",
"moredata/CRC2_chr19_trim.bam",
"moredata/CRC3_chr19_trim.bam",
"moredata/CRC4_chr19_trim.bam",
"NORM1_chr19_trim.bam",
"moredata/NORM2_chr19_trim.bam",
"moredata/NORM3_chr19_trim.bam",
"moredata/NORM4_chr19_trim.bam"),get_data_path,
USE.NAMES = FALSE)
vcf_files <- sapply(c("CRC1.chr19.trim.vcf",
"moredata/CRC2.chr19.trim.vcf",
"moredata/CRC3.chr19.trim.vcf",
"moredata/CRC4.chr19.trim.vcf",
"NORM1.chr19.trim.vcf",
"moredata/NORM2.chr19.trim.vcf",
"moredata/NORM3.chr19.trim.vcf",
"moredata/NORM4.chr19.trim.vcf"),get_data_path,
USE.NAMES = FALSE)
sample_names <- c("CRC1","CRC2","CRC3","CRC4","NORM1","NORM2","NORM3","NORM4")
reference_file <- get_data_path("19.fa")
extractbams_output <- extract_bams(bam_files, vcf_files, sample_names,
reference_file)
usethis::use_data(extractbams_output, overwrite = TRUE, compress = 'xz')
|
/data-raw/extractbams_output.R
|
permissive
|
katwre/DAMEfinder
|
R
| false
| false
| 1,391
|
r
|
library(DAMEfinder)
DATA_PATH_DIR <- "inst/extdata"
get_data_path <- function(file_name) file.path(DATA_PATH_DIR, file_name)
bam_files <- sapply(c("CRC1_chr19_trim.bam",
"moredata/CRC2_chr19_trim.bam",
"moredata/CRC3_chr19_trim.bam",
"moredata/CRC4_chr19_trim.bam",
"NORM1_chr19_trim.bam",
"moredata/NORM2_chr19_trim.bam",
"moredata/NORM3_chr19_trim.bam",
"moredata/NORM4_chr19_trim.bam"),get_data_path,
USE.NAMES = FALSE)
vcf_files <- sapply(c("CRC1.chr19.trim.vcf",
"moredata/CRC2.chr19.trim.vcf",
"moredata/CRC3.chr19.trim.vcf",
"moredata/CRC4.chr19.trim.vcf",
"NORM1.chr19.trim.vcf",
"moredata/NORM2.chr19.trim.vcf",
"moredata/NORM3.chr19.trim.vcf",
"moredata/NORM4.chr19.trim.vcf"),get_data_path,
USE.NAMES = FALSE)
sample_names <- c("CRC1","CRC2","CRC3","CRC4","NORM1","NORM2","NORM3","NORM4")
reference_file <- get_data_path("19.fa")
extractbams_output <- extract_bams(bam_files, vcf_files, sample_names,
reference_file)
usethis::use_data(extractbams_output, overwrite = TRUE, compress = 'xz')
|
corr <- function(directory, threshold = 0) {
## 'directory' is a character vector of length 1 indicating
## the location of the CSV files
## 'threshold' is a numeric vector of length 1 indicating the
## number of completely observed observations (on all
## variables) required to compute the correlation between
## nitrate and sulfate; the default is 0
## Return a numeric vector of correlations
v <- numeric(0)
dataFrame <- complete("specdata")
dataFrame <- dataFrame[dataFrame$nobs > threshold, ]
for (d in dataFrame$id) {
monitorDataFrame <- getmonitor(d, directory)
v <- c(v, cor(monitorDataFrame$sulfate, monitorDataFrame$nitrate, use = "pairwise.complete.obs"))
}
return(v)
}
|
/corr.R
|
no_license
|
addseq/r_datascience
|
R
| false
| false
| 817
|
r
|
corr <- function(directory, threshold = 0) {
## 'directory' is a character vector of length 1 indicating
## the location of the CSV files
## 'threshold' is a numeric vector of length 1 indicating the
## number of completely observed observations (on all
## variables) required to compute the correlation between
## nitrate and sulfate; the default is 0
## Return a numeric vector of correlations
v <- numeric(0)
dataFrame <- complete("specdata")
dataFrame <- dataFrame[dataFrame$nobs > threshold, ]
for (d in dataFrame$id) {
monitorDataFrame <- getmonitor(d, directory)
v <- c(v, cor(monitorDataFrame$sulfate, monitorDataFrame$nitrate, use = "pairwise.complete.obs"))
}
return(v)
}
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/parametric.R
\name{dini.surface}
\alias{dini.surface}
\title{Dini Surface}
\usage{
dini.surface(n = 10000, a = 1, b = 1)
}
\arguments{
\item{n}{number of points}
\item{a}{outer radius of object}
\item{b}{space between loops}
}
\value{
\item{points }{location of points}
\item{edges }{edges of the object (null)}
}
\description{
A function to generate a dini surface.
}
\examples{
## Generates a Dini Surface
dini.surface(n = 1000, a = 1, b = 1)
}
\author{
Barret Schloerke
}
\references{
\url{http://schloerke.github.io/geozoo/mobius/other/}
}
\keyword{dynamic}
|
/man/dini.surface.Rd
|
no_license
|
schloerke/geozoo
|
R
| false
| true
| 645
|
rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/parametric.R
\name{dini.surface}
\alias{dini.surface}
\title{Dini Surface}
\usage{
dini.surface(n = 10000, a = 1, b = 1)
}
\arguments{
\item{n}{number of points}
\item{a}{outer radius of object}
\item{b}{space between loops}
}
\value{
\item{points }{location of points}
\item{edges }{edges of the object (null)}
}
\description{
A function to generate a dini surface.
}
\examples{
## Generates a Dini Surface
dini.surface(n = 1000, a = 1, b = 1)
}
\author{
Barret Schloerke
}
\references{
\url{http://schloerke.github.io/geozoo/mobius/other/}
}
\keyword{dynamic}
|
# Ex. 6.7 on p. 298)
g1 = c(9, 6, 9)
g2 = c(0, 2)
g3 = c(3, 1, 2)
Data <- data.frame(Y = c(g1, g2, g3),
Group = factor(rep(c("g1", "g2", "g3"), times=c(length(g1), length(g2), length(g3)))))
Data
m1 = aov(Y~ Group, data=Data)
anova(m1)
# Read QueenslandFuel.txt data
x <- read.table(file.choose(), header = TRUE)
# ANOVA
summary(aov(Price ~ City, data = x))
# Do not reject Ho
# Manual computation
m1 <- mean(x[1:9, 3])
m2 <- mean(x[10:18, 3])
m3 <- mean(x[19:27, 3])
m4 <- mean(x[28:36, 3])
m5 <- mean(x[37:45, 3])
m6 <- mean(x[45:54, 3])
(x.bar = mean(x$Price))
n1 = n2 = n3 = n4 = n5 = n6 = 9
g = 6
SS.mean = (n1 + n2 + n3 + n4 + n5 + n6)*((x.bar)^2)
(SS.Total = sum(x$Price^2) - SS.mean)
SS.Treat = n1*((m1 - x.bar)^2) + n2*(m2 - x.bar)^2 + n3*(m3 - x.bar)^2 + n4*(m4 - x.bar)^2 + n5*(m5 - x.bar)^2 + n6*(m6 - x.bar)^2
SS.Treat
(MS.Treat = SS.Treat/(g - 1))
(SS.Res = SS.Total - SS.Treat)
(MS.Res = SS.Res/(n1 + n2 + n3 + n4 + n5 + n6 - g))
(F.Test = MS.Treat/MS.Res)
|
/Multivariate Analysis I/class/week5/05-MANOVA.R
|
no_license
|
yc3356/courses
|
R
| false
| false
| 1,040
|
r
|
# Ex. 6.7 on p. 298)
g1 = c(9, 6, 9)
g2 = c(0, 2)
g3 = c(3, 1, 2)
Data <- data.frame(Y = c(g1, g2, g3),
Group = factor(rep(c("g1", "g2", "g3"), times=c(length(g1), length(g2), length(g3)))))
Data
m1 = aov(Y~ Group, data=Data)
anova(m1)
# Read QueenslandFuel.txt data
x <- read.table(file.choose(), header = TRUE)
# ANOVA
summary(aov(Price ~ City, data = x))
# Do not reject Ho
# Manual computation
m1 <- mean(x[1:9, 3])
m2 <- mean(x[10:18, 3])
m3 <- mean(x[19:27, 3])
m4 <- mean(x[28:36, 3])
m5 <- mean(x[37:45, 3])
m6 <- mean(x[45:54, 3])
(x.bar = mean(x$Price))
n1 = n2 = n3 = n4 = n5 = n6 = 9
g = 6
SS.mean = (n1 + n2 + n3 + n4 + n5 + n6)*((x.bar)^2)
(SS.Total = sum(x$Price^2) - SS.mean)
SS.Treat = n1*((m1 - x.bar)^2) + n2*(m2 - x.bar)^2 + n3*(m3 - x.bar)^2 + n4*(m4 - x.bar)^2 + n5*(m5 - x.bar)^2 + n6*(m6 - x.bar)^2
SS.Treat
(MS.Treat = SS.Treat/(g - 1))
(SS.Res = SS.Total - SS.Treat)
(MS.Res = SS.Res/(n1 + n2 + n3 + n4 + n5 + n6 - g))
(F.Test = MS.Treat/MS.Res)
|
setwd("C:/Users/faisal/OneDrive - eSevens/HD Backup/My University/Study/Kanazawa University/Riset/Code/R/AminoAcidComposition")
#setwd("C:/Users/M Reza Faisal/Documents/OneDriveBussiness/OneDrive - eSevens/HD Backup/My University/Study/Kanazawa University/Riset/Code/R/AminoAcidComposition")
rm(list = ls())
library(randomForest)
library(kernlab)
library(caret)
divider_j = 5
k_value = 5
cross_num=5
feature_count.init = 50
feature_count = 50
feature_count.max = 1019
feature_increment = 50
file_index = c(1:cross_num)
file_name_pattern = c("chlo", "cytop", "cytos", "er", "extr", "golgi", "lyso", "mito", "nuc", "pero", "plas", "vacu")
dividers = c(2,3,4,5)
dividers = c(divider_j)
main_data.prediction.filename = "ksvm.main_data.prediction.ver2.50inc.csv"
main_data.class.filename = "ksvm.main_data.class.ksvm.ver2.50inc.csv"
features.existing.filename = "kknn-features-rangking-original-overlapped.50inc.csv"
#baca features
features.existing = read.csv(paste0("result/overlapped/", features.existing.filename), stringsAsFactors = FALSE)
print(paste(Sys.time(), "KSVM ALL-original-overlapped Step: 50"))
#print(paste("Divider: ", divider_j))
if(exists("main_data.perf")){
rm("main_data.perf")
}
if(exists("main_data.ranking")){
rm("main_data.ranking")
}
file_prefix = paste0("ALL-original-overlapped-")
#classification - start
#klasifikasi akan dilakukan berdasarkan feature-feature yang diinginkan
#jumlah feature akan dimulai dari 2 sampai akhir kolom feature
#==========================================================================================
if(exists("main_data.prediction")){
rm("main_data.prediction")
}
if(exists("main_data.performance")){
rm("main_data.performance")
}
if(exists("main_data.class")){
rm("main_data.class")
}
for(cross_i in 1:cross_num){
feature_count = feature_count.init
if(exists("main_data.train")){
rm("main_data.train")
}
if(exists("main_data.test")){
rm("main_data.test")
}
if(exists("main_data.class.temp")){
rm("main_data.class.temp")
}
if(exists("main_data.prediction.temp")){
rm("main_data.prediction.temp")
}
#mengumpulkan data training
for(file_j in file_index[-cross_i]){
for(file_name_i in file_name_pattern){
main_data.train.file = paste0("overlapped/all/", file_prefix, file_name_i, file_j, ".csv")
if(!exists("main_data.train")){
assign("main_data.train", read.csv(main_data.train.file, stringsAsFactors = FALSE))
} else {
main_data.train = rbind.data.frame(main_data.train, read.csv(main_data.train.file, stringsAsFactors = FALSE))
}
}
}
#main_data.train = cbind.data.frame(main_data.train[,result.rf.features[1:feature_count]], main_data.train$class)
colnames(main_data.train)[ncol(main_data.train)] = "class"
main_data.train$class = as.factor(main_data.train$class)
#mengumpulkan data testing
for(file_name_i in file_name_pattern){
main_data.test.file = paste0("overlapped/all/", file_prefix, file_name_i, cross_i, ".csv")
if(!exists("main_data.test")){
assign("main_data.test", read.csv(main_data.test.file, stringsAsFactors = FALSE))
} else {
main_data.test = rbind.data.frame(main_data.test, read.csv(main_data.test.file, stringsAsFactors = FALSE))
}
}
#main_data.test = cbind.data.frame(main_data.test[,result.rf.features[1:feature_count]], main_data.test$class)
colnames(main_data.test)[ncol(main_data.test)] = "class"
main_data.test$class = as.factor(main_data.test$class)
#menggunakan features yang telah ada - start
#==========================================================================================
main_data.train = na.omit(main_data.train)
main_data.test = na.omit(main_data.test)
result.rf.features = unlist(features.existing[cross_i,])
#==========================================================================================
#menggunakan features yang telah ada - end
feature_count.max = length(result.rf.features)
while(feature_count < feature_count.max){
print(feature_count)
#membuat data training dan testing dengan feature sesuai urutan feature penting hasil dari feature selection - start
#==========================================================================================
main_data.train.temp = cbind.data.frame(main_data.train[,result.rf.features[1:feature_count]], main_data.train$class)
colnames(main_data.train.temp)[ncol(main_data.train.temp)] = "class"
main_data.train.temp$class = as.factor(main_data.train.temp$class)
main_data.test.temp = cbind.data.frame(main_data.test[,result.rf.features[1:feature_count]], main_data.test$class)
colnames(main_data.test.temp)[ncol(main_data.test.temp)] = "class"
main_data.test.temp$class = as.factor(main_data.test.temp$class)
#==========================================================================================
#classification
#classification_model = kknn(class~., main_data.train.temp, main_data.test.temp[,-ncol(main_data.test.temp)], k = k_value, kernel = "triangular")
#predict_result <- fitted(classification_model)
classification_model = ksvm(class~., main_data.train.temp, type = "spoc-svc")
predict_result <- predict(classification_model, main_data.test.temp[,-(ncol(main_data.test.temp))])
if(!exists("main_data.prediction.temp")){
assign("main_data.prediction.temp", as.character(predict_result))
} else {
main_data.prediction.temp = rbind(main_data.prediction.temp, as.character(predict_result))
}
if(!exists("main_data.class.temp")){
assign("main_data.class.temp", as.character(main_data.test.temp$class))
} else {
main_data.class.temp = rbind(main_data.class.temp, as.character(main_data.test.temp$class))
}
feature_count = feature_count + feature_increment
if(feature_count > length(result.rf.features)){
feature_count = length(result.rf.features)
}
}
#mengumpulkan seluruh data testing
if(!exists("main_data.class")){
assign("main_data.class", main_data.class.temp)
} else {
main_data.class = cbind.data.frame(main_data.class, main_data.class.temp)
}
if(!exists("main_data.prediction")){
assign("main_data.prediction", main_data.prediction.temp)
} else {
main_data.prediction = cbind(main_data.prediction, main_data.prediction.temp)
}
print("class")
print(dim(main_data.class))
print("prediction")
print(dim(main_data.prediction))
print(paste("cross:",cross_i,"======================================================="))
}
#write result
#jika diperlukan kembali dikemudian hari
write.csv(main_data.prediction, paste0("result/perf/", main_data.prediction.filename), row.names = FALSE)
write.csv(main_data.class, paste0("result/perf/", main_data.class.filename), row.names = FALSE)
#read data jika diperlukan
main_data.prediction = read.csv(paste0("result/perf/", main_data.prediction.filename), stringsAsFactors = FALSE)
main_data.class = read.csv(paste0("result/perf/", main_data.class.filename), stringsAsFactors = FALSE)
#menghitung akurasi
for(predict_i in 1:nrow(main_data.prediction)){
prediction.data = cbind(unlist(main_data.prediction[predict_i,]), as.character(unlist(main_data.class[predict_i,])))
prediction.data = as.data.frame(prediction.data)
colnames(prediction.data) = c("predict","class")
#total akurasi (total akurasi = acc dari confusionMatrix)
is_prediction_true.count = 0
for(prediction.data_i in 1:nrow(prediction.data)){
if(as.character(prediction.data[prediction.data_i, 1]) == as.character(prediction.data[prediction.data_i, 2])){
is_prediction_true.count = is_prediction_true.count + 1
}
}
total_acc = is_prediction_true.count/nrow(prediction.data)
print(paste("Total Acc:", total_acc))
# matrix.sens.spec = confusionMatrix(as.character(prediction.data$predict), as.character(prediction.data$class))
#print(paste("Total Acc:",matrix.sens.spec$overall[1]))
#lokal akurasi
#menghitung akurasi setiap class kemudian dijumlahkan
#kemudian dibagi dengan jumlah class
local.acc.temp = 0
prediction.data.classes = as.character(as.data.frame(table(prediction.data$class))$Var1)
for(prediction.data.class in prediction.data.classes){
prediction.data.temp = prediction.data[which(prediction.data$class == prediction.data.class),]
is_prediction_true.count = 0
for(prediction.data_i in 1:nrow(prediction.data.temp)){
if(as.character(prediction.data.temp[prediction.data_i, 1]) == as.character(prediction.data.temp[prediction.data_i, 2])){
is_prediction_true.count = is_prediction_true.count + 1
}
}
local_acc = is_prediction_true.count/nrow(prediction.data.temp)
local.acc.temp = local.acc.temp + local_acc
#print(local_acc)
}
local.acc = local.acc.temp/length(prediction.data.classes)
print(paste("Local Acc:", local.acc))
print(paste(predict_i,"=================="))
perf.temp = c(total_acc, local.acc)
if(!exists("main_data.perf")){
assign("main_data.perf", perf.temp)
} else {
main_data.perf = rbind.data.frame(main_data.perf, perf.temp)
}
}
# for(predict_i in 1:nrow(main_data.prediction)){
# matrix.sens.spec = confusionMatrix(unlist(main_data.prediction[predict_i,]), unlist(main_data.class[predict_i,]))
# perf.temp = colSums(matrix.sens.spec$byClass[,c(5:7)], na.rm = TRUE)/(nrow(matrix.sens.spec$byClass[,c(5:7)]))
# print(colSums(matrix.sens.spec$byClass[,c(5:7)], na.rm = TRUE)/(nrow(matrix.sens.spec$byClass[,c(5:7)])))
#
# if(!exists("main_data.perf")){
# assign("main_data.perf", perf.temp)
# } else {
# main_data.perf = rbind.data.frame(main_data.perf, perf.temp)
# }
# print(paste(predict_i,"========================================="))
# }
#==========================================================================================
#classification - end
colnames(main_data.perf) = c("TotalAcc", "LocalAcc")
write.csv(main_data.perf, paste0("result/overlapped/ksvm-cv-original-overlapped.50inc.accuracy.csv"), row.names = FALSE, quote = FALSE)
#colnames(main_data.ranking) = paste0("V", c(1:ncol(main_data.ranking)))
#write.csv(main_data.ranking, paste0("result/overlapped/kknn-features-rangking-original-overlapped.1inc.1013-1019.ver4.csv"), row.names = FALSE, quote = FALSE)
|
/Step6.OverlappedOriginal.AllFeature.ExistingFeatures.Classification.KSVM.Counter50.Acc.Ver7.R
|
no_license
|
rezafaisal/ProteinClassification
|
R
| false
| false
| 10,353
|
r
|
setwd("C:/Users/faisal/OneDrive - eSevens/HD Backup/My University/Study/Kanazawa University/Riset/Code/R/AminoAcidComposition")
#setwd("C:/Users/M Reza Faisal/Documents/OneDriveBussiness/OneDrive - eSevens/HD Backup/My University/Study/Kanazawa University/Riset/Code/R/AminoAcidComposition")
rm(list = ls())
library(randomForest)
library(kernlab)
library(caret)
divider_j = 5
k_value = 5
cross_num=5
feature_count.init = 50
feature_count = 50
feature_count.max = 1019
feature_increment = 50
file_index = c(1:cross_num)
file_name_pattern = c("chlo", "cytop", "cytos", "er", "extr", "golgi", "lyso", "mito", "nuc", "pero", "plas", "vacu")
dividers = c(2,3,4,5)
dividers = c(divider_j)
main_data.prediction.filename = "ksvm.main_data.prediction.ver2.50inc.csv"
main_data.class.filename = "ksvm.main_data.class.ksvm.ver2.50inc.csv"
features.existing.filename = "kknn-features-rangking-original-overlapped.50inc.csv"
#baca features
features.existing = read.csv(paste0("result/overlapped/", features.existing.filename), stringsAsFactors = FALSE)
print(paste(Sys.time(), "KSVM ALL-original-overlapped Step: 50"))
#print(paste("Divider: ", divider_j))
if(exists("main_data.perf")){
rm("main_data.perf")
}
if(exists("main_data.ranking")){
rm("main_data.ranking")
}
file_prefix = paste0("ALL-original-overlapped-")
#classification - start
#klasifikasi akan dilakukan berdasarkan feature-feature yang diinginkan
#jumlah feature akan dimulai dari 2 sampai akhir kolom feature
#==========================================================================================
if(exists("main_data.prediction")){
rm("main_data.prediction")
}
if(exists("main_data.performance")){
rm("main_data.performance")
}
if(exists("main_data.class")){
rm("main_data.class")
}
for(cross_i in 1:cross_num){
feature_count = feature_count.init
if(exists("main_data.train")){
rm("main_data.train")
}
if(exists("main_data.test")){
rm("main_data.test")
}
if(exists("main_data.class.temp")){
rm("main_data.class.temp")
}
if(exists("main_data.prediction.temp")){
rm("main_data.prediction.temp")
}
#mengumpulkan data training
for(file_j in file_index[-cross_i]){
for(file_name_i in file_name_pattern){
main_data.train.file = paste0("overlapped/all/", file_prefix, file_name_i, file_j, ".csv")
if(!exists("main_data.train")){
assign("main_data.train", read.csv(main_data.train.file, stringsAsFactors = FALSE))
} else {
main_data.train = rbind.data.frame(main_data.train, read.csv(main_data.train.file, stringsAsFactors = FALSE))
}
}
}
#main_data.train = cbind.data.frame(main_data.train[,result.rf.features[1:feature_count]], main_data.train$class)
colnames(main_data.train)[ncol(main_data.train)] = "class"
main_data.train$class = as.factor(main_data.train$class)
#mengumpulkan data testing
for(file_name_i in file_name_pattern){
main_data.test.file = paste0("overlapped/all/", file_prefix, file_name_i, cross_i, ".csv")
if(!exists("main_data.test")){
assign("main_data.test", read.csv(main_data.test.file, stringsAsFactors = FALSE))
} else {
main_data.test = rbind.data.frame(main_data.test, read.csv(main_data.test.file, stringsAsFactors = FALSE))
}
}
#main_data.test = cbind.data.frame(main_data.test[,result.rf.features[1:feature_count]], main_data.test$class)
colnames(main_data.test)[ncol(main_data.test)] = "class"
main_data.test$class = as.factor(main_data.test$class)
#menggunakan features yang telah ada - start
#==========================================================================================
main_data.train = na.omit(main_data.train)
main_data.test = na.omit(main_data.test)
result.rf.features = unlist(features.existing[cross_i,])
#==========================================================================================
#menggunakan features yang telah ada - end
feature_count.max = length(result.rf.features)
while(feature_count < feature_count.max){
print(feature_count)
#membuat data training dan testing dengan feature sesuai urutan feature penting hasil dari feature selection - start
#==========================================================================================
main_data.train.temp = cbind.data.frame(main_data.train[,result.rf.features[1:feature_count]], main_data.train$class)
colnames(main_data.train.temp)[ncol(main_data.train.temp)] = "class"
main_data.train.temp$class = as.factor(main_data.train.temp$class)
main_data.test.temp = cbind.data.frame(main_data.test[,result.rf.features[1:feature_count]], main_data.test$class)
colnames(main_data.test.temp)[ncol(main_data.test.temp)] = "class"
main_data.test.temp$class = as.factor(main_data.test.temp$class)
#==========================================================================================
#classification
#classification_model = kknn(class~., main_data.train.temp, main_data.test.temp[,-ncol(main_data.test.temp)], k = k_value, kernel = "triangular")
#predict_result <- fitted(classification_model)
classification_model = ksvm(class~., main_data.train.temp, type = "spoc-svc")
predict_result <- predict(classification_model, main_data.test.temp[,-(ncol(main_data.test.temp))])
if(!exists("main_data.prediction.temp")){
assign("main_data.prediction.temp", as.character(predict_result))
} else {
main_data.prediction.temp = rbind(main_data.prediction.temp, as.character(predict_result))
}
if(!exists("main_data.class.temp")){
assign("main_data.class.temp", as.character(main_data.test.temp$class))
} else {
main_data.class.temp = rbind(main_data.class.temp, as.character(main_data.test.temp$class))
}
feature_count = feature_count + feature_increment
if(feature_count > length(result.rf.features)){
feature_count = length(result.rf.features)
}
}
#mengumpulkan seluruh data testing
if(!exists("main_data.class")){
assign("main_data.class", main_data.class.temp)
} else {
main_data.class = cbind.data.frame(main_data.class, main_data.class.temp)
}
if(!exists("main_data.prediction")){
assign("main_data.prediction", main_data.prediction.temp)
} else {
main_data.prediction = cbind(main_data.prediction, main_data.prediction.temp)
}
print("class")
print(dim(main_data.class))
print("prediction")
print(dim(main_data.prediction))
print(paste("cross:",cross_i,"======================================================="))
}
#write result
#jika diperlukan kembali dikemudian hari
write.csv(main_data.prediction, paste0("result/perf/", main_data.prediction.filename), row.names = FALSE)
write.csv(main_data.class, paste0("result/perf/", main_data.class.filename), row.names = FALSE)
#read data jika diperlukan
main_data.prediction = read.csv(paste0("result/perf/", main_data.prediction.filename), stringsAsFactors = FALSE)
main_data.class = read.csv(paste0("result/perf/", main_data.class.filename), stringsAsFactors = FALSE)
#menghitung akurasi
for(predict_i in 1:nrow(main_data.prediction)){
prediction.data = cbind(unlist(main_data.prediction[predict_i,]), as.character(unlist(main_data.class[predict_i,])))
prediction.data = as.data.frame(prediction.data)
colnames(prediction.data) = c("predict","class")
#total akurasi (total akurasi = acc dari confusionMatrix)
is_prediction_true.count = 0
for(prediction.data_i in 1:nrow(prediction.data)){
if(as.character(prediction.data[prediction.data_i, 1]) == as.character(prediction.data[prediction.data_i, 2])){
is_prediction_true.count = is_prediction_true.count + 1
}
}
total_acc = is_prediction_true.count/nrow(prediction.data)
print(paste("Total Acc:", total_acc))
# matrix.sens.spec = confusionMatrix(as.character(prediction.data$predict), as.character(prediction.data$class))
#print(paste("Total Acc:",matrix.sens.spec$overall[1]))
#lokal akurasi
#menghitung akurasi setiap class kemudian dijumlahkan
#kemudian dibagi dengan jumlah class
local.acc.temp = 0
prediction.data.classes = as.character(as.data.frame(table(prediction.data$class))$Var1)
for(prediction.data.class in prediction.data.classes){
prediction.data.temp = prediction.data[which(prediction.data$class == prediction.data.class),]
is_prediction_true.count = 0
for(prediction.data_i in 1:nrow(prediction.data.temp)){
if(as.character(prediction.data.temp[prediction.data_i, 1]) == as.character(prediction.data.temp[prediction.data_i, 2])){
is_prediction_true.count = is_prediction_true.count + 1
}
}
local_acc = is_prediction_true.count/nrow(prediction.data.temp)
local.acc.temp = local.acc.temp + local_acc
#print(local_acc)
}
local.acc = local.acc.temp/length(prediction.data.classes)
print(paste("Local Acc:", local.acc))
print(paste(predict_i,"=================="))
perf.temp = c(total_acc, local.acc)
if(!exists("main_data.perf")){
assign("main_data.perf", perf.temp)
} else {
main_data.perf = rbind.data.frame(main_data.perf, perf.temp)
}
}
# for(predict_i in 1:nrow(main_data.prediction)){
# matrix.sens.spec = confusionMatrix(unlist(main_data.prediction[predict_i,]), unlist(main_data.class[predict_i,]))
# perf.temp = colSums(matrix.sens.spec$byClass[,c(5:7)], na.rm = TRUE)/(nrow(matrix.sens.spec$byClass[,c(5:7)]))
# print(colSums(matrix.sens.spec$byClass[,c(5:7)], na.rm = TRUE)/(nrow(matrix.sens.spec$byClass[,c(5:7)])))
#
# if(!exists("main_data.perf")){
# assign("main_data.perf", perf.temp)
# } else {
# main_data.perf = rbind.data.frame(main_data.perf, perf.temp)
# }
# print(paste(predict_i,"========================================="))
# }
#==========================================================================================
#classification - end
colnames(main_data.perf) = c("TotalAcc", "LocalAcc")
write.csv(main_data.perf, paste0("result/overlapped/ksvm-cv-original-overlapped.50inc.accuracy.csv"), row.names = FALSE, quote = FALSE)
#colnames(main_data.ranking) = paste0("V", c(1:ncol(main_data.ranking)))
#write.csv(main_data.ranking, paste0("result/overlapped/kknn-features-rangking-original-overlapped.1inc.1013-1019.ver4.csv"), row.names = FALSE, quote = FALSE)
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.