blob_id
stringlengths 40
40
| directory_id
stringlengths 40
40
| path
stringlengths 2
327
| content_id
stringlengths 40
40
| detected_licenses
listlengths 0
91
| license_type
stringclasses 2
values | repo_name
stringlengths 5
134
| snapshot_id
stringlengths 40
40
| revision_id
stringlengths 40
40
| branch_name
stringclasses 46
values | visit_date
timestamp[us]date 2016-08-02 22:44:29
2023-09-06 08:39:28
| revision_date
timestamp[us]date 1977-08-08 00:00:00
2023-09-05 12:13:49
| committer_date
timestamp[us]date 1977-08-08 00:00:00
2023-09-05 12:13:49
| github_id
int64 19.4k
671M
⌀ | star_events_count
int64 0
40k
| fork_events_count
int64 0
32.4k
| gha_license_id
stringclasses 14
values | gha_event_created_at
timestamp[us]date 2012-06-21 16:39:19
2023-09-14 21:52:42
⌀ | gha_created_at
timestamp[us]date 2008-05-25 01:21:32
2023-06-28 13:19:12
⌀ | gha_language
stringclasses 60
values | src_encoding
stringclasses 24
values | language
stringclasses 1
value | is_vendor
bool 2
classes | is_generated
bool 2
classes | length_bytes
int64 7
9.18M
| extension
stringclasses 20
values | filename
stringlengths 1
141
| content
stringlengths 7
9.18M
|
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
54a3a38f72f83a6ffa7ed50797d6581c967dc00c
|
000898e609c09e5f7243a56e2a24129f90749690
|
/man/gradientPickerD3_example.Rd
|
2d3781b6c492da2efb4eb37c498aba885fd16a68
|
[] |
no_license
|
cran/gradientPickerD3
|
233485a7e45dec09ea28dcd4dc9261ba6472eb3b
|
a79d9cbb1c3ac4bc45657060903c60e937a6a332
|
refs/heads/master
| 2021-06-27T01:44:07.758976
| 2017-09-15T10:56:09
| 2017-09-15T10:56:09
| 103,651,891
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| true
| 453
|
rd
|
gradientPickerD3_example.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/gradientPickerD3_example.R
\name{gradientPickerD3_example}
\alias{gradientPickerD3_example}
\title{gradientPickerD3_example}
\usage{
gradientPickerD3_example()
}
\description{
Creates an example shiny app which include the gradientPickerD3 and a rendered table for gradientPickerD3 return value. By clicking the reload button new random ticks will be generated.
}
|
2a8c44bc801119f47f734dbb632af87c103ad272
|
c52543d76f7082fc5bfb63d975ac6b2fed9fbe74
|
/R-Prog/Exercise 4-20150608.R
|
a318a64d67d0bcec12cc284c9afd56b7850ce27f
|
[] |
no_license
|
YoganandaMN/Coursework
|
d8d17298d2e3a53b7d55da432d7835b2544de9af
|
30335d879d1234c8ae1aebba8542f18cb9da80fc
|
refs/heads/master
| 2016-08-12T08:57:58.660726
| 2015-07-16T15:03:11
| 2015-07-16T15:03:11
| 36,800,924
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 471
|
r
|
Exercise 4-20150608.R
|
#Exercise 4: Redo the previous exercise a more efficient way, i.e., without using a for loop.
#(Hint: This may be a bit more challenging than it looks; you have to think outside the box.)
RandomGeneration<-function(){
count<-1
while(count<=10){
n<-runif(10,min=-50,max=50)
print(n)
cat("\n")
s<-sort.int(n)
print(s)
cat("\n")
midpoint=(s[1]+s[10])/2
mid<-"midpoint="
cat(mid,midpoint,"\n")
cat("\n")
count=count+1
}
}
|
97e80462af9b2380febf53ba906527010a1851d7
|
319c8effd49600b5796cd1759063b0b8f10aeac1
|
/workspace/CRISPR/mKO_FEF/species_specific/cis_trans/scatterplot_par_hyb.r.2018080917
|
15cf80cf8a4cf33382247796542d0b28024bcb9e
|
[] |
no_license
|
ijayden-lung/hpc
|
94ff6b8e30049b1246b1381638a39f4f46df655c
|
6e8efdebc6a070f761547b0af888780bdd7a761d
|
refs/heads/master
| 2021-06-16T14:58:51.056045
| 2021-01-27T02:51:12
| 2021-01-27T02:51:12
| 132,264,399
| 1
| 1
| null | null | null | null |
UTF-8
|
R
| false
| false
| 1,135
|
2018080917
|
scatterplot_par_hyb.r.2018080917
|
#!/usr/bin/env Rscript
library(ggplot2)
library(ggrepel)
args<-commandArgs(T)
pdf(args[2])
data = read.table(args[1],header=TRUE,sep="\t")
#quad1= nrow(subset(data,data$color=="yellow"))
#quad2= nrow(subset(data,data$color=="blue"))
#quad3= nrow(subset(data,data$color=="red"))
#quad4= nrow(subset(data,data$color=="green"))
ggplot(data, aes(x=log2(mouse_par_fpkm), y=log2(mouse_hyb_fpkm)))+
geom_point(aes(color=quadrant ),size=1)+
scale_color_manual(values=c(quad1="orange",quad2="cyan",quad3="red",quad4="darkgreen",quad0="grey90"))+
#geom_hline(yintercept=0)+
#geom_vline(xintercept=0)+
theme_bw()+
theme(legend.position=c(0.1,0.9),legend.title=element_blank())+
geom_text_repel(aes(label=gene_name),size=3, data=subset(data,quadrant!="quad0" & (mouse_par_fpkm/mouse_hyb_fpkm >2 | mouse_par_fpkm/mouse_hyb_fpkm <0.5)))
#labs(x=paste("fef ",xtitle, " proliferation rate"),y="mef proliferaion rate")+
#annotate("text", x=0.3, y=0.3, label=quad1, alpha=.5)+
#annotate("text", x=-0.3, y=0.3, label=quad2, alpha=.5)+
#annotate("text", x=-0.3, y=-0.3, label=quad3, alpha=.5)+
#annotate("text", x=0.3, y=-0.3, label=quad4, alpha=.5)
|
f52d39dfed73c2046fca8df526b2b11852132459
|
d7dda74ede6f6987d3b60b5a95140aa961c7f78c
|
/Variaveis.R
|
b2a86830661e46d8964c712c617a2211cea8967a
|
[] |
no_license
|
MaxuelAntenor/R-for-Data-Science
|
19e1ba83bc78aecbee35e65661fd2cc48e54f438
|
10a3ab4c3847214ed8a7899b3d2c86bd446f0ad8
|
refs/heads/master
| 2022-11-18T14:50:46.396356
| 2020-07-15T15:45:30
| 2020-07-15T15:45:30
| 274,220,290
| 0
| 0
| null | null | null | null |
ISO-8859-1
|
R
| false
| false
| 568
|
r
|
Variaveis.R
|
# Criando variáveis
# Para executar clique sobre a linha ou selecione o conjunto de linhas
# e pressione CTRL + ENTER
# declarando o valor 10 para a variável a
a <- 10
a
# colocando b sem declarar antes, depois declarando com valor 10
# rodando a variável com letra maiúscula
b
b = 10
b
B
# irei utilizar '<-' na criação de variáveis e '=' nas atribuições de funções
c -> 10
10 -> c
c -> d
# atribuindo a soma na variável 'c'
c <- a + b
a <- "Maxuel"
b <- "João"
# atribuindo a soma na variável 'c'
c <- a + b
|
a68ef439223943687284228352c93a18fd0c92df
|
d14ef67e34e3078dd351e9765bf9c67873d59641
|
/R/021.R
|
a1026178427bb04208b18eb90ce318406decff55
|
[] |
no_license
|
mix060514/Euler-project
|
2e3914f8f1bd8b229a7a26425efc6c0b955e8cfe
|
c2aa9ecb71df43bfe84123d93c7a34b64fbb3b4f
|
refs/heads/master
| 2020-05-20T12:16:14.644973
| 2019-05-14T05:20:10
| 2019-05-14T05:20:10
| 185,567,481
| 0
| 0
| null | 2019-05-08T09:51:28
| 2019-05-08T08:45:46
| null |
UTF-8
|
R
| false
| false
| 418
|
r
|
021.R
|
divisors <- function(n){
y <- seq_len(n) # same as 1:n
return(y[n %% y == 0])
}
# sum of proper divisors
sopd <- function(v){
if (length(v) != 1) {
v <- v[-length(v)]
}
return(sum(v))
}
sopd(divisors(220))
sopd(divisors(284))
AY <- 2:10000
a <- unlist(lapply(AY, function(x){sopd(divisors(x))}))
b <- unlist(lapply(a, function(x){sopd(divisors(x))}))
AY[AY == b & a != b]
sum(AY[AY == b & a != b])
|
9d07f1a01edaa2cf52bcc895aa528f211e50240d
|
5d0ad197f94a53680dc4172ed3b8f1e8384a7d27
|
/code/functions.R
|
ecdde38799fe7e807891152fe2109c352c38eed7
|
[
"MIT"
] |
permissive
|
markrobinsonuzh/os_monitor
|
3356cbc8fb2a826572a8f4d64d1a454a180ffe2b
|
a6acd4740c657b9ebae0a09945862666bf1345f0
|
refs/heads/master
| 2022-02-28T20:44:27.516655
| 2022-02-17T12:43:52
| 2022-02-17T12:43:52
| 243,106,445
| 2
| 1
|
MIT
| 2020-10-07T05:55:18
| 2020-02-25T21:29:29
|
R
|
UTF-8
|
R
| false
| false
| 4,261
|
r
|
functions.R
|
library(dplyr)
library(rentrez)
library(RefManageR)
library(scholar)
fix_null <- function(x) {
if (is.null(x) || length(x) == 0) NA
else x
}
# grab everything from Entrez
retrieve_from_entrez <- function(pmid_search, pmid_remove=NULL, pmid_add=NULL, just_ids=NULL) {
if(is.null(just_ids)) {
x <- entrez_search(db = "pubmed", term = pmid_search, retmax = 1000)
x$ids <- unique(c(base::setdiff(x$ids, pmid_remove), pmid_add))
} else {
x <- list(ids=just_ids)
}
summ <- entrez_summary(db = "pubmed", id = x$ids)
summ <- lapply(summ, function(w) {
data.frame(pubyear = fix_null(strsplit(w$pubdate, " ")[[1]][1]),
title = fix_null(w$title),
authors = fix_null(paste(w$authors$name, collapse = ", ")),
journal = fix_null(w$source),
doi = fix_null(w$articleids$value[w$articleids$idtype == "doi"]),
pmid = fix_null(w$articleids$value[w$articleids$idtype == "pubmed"]),
stringsAsFactors = FALSE)
})
summ <- do.call(rbind, summ)
summ$title <- sub("\\.$","",summ$title)
summ$title <- gsub("</u>","",gsub("<u>","",summ$title, fixed=TRUE))
summ$title <- gsub("</i>","",gsub("<i>","",summ$title, fixed=TRUE))
summ$doi <- tolower(summ$doi)
summ$doi <- gsub("<","<", summ$doi)
summ$doi <- gsub(">",">", summ$doi)
summ
}
# grab everything from ORCID
# > with(works, table(type, `display-index`))
# display-index
# type -1 0 1
# data-set 0 9 0
# journal-article 2 117 44
# other 2 22 0
# report 0 4 0
# NOTE: should we be filtering to only 'journal-article' ?
retrieve_from_orcid <- function(orcid) {
works <- rorcid::orcid_works(orcid)
works <- works[[1]]$works
if(nrow(works)==0) {
df <- data.frame(title = character(0), journal=character(0),
type = character(0), doi = character(0),
year = integer(0))
return(df)
}
works$doi <- sapply(works$`external-ids.external-id`,
function(u) ifelse(nrow(u)>0, u$`external-id-value`[u$`external-id-type`=="doi"], NA))
works$doi <- tolower(works$doi)
works$doi <- gsub("http://dx.doi.org/", "", works$doi)
works <- works %>% filter(type != "data-set") %>%
mutate(title = title.title.value, journal = `journal-title.value`,
year = `publication-date.year.value`) %>%
select(title, journal, type, doi, year)
works$title <- sub("\\.$","",works$title)
works <- unique(works)
works_split <- split(works, works$doi)
n <- sapply(works_split, nrow)
z <- lapply(works_split[n>=2], function(u) {
nas <- is.na(u$journal)
if( sum(nas)>0 & sum(!nas)>0 )
return(u[!nas,,drop=FALSE][1,,drop=FALSE])
else
return(u[1,,drop=FALSE])
})
df <- do.call(rbind,c(z,works_split[n==1]))
rownames(df) <- NULL
df
}
retrieve_from_scholar <- function(scholar_id) {
starts <- seq(0,1000,by=100)
scholar_pubs <- lapply(starts, function(u) {
get_publications(scholar_id, cstart = u, pagesize = 100, flush = FALSE)
})
scholar_pubs <- do.call(rbind, scholar_pubs)
scholar_pubs <- unique(scholar_pubs)
scholar_pubs$title <- as.character(scholar_pubs$title)
scholar_pubs
}
split_to_rank <- function(u) {
ss <- strsplit(u, "[ -/\\]")
ss <- lapply(ss, function(v) {
v <- v[nchar(v)>0]
n <- length(v)
setNames(1:n, toupper(v))
})
setNames(ss, u)
}
sentence_Dist <- function(a, b) {
jaccard <- length(intersect(names(a),names(b))) / min(length(a),length(b),length(union(names(a),names(b))))
n <- intersect(names(a), names(b))
if(length(n) <= 4) return(0)
return(cor(a[n],b[n], method = "spearman") * jaccard)
}
# calculate score of similarity between two vectors of titles
calcScore <- function(x,y) {
ss_x <- split_to_rank(x)
ss_y <- split_to_rank(y)
dist <- matrix(0, nrow=length(ss_x), ncol=length(ss_y))
for(i in seq_along(ss_x))
for(j in seq_along(ss_y))
dist[i,j] <- sentence_Dist(ss_x[[i]], ss_y[[j]])
list(dist=dist, rows=x, cols=y)
# keep_x <- rowSums(dist)>0
# keep_y <- colSums(dist)>0
# list(dist=dist[keep_x,keep_y,drop=FALSE],
# rows=x[keep_x],
# cols=y[keep_y])
}
|
044191941d2221c35bf4a1d272d4d6c607251a39
|
cfb642c4568a403e7cd39b66e16dcaed0d08bd49
|
/man/plotIntLay.Rd
|
d302e8b04e4b9ed65dcdd50d857cc1e7a173dc16
|
[
"LicenseRef-scancode-warranty-disclaimer"
] |
no_license
|
JVAdams/EchoNet2Fish
|
5223bcdb98a43bb61cd629cb33f590cba9fd1fed
|
6e397345e55a13a0b3fca70df3701f79290d30b6
|
refs/heads/master
| 2023-06-22T17:56:41.457893
| 2021-02-08T16:08:09
| 2021-02-08T16:08:09
| 32,336,396
| 4
| 1
| null | 2023-06-09T17:36:08
| 2015-03-16T15:59:18
|
R
|
UTF-8
|
R
| false
| true
| 1,248
|
rd
|
plotIntLay.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/plotIntLay.R
\name{plotIntLay}
\alias{plotIntLay}
\title{Plot Acoustic Survey Data using Different Colored Symbols}
\usage{
plotIntLay(
interval,
layer,
group,
grouporder = sort(unique(group)),
colorz,
main = ""
)
}
\arguments{
\item{interval}{A numeric vector of intervals along the length of an acoustic transect.}
\item{layer}{A numeric vector of layers from surface to bottom along the vertical
water column of an acoustic transect, all values should be <= 0,
the same length as \code{interval}.}
\item{group}{A vector of group identifiers, the same length as \code{interval}.}
\item{grouporder}{A vector of unique group identifiers, providing the order that each group
will be plotted, the same length as \code{unique(group)}, default
\code{sort(unique(group))}.}
\item{colorz}{A vector of character or numeric colors to use,
the same length as \code{interval}.}
\item{main}{A character scalar of the main title of the plot, default "".}
}
\description{
Plot acoustic survey data, interval vs. layer, using different colored
symbols for data exploration purposes. Place multiple group-specific plots
on one page, using the same x- and y-scales.
}
|
75bb02b6a74265535ca51749843f381af78779ae
|
a0936eaffc5ff58736bfd8b1c2634f6a95294519
|
/R/list_trained_models.R
|
f61d54ce7d8ee56e1026b07bfce5dc617e176e90
|
[
"Apache-2.0"
] |
permissive
|
ShixiangWang/sigminer.prediction
|
2172ce5f3e95979de522f3b3d4b87a84f40785bd
|
0572bfc65fd9eb43bf0bc73b108881953b07787f
|
refs/heads/master
| 2022-07-10T22:22:52.245381
| 2022-06-25T09:09:40
| 2022-06-25T09:09:40
| 254,610,763
| 7
| 1
| null | null | null | null |
UTF-8
|
R
| false
| false
| 1,111
|
r
|
list_trained_models.R
|
#' List Current Available Trained Keras Models
#'
#' @return A `tibble` containing summary models.
#' @export
#'
#' @examples
#' list_trained_models()
list_trained_models <- function() {
ext_path <- system.file("extdata", package = "sigminer.prediction")
if (!dir.exists(ext_path)) {
ext_path <- system.file("inst", "extdata", package = "sigminer.prediction")
}
model_list <- dplyr::tibble(
TargetCancerType = c("PRAD", "PRAD", "PRAD"),
Application = c("Universal", "WES", "Target Sequencing"),
Cohort = c("Combined", "Wang et al", "MSKCC 2020"),
AccuracyTrainLast = c(0.904, 0.980, 0.974),
AccuracyValLast = c(0.905, 0.960, 0.976),
AccuracyTest = c(0.919, 0.984, 0.969),
Date = as.Date(rep("2020-04-09", 3)),
ModelFile = c(
file.path(ext_path, "keras_model_for_all_cohorts_20200409.h5"),
file.path(ext_path, "keras_model_for_wang_cohort_20200409.h5"),
file.path(ext_path, "keras_model_for_mskcc_cohort_20200409.h5")
)
) %>%
dplyr::mutate(Index = dplyr::row_number()) %>%
dplyr::select("Index", dplyr::everything())
model_list
}
|
9724cf7272836e3772758d3d6d2cb68a5357c8e6
|
760c7b6fb3fdf1095e45a13a026c15d81641c695
|
/inst/shiny/myapp/ui.R
|
4a070fadc2876de42afd773f9e33a993e5637926
|
[] |
no_license
|
reese3928/methylGSA
|
b3c39291c1996e430cf01b9c66ac73e32b67bce4
|
b4f62eeaababc32c43be1954041ad06232891ff5
|
refs/heads/master
| 2021-06-07T11:26:27.911171
| 2021-05-08T18:48:43
| 2021-05-08T18:48:43
| 134,437,726
| 8
| 2
| null | null | null | null |
UTF-8
|
R
| false
| false
| 7,133
|
r
|
ui.R
|
library(shinycssloaders)
ui <- navbarPage("methylGSA",
tabPanel("Main",
# Sidebar with a slider input for number of bins
sidebarLayout(
sidebarPanel(
HTML("Please upload CpG IDs and their p-values. See help page for instructions."
),
fileInput(inputId = "cpg.pval",
label = ""),
fluidRow(
column(6,
selectInput(inputId = "array.type",
label = "Array type:",
choices = c("450K", "EPIC"),
selected = "450K")
),
column(6,
selectInput(inputId = "group",
label = "Group:",
choices = c("all", "body", "promoter1", "promoter2"),
selected = "all")
)
),
selectInput(inputId = "GS.list",
label = "Gene sets tested:",
choices = c("Gene Ontology" = "GO", "KEGG", "Reactome"),
selected = "GO"),
sliderInput(inputId = "minsize",
label = "Minimum gene set size:",
min = 0, max = 1000, value = 100, step = 10),
sliderInput(inputId = "maxsize",
label = "Maximum gene set size:",
min = 0, max = 1000, value = 500, step = 10),
selectInput(inputId = "test.method",
label = "Test method:",
choices = c("methylglm", "gometh", "RRA(ORA)", "RRA(GSEA)"),
selected = "methylglm"),
actionButton("go","GO!")
#HTML("Please note, it may take some time for the results to show up.")
),
mainPanel(
htmlOutput("restext"),
tabsetPanel(type = "tabs",
tabPanel("Table",
withSpinner(DT::dataTableOutput("resTable"), type = 8),
downloadButton('download1',"Download as csv"),
downloadButton('download2',"Download as txt"),
tags$head(tags$style("#restext{font-size: 25px;}"))),
tabPanel("Plot",
fluidRow(
column(4,
numericInput(inputId = "ngs",
label = "Number of gene sets to display:",
value = 5,
min = 1,
max = NA)
),
column(4,
selectInput(inputId = "xaxis",
label = "x-axis:",
choices = c("Number of significant genes" = "Count", "Total genes" = "Size"),
selected = "Size")
),
column(4,
selectInput(inputId = "colorby",
label = "Color by:",
choices = c("Adjusted p-value" = "padj", "Raw p-value" = "pvalue"),
selected = "padj")
)
),
plotOutput("resPlot"),
downloadButton('downloadPlot1',"Download as pdf"),
downloadButton('downloadPlot2',"Download as png")
)
)
)
)
),
tabPanel("Help",
fluidRow(
column(8,
includeMarkdown("instructions.md")
)
)
),
tabPanel("About",
fluidRow(
column(6,
includeMarkdown("About.md")
)
)
)
)
|
76c3d3ff701a3841ffb3aa077a1b4402a833efaa
|
0b1a0e0daa124adf774e14a89dc09c1be978c5af
|
/man/occupancy.Rd
|
ad5308d8e9b4684a4f0462a8b2bcbaf2fdb4627d
|
[
"Apache-2.0"
] |
permissive
|
jdyen/occupancy
|
d1c3e75ce1f80589e8a13330f9723aebec841808
|
72234fa32339066f14528c780cf6b32dc8e33fbd
|
refs/heads/master
| 2020-06-14T23:03:11.455556
| 2019-07-05T06:03:26
| 2019-07-05T06:03:26
| 195,150,180
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| true
| 4,255
|
rd
|
occupancy.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/occupancy.R, R/package.R
\docType{package}
\name{occupancy}
\alias{occupancy}
\alias{occupancy-package}
\title{fit occupancy-detection models}
\usage{
occupancy(formula_occ, formula_detect, site_id, survey_id, data,
jags_settings = list())
}
\arguments{
\item{formula_occ}{model formula for occupancy component of the model. A two-sided
formula with the response variable on the left of the \code{~} operator and the
predictor variables on the right. Multiple predictor variables are separated by
\code{+} operators. Random effects are included with vertical bars, using the
notation \code{(1 | group)} to specify a random intercept for the variable
\code{group}. More complex random structures (e.g., random slopes) are not
supported.}
\item{formula_detect}{model formula for detection component of the model. A one-sided
formula with predictor variables on the right, formatted as for \code{formula_occ}.}
\item{site_id}{the name of the column in \code{data} in which site
identifiers are recorded.}
\item{survey_id}{the name of the column in \code{data} in which survey
identifiers are recorded.}
\item{data}{a \code{data.frame} in long format containing data on all variables. Required
variables include the response (detection-nondetection data), site and survey identifiers
(see \code{site_id} and \code{survey_id}, above), and predictor variables. Column names
must match the names used in \code{formula_occ} and \code{formula_detect}.}
\item{jags_settings}{optional list of MCMC settings. Any or all items can be altered
if needed. Options are:
\describe{
\item{\code{n_iter}}{the total number of MCMC iterations (including burn-in)}
\item{\code{n_burnin}}{the number of MCMC iterations to discard as a burn-in}
\item{\code{n_chains}}{the number of MCMC chains}
\item{\code{n_thin}}{thinning rate of MCMC samples}
\item{\code{parallel}}{logical, should chains be run in parallel?}
\item{\code{modules}}{JAGS modules to load}
\item{\code{params}}{character vector of parameters to store}
\item{\code{seed}}{seed used to initialise MCMC chains}
}}
}
\value{
\code{occupancy_model} - a \code{list} object that can be analysed using
functions described in \link[occupancy:methods]{methods}.
}
\description{
occupancy is a function to fit occupancy-detection models in JAGS from
within R. Models are specified with a formula interface and are supported by
methods to summarise, visualise, validate, and predict from fitted models.
occupancy lets you fit occupancy-detection models in JAGS without
having to prepare JAGS code. Models are specified with a formula interface
and can incorporate random effects. Models are supported by several methods
to summarise, visualise, validate, and predict from fitted models.
}
\details{
This function fits an occupancy-detection model in JAGS from two formulas:
\code{formula_occ} and \code{formula_detect}. Occupancy-detection models separate
the two processes of being present at a site and being detect given presence at
a site. This requires data from repeated visits (surveys) to sites.
The occupancy component of the model (presence at a site) is defined at
the site level. The detection component of the model (detections given presence)
is defined at the survey level. The model assumes that associations between
occupancy/detection and predictor variables are linear on a logit scale.
}
\examples{
\dontrun{
# fit a model to simulated data
mod <- occupancy(response ~ occ_predictor1 + occ_predictor2 +
(1 | occ_random1) + (1 | occ_random2),
~ detect_predictor1 + detect_predictor2 +
(1 | detect_random1),
site_id = "site",
survey_id = "survey",
data = occupancy_data,
jags_settings = list(n_iter = 1000, n_burnin = 500, n_thin = 2))
# plot the model coefficients
par(mfrow = c(2, 1))
plot(mod)
# extract the model coefficients
coef(mod)
# check model fit
calculate_metrics(mod)
}
\dontrun{
# a simple occupancy-detection model for artificial data
# build and sample
# plot coefficients
# validate
}
}
|
c97613492146a25478371068110ca736d7086c6c
|
5a831da99b4134816b29b9b72945deb84bebc52c
|
/Scripts/summaryZOIPM.R
|
40c546700b5d9a8a922fe620a18ab4ffb1794283
|
[] |
no_license
|
jucdiaz/Articulo3
|
d64a5681af78534a2d5aca92e06bf3c4f6756636
|
6a1d2ad3317f3cf8b3eb48e861a51650f5973993
|
refs/heads/master
| 2021-01-15T12:02:49.835223
| 2017-10-03T05:01:36
| 2017-10-03T05:01:36
| 99,646,608
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 3,202
|
r
|
summaryZOIPM.R
|
summary.ZOIPM<-function(mod){
estimate <- c(mod$Fixed_Parameters.mu,mod$Fixed_Parameters.sigma
,mod$Fixed_Parameters.p0,mod$Fixed_Parameters.p1,mod$Parameters.randoms[,1])
se <- sqrt(diag(solve(mod$HM)))
zvalue <- estimate / se
pvalue <- 2 * pnorm(abs(zvalue), lower.tail=F)
res <- cbind(estimate=estimate, se=se, zvalue=zvalue, pvalue=pvalue)
colnames(res) <- c('Estimate', 'Std. Error', 'z value', 'Pr(>|z|)')
res <- as.data.frame(res)
a <- 1:length(mod$Fixed_Parameters.mu)
b <-
(length(mod$Fixed_Parameters.mu) + 1):(length(mod$Fixed_Parameters.mu) +
length(mod$Fixed_Parameters.sigma))
c <-
(length(mod$Fixed_Parameters.mu) + length(mod$Fixed_Parameters.sigma) +
1):(
length(mod$Fixed_Parameters.mu) + length(mod$Fixed_Parameters.sigma) + length(mod$Fixed_Parameters.p0)
)
d <-
(
length(mod$Fixed_Parameters.mu) + length(mod$Fixed_Parameters.sigma) + length(mod$Fixed_Parameters.p0) +
1
):(
length(mod$Fixed_Parameters.mu) + length(mod$Fixed_Parameters.sigma) + length(mod$Fixed_Parameters.p0) +
length(mod$Fixed_Parameters.p1)
)
e <-
(
length(mod$Fixed_Parameters.mu) + length(mod$Fixed_Parameters.sigma) + length(mod$Fixed_Parameters.p0) +
length(mod$Fixed_Parameters.p1) + 1
):(
length(mod$Fixed_Parameters.mu) + length(mod$Fixed_Parameters.sigma) + length(mod$Fixed_Parameters.p0) +
length(mod$Fixed_Parameters.p1) + length(mod$Parameters.randoms[, 1])
)
cat("---------------------------------------------------------------\n")
cat(paste("Fixed effects for ",
link[1], "(mu) \n", sep=''))
cat("---------------------------------------------------------------\n")
printCoefmat(res[a,], P.value=TRUE, has.Pvalue=TRUE)
cat("---------------------------------------------------------------\n")
cat(paste("Fixed effects for ",
link[2], "(sigma) \n", sep=''))
cat("---------------------------------------------------------------\n")
printCoefmat(res[b,], P.value=TRUE, has.Pvalue=TRUE)
cat("---------------------------------------------------------------\n")
cat(paste("Fixed effects for ",
link[3], "(p0) \n", sep=''))
cat("---------------------------------------------------------------\n")
printCoefmat(res[c,], P.value=TRUE, has.Pvalue=TRUE)
cat("---------------------------------------------------------------\n")
cat(paste("Fixed effects for ",
link[4], "(p1) \n", sep=''))
cat("---------------------------------------------------------------\n")
printCoefmat(res[d,], P.value=TRUE, has.Pvalue=TRUE)
cat("---------------------------------------------------------------\n")
cat("---------------------------------------------------------------\n")
cat(paste("Random effects for mu and sigma \n",sep=''))
cat("---------------------------------------------------------------\n")
printCoefmat(res[e,], P.value=TRUE, has.Pvalue=TRUE)
cat("---------------------------------------------------------------\n")
cat("---------------------------------------------------------------\n")
}
|
5372c8fbdfaf39a521423a1ba9b35809f1d1cf8d
|
c53846243a231e977157d05c320537c3ec00a38c
|
/ui.R
|
cf9227f9054b53a1a06eecfc4d5419c823d8a779
|
[] |
no_license
|
atamaianalytics/SpatialValidation
|
ca7a5e310459dea4d02be932b4654e8d418cd4da
|
b080dc8454285f9220001cdd33b8e99f9202742c
|
refs/heads/master
| 2021-03-19T00:37:31.019826
| 2020-03-13T16:32:08
| 2020-03-13T16:32:08
| 247,114,998
| 2
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 7,031
|
r
|
ui.R
|
library(shiny)
library(shinydashboard)
library(leaflet)
library(leaflet.extras)
library(sp)
library(shinycssloaders)
library(dplyr)
library(ggplot2)
library(RODBC)
library(DT)
library(lmodel2)
library(zoo)
library(DBI)
library(odbc)
library(gtools)
library(rgdal)
library(plotly)
units <- c("ppm","pct")
trace <- c("Assay","Geology")
dbHeader <- dashboardHeader(title = "Drillhole Spatial Validation",
titleWidth = 300,
tags$li(a(href = 'https://www.shinyapps.io/',
icon("power-off"),
title = "Back to Apps Home"),
class = "dropdown"),
tags$li(a(href = 'http://www.atamaianalytics.com',
img(src = 'AtamaiAnalytics.jpeg',
title = "Company Home", height = "30px"),
style = "padding-top:10px; padding-bottom:10px;"),
class = "dropdown"))
# Shinydashboard app
ui <- dashboardPage(skin="green",
dbHeader,
dashboardSidebar(width=300,
sidebarMenu(
h4(HTML(paste0("<b>","Dashboard Tabs","</b>"))),
menuItem("Drillhole & Trace Selection", tabName = "dashboard", icon = icon("globe")),
br(),
menuItem("3D", tabName = "3d", icon = icon("cube")),
hr(),
h4(HTML(paste0("<b>","Instructions","</b>"))),
HTML(" (1) <br/> Select a Project then GO to fetech, and display, <br/>
drillholes associated with the Project. Zoom <br/> into the area of interest"),
br(),
br(),
HTML(" (2) <br/> Use drawing tools, on the left side of the map,
to <br/> highlight a selection of drillholes."),
br(),
br(),
HTML(" (3) <br/> Select an element, and prefered units, to <br/> calculate for drillhole traces."),
br(),
br(),
HTML(" (4) <br/> Select a geological attribute to calculate for <br/> drillhole traces."),
br(),
br(),
HTML(" (5) <br/> Select either Assay or Geology to display on the <br/> drillhole traces."),
br(),
br(),
HTML(" (6) <br/> Select GO to generate drillhole traces."),
br(),
br(),
HTML(" (7) <br/> Toggle Display Trace to view another attribute."),
br(),
br(),
HTML(" (8) <br/> Select the 3D tab to view the selection in 3D."),
hr(),
h4(HTML(paste0("<b>","Disclaimer","</b>"))),
"Do not use the results of this tool in public",
br(),
"reporting without an independent",
br(),
"verification and validation."
)
),
dashboardBody(
tabItems(
tabItem(tabName = "dashboard",
fluidRow(
column(width = 2,
box(background = "green",solidHeader = TRUE,width = NULL,
h4("Select Drillholes",style="font-weight:bold"),
hr(),
uiOutput("project"),
actionButton("getdata", "GO",style = "color: white;background-color: #35e51d")
),
box(background = "green",solidHeader = TRUE,width = NULL,
h4("Drill Trace & Attribute Selections",style="font-weight:bold"),
hr(),
fluidRow(
column(6,uiOutput("element")),
column(6,selectInput('Unit', 'Units',units,selected=""))
),
hr(),
fluidRow(
column(8,uiOutput("geoattrib"))
),
hr(),
fluidRow(
column(8,selectInput('Trace', 'Display Trace',trace,selected=""))
),
br(),
actionButton("desurvey", "GO",style = "color: white;background-color: #35e51d")
)
),
column(width = 5,
box(width = NULL,
leafletOutput("mymap")
),
box(width = NULL,
uiOutput("looknorthPlot")
)
),
column(width = 5,
box(width = NULL,
uiOutput("planviewPlot")
),
box(width = NULL,
uiOutput("lookeastPlot")
)
)
)
),
tabItem(tabName = "3d",
plotlyOutput("threeD",
height = "800px",
width = "1000px") %>% withSpinner(type=4,color="#35e51d")
)
)
)
)
|
d8e92bfc139246cbce628d3990a922666d16ff43
|
f7942539a0685a3d359410a108e2ecbfe973f3db
|
/Assignment1_AirPolution/complete.R
|
cd7d28db84c1015bc0a6738c26012a0e652be7b5
|
[] |
no_license
|
MrBanhBao/Coursera-R-Programming-Assignments
|
8dab6a4b7fd3b9d406e36d8463f0dba6eb2d1c26
|
bb9be70b5df63a61dae21ae932a349ce4dba30de
|
refs/heads/master
| 2021-01-16T18:57:48.290187
| 2016-06-20T19:46:56
| 2016-06-20T19:46:56
| 58,775,557
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 830
|
r
|
complete.R
|
complete <- function(directory="specdata", id = 1:322) {
length <- length(id)
idVec <- numeric(length)
nobsVec <- numeric(length)
for(i in seq_along(id)) {
path <- paste(directory, "/",sprintf("%03d", id[i]), ".csv", sep="")
data <- read.csv(path)
idVec[i] <- id[i]
nobsVec[i] <- sum(complete.cases(data))
}
dataTable <- data.frame(id = idVec, nobs = nobsVec)
dataTable
}
complete("specdata", 1)
## id nobs
## 1 1 117
complete("specdata", c(2, 4, 8, 10, 12))
## id nobs
## 1 2 1041
## 2 4 474
## 3 8 192
## 4 10 148
## 5 12 96
complete("specdata", 30:25)
## id nobs
## 1 30 932
## 2 29 711
## 3 28 475
## 4 27 338
## 5 26 586
## 6 25 463
complete("specdata", 3)
## id nobs
## 1 3 243
|
468ec0eaa4b3f90b2c0156b874b9993d4bca5632
|
37483f89fb907baaa6062e5ce1681cfaf1794950
|
/functions_qiu.R
|
8e9a3654c27112709af1fc7a18e74d87b03aee54
|
[] |
no_license
|
morndorff/GoF-Test
|
8f80b7f44eddce550ca60791cebeb44ca7ec2524
|
fdcb4f3a14ad0b99428926e1f8ec7a1dfb79d457
|
refs/heads/master
| 2020-12-24T15:50:00.743321
| 2016-03-21T17:40:00
| 2016-03-21T17:40:00
| 21,781,869
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 21,126
|
r
|
functions_qiu.R
|
# Qiu 2009 Functions
get_y <- function(x, boundaries, sum){
int <- findInterval(x, boundaries)
tab <- tabulate(int + 1, nbins=length(boundaries) + 1) # m*f
emp_probs <- tab/length(x) #f0
return(emp_probs)
}
get_exact <- function(num_bps){
exact_probs <- rep(1/num_bps, num_bps)
return(exact_probs)
}
get_g <- function(data, quan, past_cols=0){
#pastcols must be b/w 0 and dim(data)[2] - 1
num_col <- dim(data)[2]
num_quan <- length(quan)
if(is.null(num_col) || num_col==1){
if(past_cols!=0) stop("Too many columns on vector!")
data_subset <- data
}else{
data_subset <- data[, (num_col - past_cols):num_col]
}
int <- findInterval(data_subset, quan)
tab <- tabulate(int + 1, nbins=num_quan + 1) # b/c needs positive, see ?tabulate for details
}
qiu_ARL <- function(ic_data=rnorm(500), kP=.1, num_bps=4, control_limit, m=5, exact=FALSE, s=.01) {
#ic_data: a sample of in control data
# kP: allowance parameter (Qiu says ~.05 is good)
# num_bps: number of break points. (Poorly named)
# control_limit: upper control limit (manual input)
# m: batch size
# exact: Calculate based on N(0,1) quantiles. Will have to extend later.
# Checked 3/2: Sobs and Sexp are calculated correctly
ic_data_length <- length(ic_data)
if(is.null(num_bps)){
num_bps <- floor(sqrt(ic_data_length))
}
if(exact){
stop("Not Working Now")
ic_probs <- get_exact(num_bps + 1)
boundaries <- get_exact_boundaries(num_bps + 1)
}else{
boundaries <- quantile(ic_data, probs=seq(1/(num_bps + 1), (num_bps)/(num_bps+1), length.out=num_bps))
ic_probs <- get_y(ic_data, boundaries) #f0
}
data <- NULL
# Initializing S_exp and S_obs
S_obs <- matrix(0, nrow=(num_bps + 1), ncol=1)
S_exp <- matrix(0, nrow=(num_bps + 1), ncol=1)
u <- 0
mf0 <- m * ic_probs
i <- 0
num_bins <- num_bps + 1
while(tail(u, 1) < control_limit) {
i <- i + 1
data <- cbind(data, rnorm(m))
g_n <- get_g(data = data[, i], quan=boundaries, past_cols=0) #g(n)
g_n <- g_n + rnorm(num_bins, 0, s)
# print(g_n)
# C1 <- t((S_obs[, i] - S_exp[, i]) + (g_n - mf0)) # check tranposition
# C2 <- diag(1 / (S_exp[, i] + mf0))
# C3 <- t(C1)
C_n <- sum(((S_obs[, i] - S_exp[, i]) + (g_n - mf0))^2 / (S_exp[, i] + mf0))
# C_n <- C1 %*% C2 %*% C3
# C_n <- as.vector(C_n)
# print(paste("Cn", C_n))
# print(paste("Ct", Ct))
if(C_n <= kP){
S_o_new = numeric(num_bps + 1)
S_e_new = numeric(num_bps + 1)
print(paste("Cn < kP at time", i))
}else{
S_o_new <- (S_obs[, i] + g_n) * ((C_n - kP) / C_n)
S_e_new <- (S_exp[, i] + mf0) * ((C_n - kP) / C_n)
}
S_obs <- cbind(S_obs, S_o_new)
S_exp <- cbind(S_exp, S_e_new)
if(all(S_o_new==0)){
u <- append(u, 0)
}else{
U1 <- t(S_obs[, (i + 1)] - S_exp[, (i + 1)])
U2 <- diag( 1 / (S_exp[, (i + 1)]))
U3 <- t(U1)
}
u <- append(u, U1 %*% U2 %*% U3)
#print(u)
if(i ==103){
#print(data)
# print(g_n)
print(paste("Cn =" , C_n))
#print(S_obs)
#print(S_exp)
}
}
return(list("uP"=u, "Time OOC"=i))
}
qiu_Phase_II <- function(ic_data=rnorm(500), kP=.1, num_bps=10, control_limit=20, m=5, exact=TRUE,
tau=0,
ICdist="qnorm", IC_dist_ops=NULL,
OOCdist="qnorm", OOC_dist_ops=NULL){
#ic_data: a sample of in control data
# kP: allowance parameter (Qiu says ~.05 is good)
# num_bps: number of break points. (Poorly named)
# control_limit: upper control limit (manual input)
# m: batch size
# exact: Calculate based on N(0,1) quantiles. Will have to extend later.
# IC_dist_ops= NULL or list(mean=100, sd=2) or similar
# ICdist/OOCdist= "qnorm" (for now, must be character
# Checked 3/2: Sobs and Sexp are calculated correctly
ic_data_length <- length(ic_data)
if(is.null(num_bps)){
num_bps <- floor(sqrt(ic_data_length))
}
if(exact){
ic_probs <- get_exact(num_bps + 1)
boundaries <- do.call(ICdist, c(list(seq(1/num_bps, (num_bps-1)/num_bps, length.out=num_bps)),
IC_dist_ops))
}else{
boundaries <- quantile(ic_data, probs=seq(1/(num_bps + 1), (num_bps)/(num_bps+1), length.out=num_bps))
ic_probs <- get_y(ic_data, boundaries) #f0
}
IC_gen <- dist.conv.str(ICdist, "r")
OOC_gen <- dist.conv.str(ICdist, "r")
rIC <- get(IC_gen, mode = "function", envir = parent.frame())
rOOC <- get(OOC_gen, mode = "function", envir = parent.frame())
# print(ic_emp_probs)
# print(sum(ic_emp_probs))
# boundaries <- quantile(ic_data, probs=seq(1/num_bps, (num_bps-1)/num_bps, length.out=num_bps))
# print(boundaries)
# int <- findInterval(ic_data, quan)
m <- 5 # data size
#data <- matrix(rnorm(m), nrow=m, ncol=1)
data <- NULL
#n <- 10 # current time
# g1
#g <- get_g(data=data, quan=boundaries, past_cols=0)
# Initializing S_exp and S_obs
S_obs <- matrix(0, nrow=(num_bps + 1), ncol=1)
S_exp <- matrix(0, nrow=(num_bps + 1), ncol=1)
u <- 0
mf0 <- m * ic_probs
#print(mf0)
i <- 0
print(boundaries)
while(tail(u, 1) < control_limit) {
i <- i + 1
if(i <= tau){
data <- cbind(data, do.call(rIC, c(list(m), IC_dist_ops) ) )
#print(data)
}else{
data <- cbind(data, do.call(rOOC, c(list(m), OOC_dist_ops)) )
}
g_n <- get_g(data = data[, i], quan=boundaries, past_cols=0) #g(n)
g_n <- g_n + rnorm(num_bins, 0, s)
C1 <- t((S_obs[, i] - S_exp[, i]) + (g_n - mf0)) # check tranposition
#print(g_n)
#print(mf0)
#print(g_n-mf0)
C2 <- diag(1 / (S_exp[, i] + mf0))
# print(C2)
C3 <- t(C1)
C_n <- C1 %*% C2 %*% C3
C_n <- as.vector(C_n)
#print(C_n)
if(C_n <= kP){
S_o_new = numeric(num_bps+1)
S_e_new = numeric(num_bps +1)
}else{
S_o_new <- (S_obs[, i] + g_n) * ((C_n - kP) / C_n)
S_e_new <- (S_exp[, i] + mf0) * ((C_n - kP) / C_n)
}
S_obs <- cbind(S_obs, S_o_new)
#print(S_obs)
S_exp <- cbind(S_exp, S_e_new)
#print(S_exp)
U1 <- t(S_obs[, (i + 1)] - S_exp[, (i + 1)])
U2 <- diag( 1 / (S_exp[, (i + 1)]))
U3 <- t(U1)
u <- append(u, U1 %*% U2 %*% U3)
}
# Diagnostics
# print(S_obs)
# return(list(data, boundaries))
return(list("uP"=u, "Time OOC"=i))
}
qiu_L_ARL <- function(ic_data=rnorm(500), kL=1, num_bps=5, control_limit, m=5, exact=FALSE,
additive_constant=.001, s=.01) {
#ic_data: a sample of in control data
# kP: allowance parameter (Qiu says ~.05 is good)
# num_bps: number of break points. (Poorly named)
# control_limit: upper control limit (manual input)
# m: batch size
# exact: Calculate based on N(0,1) quantiles. Will have to extend later.
# Checked 3/2: Sobs and Sexp are calculated correctly
ic_data_length <- length(ic_data)
if(is.null(num_bps)){
num_bps <- floor(sqrt(ic_data_length))
}
if(exact){
ic_probs <- get_exact(num_bps + 1)
boundaries <- get_exact_boundaries(num_bps + 1)
}else{
# Choose boundaries based on quantiles
boundaries <- quantile(ic_data, probs=seq(1/(num_bps + 1), (num_bps)/(num_bps+1), length.out=num_bps))
ic_probs <- get_y(ic_data, boundaries) #f0
}
data <- NULL
# Initializing S_exp and S_obs
S_obs <- matrix(0, nrow=(num_bps + 1), ncol=1)
S_exp <- matrix(0, nrow=(num_bps + 1), ncol=1)
u <- 0
mf0 <- m * ic_probs
#print(mf0)
i <- 0
while(tail(u, 1) < control_limit) {
i <- i + 1
data <- cbind(data, rnorm(m))
g_n <- get_g(data = data[, i], quan=boundaries, past_cols=0) #g(n)
if(all(S_obs[, i]==0)){
S_obs[, i] <- S_obs[, i] + additive_constant # avoids probs with log(0)
}
#print(mf0)
C1 <- 2 * t(S_obs[, i] + g_n)
C2 <- log( (S_obs[, i] + g_n) / (S_exp[, i] + mf0))
#print(S_obs[, i])
#print(S_exp[, i])
#print(S_obs[, i] + g_n)
#print(S_exp[, i] + mf0)
#print(C1)
#print(C2)
C_n <- C1 %*% C2
C_n <- as.vector(C_n)
#print(C_n)
if(C_n <= kL){
S_o_new = numeric(num_bps + 1)
S_e_new = numeric(num_bps + 1)
}else{
S_o_new <- (S_obs[, i] + g_n) * ((C_n - kL) / C_n)
S_e_new <- (S_exp[, i] + mf0) * ((C_n - kL) / C_n)
}
S_obs <- cbind(S_obs, S_o_new)
S_exp <- cbind(S_exp, S_e_new)
if(all(S_o_new==0)){
u <- append(u, 0)
}else{
U1 <- t(S_obs[, (i + 1)])
U2 <- log(S_obs[, (i+1)] / mf0)
print(U1)
print(U2)
u_new <- 2 * U1 %*% U2
u <- append(u, u_new)
}
}
# Diagnostics
# print(S_obs)
# return(list(data, boundaries))
return(list("uP"=u, "Time OOC"=i))
}
qiu_KS_ARL <- function(ic_data=rnorm(500), kK=.02, control_limit, m=5,
ICdist="rnorm", IC_dist_ops=NULL,
bootstrap_samples=3000, keep_data=FALSE) {
#ic_data: a sample of in control data
# kP: allowance parameter (Qiu says ~.05 is good)
# num_bps: number of break points. (Poorly named)
# control_limit: upper control limit (manual input) (hK)
# m: batch size
# no exact option available here
# Checked 3/2: Sobs and Sexp are calculated correctly
fhat_ic <- ecdf(ic_data)
# Calculate d0 via bootstrapping
j <- 1:m
D_n <- numeric(bootstrap_samples)
for(i in 1:bootstrap_samples){
f0 <- fhat_ic(sort.int(sample(ic_data, replace=T, size=m)))
d_n1 <- f0 - (j-1)/m
d_n2 <- (j/m) - f0
D <- max(d_n1, d_n2)
D_n[i] <- D
}
d0=mean(D_n)
# Initializing Variables
data <- NULL
u <- 0
i <- 0
j <- 1:m
rIC <- get(ICdist, mode = "function", envir = parent.frame())
while(tail(u, 1) < control_limit) {
i <- i + 1
if(keep_data){
data <- cbind(data, do.call(rIC, c(list(m), IC_dist_ops) ) )
f0_n <- fhat_ic(sort.int(data[, i]))
d_n1 <- f0_n - (j-1)/m
d_n2 <- (j/m) - f0_n
D_n <- max(d_n1, d_n2)
u_nK <- max(0,
u[i] + (D_n - d0) - kK)
u <- append(u, u_nK)
}else{
data <- do.call(rIC, c(list(m), IC_dist_ops))
f0_n <- fhat_ic(sort.int(data))
d_n1 <- f0_n - (j-1)/m
d_n2 <- (j/m) - f0_n
D_n <- max(d_n1, d_n2)
u_nK <- max(0,
u[i] + (D_n - d0) - kK)
u <- append(u, u_nK)
}
}
return(list("uP"=u, "Time OOC"=i))
}
qiu_KS_PhaseII <- function(ic_data=rnorm(500), kK=.02, control_limit=20, m=5, exact=TRUE,
tau=3,
ICdist="rnorm", IC_dist_ops=NULL,
OOCdist="rnorm", OOC_dist_ops=NULL,
bootstrap_samples=3000,
keep_data=FALSE){
fhat_ic <- ecdf(ic_data)
# Calculate d0 via bootstrapping
j <- 1:m
D_n <- numeric(bootstrap_samples)
for(i in 1:bootstrap_samples){
f0 <- fhat_ic(sort.int(sample(ic_data, replace=T, size=m)))
d_n1 <- f0 - (j-1)/m
d_n2 <- (j/m) - f0
D <- max(d_n1, d_n2)
D_n[i] <- D
}
d0=mean(D_n)
# Initializing Variables
data <- NULL
u <- 0
i <- 0
j <- 1:m
rOOC <- get(OOCdist, mode = "function", envir = parent.frame())
rIC <- get(ICdist, mode = "function", envir = parent.frame())
while(tail(u, 1) < control_limit) {
i <- i + 1
if(keep_data){
if(i <= tau){
data <- cbind(data, do.call(rIC, c(list(m), IC_dist_ops) ) )
}else{
data <- cbind(data, do.call(rOOC, c(list(m), OOC_dist_ops)) )
}
f0_n <- fhat_ic(sort.int(data[, i]))
d_n1 <- f0_n - (j-1)/m
d_n2 <- (j/m) - f0_n
D_n <- max(d_n1, d_n2)
u_nK <- max(0,
u[i] + (D_n - d0) - kK)
u <- append(u, u_nK)
}else{
if(i <= tau){
data <- do.call(rIC, c(list(m), IC_dist_ops) )
}else{
data <- do.call(rOOC, c(list(m), OOC_dist_ops))
}
f0_n <- fhat_ic(sort.int(data))
d_n1 <- f0_n - (j-1)/m
d_n2 <- (j/m) - f0_n
D_n <- max(d_n1, d_n2)
u_nK <- max(0,
u[i] + (D_n - d0) - kK)
u <- append(u, u_nK)
}
}
return(list("uP"=u, "Time OOC"=i))
}
qiu_CVM_ARL <- function(ic_data, kK=.02, control_limit, m, bootstrap_samples=1000){
j <- 1:m
D_n <- numeric(bootstrap_samples)
for(i in 1:bootstrap_samples){
D <- CvMTwoSamp.res(ic_data, sample(ic_data, replace=T, size=m))
D_n[i] <- D
}
d0=mean(D_n)
# Initializing Variables
data <- NULL
u <- 0
i <- 0
j <- 1:m
while(tail(u, 1) < control_limit) {
i <- i + 1
data <- cbind(data, rnorm(m))
D_n <- CvMTwoSamp.res(data[, i], ic_data)
u_nK <- max(0,
u[i] + (D_n - d0) - kK)
u <- append(u, u_nK)
}
# Diagnostics
# print(S_obs)
# return(list(data, boundaries))
return(list("uP"=u, "Time OOC"=i))
}
qiu_CVM_PhaseII <- function(ic_data=rnorm(500), kK=.02, control_limit=20, m=5, exact=TRUE,
tau=3,
ICdist="rnorm", IC_dist_ops=NULL,
OOCdist="rnorm", OOC_dist_ops=NULL,
bootstrap_samples=1000){
j <- 1:m
D_n <- numeric(bootstrap_samples)
for(i in 1:bootstrap_samples){
D <- CvMTwoSamp.res(ic_data, sample(ic_data, replace=T, size=m))
D_n[i] <- D
}
d0=mean(D_n)
print(d0)
print(sd(D_n)*1.96/sqrt(length(D_n)))
# Initializing Variables
data <- NULL
u <- 0
i <- 0
j <- 1:m
# Random variable generation functions
rOOC <- get(OOCdist, mode = "function", envir = parent.frame())
rIC <- get(ICdist, mode = "function", envir = parent.frame())
while(tail(u, 1) < control_limit) {
i <- i + 1
if(i <= tau){
data <- cbind(data, do.call(rIC, c(list(m), IC_dist_ops) ) )
#print(data)
}else{
data <- cbind(data, do.call(rOOC, c(list(m), OOC_dist_ops)) )
}
D_n <- CvMTwoSamp.res(data[, i], ic_data)
u_nK <- max(0,
u[i] + (D_n - d0) - kK)
u <- append(u, u_nK)
}
return(list("uP"=u, "Time OOC"=i))
}
R_EWMA_PhaseII <- function(lambda=.05,
control_limit=.129375,
m=5,
tau=3,
ICdist="rnorm", IC_dist_ops=NULL,
OOCdist="rnorm", OOC_dist_ops=NULL){
# Initializing Variables
data <- NULL
v <- 0 # Initialize v at 0
i <- 0
j <- 1:m
rOOC <- get(OOCdist, mode = "function", envir = parent.frame())
rIC <- get(ICdist, mode = "function", envir = parent.frame())
while(tail(v, 1) < control_limit) {
i <- i + 1
if(i <= tau){
data <- cbind(data, do.call(rIC, c(list(m), IC_dist_ops) ) )
#print(data)
}else{
data <- cbind(data, do.call(rOOC, c(list(m), OOC_dist_ops)) )
}
x_bar <- mean(data[, i])
v_N <- lambda * x_bar + (1-lambda) * tail(v, 1)
v <- append(v, v_N)
}
return(list("v_EWMA"=v, "Time OOC"=i))
}
R_EWMA_Find_CL <- function(lambda=.05,
control_limit=.12,
m=5,
ICdist="rnorm",
IC_dist_ops=NULL){
# Initializing Variables
data <- NULL
v <- 0 # Initialize v at 0
i <- 0
j <- 1:m
rIC <- get(ICdist, mode = "function", envir = parent.frame())
while(tail(v, 1) < control_limit) {
i <- i + 1
data <- cbind(data, do.call(rIC, c(list(m), IC_dist_ops) ) )
x_bar <- mean(data[, i])
v_N <- lambda * x_bar + (1-lambda) * tail(v, 1)
v <- append(v, v_N)
}
return(list("v_EWMA"=v, "Time OOC"=i))
}
Find_ARL <- function(arl=200, lcl=0, ucl=.15, N_max=15, tol=1, sd_tol=3,
ICdist="rnorm", IC_dist_ops=NULL,
f=R_EWMA_Find_CL, N2_min=300){
# This can be adapted for a number of problems later
# Works for any function where the output has "Time OOC"
N <- 1
current_lcl <- lcl
current_ucl <- ucl
arl_track <- arl + tol * 1.5 # will get overwritten later, jf 1st while iter
while(N < N_max){
# Is the arl found on the previous iteration near 200?
# If yes, then output the list
if(abs(arl_track - arl) < tol ){
return(list("Calculated Control Limit"=new_cl,
"Number of Iterations"=N,
"Calculated ARL"=arl_track,
"ARL_SD"=sd_arl))
}
new_cl <- mean(c(current_lcl, current_ucl)) # New control limit via bisection
# Calculating control limit based on new control limit
# Two triggers: run for at least 500 iterations
# and s.dev within sd_tol
sd_arl <- sd_tol + 1
arl_track <- NULL
N2 <- 0
while(sd_arl > sd_tol){
new_arl <- f(control_limit=new_cl, ICdist = ICdist, IC_dist_ops = IC_dist_ops)
new_arl <- new_arl[["Time OOC"]]
arl_track <- append(arl_track, new_arl)
sd_arl <- sd(arl_track)/sqrt(length(arl_track))
if(is.na(sd_arl)) sd_arl <- sd_tol + 1
N2 <- N2 + 1
if(N2 < N2_min) sd_arl <- sd_tol + 1 # don't stop until N2min
}
# output mean of arl_track
arl_track <- mean(arl_track) # f(new_cl)
print(paste("New Control Limit", new_cl, "has ARL of:", arl_track))
print(paste("This took", N2, "iterations"))
# Create new estimates
if(arl_track < arl){
current_lcl <- new_cl
}else{
current_ucl <- new_cl
}
N <- N + 1
}
stop("Did Not Converge")
}
EWMA_KS_Find_CL <- function(ic_data=rnorm(500),
lambda=.05,
control_limit=.12,
m=5,
ICdist="rnorm",
IC_dist_ops=NULL,
bootstrap_samples=3000,
keep_data=FALSE){
fhat_ic <- ecdf(ic_data)
# Calculate d0 via bootstrapping
j <- 1:m
D_n <- numeric(bootstrap_samples)
for(i in 1:bootstrap_samples){
f0 <- fhat_ic(sort.int(sample(ic_data, replace=T, size=m)))
d_n1 <- f0 - (j-1)/m
d_n2 <- (j/m) - f0
D <- max(d_n1, d_n2)
D_n[i] <- D
}
d0=mean(D_n)
# Initializing Variables
data <- NULL
u<- 0
i <- 0
j <- 1:m
rIC <- get(ICdist, mode = "function", envir = parent.frame())
while(tail(u, 1) < control_limit) {
i <- i + 1
if(keep_data){
data <- cbind(data, do.call(rIC, c(list(m), IC_dist_ops) ) )
f0_n <- fhat_ic(sort.int(data[, i]))
d_n1 <- f0_n - (j-1)/m
d_n2 <- (j/m) - f0_n
D_n <- max(d_n1, d_n2)
u_nK <- lambda*(D_n - d0) + (1 - lambda)*tail(u, 1)
u <- append(u, u_nK)
}else{
data <- do.call(rIC, c(list(m), IC_dist_ops))
f0_n <- fhat_ic(sort.int(data))
d_n1 <- f0_n - (j-1)/m
d_n2 <- (j/m) - f0_n
D_n <- max(d_n1, d_n2)
u_nK <- lambda*(D_n - d0) + (1 - lambda)*tail(u, 1)
u <- append(u, u_nK)
}
}
return(list("u_EWMA_KS"=u, "Time OOC"=i))
}
EWMA_KS_PhaseII <- function(ic_data=rnorm(500),
lambda=.05,
control_limit=0.033125,
m=5,
tau=3,
ICdist="rnorm", IC_dist_ops=NULL,
OOCdist="rnorm", OOC_dist_ops=NULL,
bootstrap_samples=3000,
keep_data=FALSE){
fhat_ic <- ecdf(ic_data)
# Calculate d0 via bootstrapping
j <- 1:m
D_n <- numeric(bootstrap_samples)
for(i in 1:bootstrap_samples){
f0 <- fhat_ic(sort(sample(ic_data, replace=T, size=m)))
d_n1 <- f0 - (j-1)/m
d_n2 <- (j/m) - f0
D <- max(d_n1, d_n2)
D_n[i] <- D
}
d0=mean(D_n)
# Initializing Variables
data <- NULL
u <- 0 # Initialize u at 0
i <- 0
j <- 1:m
rOOC <- get(OOCdist, mode = "function", envir = parent.frame())
rIC <- get(ICdist, mode = "function", envir = parent.frame())
while(tail(u, 1) < control_limit) {
i <- i + 1
if(keep_data){
if(i <= tau){
data <- cbind(data, do.call(rIC, c(list(m), IC_dist_ops) ) )
}else{
data <- cbind(data, do.call(rOOC, c(list(m), OOC_dist_ops)) )
}
f0_n <- fhat_ic(sort.int(data[, i]))
d_n1 <- f0_n - (j-1)/m
d_n2 <- (j/m) - f0_n
D_n <- max(d_n1, d_n2)
u_nK <- lambda*(D_n - d0) + (1 - lambda)*tail(u, 1)
u <- append(u, u_nK)
}else{
if(i <= tau){
data <- do.call(rIC, c(list(m), IC_dist_ops))
}else{
data <- do.call(rOOC, c(list(m), OOC_dist_ops))
}
f0_n <- fhat_ic(sort.int(data))
d_n1 <- f0_n - (j-1)/m
d_n2 <- (j/m) - f0_n
D_n <- max(d_n1, d_n2)
u_nK <- lambda*(D_n - d0) + (1 - lambda)*tail(u, 1)
u <- append(u, u_nK)
}
}
return(list("u_EWMA"=u, "Time OOC"=i))
}
rmixnorm <- function(N, u1, u2, sd1, sd2){
# Does some mixture distributions
components <- sample(1:2,prob=c(.5, .5),size=N,replace=TRUE)
mus <- c(u1, u2)
sds <- c(sd1, sd2)
samples <- rnorm(n=N,mean=mus[components],sd=sds[components])
return(samples)
}
|
13ff312f2452e79b89987b69a5ddb6320835dabc
|
5378b28dde3f365587dfb646d15a0eb66ac40f1e
|
/server.R
|
646459de5a6d477510848c957ccb6f9920ec254b
|
[
"MIT",
"LicenseRef-scancode-other-permissive"
] |
permissive
|
wwheeler6/CalCAT-1
|
64a69e3603c616397c9d9cbf74c7caa000734820
|
ee73c7fcaade36cf3e785cc562688f2857471ffc
|
refs/heads/master
| 2022-11-27T22:32:50.286561
| 2020-08-11T04:37:50
| 2020-08-11T04:37:50
| 286,646,310
| 0
| 0
|
MIT
| 2020-08-11T04:35:20
| 2020-08-11T04:35:20
| null |
UTF-8
|
R
| false
| false
| 78,853
|
r
|
server.R
|
# Developed by California COVID Modeling Team
# Copyright 2020, State of California, Department of Public Health
#
# John Pugliese, PhD.
# California Department of Public Health
#
# Jason Vargo, PhD.
# California Department of Public Health
#
# Nice!!
#
# Alpha Version : Released 6/8/2020
#
##################################################
##### A gift from California with love. ##########
#### “Together, all things are possible.” ######
###################### -- Cesar Chavez ###########
##################################################
library(shiny)
# Define server logic required to draw a histogram
server <- function(input, output, session) {
#### Carousel Navigation ####
shinyjs::onclick("nowcast_img", updateTabsetPanel(session, inputId="navbar", selected= "Nowcasts"))
shinyjs::onclick("forecast_img", updateTabsetPanel(session, inputId="navbar", selected= "Forecasts"))
shinyjs::onclick("epimodel_img", updateTabsetPanel(session, inputId="navbar", selected= "Scenarios"))
### Nowcasts of R Effective ####
#Data Prep
rt.ts <- reactive({
icl_rt_f <- icl %>% select(date, constant_mobility_mean_time_varying_reproduction_number_R.t.) %>% rename(mean_rt = constant_mobility_mean_time_varying_reproduction_number_R.t.)
icl_rt <- icl_model %>% select(date, mean_time_varying_reproduction_number_R.t.) %>% rename(mean_rt = mean_time_varying_reproduction_number_R.t.)
icl_rt <- rbind(icl_rt, icl_rt_f)
fu <- filter(gu, !is.na(r_values_mean))
rt.rt.xts <- xts(rt_live[,4], rt_live$date)
can.rt.xts <- xts(can.state.observed[,8],can.state.observed$date)
epifc.rt.xts <- xts(epi_forecast[which(epi_forecast$type == "nowcast"),4],
epi_forecast[which(epi_forecast$type == "nowcast"),]$date)
gu.xts <- xts(fu[,19],fu$date)
ucla.rt.xts <- xts(ucla_state[,2],ucla_state$date)
ucla.rt.xts <- ucla.rt.xts[paste0("/",Sys.Date()-1)]
if ( exists("icl") & exists('icl_model') ) {
# icl_rt_f <- icl %>% select(date, constant_mobility_mean_time_varying_reproduction_number_R.t.) %>% rename(mean_rt = constant_mobility_mean_time_varying_reproduction_number_R.t.)
icl_rt <- icl_model %>% select(date, mean_time_varying_reproduction_number_R.t.) %>% rename(mean_rt = mean_time_varying_reproduction_number_R.t.)
# icl_rt <- rbind(icl_rt, icl_rt_f)
icl.rt.xts <- xts(icl_rt[,2], icl_rt$date)
# names(icl.rt.xts) <- c("icl")
# df <- merge(df, icl.rt.xts)
}
df <- merge(rt.rt.xts, can.rt.xts,epifc.rt.xts, gu.xts, ucla.rt.xts, icl.rt.xts)
df$mean.rt <- rowMeans(df[,c(1:4,6)], na.rm = TRUE)
df[is.nan(as.numeric(df))] <- NA_character_
df <- as.data.table(df) %>% as.data.frame()
df[,2:8] <- sapply(df[,2:8], function(x) as.numeric(as.character(x)) )
return(df)
})
#Value Boxes
output$mean.rt.box <- renderValueBox({
cdt <- Sys.Date()-1
current.rt <- round(rt.ts()[which(rt.ts()$index == cdt),8], digits = 2)
valueBox(current.rt, subtitle = paste0(ifelse(current.rt >= 1.4,
"Spread of COVID-19 is very likely increasing",
ifelse(current.rt < 1.4 & current.rt >= 1.1,
"Spread of COVID-19 may be increasing",
ifelse(current.rt < 1.1 & current.rt >= 0.9,
"Spread of COVID-19 is likely stable",
"Spread of COVID-19 is likely decreasing"
)
)
)
),
color = "blue"
) #End valuBox
})
observeEvent(input$Rt_explain, {
sendSweetAlert(
session = session,
title = "What does a Reff of this size mean?",
text = HTML("<p>If the R effective is greater than 1, COVID-19 will spread <b>exponentially</b>. If R effective is less than 1, COVID-19
will spread more slowly and cases will decline. The higher the value of R effective, the faster an epidemic will progress.
The following graph illustrates the change in growth as R effective increases.</p>
<img src='reff_cuml_infection.png' alt='Infections increase faster with larger values of R effective' width='400' height='400'/>
<p><a href='https://www.cebm.net/covid-19/when-will-it-be-over-an-introduction-to-viral-reproduction-numbers-r0-and-re/' target='_blank'>Source: CEBM</a></p>"
),
html = TRUE,
type = NULL
)
})
output$hilo_rt.box <- renderUI({
df <- rt.ts()
df <- df %>% filter(index < Sys.Date()) %>% slice(n())
rt.min <- as.numeric( apply(df[,c(2:5,7)], 1, function(i) min(i, na.rm = TRUE)) )
rt.max <- as.numeric( apply(df[,c(2:5,7)], 1, function(i) max(i, na.rm = TRUE)) )
name.min <- switch(as.character(colnames(df)[match(apply(df[,c(2:5,7)], 1, function(i) min(i, na.rm = TRUE)),df)]),
"rt.rt.xts" = "rt.live",
"can.rt.xts" = "COVIDActNow",
"epifc.rt.xts" = "EpiForecasts",
"gu.xts" = "covid19-projections.com",
"ucla.rt.xts" = "UCLA",
"icl.rt.xts" = "ICL")
name.max<- switch(as.character(colnames(df)[match(apply(df[,c(2:5,7)], 1, function(i) max(i, na.rm = TRUE)),df)]),
"rt.rt.xts" = "rt.live",
"can.rt.xts" = "COVIDActNow",
"epifc.rt.xts" = "EpiForecasts",
"gu.xts" = "covid19-projections.com",
"ucla.rt.xts" = "UCLA",
"icl.rt.xts" = "ICL")
tagList(valueBox( paste0( round(rt.min,digits = 2)," - ", round(rt.max,digits = 2)) , paste0(name.min," - ",name.max), color = "navy", width = 12) )
})
#Graph
output$rt.plot <- renderPlotly({
df <- rt.ts() %>% filter(index < Sys.Date() & index > Sys.Date() -80)
p <- plot_ly(df,
hoverinfo = 'text') %>%
add_trace(x = df[[1]],
y = df[[2]],
name = "rt.live",
type = 'scatter',
mode = "lines",
line = list(color="orange", dash = 'dot', opacity = 0.5),
text = paste0(df[[1]],
"<br>",
"rt.live estimated Reff: ", round(df[[2]], digits=2)
)
) %>%
add_trace(x = df[[1]],
y = df[[3]],
name = "COVIDActNow",
type = 'scatter',
mode = "lines",
line = list(color="blue", dash = 'dot', opacity = 0.5),
hoverinfo = 'text',
text = paste0(df[[1]],
"<br>",
"COVIDActNow estimated Reff: ", round(df[[3]], digits=2) )
) %>%
add_trace(x = df[[1]],
y = df[[4]],
name = "EpiForecasts",
type = 'scatter',
mode = "lines",
line = list(color="purple", dash = 'dot', opacity = 0.5),
hoverinfo = 'text',
text = paste0(df[[1]],
"<br>",
"EpiForecasts estimated Reff: ", round(df[[4]], digits=2) )
) %>%
add_trace(x = df[[1]],
y = df[[5]],
name = "covid19-projections.com",
type = 'scatter',
mode = "lines",
line = list(color="red", dash = 'dot', opacity = 0.5),
hoverinfo = 'text',
text = paste0(df[[1]],
"<br>",
"covid19-projections.com estimated Reff: ", round(df[[5]], digits=2) )
) %>%
# add_trace(x = df[[1]],
# y = df[[6]],
# name = "UCLA",
# type = 'scatter',
# mode = "lines",
# line = list(color="grey", dash = 'dot', opacity = 0.5),
# hoverinfo = 'text',
# text = paste0("UCLA estimated Reff: ", round(df[[6]], digits=2) )
# #marker = list(color = "blue", symbol= "circle")
# ) %>%
add_trace(x = df[[1]],
y = df[[7]],
name = "ICL",
type = 'scatter',
mode = "lines",
line = list(color="grey", dash = 'dot', opacity = 0.5),
hoverinfo = 'text',
text = paste0(df[[1]],
"<br>",
"Imperial College London estimated Reff: ", round(df[[7]], digits=2) )
) %>%
add_trace(x = df[[1]],
y = df[[8]],
name = "Mean Reff",
type = 'scatter',
mode = "lines",
hoverinfo = 'text',
line = list(color = '#2b8cbe', width = 5),
text = paste0(df[[1]],
"<br>",
"Mean estimated Reff: ", round(df[[8]], digits=2),
"<br>",
ifelse(round(df[[8]], digits=2) >= 1.4,
"Spread of COVID-19 is very likely increasing",
ifelse(round(df[[8]], digits=2) < 1.4 & round(df[[8]], digits=2) >= 1.1,
"Spread of COVID-19 may be increasing",
ifelse(round(df[[8]], digits=2) < 1.1 & round(df[[8]], digits=2) >= 0.9,
"Spread of COVID-19 is likely stable",
"Spread of COVID-19 is likely decreasing"
)
)
)
)
) %>%
layout(
title = NULL,
xaxis = list(title = NULL, showgrid = FALSE, zeroline = FALSE ),
yaxis = list(title = "R-Eff", showline = TRUE, showgrid = FALSE, zeroline = FALSE ),
margin = list(l = 100),
showlegend = TRUE,
shapes = list(
type = "line",
x0 = 0,
x1 = 1,
xref = "paper",
y0 = 1,
y1 = 1,
yref = "y",
line = list(color = "gray50", dash= "dash", opacity = 0.3))
)
return(p)
})
#Downloadable file of Statewide Reff Values
output$dlRt <- downloadHandler(
filename = function() { paste("R_eff_Nowcasts_",Sys.Date(),'.csv', sep='') },
content = function(file) {
# Title
t <- c(paste("R-Effective Model and Ensemble Time Series", sep = ""),"","","","","","")
#Subtitle
tt <- c(paste("COVID Assessment Tool - Downloaded on",Sys.Date(), sep = " "),"","","","","","")
#Column labels
l <- c("Date","rt.live","COVIDActNow","EpiForecasts","covid19-projections.com","ICL","Mean Reff")
df <- rt.ts()[,c(1:5,7,8)] %>% filter(index < Sys.Date() & index > Sys.Date() -80)
df[,2:7] <- lapply(df[,2:7],function(x) round(x,2))
# df[is.na(df)] <- 0
df[] <- lapply(df, as.character)
#Source
s <- c("Please see the Technical Notes tab of the application for data sources.","","","","","","")
p <- c(paste0("Prepared by: ",state_name," Department of Public Health"),"","","","","","")
dlm <- rbind(t, tt, l, df, s, p)
write.table(dlm, file, row.names = F, col.names = F, quote = F, na= "NA", sep = ",")
})
#### County Rt Nowcasts ####
#Data Prep
county.rt <- reactive({
progress <- Progress$new()
# Make sure it closes when we exit this reactive, even if there's an error
on.exit(progress$close())
progress$set(message = "Gathering R Effective Nowcasts", value = 0)
c <- names(canfipslist[match(input$select.county.rt,canfipslist)])
cnty <- input$select.county.rt
progress$inc(3/4)
# out <- lapply(cnty[1], function(x) get_can_cnty(x))
out <- filter(can.county.observed, fips == cnty)
# cnty.rt <- do.call("rbind",out)
cnty.rt <- out %>% select(date,RtIndicator) %>% as.data.frame() #,RtIndicatorCI90
cnty.rt$date <- as.Date(cnty.rt$date)
progress$inc(1/4)
df <- xts(cnty.rt[,-1],cnty.rt$date)
if( c %in% unique(gu.cnty$subregion)) {
cnty.gu <- gu.cnty %>% filter(subregion == c) %>% select(date, r_values_mean)
gu.xts <- xts(cnty.gu[,-1],cnty.gu$date)
df <- merge(df,gu.xts)
}
# if (c %in% unique(ucla_cnty_rt$county) ) { cnty.ucla <- ucla_cnty_rt %>% filter(county == c) %>% select(date, Rt)
# ucla.xts <- xts(cnty.ucla[,-1],cnty.ucla$date)
# df <- merge(df,ucla.xts)
# }
if (ncol(df) > 1) {df$mean.proj <- rowMeans(df[,1:ncol(df)], na.rm = TRUE)}
df <- as.data.table(df) %>% as.data.frame() %>% filter(index < Sys.Date())
return(df)
})
#Graph
output$county.rt.plot <- renderPlotly({
df <- county.rt()
c <- names(canfipslist[match(input$select.county.rt,canfipslist)])
#df$ymin <- df$RtIndicator - (df$RtIndicatorCI90)
#df$ymax <- df$RtIndicator + (df$RtIndicatorCI90)
p <- plot_ly(df,
x = df[[1]],
y = df[[2]],
name = "COVIDActNow",
type = 'scatter',
mode = "lines",
line = list(color="blue", dash = 'dot', opacity = 0.5),
hoverinfo = 'text',
text = paste0(df[[1]],
"<br>",
"COVIDActNow estimated Reff: ", round(df[[2]], digits=2) )
)
if (c %in% unique(gu.cnty$subregion) ) {p <- p %>% add_trace(x = df[[1]],
y = df[["gu.xts"]],
name = "covid19-projections.com",
type = 'scatter',
mode = "lines",
line = list(color="red", dash = 'dot', opacity = 0.5),
hoverinfo = 'text',
text = paste0(df[[1]],
"<br>",
"covid19-projections.com estimated Reff: ", round(df[["gu.xts"]], digits=2) )
)
}
# if (c %in% unique(ucla_cnty_rt$county) ) {p <- p %>% add_trace(x = df[[1]],
# y = df[["ucla.xts"]],
# name = "UCLA",
# type = 'scatter',
# mode = "lines",
# line = list(color="grey", dash = 'dot', opacity = 0.5),
# hoverinfo = 'text',
# text = paste0(df[[1]],
# "<br>",
# "UCLA estimated Reff: ", round(df[["ucla.xts"]], digits=2) )
# )
# }
if (ncol(df) > 2) {p <- p %>% add_trace(x = df[[1]],
y = df[["mean.proj"]],
name = "Mean Reff",
type = 'scatter',
mode = "lines",
hoverinfo = 'text',
text = paste0(df[[1]],
"<br>",
"Mean estimated Reff: ", round(df[["mean.proj"]], digits=2),
"<br>",
ifelse(round(df[["mean.proj"]], digits=2) >= 1.4,
"Spread of COVID-19 is very likely increasing",
ifelse(round(df[["mean.proj"]], digits=2) < 1.4 & round(df[["mean.proj"]], digits=2) >= 1.1,
"Spread of COVID-19 may be increasing",
ifelse(round(df[["mean.proj"]], digits=2) < 1.1 & round(df[["mean.proj"]], digits=2) >= 0.9,
"Spread of COVID-19 is likely stable",
"Spread of COVID-19 is likely decreasing"
)
)
)
),
inherit = FALSE,
line = list(color = '#2b8cbe', width = 5),
linetype = "solid"
)
}
# add_ribbons(x = df[[1]],
# ymax = df[[5]],
# ymin = df[[4]],
# opacity = 0.5,
# inherit = TRUE,
# line = list(color = '#2b8cbe' ),
# fillcolor = '#2b8cbe',
# name = '90% CI'
# ) %>%
p <- p %>% layout( legend = list(orientation = 'h'),
title = as.character(counties[match(input$select.county.rt, counties$fips),"county"]),
xaxis = list(title = NULL, showgrid = FALSE, zeroline = FALSE ),
yaxis = list(title = "R-Eff", showline = TRUE, showgrid = FALSE, zeroline = FALSE ),
margin = list(l = 100),
showlegend = TRUE,
shapes = list(
type = "line",
x0 = 0,
x1 = 1,
xref = "paper",
y0 = 1,
y1 = 1,
yref = "y",
line = list(color = "gray50", dash= "dash", opacity = 0.3)
)
)
return(p)
})
#Download file of individual COUNTY Reff Values
output$dlRt.indv.cnty <- downloadHandler(
filename = function() { paste("Rt_Nowcasts_",names(canfipslist[match(input$select.county.rt,canfipslist)]),"_",Sys.Date(),'.csv', sep='') },
content = function(file) {
c <- names(canfipslist[match(input$select.county.rt,canfipslist)])
# Title
t <- c(paste("R-Effective County Model Time Series for ",c, sep = ""),"","","","")
#Subtitle
tt <- c(paste("COVID Assessment Tool - Downloaded on",Sys.Date(), sep = " "),"","","","")
df <- county.rt() %>% as.data.frame()
if ( ncol(df) > 2 ) { df[,2:ncol(df)] <- lapply(df[,2:ncol(df)],function(x) round(x,2)) } else { df[,2] <- round(df[,2],2) }
df[is.na(df)] <- 0
df[] <- lapply(df, as.character)
#Column labels
l <- c("Date","COVIDActNow")
if ( c %in% unique(gu.cnty$subregion) ) { l <- c(l, c("covid19-projections.com")) }
if ( c %in% unique(ucla_cnty_rt$county) ) { l <- c(l, c("UCLA")) }
if ( length(l) > 2 ) { l <- c(l, c("Mean Reff") ) }
#Source
s <- c("Please see the Technical Notes tab of the application for data sources.","","","","")
p <- c(paste0("Prepared by: ",state_name," Department of Public Health"),"","","","")
dlm <- rbind(t, tt, l, df, s, p)
write.table(dlm, file, row.names = F, col.names = F, quote = F, na= "NA", sep = ",")
})
#### Rt Dot Plot ####
#Data Prep
cnty.7.day.rt <- data.table(can.county.observed) %>%
.[, max_date := max(date, na.rm = T), by = .(county)] %>%
.[date > Sys.Date()-7, .(Rt.m = mean(RtIndicator, na.rm = T),
ll = mean(RtIndicator - RtIndicatorCI90, na.rm=T),
ul = mean(RtIndicator + RtIndicatorCI90, na.rm=T)), by = .(county)] %>% na.omit()
# cnty.7.day.rt <- reactive({
#
# cnty.can <- can.county.observed %>% filter(date <= Sys.Date()-1,
# date > Sys.Date()-8) %>%
# select(county, date, RtIndicator) %>%
# mutate(date = as.Date(date)) %>%
# rename(Rt = RtIndicator) %>%
# as.data.frame()
# cnty.gu <- gu.cnty %>% filter(date <= Sys.Date()-1,
# date > Sys.Date()-8) %>%
# select(subregion, date, r_values_mean) %>%
# rename(county = subregion,
# Rt = r_values_mean )
# cnty.ucla <- ucla_cnty_rt %>% filter(date <= Sys.Date()-1,
# date > Sys.Date()-8) %>%
# select(county, date, Rt)
#
# df <- rbind(cnty.can,cnty.gu,cnty.ucla) %>%
# arrange(county,date) %>%
# group_by(county) %>%
# summarise(Rt.m = mean(Rt, na.rm = T),
# Rt.sd = sd(Rt, na.rm = T) ) %>%
# na.omit() %>%
# mutate(ll = Rt.m - 1.95*Rt.sd,
# ul = Rt.m + 1.95*Rt.sd)
# return(df)
#
# })
#
#Graph
output$rt.dot.plot <- renderPlotly({
df <- cnty.7.day.rt
p <- plot_ly(df,
x = ~ reorder(df$county, desc(df$Rt.m)),
y = ~ df$Rt.m,
name = "R effective",
type = 'scatter',
mode = "markers",
marker = list(color = '#2b8cbe'),
hoverinfo = 'text',
text = ~paste0(df$county, " County<br>","7-day Average Reff: ", round(df$Rt.m, digits=2),
"<br>",
ifelse(df$Rt.m >= 1.4,
"Spread of COVID-19 is very likely increasing",
ifelse(df$Rt.m < 1.4 & df$Rt.m >= 1.1,
"Spread of COVID-19 may be increasing",
ifelse(df$Rt.m < 1.1 & df$Rt.m >= 0.9,
"Spread of COVID-19 is likely stable",
"Spread of COVID-19 is likely decreasing"
)
)
)
)
) %>%
add_segments(x =~ reorder(df$county, df$Rt.m),
xend = ~ reorder(df$county, df$Rt.m),
y = df$ll,
yend = df$ul,
type = "scatter",
mode = "lines",
opacity = .5,
line = list(color='#2b8cbe', width = 6),
showlegend = FALSE
) %>%
layout(
xaxis = list(title = "", tickangle = -30, showgrid = FALSE, zeroline = FALSE ),
yaxis = list(title = "R-Eff", showline = TRUE, showgrid = FALSE, zeroline = FALSE ),
margin = list(l = 100),
showlegend = FALSE,
shapes = list(
type = "line",
x0 = 0,
x1 = 1,
xref = "paper",
y0 = 1,
y1 = 1,
yref = "y",
line = list(color = "gray50", dash= "dash", opacity = 0.3)
)
)
return(p)
})
#Download file of ALL COUNTY 7-day average Reff Values
output$dlRt.cnty <- downloadHandler(
filename = function() { paste("Rt_Nowcasts_7DayAvg_Counties",Sys.Date(),'.csv', sep='') },
content = function(file) {
# Title
t <- c(paste("R-Effective 7 Day Averages for Counties", sep = ""),"","","","")
#Subtitle
tt <- c(paste("COVID Assessment Tool - Downloaded on",Sys.Date(), sep = " "),"","","","")
df <- cnty.7.day.rt %>% as.data.frame()
if ( ncol(df) > 2 ) { df[,2:ncol(df)] <- lapply(df[,2:ncol(df)],function(x) round(x,2)) } else { df[,2] <- round(df[,2],2) }
df[is.na(df)] <- 0
df[] <- lapply(df, as.character)
#Column labels
l <- c("County","COVIDActNow - 7 Day Avg", "LL", "UL")
#Source
s <- c("Please see the Technical Notes tab of the application.","","","","")
p <- c(paste0("Prepared by: ",state_name," Department of Public Health"),"","","","")
u <- c("Source: COVIDActNow - https://blog.covidactnow.org/modeling-metrics-critical-to-reopen-safely/","","","","")
dlm <- rbind(t, tt, l, df, s, p, u)
write.table(dlm, file, row.names = F, col.names = F, quote = F, na= "NA", sep = ",")
})
#### Hospitalization Projections ####
#Data Prep
hosp.proj.ts <- reactive({
min_hosp <- min(covid$Most.Recent.Date)
hosp <- covid %>% select(Most.Recent.Date,COVID.19.Positive.Patients) %>% filter(covid$County.Name == state_name) %>% as.data.frame()
can.hosp.proj <- can.state.observed %>% select(date, hospitalBedsRequired) %>% filter(min_hosp <= date & date <= Sys.Date() + 30)
IHME.hosp.proj <- IHME %>% select(date, allbed_mean) %>% filter(min_hosp <= date & date <= Sys.Date() + 30)
mobs.hosp.proj <- mobs %>% select(2,8) %>% filter(min_hosp <= date & date <= Sys.Date() + 30)
mit.hosp.proj <- mit %>% select(11,7) %>% filter(min_hosp <= date & date <= Sys.Date() + 30)
covid.xts <- xts(hosp[,-1],hosp$Most.Recent.Date)
can.proj.xts <- xts(can.hosp.proj[,-1],can.hosp.proj$date)
ihme.proj.xts <- xts(IHME.hosp.proj[,-1],IHME.hosp.proj$date)
mobs.proj.xts <- xts(mobs.hosp.proj[,-1],mobs.hosp.proj$date)
mit.proj.xts <- xts(mit.hosp.proj[,-1],mit.hosp.proj$date)
df <- merge(covid.xts,can.proj.xts,ihme.proj.xts,mobs.proj.xts,mit.proj.xts)
df$mean.proj <- rowMeans(df[,2:5], na.rm = TRUE)
df$mean.proj <- ifelse(!is.na(df$covid.xts), NA, df$mean.proj)
df <- as.data.table(df) %>% as.data.frame()
df$period <- ifelse(!is.na(df$covid.xts), "solid", "dot")
df$type <- ifelse(!is.na(df$covid.xts), "Est.", "Proj.")
return(df)
})
#Value Boxes
output$actual.hosp.box <- renderValueBox({
cdt <- max(covid$Most.Recent.Date)
current.hosp <- as.character(covid[which(covid$Most.Recent.Date == cdt & covid$County.Name == state_name),"COVID.19.Positive.Patients"])
valueBox( "Actuals Go Here",
#format(as.numeric(current.hosp), big.mark = ","),
paste0("Actuals: ",cdt), color = "black")
})
output$mean.proj.hosp.box <- renderUI({
cdt.ihme <- max( IHME[which(IHME$date <= Sys.Date() + 30),]$date )
mean.proj <- hosp.proj.ts() %>% slice(n()) %>% select(7)
valueBox( format(round(mean.proj, digits = 0), big.mark = ","), paste0("Mean 30-Day Forecast through ", cdt.ihme), color = "blue", width = 12)
})
#Graphs
output$hosp.proj.plot <- renderPlotly({
df <- hosp.proj.ts()
cdt <- max(df[which(!is.na(df$covid.xts)),1])
p <- plot_ly(df,
hoverinfo = 'text') %>%
add_trace(x = df[[1]],
y = df[[2]],
name = "Actuals",
type = 'scatter',
mode = "lines+markers",
hoverinfo = 'text',
text = paste0(df[[1]],
"<br>",
"Actual Hospitalization (PLACEHOLDER DATA - PLEASE REPLACE!!): ", format(round(df[[2]],0), big.mark = ",") ),
line = list(color = "black"),
marker = list(color = "black", symbol= "circle")
) %>%
add_trace(x = df[[1]],
y = df[[3]],
name = ~I(paste0("COVIDActNow - ",df$type)),
type = 'scatter',
mode = "lines",
inherit = TRUE,
line = list(color="orange"),
linetype = ~I(period),
hoverinfo = 'text',
text = paste0(df[[1]],
"<br>",
"COVIDActNow Estimate: ", format(round(df[[3]],0), big.mark = ",") )
) %>%
add_trace(x = df[[1]],
y = df[[4]],
name = ~I(paste0("IHME - ",df$type)),
type = 'scatter',
mode = "lines",
inherit = TRUE,
line = list(color="navy"),
linetype = ~I(period),
hoverinfo = 'text',
text = paste0(df[[1]],
"<br>",
"IHME Estimate: ", format(round(df[[4]],0), big.mark = ",") )
) %>%
add_trace(x = df[[1]],
y = df[[5]],
name = ~I(paste0("MOBS - ",df$type)),
type = 'scatter',
mode = "lines",
inherit = TRUE,
line = list(color="red"),
linetype = ~I(period),
hoverinfo = 'text',
text = paste0(df[[1]],
"<br>",
"MOBS Estimate: ", format(round(df[[5]],0), big.mark = ",") )
) %>%
add_trace(x = df[[1]],
y = df[[6]],
name = ~I(paste0("MIT - ",df$type)),
type = 'scatter',
mode = "lines",
inherit = TRUE,
line = list(color="green"),
linetype = ~I(period),
hoverinfo = 'text',
text = paste0(df[[1]],
"<br>",
"MIT Estimate: ", format(round(df[[6]],0), big.mark = ",") )
) %>%
add_trace(x = df[[1]],
y = df[[7]],
name = "Mean Proj.",
type = 'scatter',
mode = "lines",
hoverinfo = 'text',
text = paste0(df[[1]],
"<br>",
"Mean Projection: ", format(round(df[[7]],0), big.mark = ",") ),
line = list(color = '#2b8cbe', width = 5)
) %>%
layout(
title = NULL,
xaxis = list(title = NULL, showline = TRUE, showgrid = FALSE, zeroline = FALSE ),
yaxis = list(title = "Hospitalizations", showline = TRUE, showgrid = FALSE, zeroline = FALSE ),
margin = list(l = 100),
showlegend = TRUE,
shapes = list(type = "line",
y0 = 0,
y1 = 1,
yref = "paper",
x0 = cdt,
x1 = cdt,
line = list(color = "black", dash = 'dash')
)
)
return(p)
})
#Download file of Statewide Hospitalization Forecasts
output$dlhosp <- downloadHandler(
filename = function() { paste("Hospital_Forecasts_",Sys.Date(),'.csv', sep='') },
content = function(file) {
# Title
t <- c(paste("Statewide Hospitalization Forecasts", sep = ""),"","","","","","")
#Subtitle
tt <- c(paste("COVID Assessment Tool - Downloaded on",Sys.Date(), sep = " "),"","","","","","")
#Column labels
l <- c("Date","Actuals", "COVIDActNow","IHME","MOBS","MIT","Mean")
df <- hosp.proj.ts()[,1:7] %>% as.data.frame()
df[,2:7] <- lapply(df[,2:7],function(x) round(x,2))
df[is.na(df)] <- 0
df[] <- lapply(df, as.character)
#Source
s <- c("Please see the Technical Notes tab of the application for data sources.","","","","","","")
p <- c("Prepared by: California Department of Public Health - COVID Modeling Team","","","","","","")
dlm <- rbind(t, tt, l, df, s, p)
write.table(dlm, file, row.names = F, col.names = F, quote = F, na= "NA", sep = ",")
})
#### County Hospitalization Projections ####
#Data Prep
county.hosp <- reactive({
progress <- Progress$new()
# Make sure it closes when we exit this reactive, even if there's an error
on.exit(progress$close())
progress$set(message = "Gathering Hospitalization Forecasts", value = 0)
cnty <- input$select.county.hosp
progress$inc(3/4)
# out <- lapply(cnty[1], function(x) get_can_cnty(x))
# cnty.hosp <- do.call("rbind",out)
out <- filter(can.county.observed, fips == cnty)
cnty.hosp <- out %>% select(date,hospitalBedsRequired) %>% as.data.frame()
progress$inc(1/4)
return(cnty.hosp)
})
##################################
##### COUNTS OF COUNTY BEDS #####
##### Add to global #####
##################################
# fc.cnty.beds <- reactive({
# c <- names(canfipslist[match(input$select.county.hosp,canfipslist)])
#
# if (c %in% cnty.beds[,1] == TRUE) {
# beds <- c(cnty.beds[which(cnty.beds$COUNTY == c),9])
# } else {
# beds <- c(NA)
# }
# })
hosp.proj.cnty.ts <- reactive({
c <- names(canfipslist[match(input$select.county.hosp,canfipslist)])
min_hosp <- min(covid$Most.Recent.Date)
hosp <- covid %>% select(Most.Recent.Date,COVID.19.Positive.Patients) %>% filter(covid$County.Name == c) %>% as.data.frame()
can.hosp.proj <- county.hosp() %>% select(date, hospitalBedsRequired) %>% filter(min_hosp <= date & date <= Sys.Date() + 30)
covid.xts <- xts(hosp[,-1],hosp$Most.Recent.Date)
can.proj.xts <- xts(can.hosp.proj[,-1],can.hosp.proj$date)
df <- merge(covid.xts,can.proj.xts)
df <- as.data.table(df) %>% as.data.frame()
df$period <- ifelse(!is.na(df$covid.xts), "solid", "dot")
return(df)
})
#Value Boxes
output$actual.cnty.hosp.box <- renderValueBox({
c <- names(canfipslist[match(input$select.county.hosp,canfipslist)])
cdt <- max(covid$Most.Recent.Date)
current.hosp <- as.character(covid[which(covid$Most.Recent.Date == cdt & covid$County.Name == c),"COVID.19.Positive.Patients"])
valueBox( "Counts/Beds Here",
#paste0(format(as.numeric(current.hosp), big.mark = ","),"/",
# #format(as.numeric(fc.cnty.beds()), big.mark = ",")
# ),
paste0("Actuals / Total Beds: ",cdt),
color = "black")
})
output$mean.cnty.proj.hosp.box <- renderValueBox({
cdt.ihme <- max( IHME[which(IHME$date <= Sys.Date() + 30),]$date )
mean.proj <- hosp.proj.cnty.ts() %>% slice(n()) %>% select(3)
valueBox( format(round(mean.proj, digits = 0), big.mark = ","),
paste0("30-Day Forecast through ", cdt.ihme), color = "blue")
})
#Graph
output$county.hosp.plot <- renderPlotly({
df <- hosp.proj.cnty.ts()
cdt <- max(df[which(!is.na(df$covid.xts)),1])
today <- list(type = "line",
y0 = 0,
y1 = 1,
yref = "paper",
x0 = cdt,
x1 = cdt,
line = list(color = "black", dash = 'dash') )
p <- plot_ly(df,
hoverinfo = 'text') %>%
add_trace(x = df[[1]],
y = df[[2]],
name = "Actuals",
type = 'scatter',
mode = "lines+markers",
hoverinfo = 'text',
text = paste0(df[[1]],
"<br>",
"Actual Hospitalization (PLACEHOLDER DATA - PLEASE REPLACE!!): ", format(df[[2]], big.mark = ",") ),
line = list(color = "black"),
marker = list(color = "black", symbol= "circle")
) %>%
add_trace(x = df[[1]],
y = df[[3]],
name = "COVIDActNow - Proj.",
type = 'scatter',
mode = "lines",
inherit = TRUE,
line = list(color="orange"),
linetype = ~I(period),
hoverinfo = 'text',
text = paste0(df[[1]],
"<br>",
"COVIDActNow Estimate: ", format(df[[3]], big.mark = ",") )
) %>%
layout(
title = as.character(counties[match(input$select.county.hosp, counties$fips),"county"]),
xaxis = list(title = NULL, showline = TRUE, showgrid = FALSE, zeroline = FALSE ),
yaxis = list(title = "Hospitalziations", showline = TRUE, showgrid = FALSE, zeroline = FALSE),
margin = list(l = 100),
showlegend = TRUE,
shapes = list(today)
)
return(p)
})
#Download file of COUNTY Hospitalization Forecasts
output$dlhosp.cnty <- downloadHandler(
filename = function() { paste("Hospital_Forecasts_for_",names(canfipslist[match(input$select.county.hosp,canfipslist)]),Sys.Date(),'.csv', sep='') },
content = function(file) {
c <- names(canfipslist[match(input$select.county.hosp,canfipslist)])
# Title
t <- c(paste("Hospitalization Forecasts for ",c, sep = ""),"","","","","","")
#Subtitle
tt <- c(paste("COVID Assessment Tool - Downloaded on",Sys.Date(), sep = " "),"","","","","","")
#Column labels
l <- c("Date","Actuals", "COVIDActNow")
df <- hosp.proj.cnty.ts()[,1:3] %>% as.data.frame()
df[,2:3] <- lapply(df[,2:3],function(x) round(x,2))
df[is.na(df)] <- 0
df[] <- lapply(df, as.character)
#Source
s <- c("Please see the Technical Notes tab of the application for data sources.","","","","","","")
p <- c(paste0("Prepared by: ",state_name," Department of Public Health"),"","","","","","")
dlm <- rbind(t, tt, l, df, s, p)
write.table(dlm, file, row.names = F, col.names = F, quote = F, na= "NA", sep = ",")
})
#### Statewide Cumulative Deaths Projections ####
#Data Prep
cdeath.ca <- reactive({
reich_test <- reich_lab %>% unique() %>% as.data.frame()
cdeaths_test <- covid %>% select(Most.Recent.Date,Total.Count.Deaths) %>%
filter(covid$County.Name == state_name) %>%
mutate(model_team = 'Actuals') %>%
rename(model_team = model_team,
target_end_date = Most.Recent.Date,
pointNA = Total.Count.Deaths
) %>%
select(model_team, pointNA, target_end_date) %>%
as.data.frame()
reich_test <- rbind(reich_test,cdeaths_test)
reich_test <- reich_test %>% distinct(model_team, target_end_date, .keep_all = TRUE) %>% spread(model_team, pointNA)
})
#Value Boxes
output$actual.cdeath.box <- renderValueBox({
cdt <- max(covid$Most.Recent.Date)
current.deaths <- as.character(covid[which(covid$Most.Recent.Date == cdt & covid$County.Name == state_name),4])
valueBox( format(as.numeric(current.deaths), big.mark = ","), paste0("Actuals (NYTIMES DATA): ",cdt), color = "black")
})
output$mean.proj.cdeaths.box <- renderUI({
ensemble <- cdeath.ca() %>% select(target_end_date,COVIDhub.ensemble) %>% filter(!is.na(COVIDhub.ensemble))
cdt.ens <- max(ensemble$target_end_date)
mean.proj <- ensemble %>% slice(n()) %>% select(2)
valueBox( format(round(mean.proj, digits = 0), big.mark = ","), paste0("COVIDhub Ensemble Forecast through ", cdt.ens), color = "blue", width = 12)
})
#Graphs
output$cdeath.proj.plot <- renderPlotly({
df <- cdeath.ca()
#Need to filter out Reich Lab models that represent scenarios rather than forecasts of current conditions
models <- names(df)
models <- setdiff(models, c("target_end_date", "CU.nointerv", "CU.60contact","CU.70contact",
"CU.80contact","CU.80contact1x10p","CU.80contact1x5p","CU.80contactw10p",
"CU.80contactw5p","COVIDhub.ensemble", "Actuals" ) )
models <- models %>% c("Actuals","COVIDhub.ensemble")
p <- plot_ly(data=df, type = "scatter", mode = "lines")
for(trace in models){
if (trace == "Actuals") {
p <- p %>% plotly::add_trace(x = ~target_end_date,
y = as.formula(paste0("~`", trace, "`")),
name = trace,
type = 'scatter',
mode = "lines+markers",
line = list(color ="black"),
marker = list(color = "black", symbol= "circle"),
hoverinfo = 'text',
text = paste0(df[[1]],
"<br>",
"Actual Total Deaths (NYTIMES DATA): ", format(df$Actuals, big.mark = ","))
)
} else {
if (trace == "COVIDhub.ensemble") {
p <- p %>% add_trace(x = ~target_end_date,
y = as.formula(paste0("~`", trace, "`")),
inherit = FALSE,
name = trace,
line = list(shape = "spline", color = '#2b8cbe'),
marker = list(color = '#2b8cbe', symbol= "circle"),
hoverinfo = 'text',
text = paste0(df[[1]],
"<br>",
"COVIDhub Ensemble Forecast: ", format(df$COVIDhub.ensemble, big.mark = ","))
)
} else {
p <- p %>% plotly::add_trace(x = ~target_end_date,
y = as.formula(paste0("~`", trace, "`")),
name = trace,
type = 'scatter',
mode = "lines",
line = list(color ="lightgray"),
hoverinfo = 'text+y',
text = paste0(df[[1]],
"<br>",
trace," Forecast")
)
}
}
}
p %>%
layout(title = NULL,
xaxis = list(title = " ", showline = TRUE, showgrid = FALSE, zeroline = FALSE ),
yaxis = list(title = "Total Deaths", showline = TRUE, showgrid = FALSE, zeroline = FALSE, hoverformat = ',.2r' ),
margin = list(l = 100),
legend = list(traceorder = "reversed"),
showlegend = TRUE)
})
#Download file of Statewide Cumulative Deaths Forecasts
output$dlDeath <- downloadHandler(
filename = function() { paste("Cumulative_Deaths_Forecasts_",Sys.Date(),'.csv', sep='') },
content = function(file) {
# Title
t <- c(paste("Statewide Cumulative Deaths Forecasts", sep = ""),rep("",ncol(cdeath.ca())-1) )
#Subtitle
tt <- c(paste("COVID Assessment Tool - Downloaded on",Sys.Date(), sep = " "),rep("",ncol(cdeath.ca())-1))
#Column labels
l <- names(cdeath.ca())
df <- cdeath.ca() %>% as.data.frame()
#df[,2:ncol(df)] <- lapply(df[,2:ncol(df)],function(x) round(x,2))
df[is.na(df)] <- 0
df[] <- lapply(df, as.character)
#Source
s <- c("Please see the Technical Notes tab of the application for data sources.",rep("",ncol(cdeath.ca())-1))
p <- c(paste0("Prepared by: ",state_name," Department of Public Health"),rep("",ncol(cdeath.ca())-1))
dlm <- rbind(t, tt, l, df, s, p)
write.table(dlm, file, row.names = F, col.names = F, quote = F, na= "NA", sep = ",")
})
#### County Cumulative Death Projections ####
### You can add additional county death forecasts here ###
###
#Data prep
county.deaths <- reactive({
progress <- Progress$new()
# Make sure it closes when we exit this reactive, even if there's an error
on.exit(progress$close())
progress$set(message = "Gathering Death Forecast Data", value = 0)
fips <- input$select.county.death
cnty <- names(canfipslist[match(fips,canfipslist)])
#Used to filter model estimates that occur prior to actuals
death <- covid %>% select(Most.Recent.Date,Total.Count.Deaths) %>% filter(covid$County.Name == cnty) %>% as.data.frame()
min_death <- min(death$Most.Recent.Date)
progress$inc(3/4)
# out <- lapply(fips[1], function(x) get_can_cnty(x))
# can.death <- do.call("rbind",out)
out <- filter(can.county.observed, county == cnty)
can.death <- out %>% select(date,cumulativeDeaths) %>%
filter(min_death <= date & date <= Sys.Date() + 30) %>%
rename(CovidActNow = cumulativeDeaths) %>% as.data.frame()
yu.death <- filter( yu, CountyName==cnty) %>% select(date,predicted_deaths) %>%
filter(min_death <= date & date <= Sys.Date() + 30) %>%
rename(YuGroup = predicted_deaths) %>% as.data.frame()
progress$inc(1/4)
covid.xts <- xts(death[,-1],death$Most.Recent.Date)
can.proj.xts <- xts(can.death[,-1],can.death$date)
yu.proj.xts <- xts(yu.death[,-1],yu.death$date)
#Add additional forecasts as xts object
df <- merge(covid.xts,can.proj.xts,yu.proj.xts)
#Estimate a mean forecast here
df$mean.proj <- rowMeans(df[,2:3], na.rm = TRUE)
df$mean.proj <- ifelse(!is.na(df$covid.xts), NA, df$mean.proj)
df <- as.data.table(df) %>% as.data.frame()
df$period <- ifelse(!is.na(df$covid.xts), "solid", "dot")
df$type <- ifelse(!is.na(df$covid.xts), "Est.", "Proj.")
return(df)
})
#Value Boxes
output$actual.cnty.death.box <- renderValueBox({
c <- names(canfipslist[match(input$select.county.death,canfipslist)])
cdt <- max(covid$Most.Recent.Date)
current.deaths <- as.character(covid[which(covid$Most.Recent.Date == cdt & covid$County.Name == c),"Total.Count.Deaths"])
valueBox( paste0(format(as.numeric(current.deaths), big.mark = ",") ),
paste0("Actual Deaths (NYTIMES DATA): ",cdt),
color = "black")
})
output$mean.cnty.proj.death.box <- renderValueBox({
df <- county.deaths()
cdt <- max( df$index )
mean.proj <- df %>% slice(n()) %>% select(mean.proj)
valueBox( format(round(mean.proj, digits = 0), big.mark = ","),
paste0("30-Day Forecast through ", cdt), color = "blue")
})
#Graph
output$county.death.plot <- renderPlotly({
df <- county.deaths()
cdt <- max(df[which(!is.na(df$covid.xts)),1])
today <- list(type = "line",
y0 = 0,
y1 = 1,
yref = "paper",
x0 = cdt,
x1 = cdt,
line = list(color = "black", dash = 'dash') )
p <- plot_ly(df,
hoverinfo = 'text') %>%
add_trace(x = df[[1]],
y = df[[2]],
name = "Actuals",
type = 'scatter',
mode = "lines+markers",
hoverinfo = 'text',
text = paste0(df[[1]],
"<br>",
"Actual Deaths (NYTIMES DATA): ", format(df[[2]], big.mark = ",") ),
line = list(color = "black"),
marker = list(color = "black", symbol= "circle")
) %>%
add_trace(x = df[[1]],
y = df[[3]],
name = ~I(paste0("COVIDActNow - ",df$type)),
type = 'scatter',
mode = "lines",
inherit = TRUE,
line = list(color="orange"),
linetype = ~I(period),
hoverinfo = 'text',
text = paste0(df[[1]],
"<br>",
"COVIDActNow Estimate: ", format(df[[3]], big.mark = ",") )
) %>%
add_trace(x = df[[1]],
y = df[[4]],
name = ~I(paste0("Berkeley Yu - ",df$type)),
type = 'scatter',
mode = "lines",
inherit = TRUE,
line = list(color="blue"),
linetype = ~I(period),
hoverinfo = 'text',
text = paste0(df[[1]],
"<br>",
"Berkeley Estimate: ", format(df[[4]], big.mark = ",") )
) %>%
#Example trace for additional forecast
# add_trace(x = df[[1]],
# y = df[[4]],
# name = ~I(paste0("UCLA - ",df$type)),
# type = 'scatter',
# mode = "lines",
# inherit = TRUE,
# line = list(color="blue"),
# linetype = ~I(period),
# hoverinfo = 'text',
# text = paste0(df[[1]],
# "<br>",
# "UCLA Estimate: ", format(df[[4]], big.mark = ",") )
#
# ) %>%
add_trace(x = df[[1]],
y = df[[5]],
name = "Mean Proj.",
type = 'scatter',
mode = "lines",
hoverinfo = 'text',
text = paste0(df[[1]],
"<br>",
"Mean Projection: ", format(round(df[[4]],0), big.mark = ",") ),
line = list(color = '#2b8cbe', width = 5)
) %>%
layout(
title = as.character(counties[match(input$select.county.death, counties$fips),"county"]),
xaxis = list(title = NULL, showline = TRUE, showgrid = FALSE, zeroline = FALSE ),
yaxis = list(title = "Total Deaths", showline = TRUE, showgrid = FALSE, zeroline = FALSE),
margin = list(l = 100),
showlegend = TRUE,
shapes = list(today)
)
return(p)
})
#Download file of COUNTY Total Death Forecasts
output$dlDeath.cnty <- downloadHandler(
filename = function() { paste("Cumulative_Death_Forecasts_for_",names(canfipslist[match(input$select.county.death,canfipslist)]),Sys.Date(),'.csv', sep='') },
content = function(file) {
c <- names(canfipslist[match(input$select.county.death,canfipslist)])
# Title
t <- c(paste("Cumulative Death Forecasts for ",c, sep = ""),rep("",ncol(county.deaths())-1))
#Subtitle
tt <- c(paste("COVID Assessment Tool - Downloaded on",Sys.Date(), sep = " "),rep("",ncol(county.deaths())-1))
df <- county.deaths() %>% select(-c(period, type)) %>% rename(date = index) %>% as.data.frame()
df[is.na(df)] <- 0
df[] <- lapply(df, as.character)
#Column labels
l <- c("Date","Total Deaths")
if ( "can.proj.xts" %in% names(county.deaths()) ) { l <- c(l, c("COVIDActNow")) }
if ( "yu.proj.xts" %in% names(county.deaths()) ) { l <- c(l, c("Berkeley")) }
#Add lines for additional sources of forecasts
#if ( "ucla.proj.xts" %in% names(county.deaths()) ) { l <- c(l, c("UCLA")) }
if ( length(l) > 2 ) { l <- c(l, c("Mean") ) }
#Source
s <- c("Please see the Technical Notes tab of the application for data sources.",rep("",ncol(county.deaths())-1))
p <- c(paste0("Prepared by: ",state_name," Department of Public Health"),rep("",ncol(county.deaths())-1))
dlm <- rbind(t, tt, l, df, s, p)
write.table(dlm, file, row.names = F, col.names = F, quote = F, na= "NA", sep = ",")
})
#### Scenario Models ####
output$model.descrip.ts <- renderUI({
UKKC <- as.character(input$include_JHU_UKKC)
model_descrip_list <- lapply(UKKC, function(i) { HTML(paste("<p>",as.character(scenarios[match(i, scenarios$colvar),2]),": ",
as.character(scenarios[match(i, scenarios$colvar),4]),"</p>")) })
do.call(tagList, model_descrip_list)
})
#### Daily Estimates #####
### For NPI scenario models, this is the drop that lists the NPI drop down.
### These lists are defined in the model_output_lists.R
### California had over a dozen models, and therefore needed a flexible way to add/remove them.
### The dates are to identify when collections of models were run.
output$physical.select <- renderUI({
s <- as.character(state_name == input$county_ts)
choice.list <- switch(s,
"TRUE" = list ( #"5/22/2020" = modellist[c(8:11)],
#"4/23/2020" = modellist[c(4:7)],
"4/11/2020" = otherlist[1:2],
"4/07/2020" = otherlist[3] ),
list (#"5/22/2020" = modellist[c(8:11)],
#"4/23/2020" = modellist[c(4:7)],
"4/11/2020" = otherlist[1:2] )
)
pickerInput(
inputId = "include_JHU_UKKC", "Select Scenario",
choices = choice.list,
selected = c("strictDistancingNow",
"weakDistancingNow"),
options = list(`actions-box` = TRUE, noneSelectedText = "Select Scenario"),
#inline = TRUE,
multiple = TRUE,
choicesOpt = list( style = rep(("color: black; background: white; font-weight: bold;"),13))
)
})
output$epi_covid_select <- renderUI({
selectInput("select_COVID",
"Select Actuals (THIS IS PLACEHOLDER DATA):",
COVIDvar,
selected = switch(input$selected_crosswalk,
"1" = "COVID.19.Positive.Patients",
"2" = "ICU.COVID.19.Positive.Patients",
"3" = "Total.Count.Deaths")
)
})
state.model.xts <- reactive({
c <- input$county_ts
# This is how we organized Johns Hopkins Runs, which consisted of a mean, median and intervals.
# We were primarily interested in displaying hospitalziations, ICU beds and cumulative deaths despite other
# outputs available from some modelers. The goal here is to harmonize and organize outputs from multiple modelers.
# JHU_sts.m <- to_xts_awsJHU(JHU_aws, c,
# switch(input$selected_crosswalk,
# "1" = "hosp_occup_mean",
# "2" = "icu_occup_mean",
# "3" = "cum_deaths_mean"
# ))
#
# JHU_sts.md <- to_xts_awsJHU(JHU_aws, c,
# switch(input$selected_crosswalk,
# "1" = "hosp_occup_q50",
# "2" = "icu_occup_q50",
# "3" = "cum_deaths_q50"
# ))
# colnames(JHU_sts.md) <- paste(colnames(JHU_sts.md),"M", sep = ".")
#
# JHU_sts.L <- to_xts_awsJHU(JHU_aws, c,
# switch(input$selected_crosswalk,
# "1" = "hosp_occup_q25",
# "2" = "icu_occup_q25",
# "3" = "cum_deaths_q25"
# ))
# colnames(JHU_sts.L) <- paste(colnames(JHU_sts.L),"L", sep = ".")
#
# JHU_sts.H <- to_xts_awsJHU(JHU_aws, c,
# switch(input$selected_crosswalk,
# "1" = "hosp_occup_q75",
# "2" = "icu_occup_q75",
# "3" = "cum_deaths_q75"
# ))
# colnames(JHU_sts.H) <- paste(colnames(JHU_sts.H),"H", sep = ".")
IHME_sts <- to_xts_IHME(IHME,state_name,
switch(input$selected_crosswalk,
"1" = "allbed_mean",
"2" = "ICUbed_mean",
"3" = "totdea_mean"
))
IHME_sts.L <- to_xts_IHME(IHME,state_name,
switch(input$selected_crosswalk,
"1" = "allbed_lower",
"2" = "ICUbed_lower",
"3" = "totdea_lower"
))
IHME_sts.H <- to_xts_IHME(IHME,state_name,
switch(input$selected_crosswalk,
"1" = "allbed_upper",
"2" = "ICUbed_upper",
"3" = "totdea_upper"
))
CAN_sts <- to_xts_CAN(CAN_aws, c,
switch(input$selected_crosswalk,
"1" = "hospitalizations",
"2" = "beds",
"3" = "deaths"
))
COVID_sts <- to_xts_COVID(covid, c)
#Not all modelers produce coutny level outputs for scenarios. i.e. IHME
if (c != state_name) {
all_ts <- suppressWarnings( merge.xts(#JHU_sts.m, #New JHU with optional estimates and intervals
#JHU_sts.md,
#JHU_sts.L,
#JHU_sts.H,
CAN_sts,
COVID_sts, fill = NA) )
} else {
all_ts <- suppressWarnings( merge.xts(#JHU_sts.m, #New JHU with optional estimates and intervals
#JHU_sts.md,
#JHU_sts.L,
#JHU_sts.H,
IHME_sts, #IHME with intervals
IHME_sts.L,
IHME_sts.H,
CAN_sts,
COVID_sts,
fill = NA #Covid outputs
) )
}
#all_ts <- all_ts[,c(-1)]
all_ts <- all_ts["20200301/20201231"] #Some models extend beyond 2020
return(all_ts)
})
total.cnty.beds <- reactive({
c <- input$county_ts
# This coded grabs county bed counts for a reference line in plots.
# if (c %in% cnty.beds[,1] == TRUE) {
# beds <- c(cnty.beds[which(cnty.beds$COUNTY == c),9])
# } else {
# beds <- c(NA)
# }
beds <- 100
})
#Regex patterns for JHU scenarios
jhu.no <- "UK.\\w+.\\d+_\\d+|.\\w+_\\w{4,}"
jhu.M <- "UK.\\w+.\\d+_\\d+.M|.\\w+_\\w{4,}.M"
jhu.lh <- "UK.\\w+.\\d[w].\\w+.[LH]|.\\w+_\\w{4,}.[LH]"
jhu.lh.b <- "UK.\\w+.\\d+_\\d+.[LH]|.\\w+_\\w{4,}.[LH]"
output$physical.graph <- renderDygraph({
df <- state.model.xts()
dtrange <- paste(as.character(input$dateRange_ts), collapse = "/")
chbx <- c()
#### Actuals
if ( input$actuals == TRUE) {chbx <- c(chbx,c(input$select_COVID)) }
UKKC <- as.character(input$include_JHU_UKKC)
if ( TRUE %in% grepl(jhu.no, UKKC) & input$physical.mmd == "M" ) {
JHU_list <- UKKC[grep(jhu.no,UKKC)]
chbx <- c(chbx, c(JHU_list) )
} else {
JHU_list <- UKKC[grep(jhu.no,UKKC)]
chbx <- c(chbx, c( as.character(lapply(seq_along(JHU_list), function(i) { paste0(as.character( JHU_list[[i]] ),".M" ) } ) ) ) )
}
if (TRUE %in% grepl(jhu.no, UKKC) & input$physical.iqr == TRUE) {
JHU_list <- UKKC[grep(jhu.no,UKKC)]
chbx <- c(chbx, c( as.character(lapply(seq_along(JHU_list), function(i) {paste0(as.character( JHU_list[[i]] ),".L" ) } )) ),
c( as.character(lapply(seq_along(JHU_list), function(i) {paste0(as.character( JHU_list[[i]] ),".H" ) } )) ) )
}
if ( TRUE %in% grepl("IHME_sts", UKKC ) & input$county_ts == state_name ) {
chbx <- chbx %>% c("IHME_sts")
}
if ( TRUE %in% grepl("IHME_sts", UKKC ) & input$IHME.iqr == TRUE & input$county_ts == state_name) {
IHME <- "IHME_sts"
chbx <- c(chbx, c( as.character(lapply(seq_along(IHME), function(i) {paste0(as.character( IHME[[i]] ),".L") } )) ),
c( as.character(lapply(seq_along(IHME), function(i) {paste0(as.character( IHME[[i]] ),".H") } )) )
)
}
if ( TRUE %in% grepl("weakDistancingNow|strictDistancingNow",UKKC) &
input$county_ts %in% can_counties == TRUE ) {
can <- UKKC[grep("weakDistancingNow|strictDistancingNow",UKKC)]
chbx <- chbx %>% c(can)
}
df <- df[,c(chbx)]
FUNC_JSFormatNumber <- "function(x) {return x.toString().replace(/(\\d)(?=(\\d{3})+(?!\\d))/g, '$1,')}"
d <- dygraph(df, main = switch(input$selected_crosswalk,
"1" = paste0(input$county_ts," COVID Hospitalizations"),
"2" = paste0(input$county_ts," COVID ICU Patients"),
"3" = paste0(input$county_ts," COVID Cumulative Deaths")
))
if ( TRUE %in% grepl(jhu.lh, chbx) | TRUE %in% grepl(jhu.lh.b, chbx) ) {
if ( input$physical.mmd == "M") {
chbx.M <- chbx[grep(jhu.no,chbx)]
chbx.M <- unique(str_remove(chbx.M, "\\.[LH]"))
for (scenario in chbx.M) {
d <- d %>% dySeries(c( paste0(scenario,".L"),paste0(scenario),paste0(scenario,".H")), label = names(modellist[match(scenario,modellist)]), fillGraph = FALSE)
}
} else {
chbx.M <- chbx[grep(jhu.M,chbx)]
chbx.M <- str_remove(chbx.M, ".M")
for (scenario in chbx.M) {
d <- d %>% dySeries(c( paste0(scenario,".L"),paste0(scenario,".M"),paste0(scenario,".H")), label = names(modellist[match(scenario,modellist)]), fillGraph = FALSE)
}
}
# No intervals
} else {
if ( input$physical.mmd == "M") {
chbx.M <- chbx[grep(jhu.no,chbx)]
for (scenario in chbx.M) {
d <- d %>% dySeries(paste0(scenario), label = names(modellist[match(scenario,modellist)]), fillGraph = FALSE)
}
} else {
chbx.M <- chbx[grep(jhu.M,chbx)]
chbx.M <- str_remove(chbx.M, ".M")
for (scenario in chbx.M) {
d <- d %>% dySeries(paste0(scenario,".M"), label = names(modellist[match(scenario,modellist)]), fillGraph = FALSE)
}
}
}
if ( TRUE %in% grepl("IHME_sts.[LH]", chbx) ){
if ( "IHME_sts.L" %in% c(chbx) ) {d <- d %>% dySeries(c("IHME_sts.L","IHME_sts","IHME_sts.H"), label = 'IHME Model', fillGraph = FALSE) }
} else {
if ( "IHME_sts" %in% c(chbx) ) {d <- d %>% dySeries("IHME_sts", label = 'IHME Model', fillGraph = FALSE) }
}
if ( "weakDistancingNow" %in% c(chbx) ) {d <- d %>% dySeries("weakDistancingNow", label = 'CAN: Delay/Distancing', fillGraph = FALSE) }
if ( "strictDistancingNow" %in% c(chbx) ) {d <- d %>% dySeries("strictDistancingNow", label = 'CAN: Shelter in Place', fillGraph = FALSE) }
if ( "Total.Count.Deaths" %in% c(chbx) ) {d <- d %>% dySeries("Total.Count.Deaths", label = "Total Deaths", fillGraph= FALSE, drawPoints = TRUE, pointSize = 5, pointShape = "square", color = "black") }
if ( "COVID.19.Positive.Patients" %in% c(chbx) ) {d <- d %>% dySeries("COVID.19.Positive.Patients", label = "Patients Positive for COVID-19", fillGraph= FALSE, drawPoints = TRUE, pointSize = 5, pointShape = "diamond", color = "black") }
if ( "ICU.COVID.19.Positive.Patients" %in% c(chbx) ) {d <- d %>% dySeries("ICU.COVID.19.Positive.Patients", label = "ICU Patients Positive for COVID-19", fillGraph= FALSE, drawPoints = TRUE, pointSize = 5, pointShape = "hexagon", color = "black") }
#### Add county beds
if ( input$selected_crosswalk == "1" & input$county_ts == state_name) {
d <- d %>% dyLimit(50000, label = "Phase 1 Surge Capacity", labelLoc = c("left"), color = "black", strokePattern = "dashed")
} else {
if ( input$selected_crosswalk == "1" & !is.na(total.cnty.beds()) == TRUE ) { d <- d %>% dyLimit(total.cnty.beds(), label = "Total Licensed Beds", labelLoc = c("left"), color = "black", strokePattern = "dashed") }
}
d <- d %>% dyOptions(digitsAfterDecimal=0, strokeWidth = 3, connectSeparatedPoints = TRUE, drawGrid = FALSE) %>%
dyAxis("y", axisLabelFormatter=htmlwidgets::JS(FUNC_JSFormatNumber), valueFormatter=htmlwidgets::JS(FUNC_JSFormatNumber)) %>%
dyHighlight(highlightSeriesOpts = list(strokeWidth = 4)) %>%
dyEvent(Sys.Date(), "Today", labelLoc = "top") %>%
dyLegend(show = "always",
labelsDiv = "legendDivID2",
hideOnMouseOut = TRUE) %>%
dyRangeSelector(height = 30, dateWindow = c((Sys.Date() - 30), as.Date("2020-12-31")) )
})
#### Static Daily Estimates ####
output$physical.graph.static <- renderPlot({
df <- state.model.xts()[ paste0( as.Date(input$physical.graph_date_window[[1]]),"/",as.Date(input$physical.graph_date_window[[2]]) ) ]
#dtrange <- paste(as.character(input$dateRange_ts), collapse = "/")
chbx <- c()
#### Uncontrolled + Actuals
#if ( input$overlay_uncontrolled == TRUE ) { chbx <- chbx %>% c("No_Intervention") }
if ( input$actuals == TRUE) {chbx <- c(chbx,c(input$select_COVID)) }
UKKC <- as.character(input$include_JHU_UKKC)
if ( TRUE %in% grepl(jhu.no, UKKC) & input$physical.mmd == "M" ) {
JHU_list <- UKKC[grep(jhu.no,UKKC)]
chbx <- c(chbx, c(JHU_list) )
} else {
JHU_list <- UKKC[grep(jhu.no,UKKC)]
chbx <- c(chbx, c( as.character(lapply(seq_along(JHU_list), function(i) { paste0(as.character( JHU_list[[i]] ),".M" ) } ) ) ) )
}
if (TRUE %in% grepl(jhu.no, UKKC) & input$physical.iqr == TRUE) {
JHU_list <- UKKC[grep(jhu.no,UKKC)]
chbx <- c(chbx, c( as.character(lapply(seq_along(JHU_list), function(i) {paste0(as.character( JHU_list[[i]] ),".L" ) } )) ),
c( as.character(lapply(seq_along(JHU_list), function(i) {paste0(as.character( JHU_list[[i]] ),".H" ) } )) ) )
}
if ( TRUE %in% grepl("IHME_sts", UKKC ) & input$county_ts == state_name ) {
chbx <- chbx %>% c("IHME_sts")
}
if ( TRUE %in% grepl("IHME_sts", UKKC ) & input$IHME.iqr == TRUE & input$county_ts == state_name) {
IHME <- "IHME_sts"
chbx <- c(chbx, c( as.character(lapply(seq_along(IHME), function(i) {paste0(as.character( IHME[[i]] ),".L") } )) ),
c( as.character(lapply(seq_along(IHME), function(i) {paste0(as.character( IHME[[i]] ),".H") } )) )
)
}
if ( TRUE %in% grepl("weakDistancingNow|strictDistancingNow",UKKC) &
input$county_ts %in% can_counties == TRUE ) {
can <- UKKC[grep("weakDistancingNow|strictDistancingNow",UKKC)]
chbx <- chbx %>% c(can)
}
df <- df[,c(chbx)]
# nl <- as.numeric(match("No_Intervention",names(df)))
# maxy <- suppressWarnings( max(df[,-as.numeric(nl)], na.rm=TRUE) +
# ( max(df[,-as.numeric(nl)], na.rm=TRUE) * 0.05)
# )
colors <- c("No Intervention"= "black",
"IHME Model" = "#023858",
"CAN: Shelter in Place" = "#02818a",
"CAN: Delay/Distancing" = "#238443",
'JHU: NPIs 30-40% Effective' = "#d7301f",
'JHU: NPIs 40-50% Effective' = "#238b45",
'JHU: NPIs 50-60% Effective' = "#4d004b",
'JHU: NPIs 60-70% Effective' = "#67001f",
"JHU: Continuing Lockdown" = "#d7301f",
'JHU: Slow-paced Reopening' = "#238b45",
'JHU: Moderate-paced Reopening' = "#4d004b",
'JHU: Fast-paced Reopening' = "#67001f",
#"Total Confirmed Cases" = "red",
"Total Deaths" = "black",
"Patients Positive for COVID-19" = "black",
"ICU Patients Positive for COVID-19" = "black"
#"Positive + Suspected Patients" = "green",
#"Positive + Suspected ICU" = "blue"
)
#test_colors <- c("Continued_Lockdown" = "#d7301f")
p <- ggplot()
if (input$selected_crosswalk == "1" & input$drop_hline == TRUE & input$county_ts == state_name) {
p <- p + geom_line(df, mapping = aes(x= Index, y = 50000), color = "black", linetype = "dashed") +
geom_text(aes(x = as.Date(input$physical.graph_date_window[[1]]), y= 50000,
label = "Phase 1 Surge Capacity"),
hjust = -0.1,
vjust = -0.3)
} else {
if ( input$selected_crosswalk == "1" & !is.na(total.cnty.beds()) == TRUE ) {
p <- p + geom_line(df, mapping = aes(x= Index, y = total.cnty.beds()), color = "black", linetype = "dashed") +
geom_text(aes(x = as.Date(input$physical.graph_date_window[[1]]), y= total.cnty.beds(),
label = "Total Licensed Beds"),
hjust = -0.1,
vjust = -0.3)
}
}
#if ( "No_Intervention" %in% c(chbx) ) { p <- p + geom_line(df, mapping = aes(x = Index, y = No_Intervention), color = "black", size = 1.5, linetype = "dashed") }
### JHU Scenarios
if ( TRUE %in% grepl(jhu.no, chbx)) {
chbx.M <- chbx[grep(jhu.no,chbx)]
chbx.M <- unique(str_remove(chbx.M, "\\.[MLH]"))
for (scenario in chbx.M) {
c <- as.character(colors[match(names(modellist[match(scenario,modellist)]),names(colors))])
if ( scenario %in% c(chbx) ) { p <- p + geom_line(df, mapping = aes_string(x="Index", y=scenario, color = shQuote(names(modellist[match(scenario,modellist)])) ), size = 1.5, linetype = "solid") }
if ( paste0(scenario,".M") %in% c(chbx) ) { p <- p + geom_line(df, mapping = aes_string(x="Index", y=paste0(scenario,".M"), color = shQuote(names(modellist[match(scenario,modellist)])) ), size = 1.5, linetype = "solid") }
if ( paste0(scenario,".L") %in% c(chbx) ) { p <- p + geom_ribbon(df, mapping = aes_string(x ="Index", ymin = paste0(scenario,".L"), ymax = paste0(scenario,".H") ), fill=c, color = c, alpha = 0.2) }
}
}
### Other Models/Scenarios
if ( "IHME_sts" %in% c(chbx) ) { p <- p + geom_line(df, mapping = aes(x=Index, y=IHME_sts, color = "IHME Model"), size = 1.5, linetype = "solid") }
if ( "IHME_sts.L" %in% c(chbx) ) { p <- p + geom_ribbon(df, mapping = aes(x = Index, ymin = IHME_sts.L, ymax = IHME_sts.H), fill="#a6bddb", color = "#a6bddb", alpha = 0.2) }
if ( "strictDistancingNow" %in% c(chbx) ) { p <- p + geom_point(df, mapping = aes(x=Index, y=strictDistancingNow, color = "CAN: Shelter in Place") ) }
if ( "weakDistancingNow" %in% c(chbx) ) { p <- p + geom_point(df, mapping = aes(x=Index, y=weakDistancingNow, color = "CAN: Delay/Distancing") ) }
### Actuals
if ( "Total.Count.Deaths" %in% c(chbx) ) {p <- p + geom_point(df, mapping = aes(x = Index, y = Total.Count.Deaths, color = "Total Deaths"), shape = 15, fill = "black", size = 3 ) }
if ( "COVID.19.Positive.Patients" %in% c(chbx) ) {p <- p + geom_point(df, mapping = aes(x = Index, y = COVID.19.Positive.Patients, color = "Patients Positive for COVID-19"), shape = 23, fill = "black", size = 3 ) }
if ( "ICU.COVID.19.Positive.Patients" %in% c(chbx) ) {p <- p + geom_point(df, mapping = aes(x = Index, y = ICU.COVID.19.Positive.Patients, color = "ICU Patients Positive for COVID-19"), shape = 19, fill = "black", size = 3 ) }
# if ( input$overlay_uncontrolled == TRUE ) {
# p <- p + scale_y_continuous(labels = scales::comma, limits = c(0, as.numeric(maxy)) )
# } else {
p <- p + scale_y_continuous(labels = scales::comma)
#}
p <- p + labs(x = "Date",
y = switch(input$selected_crosswalk,
"1" = "Hospital Bed Occupancy",
"2" = "ICU Bed Occupancy",
"3" = "Cumulative Deaths"),
color = "Legend") + scale_color_manual(values = colors) +
ggtitle(switch(input$selected_crosswalk,
"1" = paste0(input$county_ts," COVID Hospitalizations"),
"2" = paste0(input$county_ts," COVID ICU Patients"),
"3" = paste0(input$county_ts," COVID Cumulative Deaths")
)) +
theme(plot.title = element_text(size = 18, face = "bold"),
axis.title = element_text(face = "bold", size = 18, colour = "black"),
axis.text.x = element_text(face = "bold", color = "black", size = 18),
axis.text.y = element_text(face = "bold", color = "black", size = 18),
axis.line = element_line(color = "black", size = 1, linetype = "solid"),
panel.grid.major = element_blank(),
panel.grid.minor = element_blank(),
panel.border = element_blank(),
panel.background = element_blank(),
legend.text=element_text(size=14),
legend.position = "bottom"
)
return(p)
})
## download Static figure data
static.plot.data <- reactive({
df <- state.model.xts()[ paste0( as.Date(input$physical.graph_date_window[[1]]),"/",as.Date(input$physical.graph_date_window[[2]]) ) ]
#dtrange <- paste(as.character(input$dateRange_ts), collapse = "/")
chbx <- c()
#### Uncontrolled + Actuals
if ( input$actuals == TRUE) {chbx <- c(chbx,c(input$select_COVID)) }
UKKC <- as.character(input$include_JHU_UKKC)
if ( TRUE %in% grepl("UK.\\w+.\\d+_\\d+|.\\w+_\\w{4,}", UKKC) & input$physical.mmd == "M" ) {
JHU_list <- UKKC[grep("UK.\\w+.\\d+_\\d+|.\\w+_\\w{4,}",UKKC)]
chbx <- c(chbx, c(JHU_list) )
} else {
JHU_list <- UKKC[grep("UK.\\w+.\\d+_\\d+|.\\w+_\\w{4,}",UKKC)]
chbx <- c(chbx, c( as.character(lapply(seq_along(JHU_list), function(i) { paste0(as.character( JHU_list[[i]] ),".M" ) } ) ) ) )
}
if (TRUE %in% grepl("UK.\\w+.\\d+_\\d+|.\\w+_\\w{4,}", UKKC) & input$physical.iqr == TRUE) {
JHU_list <- UKKC[grep("UK.\\w+.\\d+_\\d+|.\\w+_\\w{4,}",UKKC)]
chbx <- c(chbx, c( as.character(lapply(seq_along(JHU_list), function(i) {paste0(as.character( JHU_list[[i]] ),".L" ) } )) ),
c( as.character(lapply(seq_along(JHU_list), function(i) {paste0(as.character( JHU_list[[i]] ),".H" ) } )) ) )
}
if ( TRUE %in% grepl("IHME_sts", UKKC ) & input$county_ts == state_name ) {
chbx <- chbx %>% c("IHME_sts")
}
if ( TRUE %in% grepl("IHME_sts", UKKC ) & input$IHME.iqr == TRUE & input$county_ts == state_name) {
IHME <- "IHME_sts"
chbx <- c(chbx, c( as.character(lapply(seq_along(IHME), function(i) {paste0(as.character( IHME[[i]] ),".L") } )) ),
c( as.character(lapply(seq_along(IHME), function(i) {paste0(as.character( IHME[[i]] ),".H") } )) )
)
}
if ( TRUE %in% grepl("weakDistancingNow|strictDistancingNow",UKKC) & input$selected_crosswalk != "2") {
can <- UKKC[grep("weakDistancingNow|strictDistancingNow",UKKC)]
chbx <- chbx %>% c(can)
}
df <- df[,c(chbx)] %>% data.frame() %>% mutate(Date = seq(as.Date(input$physical.graph_date_window[[1]]),as.Date(input$physical.graph_date_window[[2]]), by = "day"))
df
})
output$dlScenario <- downloadHandler(
filename = function () {
paste0("COVID_Scenarios_",input$county_ts,".csv")
},
content = function(file) {
# Title
t <- c(paste("Long-term COVID Scenarios for ",input$county_ts, sep = ""),rep("",ncol(static.plot.data())-1))
#Subtitle
tt <- c(paste("COVID Assessment Tool - Downloaded on",Sys.Date(), sep = " "),rep("",ncol(static.plot.data())-1))
#Column labels
l <- names(static.plot.data())
df <- static.plot.data()
df[is.na(df)] <- 0
df[] <- lapply(df, as.character)
#Source
s <- c("Please see the Technical Notes tab of the application for data sources.",rep("",ncol(static.plot.data())-1))
p <- c(paste0("Prepared by: ",state_name," Department of Public Health"), rep("",ncol(static.plot.data())-1))
dlm <- rbind(t, tt, l, df, s, p)
write.table(dlm, file, row.names = F, col.names = F, quote = F, na= "NA", sep = ",")
#write.csv(df, file, row.names = F)
}
)
} # End Server
|
7013ed16548a62f4d999c7a14d45431fd50cb6fe
|
2d6d15e8bd267bd142a45b89d159973f7aa56e81
|
/Activity 5/activity 5 script.R
|
69b0c86d13a29e47c81bdcf759410b9cd68d3d57
|
[] |
no_license
|
kbitsber/ENVST206
|
03e069b95c9f40c7879f0a7463c12b97b4b9d224
|
c27cb3b775c95c4e312b10d60b8b6559540e275f
|
refs/heads/master
| 2023-01-22T22:46:25.193423
| 2020-11-24T23:23:38
| 2020-11-24T23:23:38
| 291,063,658
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 6,889
|
r
|
activity 5 script.R
|
#read in weather station file from the data folder
datW <- read.csv("/Users/kristenbitsberger/Documents/GitHub/ENVST206/Activity 2/Data/a02/noaa2011124.csv")
#specify that the name column should be a factor
datW$NAME<- as.factor(datW$NAME)
#set up a vector of all names for each level
nameS <- levels(datW$NAME)
nameS
#make a dataframe with just precipitation, year, and site name
#remove NA using na.omit
datP <- na.omit(data.frame(NAME=datW$NAME,
year=datW$year,
PRCP=datW$PRCP))
#total annual precipitation (mm)
precip <- aggregate(datW$PRCP, by=list(datW$NAME,datW$year), FUN="sum", na.rm=TRUE)
#use aggregate to get total annual precipitation
precip <- aggregate(datP$PRCP, by=list(datP$NAME,datP$year), FUN="sum", na.rm=TRUE)
#rename columns
colnames(precip) <- c("NAME","year","totalP")
#add the x column from aggregate looking at the length of observations in each year
precip$ncount <- aggregate(datP$PRCP, by=list(datP$NAME,datP$year), FUN="length")$x
#make a new dataframe
pr <- precip[precip$ncount >=364, ]
#look at only livermore california and morrisville new york preciptiation
ca <- pr[pr$NAME == nameS[2], ]
ny <- pr[pr$NAME == nameS[5], ]
#make a plot of california precip
plot(ca$year, ca$totalP)
#make a plot of california precip
plot(ca$year, ca$totalP,
type = "b",
pch = 19,
ylab = "Annual precipitation (mm)",
xlab = "Year")
#make a plot of california precip
plot(ca$year, ca$totalP,
type = "b",
pch = 19,
ylab = "Annual precipitation (mm)",
xlab = "Year",
yaxt = "n")
#add y axis
#arguments are axis number (1 bottom, 2 left, 3 top, 4 right)
#las = 2 changes the labels to be read in horizontal direction
axis(2, seq(200,800, by=200), las=2 )
plot(ca$year, ca$totalP,
type = "b",
pch = 19,
ylab = "Annual precipitation (mm)",
xlab = "Year",
yaxt = "n")
#add y axis
axis(2, seq(200,800, by=200), las=2 )
#add arizona
points(ny$year, ny$totalP,
type = "b",
pch = 19,
col="tomato3")
plot(ca$year, ca$totalP,
type = "b",
pch = 19,
ylab = "Annual precipitation (mm)",
xlab = "Year",
yaxt = "n",
ylim =c(0, 1600))
#add y axis
axis(2, seq(0,1600, by=400), las=2 )
#add arizona
points(ny$year, ny$totalP,
type = "b",
pch = 19,
col="tomato3")
plot(nd$year, nd$totalP,
type = "b",
pch = 19,
ylab = "Annual precipitation (mm)",
xlab = "Year",
yaxt = "n",
ylim =c(0, 1600))
#add y axis
axis(2, seq(0,1600, by=400), las=2 )
#add arizona
points(ny$year, ny$totalP,
type = "b",
pch = 19,
col="tomato3")
#add legend
legend("topleft", #position
c("North Dakota", "New York"), #labels
col= c("black", "tomato3"), #colors
pch=19, #point shape
lwd=1, #line thickness 1, anytime both point & line arguments are given both will be drawn
bty="n") #always use this argument otherwise an ugly box is drawn
#question 3
datT <- na.omit(data.frame(NAME=datW$NAME,
year=datW$year,
TMAX=datW$TMAX))
tmax <- aggregate(datW$TMAX, by=list(datW$NAME,datW$year), FUN="mean", na.rm=TRUE)
tmax <- aggregate(datT$TMAX, by=list(datT$NAME,datT$year), FUN="mean", na.rm=TRUE)
colnames(tmax) <- c("NAME","year","Tmax")
tmax$ncount <- aggregate(datT$TMAX, by=list(datT$NAME,datT$year), FUN="length")$x
tm <- tmax[tmax$ncount >=364, ]
ny_tm <- tm[tm$NAME == nameS[5], ]
nd_tm <- tm[tm$NAME == nameS[3], ]
plot(nd_tm$year, nd_tm$Tmax,
type = "b",
pch = 19,
ylab = "Annual max temp (C)",
xlab = "Year",
yaxt = "n",
xaxt = "n",
ylim =c(8, 16),
xlim = c(1930, 2020))
#add y axis
axis(2, seq(0,20, by=2), las=2 )
axis(1, seq(1930, 2020, by=5), las=1)
#add new york
points(ny_tm$year, ny_tm$Tmax,
type = "b",
pch = 19,
col="tomato3")
#add legend
legend("topleft", #position
c("North Dakota", "New York"),
col= c("black", "tomato3"),
pch=19,
lwd=1,
bty="n")
install.packages("ggplot2")
library(ggplot2)
ggplot(data = pr, aes(x = year, y=totalP, color=NAME ) )+ #data for plot
geom_point()+ #make points at data point
geom_path()+ #use lines to connect data points
labs(x="year", y="Annual Precipitation") #make axis labels
ggplot(data = pr, aes(x = year, y=totalP, color=NAME ) )+ #data for plot
geom_point()+ #make points at data point
geom_path()+ #use lines to connect data points
labs(x="year", y="Annual Precipitation")+ #make axis labels
theme_classic() #change plot theme
ggplot(data = pr, aes(x = year, y=totalP, color=NAME ) )+
geom_point(alpha=0.5)+
geom_path(alpha=0.5)+
labs(x="year", y="Annual Precipitation")+
theme_classic()+
scale_color_manual(values = c("#7FB3D5","#34495E", "#E7B800", "#FC4E07","#26A69A"))
#question 5
ggplot(data = pr, aes(x = year, y=totalP, color=NAME ) )+
geom_point(alpha=0.5)+
geom_path(alpha=0.5)+
labs(x="year", y="Annual Precipitation")+
theme_classic()+
scale_color_manual(values = c("orange","sky blue", "black", "yellow","green"))
ggplot(data = datW, aes(x=NAME, y=TMIN))+ #look at daily tmin
geom_violin(fill=rgb(0.933,0.953,0.98))+ #add a violin plot with blue color
geom_boxplot(width=0.2,size=0.25, fill="grey90")+ #add grey boxplots and make them about 20% smaller than normal with 25% thinner lines than normal
theme_classic() #git rid of ugly gridlines
sub <- datW[datW$NAME == nameS[4] & datW$ year == 1974,]
#specify date format
#%Y means a four number year
#- indicates that the date uses dashes to seperate
#%m means month
#%d means day
sub$DATE <- as.Date(sub$DATE,"%Y-%m-%d")
ggplot(data=sub, aes(x=DATE, y=TMAX))+
geom_point()+
geom_path()+
theme_classic()+
labs(x="year", y="Maximimum temperature (C)")
ggplot(data=sub, aes(x=DATE, y=PRCP))+
geom_col(fill="royalblue3")+
theme_classic()+
labs(x="year", y="Daily precipitation (mm)")
#question 8
#Aberdeen
Ab <- datW[datW$NAME == nameS[1] & datW$ year == 1974,]
Ab$DATE <- as.Date(sub$DATE,"%Y-%m-%d")
ggplot(data=Ab, aes(x=DATE, y=TMAX))+
geom_point()+
geom_path()+
theme_classic()+
labs(x="year", y="Maximimum temperature (C)")
ggplot(data=Ab, aes(x=DATE, y=PRCP))+
geom_col(fill="royalblue3")+
theme_classic()+
labs(x="year", y="Daily precipitation (mm)")
#question 9
#Aberdeen
ggplot(data = datW, aes(x=NAME, y=TMAX))+
geom_violin(fill=rgb(0.933,0.953,0.98))+
geom_boxplot(width=0.2,size=0.25, fill="grey90")+
theme_classic()
dev.off()
Ab <- datW[datW$NAME == nameS[1] & datW$ year > 1999,]
Ab$year <- as.factor(Ab$year)
#make violin plot
ggplot(data = sub, aes(x=nameS[1], y=TMAX))+
geom_violin(fill=rgb(0.933,0.953,0.98))+
geom_boxplot(width=0.2,size=0.25, fill="grey90")+
theme_classic()+
labs(x= " ", y="TMAX (C)")
|
fee9104001c792dc11407cc01c2d849fa4e7b87d
|
da51412f8bbc686f070cf9c468aa51bb302e36dc
|
/Read_dataset/TCGA_PanCancerAtlas_Publications/Genomic_and_Functional_Approaches_to_Understanding_Cancer_Aneuploidy/1.Generation_TCGA_Cancer_Aneuploidy_dataset.R
|
66eee52b42744dc985c887e0e654e47ccb05bb30
|
[] |
no_license
|
haojiang9999/HCA_script
|
f7c7451e951abb2da83ada71688dd7833276dc4c
|
46eaaecb016559982818f9e2c7c7f5c689b27622
|
refs/heads/master
| 2020-12-21T13:14:58.616545
| 2020-04-01T01:58:28
| 2020-04-01T01:58:28
| 236,439,415
| 2
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 1,596
|
r
|
1.Generation_TCGA_Cancer_Aneuploidy_dataset.R
|
#### 1.Generation_TCGA_Cancer_Aneuploidy_dataset.R
# Paper:Genomic and Functional Approaches to Understanding Cancer Aneuploidy
### 1.Read table
TCGA_Aneuploidy <- read.csv("Table_S2_Chromosome_Arm_Calls_and_Aneuploidy_Scores_Figure1.csv",
header = TRUE)
sampleID <- as.character(TCGA_Aneuploidy$Sample)
sampleID <- gsub("-",".",sampleID)
TCGA_Aneuploidy$rownames <- sampleID
rownames(TCGA_Aneuploidy) <- sampleID
### 2.Cancer Types
table(TCGA_Aneuploidy$Type)
TCGA.cancer.types <- names(table(TCGA_Aneuploidy$Type))
#### 3. Metadata generation ####
Aneuploidy.metadata <- list(Paper = "Genomic and Functional Approaches to Understanding Cancer Aneuploidy",
Table = "Table S2. Sample Chromosome Arm Calls and Aneuploidy Scores, Related to Figure 1",
doi = "https://doi.org/10.1016/j.ccell.2018.03.007")
#### 4.Generate_Aneuploidy_dataset ####
#i="ACC"
for(i in TCGA.cancer.types){
### Step1 separate data by cancer types ###
TCGA_Index <- TCGA_Aneuploidy$Type == i
TCGA_Aneuploidy_sub <- TCGA_Aneuploidy[TCGA_Index,]
## Step4 Biuld TCGA data sets
TCGA_Aneuploidy_sub_list <- list(TCGA_Aneuploidy_sub = TCGA_Aneuploidy_sub,
Aneuploidy.metadata = Aneuploidy.metadata)
names(TCGA_Aneuploidy_sub_list)<-c(paste0(i,".Aneuploidy.score"),
paste0(i,".Aneuploidy.metadata"))
saveRDS(TCGA_Aneuploidy_sub_list, file = paste0(i,"_Aneuploidy_score_dataset.rds"))
}
|
2a7641fe5c48e94b70e54f59b944eea6963cf42c
|
a485f853eca5e3d1783405c56f33d62ea2ed5972
|
/pipelineMERGED.R
|
8a365122c99be63040841fc16ed0e72895166888
|
[] |
no_license
|
findcomrade/DSEA
|
488ba28fad958a57171f9b1025aba16eefd97e76
|
d2213a94997f840771f688bc335f23c653b0faa2
|
refs/heads/master
| 2021-01-02T22:31:59.786365
| 2014-07-20T13:20:31
| 2014-07-20T13:20:31
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 3,958
|
r
|
pipelineMERGED.R
|
#
# DSEA: the Second Step - ENRICHMENT
#
# Author: Dmitrii Bychkov, FIMM 2013
# (dmitrii.bychkov@helsinki.fi)
#######################################################
setwd("/home/comrade/Ubuntu One/DSEA/r-code")
source('pipeline_sup.R')
library(grid)
library(gplots)
library(ggplot2)
library(RJSONIO)
library(reshape2)
# 1. Upload a New Screen
read.csv(file="../datasets/merged_dss_new.csv", head=TRUE, sep="\t") -> data.MERGED # use check.names
#read.csv(file="../datasets/leukemia_all_celllines_data_DSS.csv", head=TRUE, sep="\t") -> data.MERGED
# 2. Identify (a) top Sensitive and (b) Resistant Drugs
cell.line <- "SR"
matrix.MERGED <- data.MERGED
matrix.MERGED <- data.matrix(matrix.MERGED[,-c(1,2)]) # del 1st & 2nd rows
rownames(matrix.MERGED) <- data.MERGED[,2] # assign colnames with drug names
#drop <- which(apply(matrix.MERGED,1,sum) == 0)
#matrix.MERGED <- matrix.MERGED[-drop,]
#nas <- is.na(matrix.MERGED); matrix.MERGED[nas] <- 0
remove(nas, drop)
cut <- 11
drugSensitivity(matrix.MERGED, cell.line, cut)
plot( density( matrix.MERGED[,cell.line], na.rm=TRUE), main = "Full Set", xlab = "DSS" )
hist(matrix.MERGED[,cell.line])
drugs.sensitive <- topSensitive(matrix.MERGED, cell.line, cut)
drugs.resistant <- topResistant(matrix.MERGED, cell.line)
drug.list <- drugs.sensitive$DrugName
x <- matrix.MERGED[drug.list,]
# 3. Upload corresponding data set wit clusters
load('RData/leukemiaClust.RData')
# 4. Push Both Sets for Enrichment
# That is to verify that most of sensitive drugs
# from a set tend to appear in the same cluster
enrichment.table <- buildEnrichmentD(tree.DRUGS, drugs.sensitive, drugs.resistant)
# Add information (new col) to 'tree.DRUGS' about
# which drugs to highlight: sensitive or resistant
is.top <- tree.DRUGS[,"DrugName"] %in% drugs.sensitive$DrugName # sensit
is.bot <- tree.DRUGS[,"DrugName"] %in% drugs.resistant$DrugName # resist
tree.DRUGS[,"isTop"] <- 0
tree.DRUGS[is.top,"isTop"] <- 1
#tree.DRUGS[is.bot,"isTop"] <- -1
dropJSON(tree.DRUGS, path='/home/comrade/Projects/d3.v3/circular.json')
tree.drugs.TARGET <- data.frame( Cluster=integer(), PubChem.CID=factor(), DrugName=character(),
Kegg=factor(), Who=character(), Level1=factor(), Level1=factor() )
for(cluster in unique(tree.DRUGS$Cluster)){
drop <- tree.DRUGS[,"Cluster"] == cluster & !is.na(tree.DRUGS[,"PubChem.CID"])
cids <- tree.DRUGS[drop, c("DrugName","PubChem.CID")]
for(cid in cids$PubChem.CID){
if(cid %in% annotations.MERGED$Pubchem_CID){
drop <- drop <- which( annotations.MERGED[,"Pubchem_CID"] == cid)
keggs <- annotations.MERGED[drop,"KEGG_id"]
for(kegg in keggs){
who <- target.class.AML[target.class.AML[,"KEGG_id"] == kegg,"Who_Name"]
l1 <- target.class.AML[target.class.AML[,"KEGG_id"] == kegg,"level_1_Description"]
l2 <- target.class.AML[target.class.AML[,"KEGG_id"] == kegg,"level_2_Description"]
df <- data.frame( Cluster=as.integer(cluster), PubChem.CID=cid,
DrugName=cids[cids[,"PubChem.CID"] == cid,"DrugName"],
Kegg=kegg, Who=who, Level1=factor(l1), Level2=factor(l2) )
colnames(df) <- c("Cluster","PubChem.CID","DrugName","Kegg","Who","Level1", "Level2")
tree.drugs.TARGET <- rbind(tree.drugs.TARGET, df)
}
}
}
}
remove(drop,who,l1,l2,df,cid,cids,kegg,keggs,cluster)
p <- tree.drugs.TARGET[tree.drugs.TARGET[,"Cluster"] == 8, c(5,6,7)]
plotDrugClassesDistibution(p, category.name='Targets')
tab <- table(tree.drugs.TARGET$Cluster,tree.drugs.TARGET$Level1)
tab <- data.frame(tab)
colnames(tab) <- c("Cluster", "SampleName", "isTop")
tab$Cluster <- as.integer(tab$Cluster)
#tab.sep <- tab[tab[,"Cluster"] == 4 | tab[,"Cluster"] == 2,]
tab.sep <- tab
drop <- which( tab.sep$isTop == 0)
tab.sep <- tab.sep[-drop,]
dropCirclePackingJSON(tab.sep, path='/home/comrade/Projects/d3.v3/circle_packing.json')
|
5240d4bcd50441d1eb15c27a3c8538c4e051a502
|
7747a3fdf0fdc57b767d8ed199b323afb4d491a2
|
/R/sim_detect.r
|
574efc81a32ab93995b1e59a82d3e2d0a78a20dc
|
[] |
no_license
|
ianjonsen/simsmolt
|
dcafaad041d6caa29cd573cd543dbeab7e14868a
|
09c9a8b8132bedaa499dd71c5c2fc6e2439256eb
|
refs/heads/master
| 2022-07-28T10:03:06.683400
| 2022-07-07T14:13:08
| 2022-07-07T14:13:08
| 155,731,825
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 3,973
|
r
|
sim_detect.r
|
#' @title simulate acoustic transmissions & detection, using \code{simulate} & \code{sim_setup} output
#'
#' @description simulates transmissions & detections along simulated track segments within a defined range of acoustic array(s)
#'
#' @author Ian Jonsen \email{ian.jonsen@mq.edu.au}
#'
#' @param s - a simsmolt class list containing output from sim_setup and sim_move
#' @param delay - min & max time intervals (s) between transmissions
#' @param burst - duration of each transmission (s)
#' @param noise - range 0 - 1; simulate effect of noisy environment. Reduces detection prob w dist
#' by specified proportion; default = 1, no reduction
#' @importFrom sp Polygon Polygons SpatialPolygons CRS
#' @importFrom sf st_as_sf st_contains
#' @importFrom raster buffer
#' @importFrom prevR point.in.SpatialPolygons
#' @importFrom dplyr %>% bind_rows mutate arrange desc
#' @importFrom stats plogis
#' @export
#'
sim_detect <-
function(s, data, delay = c(50,130), burst = 5.0, noise = 1){
## simulate tag transmissions along track but only within +/-10 km of avg receiver location
## otherwise trap() output is far too big to generate along full track
## - convert locs from km to m grid; vel in m/s
if(!exists("recLocs", data)) stop("no receiver locations present in data")
recLocs <- data$recLocs
trans <- tmp.tr <- dt <- tmp.dt <- NULL
b <- s$params$pars$pdrf
if(exists("rec", data)) {
if (data$rec == "lines") {
yrec <- recLocs$y %>% unique()
in.rng <- lapply(1:length(yrec), function(i) {
which(abs(yrec[i] - s$sim[, "y"]) <= 1.5)
})
## drop rec lines that smolt did not cross
in.rng <- in.rng[which(sapply(in.rng, length) > 0)]
## simulate transmissions
trans <- lapply(1:length(in.rng), function(i) {
path <- s$sim[in.rng[[i]], c("id", "date", "x", "y")]
path[, c("x", "y")] <- path[, c("x", "y")] * 1000
sim_transmit(path, delayRng = delay, burstDur = burst) #%>%
# mutate(line = rep(paste0("l", i), nrow(.)))
}) %>%
do.call(rbind, .)
} else if (data$rec != "lines") {
sim_sf <- st_as_sf(s$sim, coords = c("x", "y"), crs = data$prj)
in.rng <- st_contains(data$recPoly, sim_sf)[[1]]
path <- s$sim[in.rng, c("id", "date", "x", "y")]
path[, c("x", "y")] <- path[, c("x", "y")] * 1000
if (length(in.rng >= 1)) {
trans <- sim_transmit(path, delayRng = delay, burstDur = burst)
} else {
trans <- NULL
}
}
} else if(!exists("rec", data)) {
if(!is.null(data$recPoly)) {
sim_sf <- st_as_sf(s$sim, coords = c("x", "y"), crs = data$prj)
in.rng <- st_contains(data$recPoly, sim_sf)[[1]]
} else {
in.rng <- rep(TRUE, nrow(s$sim))
}
path <- s$sim[in.rng, c("id","date","x","y")]
path[, c("x","y")] <- path[, c("x","y")] * 1000
if(length(in.rng) >= 1) {
trans <- sim_transmit(path, delayRng = delay, burstDur = burst)
} else {
trans <- NULL
}
}
## define logistic detection range (m) function
## parameterised from analysis of SoBI sentinel tag detections
## in July 2009 & July 2010 (see ~/Dropbox/collab/otn/fred/r/fn/sentinel.r)
## simulate detections given receiver locations & simulated transmission along track
recLocs <- recLocs %>%
mutate(x = x * 1000, y = y * 1000)
if(!is.null(trans)) {
detect <- trans %>%
pdet(trs = ., rec = recLocs[, c("id","array","x","y","z")], b = b, noise = noise)
} else {
detect <- NULL
}
# s$trans <- trans %>%
# select(id, date, x, y) %>%
# arrange(date)
if(!is.null(detect)) {
s$detect <- detect %>%
arrange(date, recv_id, trns_id)
} else {
s$detect <- detect
}
return(s)
}
|
7b4fa82fb8bf987cd6938ea70da4f5fecfeb4928
|
3675404b45f273e879b36ea7a6af282502bab2e6
|
/Project/Analytics/Other_materials/R/phase_plane_newborn_model_1.R
|
6e1c94089ea075658b6d8a04310bd9eac5d7047d
|
[] |
no_license
|
rgrzhang/RogerZhang
|
7914c335a8b22f11b45145c7914d740b1effc3ea
|
9a51308fe4f559d3c931ecaf2e8bf10979e2dddc
|
refs/heads/master
| 2021-05-10T13:58:20.420233
| 2020-07-13T03:19:03
| 2020-07-13T03:19:03
| 118,496,622
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 1,087
|
r
|
phase_plane_newborn_model_1.R
|
library(phaseR)
FHN <- function(t, y, parameters) {
p <- parameters
dy <- numeric(2)
dy[1] <- 11/9136*(1-p)-4.5*y[1]*y[2]-11/9136*y[1]
dy[2] <- 4.5*y[1]*y[2]+11/9136*p-y[2]
return(list(dy))
}
phasePlot <- function(FHN, p=0.2){
FHN.flowField <- flowField(FHN, x.lim = c(0, 0.4),
y.lim = c(0, 0.02),
xlab="S", ylab="I",
main=paste0("p=", p),
parameters = p,
points = 15, add = FALSE)
FHN.nullclines <- nullclines(FHN, x.lim = c(0, 0.4),
y.lim = c(0, 0.02),
parameters = p,
points = 500)
y0 <- matrix(c(1/4.5,0.000936466),
ncol = 2, nrow = 1,
byrow = TRUE)
FHN.trajectory <- trajectory(FHN, y0 = y0, t.end = 500,
parameters = p)
}
op <- par(mfrow=c(2,2))
phasePlot(FHN, p= 0)
phasePlot(FHN, p=0.2)
phasePlot(FHN, p=0.5)
phasePlot(FHN, p=0.8)
par(op)
|
38b571c834ee09f2e79568da9d8c50bbe2377a9c
|
060c6a303098ef689c43ea0feff68c68272477e9
|
/RNASeq_analysis/UCFFigs.R
|
a7ccc5537cccef6878f6331af2e5b5cdeec1d1ea
|
[] |
no_license
|
Sage-Bionetworks/Synodos_NF2
|
e1c004191de438d6efa2d565f7d1c1e36a90efaa
|
1506b57c74469439e81fe8afbc6de9add681c57c
|
refs/heads/master
| 2022-12-20T16:20:09.620615
| 2022-12-14T22:53:52
| 2022-12-14T22:53:52
| 20,036,276
| 2
| 0
| null | 2014-05-28T18:09:16
| 2014-05-21T20:29:18
|
R
|
UTF-8
|
R
| false
| false
| 1,658
|
r
|
UCFFigs.R
|
library(synapseClient)
library(VennDiagram)
library(ggplot2)
library(ggrepel)
synapseLogin()
sch.new <- read.table(synGet("syn9884855")@filePath, sep = "\t", header = T) %>% filter(BH<0.05)
sch.old <- read.table(synGet("syn9884855", version = 11)@filePath, sep = "\t", header = T) %>% filter(BH<0.05)
for(x in unique(sch.new$comparison)){
print(x)
sch.new.foo <- filter(sch.new, comparison == x)
sch.old.foo <- filter(sch.old, comparison == x)
ens<-list(na.omit(unique(sch.new.foo$ensembl)), na.omit(unique(sch.old.foo$ensembl)))
names(ens) <- c("new", "old")
venn.diagram(ens, filename = paste0(x,"_ensembl_venn.png"),
imagetype = "png",
compression = "lzw",
height = 1200,
width = 1200,
resolution = 300,
units = "px")
}
sch.new2 <- read.table(synGet("syn9884855")@filePath, sep = "\t", header = T)
for(x in unique(sch.new2$comparison)){
sch.new.foo <- filter(sch.new2, comparison == x)
ggplot(data = sch.new.foo, aes(x = logFC, y = -log(BH))) +
ggthemes::theme_few() +
geom_point(aes(color = BH < 0.05)) +
scale_color_manual(values=c("FALSE"="lightgrey","TRUE"="#586BA4")) +
geom_label_repel(data = filter(sch.new.foo, BH < 0.05) %>%
top_n(10, logFC),
aes(x = logFC, y = -log(BH), label = Hugo_Gene), fill = "#FF7780") +
geom_label_repel(data = filter(sch.new.foo, BH < 0.05) %>%
top_n(10, -logFC),
aes(x = logFC, y = -log(BH), label = Hugo_Gene), fill = "#60BAFF") +
ggtitle(x)
ggsave(paste0(x,"_VolcanoPlotsforUCF.png"))
}
|
bb1708984dcb38170c8b12ffe5c65294ffcf80ea
|
e25af04a06ef87eb9fc0c3c8a580b8ca4e663c9b
|
/R/unif-alts.R
|
bb4e4e9c4d3e37347856d21f2a9aeb64b555f01a
|
[] |
no_license
|
cran/sphunif
|
c049569cf09115bb9d4a47333b85c5b7522e7fd8
|
4dafb9d08e3ac8843e8e961defcf11abe2efa534
|
refs/heads/master
| 2023-07-16T01:12:47.852866
| 2021-09-02T06:40:02
| 2021-09-02T06:40:02
| 402,474,585
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 30,639
|
r
|
unif-alts.R
|
#' @title Local projected alternatives to uniformity
#'
#' @description Density and random generation for local projected alternatives
#' to uniformity with densities
#' \deqn{f_{\kappa, \boldsymbol{\mu}}({\bf x}): =
#' \frac{1 - \kappa}{\omega_p} + \kappa f({\bf x}'\boldsymbol{\mu})}{
#' f_{\kappa, \mu}(x) = (1 - \kappa) / \omega_p + \kappa f(x'\mu)}
#' where
#' \deqn{f(z) = \frac{1}{\omega_p}\left\{1 + \sum_{k = 1}^\infty u_{k, p}
#' C_k^{p / 2 - 1}(z)\right\}}{f(x) = (1 / \omega_p)
#' \{1 + \sum_{k = 1}^\infty u_{k, p} C_k^(p / 2 - 1)(z)\}}
#' is the \emph{angular function} controlling the local alternative in a
#' \link[=Gegenbauer]{Gegenbauer series}, \eqn{0\le \kappa \le 1},
#' \eqn{\boldsymbol{\mu}}{\mu} is a direction on \eqn{S^{p - 1}}, and
#' \eqn{\omega_p} is the surface area of \eqn{S^{p - 1}}. The sequence
#' \eqn{\{u_{k, p}\}} is typically such that
#' \eqn{u_{k, p} = \left(1 + \frac{2k}{p - 2}\right) b_{k, p}}{
#' u_{k, p} = (1 + 2k / (p - 2)) b_{k, p}} for the Gegenbauer coefficients
#' \eqn{\{b_{k, p}\}} of the kernel function of a Sobolev statistic (see the
#' \link[=Sobolev_coefs]{transformation} between the coefficients \eqn{u_{k, p}}
#' and \eqn{b_{k, p}}).
#'
#' Also, automatic truncation of the series \eqn{\sum_{k = 1}^\infty u_{k, p}
#' C_k^{p / 2 - 1}(z)}{\sum_{k = 1}^\infty u_{k, p} C_k^(p / 2 - 1)(z)}
#' according to the proportion of \link[=Gegenbauer]{"Gegenbauer norm"}
#' explained.
#'
#' @param z projected evaluation points for \eqn{f}, a vector with entries on
#' \eqn{[-1, 1]}.
#' @inheritParams Sobolev_coefs
#' @inheritParams rotasym::d_tang_norm
#' @param mu a unit norm vector of size \code{p} giving the axis of rotational
#' symmetry.
#' @param f angular function defined on \eqn{[-1, 1]}. Must be vectorized.
#' @param kappa the strength of the local alternative, between \code{0}
#' and \code{1}.
#' @inheritParams r_unif
#' @param F_inv quantile function associated to \eqn{f}. Computed by
#' \code{\link{F_inv_from_f}} if \code{NULL} (default).
#' @inheritParams Gegenbauer
#' @param ... further parameters passed to \code{\link{F_inv_from_f}}.
#' @param K_max integer giving the truncation of the series. Defaults to
#' \code{1e4}.
#' @param thre proportion of norm \emph{not} explained by the first terms of the
#' truncated series. Defaults to \code{1e-3}.
#' @inheritParams Sobolev
#' @param verbose output information about the truncation (\code{TRUE} or
#' \code{1}) and a diagnostic plot (\code{2})? Defaults to \code{FALSE}.
#' @return
#' \itemize{
#' \item \code{f_locdev}: angular function evaluated at \code{x}, a vector.
#' \item \code{con_f}: normalizing constant \eqn{c_f} of \eqn{f}, a scalar.
#' \item \code{d_locdev}: density function evaluated at \code{x}, a vector.
#' \item \code{r_locdev}: a matrix of size \code{c(n, p)} containing a random
#' sample from the density \eqn{f_{\kappa, \boldsymbol{\mu}}}{
#' f_{\kappa, \mu}}.
#' \item \code{cutoff_locdev}: vector of coefficients \eqn{\{u_{k, p}\}}
#' automatically truncated according to \code{K_max} and \code{thre}
#' (see details).
#' }
#' @details
#' See the definitions of local alternatives in Prentice (1978) and in
#' García-Portugués et al. (2020).
#'
#' The truncation of \eqn{\sum_{k = 1}^\infty u_{k, p} C_k^{p / 2 - 1}(z)}{
#' \sum_{k = 1}^\infty u_{k, p} C_k^(p / 2 - 1)(z)} is done to the first
#' \code{K_max} terms and then up to the index such that the first terms
#' leave unexplained the proportion \code{thre} of the norm of the whole series.
#' Setting \code{thre = 0} truncates to \code{K_max} terms exactly. If the
#' series only contains odd or even non-zero terms, then only \code{K_max / 2}
#' addends are \emph{effectively} taken into account in the first truncation.
#' @references
#' García-Portugués, E., Navarro-Esteban, P., Cuesta-Albertos, J. A. (2020)
#' On a projection-based class of uniformity tests on the hypersphere.
#' \emph{arXiv:2008.09897}. \url{https://arxiv.org/abs/2008.09897}
#'
#' Prentice, M. J. (1978). On invariant tests of uniformity for directions and
#' orientations. \emph{The Annals of Statistics}, 6(1):169--176.
#' \doi{10.1214/aos/1176344075}
#' @examples
#' ## Local alternatives diagnostics
#'
#' loc_alt_diagnostic <- function(p, type, thre = 1e-3, K_max = 1e3) {
#'
#' # Coefficients of the alternative
#' uk <- cutoff_locdev(K_max = K_max, p = p, type = type, thre = thre,
#' N = 640)
#'
#' old_par <- par(mfrow = c(2, 2))
#'
#' # Construction of f
#' z <- seq(-1, 1, l = 1e3)
#' f <- function(z) f_locdev(z = z, p = p, uk = uk)
#' plot(z, f(z), type = "l", xlab = expression(z), ylab = expression(f(z)),
#' main = paste0("Local alternative f, ", type, ", p = ", p), log = "y")
#'
#' # Projected density on [-1, 1]
#' f_proj <- function(z) rotasym::w_p(p = p - 1) * f(z) *
#' (1 - z^2)^((p - 3) / 2)
#' plot(z, f_proj(z), type = "l", xlab = expression(z),
#' ylab = expression(omega[p - 1] * f(z) * (1 - z^2)^{(p - 3) / 2}),
#' main = paste0("Projected density, ", type, ", p = ", p), log = "y",
#' sub = paste("Integral:", round(con_f(f = f, p = p), 4)))
#'
#' # Quantile function for projected density
#' mu <- c(rep(0, p - 1), 1)
#' F_inv <- F_inv_from_f(f = f, p = p, K = 5e2)
#' plot(F_inv, xlab = expression(x), ylab = expression(F^{-1}*(x)),
#' main = paste0("Quantile function, ", type, ", p = ", p))
#'
#' # Sample from the alternative and plot the projected sample
#' n <- 5e4
#' samp <- r_locdev(n = n, mu = mu, f = f, kappa = 1, F_inv = F_inv)
#' plot(z, f_proj(z), col = 2, type = "l",
#' main = paste0("Simulated projected data, ", type, ", p = ", p),
#' ylim = c(0, 1.75))
#' hist(samp %*% mu, freq = FALSE, breaks = seq(-1, 1, l = 50), add = TRUE)
#'
#' par(old_par)
#'
#' }
#' \donttest{
#' ## Local alternatives for the PCvM test
#'
#' loc_alt_diagnostic(p = 2, type = "PCvM")
#' loc_alt_diagnostic(p = 3, type = "PCvM")
#' loc_alt_diagnostic(p = 4, type = "PCvM")
#' loc_alt_diagnostic(p = 5, type = "PCvM")
#' loc_alt_diagnostic(p = 11, type = "PCvM")
#'
#' ## Local alternatives for the PAD test
#'
#' loc_alt_diagnostic(p = 2, type = "PAD")
#' loc_alt_diagnostic(p = 3, type = "PAD")
#' loc_alt_diagnostic(p = 4, type = "PAD")
#' loc_alt_diagnostic(p = 5, type = "PAD")
#' loc_alt_diagnostic(p = 11, type = "PAD")
#'
#' ## Local alternatives for the PRt test
#'
#' loc_alt_diagnostic(p = 2, type = "PRt")
#' loc_alt_diagnostic(p = 3, type = "PRt")
#' loc_alt_diagnostic(p = 4, type = "PRt")
#' loc_alt_diagnostic(p = 5, type = "PRt")
#' loc_alt_diagnostic(p = 11, type = "PRt")
#' }
#' @name locdev
#' @rdname locdev
#' @export
f_locdev <- function(z, p, uk) {
# Check dimension
stopifnot(p >= 2)
# Unnormalized local alternative
f <- 1 + Gegen_series(theta = acos(z), coefs = uk, p = p, k = seq_along(uk))
# Normalize by \omega_p such that
# \omega_{p - 1} * f(z) * (1 - z^2)^((p - 3) / 2) integrates one in [-1, 1]
f <- f / rotasym::w_p(p = p)
return(f)
}
#' @rdname locdev
#' @export
con_f <- function(f, p, N = 320) {
# Gauss--Legendre nodes and weights
th_k <- drop(Gauss_Legen_nodes(a = 0, b = pi, N = N))
w_k <- drop(Gauss_Legen_weights(a = 0, b = pi, N = N))
# Integral of w_{p - 1} * f(z) * (1 - z^2)^((p - 3) / 2) in [-1, 1],
# using z = cos(theta) for theta in [0, pi]
int <- rotasym::w_p(p = p - 1) *
sum(w_k * f(cos(th_k)) * sin(th_k)^(p - 2), na.rm = TRUE)
return(1 / int)
}
#' @rdname locdev
#' @export
d_locdev <- function(x, mu, f, kappa) {
# Check dimension
if (is.null(dim(x))) {
x <- rbind(x)
}
stopifnot(ncol(x) == length(mu))
# Check kappa
stopifnot(0 <= kappa & kappa <= 1)
# Alternative density
if (kappa > 0) {
f1 <- rotasym::d_tang_norm(x = x, theta = mu,
d_U = rotasym::d_unif_sphere,
g_scaled = function(z, log = TRUE) log(f(z)))
} else {
f1 <- 0
}
# Uniform density
f0 <- rotasym::d_unif_sphere(x = x)
# Merge densities
return((1 - kappa) * f0 + kappa * f1)
}
#' @rdname locdev
#' @export
r_locdev <- function(n, mu, f, kappa, F_inv = NULL, ...) {
# Dimension
p <- length(mu)
# Check kappa
stopifnot(0 <= kappa & kappa <= 1)
if (kappa == 0) {
return(r_unif_sph(n = n, p = p, M = 1)[, , 1])
}
# Compute the inverse of the distribution function F?
if (is.null(F_inv)) {
F_inv <- F_inv_from_f(f = f, p = p, ...)
}
# Sample object
samp <- matrix(0, nrow = n, ncol = p)
ind_1 <- runif(n = n) <= kappa
n_1 <- sum(ind_1)
# Sample under the alternative
if (n_1 > 0) {
r_V <- function(n) F_inv(runif(n = n))
r_U <- function(n) r_unif_sph(n = n, p = p - 1, M = 1)[, , 1]
samp[ind_1, ] <- rotasym::r_tang_norm(n = n_1, theta = mu,
r_V = r_V, r_U = r_U)
}
# Sample under the null
if (n_1 < n) {
samp[!ind_1, ] <- r_unif_sph(n = n - n_1, p = p, M = 1)[, , 1]
}
# Sample
return(samp)
}
#' @rdname locdev
#' @export
cutoff_locdev <- function(p, K_max = 1e4, thre = 1e-3, type, Rothman_t = 1 / 3,
Pycke_q = 0.5, verbose = FALSE, Gauss = TRUE, N = 320,
tol = 1e-6) {
# vk2
vk2 <- weights_dfs_Sobolev(p, K_max = K_max, thre = 0, type = type,
Rothman_t = Rothman_t, Pycke_q = Pycke_q,
log = FALSE, verbose = FALSE, Gauss = Gauss,
N = N, tol = tol)$weights
K_max_new <- length(vk2)
# Signs
if (type %in% c("PRt", "Rothman", "Ajne")) {
x_t <- drop(q_proj_unif(u = ifelse(type == "Ajne", 0.5, Rothman_t), p = p))
signs <- akx(x = x_t, p = p, k = seq_len(K_max_new), sqr = TRUE)
} else {
if (verbose > 1) {
message("Signs unknown for the ", type,
" statistic, using positive signs experimentally.")
}
signs <- 1
}
# uk
uk <- vk2_to_uk(vk2 = vk2, p = p, signs = signs)
# Cutoff based on the explained squared norm
cum_norm <- Gegen_norm(coefs = uk, k = seq_len(K_max_new), p = p,
cumulative = TRUE)^2
cum_norm <- cum_norm / cum_norm[K_max_new]
cutoff <- which(cum_norm >= 1 - thre)[1]
# Truncate displaying optional information
uk_cutoff <- uk[1:cutoff]
if (verbose) {
message("Series truncated from ", K_max_new, " to ", cutoff,
" terms (", 100 * (1 - thre),
"% of cumulated norm; last coefficient = ",
sprintf("%.3e", uk_cutoff[cutoff]), ").")
# Diagnostic plots
if (verbose > 1) {
old_par <- par(mfrow = c(1, 2), mar = c(5, 5.5, 4, 2) + 0.1)
# Cumulated norm
plot(seq_len(K_max), 100 * c(cum_norm, rep(1, K_max - K_max_new)),
xlab = "k", ylab = "Percentage of cumulated squared norm",
type = "s", log = "x")
segments(x0 = cutoff, y0 = par()$usr[3],
x1 = cutoff, y1 = 100 * cum_norm[cutoff], col = 3)
segments(x0 = 1, y0 = 100 * (1 - thre),
x1 = cutoff, y1 = 100 * (1 - thre), col = 2)
abline(v = K_max_new, col = "gray", lty = 2)
# Function and truncation
z <- seq(-1, 1, l = 1e3)[-c(1, 1e3)]
th <- acos(z)
G1 <- Gegen_series(theta = th, coefs = c(1, uk),
k = c(0, seq_along(uk)), p = p)
G2 <- Gegen_series(theta = th, coefs = c(1, uk_cutoff),
k = c(0, seq_along(uk_cutoff)), p = p)
e <- expression(f(z) == 1 + sum(u[k] * C[k]^(p / 2 - 1) * (z), k == 1, K))
plot(z, G1, ylim = c(1e-3, max(c(G1, G2))), xlab = expression(z),
ylab = e, type = "l", log = "y")
lines(z, G2, col = 2)
legend("top", legend = paste("K =", c(K_max_new, cutoff)),
col = 1:2, lwd = 2)
par(old_par)
}
}
return(uk_cutoff)
}
#' @title Distribution and quantile functions from angular function
#'
#' @description Numerical computation of the distribution function \eqn{F} and
#' the quantile function \eqn{F^{-1}} for an \link[=locdev]{angular function}
#' \eqn{f} in a \link[=tang-norm-decomp]{tangent-normal decomposition}.
#' \eqn{F^{-1}(x)} results from the inversion of
#' \deqn{F(x) = \int_{-1}^x \omega_{p - 1}c_f f(z) (1 - z^2)^{(p - 3) / 2}
#' \,\mathrm{d}z}{F(x) = \int_{-1}^x \omega_{p - 1}c_f f(z)
#' (1 - z^2)^{(p - 3) / 2} dz}
#' for \eqn{x\in [-1, 1]}, where \eqn{c_f} is a normalizing constant and
#' \eqn{\omega_{p - 1}} is the surface area of \eqn{S^{p - 2}}.
#'
#' @inheritParams locdev
#' @inheritParams r_unif
#' @param Gauss use a \link[=Gauss_Legen_nodes]{Gauss--Legendre quadrature}
#' rule to integrate \eqn{f} with \code{N} nodes? Otherwise, rely on
#' \code{\link{integrate}} Defaults to \code{TRUE}.
#' @param N number of points used in the Gauss--Legendre quadrature. Defaults
#' to \code{320}.
#' @param K number of equispaced points on \eqn{[-1, 1]} used for evaluating
#' \eqn{F^{-1}} and then interpolating. Defaults to \code{1e3}.
#' @param tol tolerance passed to \code{\link{uniroot}} for the inversion of
#' \eqn{F}. Also, passed to \code{\link{integrate}}'s \code{rel.tol} and
#' \code{abs.tol} if \code{Gauss = FALSE}. Defaults to \code{1e-6}.
#' @param ... further parameters passed to \code{f}.
#' @details
#' The normalizing constant \eqn{c_f} is such that \eqn{F(1) = 1}. It does not
#' need to be part of \code{f} as it is computed internally.
#'
#' Interpolation is performed by a monotone cubic spline. \code{Gauss = TRUE}
#' yields more accurate results, at expenses of a heavier computation.
#'
#' If \code{f} yields negative values, these are silently truncated to zero.
#' @return A \code{\link{splinefun}} object ready to evaluate \eqn{F} or
#' \eqn{F^{-1}}, as specified.
#' @examples
#' f <- function(x) rep(1, length(x))
#' plot(F_from_f(f = f, p = 4, Gauss = TRUE), ylab = "F(x)", xlim = c(-1, 1))
#' plot(F_from_f(f = f, p = 4, Gauss = FALSE), col = 2, add = TRUE,
#' xlim = c(-1, 1))
#' curve(p_proj_unif(x = x, p = 4), col = 3, add = TRUE, n = 300)
#' plot(F_inv_from_f(f = f, p = 4, Gauss = TRUE), ylab = "F^{-1}(x)")
#' plot(F_inv_from_f(f = f, p = 4, Gauss = FALSE), col = 2, add = TRUE)
#' curve(q_proj_unif(u = x, p = 4), col = 3, add = TRUE, n = 300)
#' @name F_from_f
#' @rdname F_from_f
#' @export
F_from_f <- function(f, p, Gauss = TRUE, N = 320, K = 1e3, tol = 1e-6, ...) {
# Integration grid
z <- seq(-1, 1, length.out = K)
# Integrated \omega_{p - 1} * f(z) * (1 - z^2)^{(p - 3) / 2}
if (Gauss) {
# Using Gauss--Legendre quadrature but ensuring monotonicity?
F_grid <- rotasym::w_p(p = p - 1) * sapply(z, function(t) {
z_k <- drop(Gauss_Legen_nodes(a = -1, b = t, N = N))
w_k <- drop(Gauss_Legen_weights(a = -1, b = t, N = N))
sum(w_k * pmax(f(z_k, ...), 0) * (1 - z_k^2)^((p - 3) / 2), na.rm = TRUE)
})
# Normalize f (the normalizing constant may not be included in f)
c_f <- F_grid[length(F_grid)]
F_grid <- F_grid / c_f
} else {
# Using integrate
g <- function(t) pmax(f(t, ...), 0) * (1 - t^2)^((p - 3) / 2)
F_grid <- sapply(z[-1], function(u) rotasym::w_p(p = p - 1) *
integrate(f = g, lower = -1, upper = u,
subdivisions = 1e3, rel.tol = tol,
abs.tol = tol, stop.on.error = FALSE)$value)
# Normalize f (the normalizing constant may not be included in f)
c_f <- F_grid[length(F_grid)]
F_grid <- F_grid / c_f
# Add 0
F_grid <- c(0, F_grid)
}
# Use method = "hyman" for monotone interpolations if possible
if (anyNA(F_grid)) stop("Numerical error (NAs) in F_grid")
F_appf <- switch(is.unsorted(F_grid) + 1,
splinefun(x = z, y = F_grid, method = "hyman"),
approxfun(x = z, y = F_grid, method = "linear", rule = 2))
return(F_appf)
}
#' @rdname F_from_f
#' @export
F_inv_from_f <- function(f, p, Gauss = TRUE, N = 320, K = 1e3, tol = 1e-6,
...) {
# Approximate F
F_appf <- F_from_f(f = f, p = p, Gauss = Gauss, N = N, K = K, tol = tol, ...)
# Inversion of F
u <- seq(0, 1, length.out = K)
F_inv_grid <- sapply(u[-c(1, K)], function(v) {
uniroot(f = function(x) F_appf(x) - v, lower = -1, upper = 1,
tol = tol)$root
})
F_inv_grid <- c(-1, F_inv_grid, 1)
# Use method = "hyman" for monotone interpolations if possible
stopifnot(!anyNA(F_inv_grid))
F_inv <- switch(is.unsorted(F_inv_grid) + 1,
splinefun(x = u, y = F_inv_grid, method = "hyman"),
approxfun(x = u, y = F_inv_grid, method = "linear",
rule = 2))
return(F_inv)
}
#' @title Transformation between different coefficients in Sobolev statistics
#'
#' @description Given a Sobolev statistic
#' \deqn{S_{n, p} = \sum_{i, j = 1}^n \psi(\cos^{-1}({\bf X}_i'{\bf X}_j)),}{
#' S_{n, p} = \sum_{i, j = 1}^n \psi(\cos^{-1}(X_i'X_j)),}
#' for a sample \eqn{{\bf X}_1, \ldots, {\bf X}_n \in S^{p - 1} := \{{\bf x}
#' \in R^p : ||{\bf x}|| = 1\}}{X_1, \ldots, X_n \in S^{p - 1} :=
#' \{x \in R^p : ||x|| = 1\}}, \eqn{p\ge 2}, three important sequences
#' are related to \eqn{S_{n, p}}.
#' \itemize{
#' \item \link[=Gegen_coefs]{Gegenbauer coefficients} \eqn{\{b_{k, p}\}} of
#' \eqn{\psi_p} (see, e.g., the \link[=Pn]{projected-ecdf statistics}), given
#' by
#' \deqn{b_{k, p} := \frac{1}{c_{k, p}}\int_0^\pi \psi_p(\theta)
#' C_k^{p / 2 - 1}(\cos\theta)\,\mathrm{d}\theta.}{
#' b_{k, p} := \frac{1}{c_{k, p}} \int_0^\pi \psi_p(\theta)
#' C_k^(p / 2 - 1)(\cos\theta) d\theta.}
#' \item Weights \eqn{\{v_{k, p}^2\}} of the
#' \link[=Sobolev]{asymptotic distribution} of the Sobolev statistic,
#' \eqn{\sum_{k = 1}^\infty v_k^2 \chi^2_{d_{p, k}}}, given by
#' \deqn{v_{k, p}^2 = \left(1 + \frac{2k}{p - 2}\right)^{-1} b_{k, p},
#' \quad p \ge 3.}{v_{k, p}^2 = (1 + 2k / (p - 2))^{-1} b_{k, p}, p \ge 3.}
#' \item Gegenbauer coefficients \eqn{\{u_{k, p}\}} of the
#' \link[=locdev]{local projected alternative} associated to \eqn{S_{n, p}},
#' given by
#' \deqn{u_{k, p} = \left(1 + \frac{2k}{p - 2}\right) v_{k, p},
#' \quad p \ge 3.}{u_{k, p} = (1 + 2k / (p - 2)) b_{k, p}, p \ge 3.}
#' }
#' For \eqn{p = 2}, the factor \eqn{(1 + 2k / (p - 2))} is replaced by \eqn{2}.
#'
#' @param bk coefficients \eqn{b_{k, p}} associated to the indexes
#' \code{1:length(bk)}, a vector.
#' @param vk2 \bold{squared} coefficients \eqn{v_{k, p}^2} associated to the
#' indexes \code{1:length(vk2)}, a vector.
#' @param uk coefficients \eqn{u_{k, p}} associated to the indexes
#' \code{1:length(uk)}, a vector.
#' @inheritParams r_unif_sph
#' @param signs signs of the coefficients \eqn{u_{k, p}}, a vector of the
#' same size as \code{vk2} or \code{bk}, or a scalar. Defaults to \code{1}.
#' @return
#' The corresponding vectors of coefficients \code{vk2}, \code{bk}, or
#' \code{uk}, depending on the call.
#' @details
#' See more details in Prentice (1978) and García-Portugués et al. (2020). The
#' adequate signs of \code{uk} for the \code{"PRt"} \link[=Pn]{Rothman test}
#' can be retrieved with \code{\link{akx}} and \code{sqr = TRUE}, see the
#' examples.
#' @references
#' García-Portugués, E., Navarro-Esteban, P., Cuesta-Albertos, J. A. (2020)
#' On a projection-based class of uniformity tests on the hypersphere.
#' \emph{arXiv:2008.09897}. \url{https://arxiv.org/abs/2008.09897}
#'
#' Prentice, M. J. (1978). On invariant tests of uniformity for directions and
#' orientations. \emph{The Annals of Statistics}, 6(1):169--176.
#' \doi{10.1214/aos/1176344075}
#' @examples
#' # bk, vk2, and uk for the PCvM test in p = 3
#' (bk <- Gegen_coefs_Pn(k = 1:5, type = "PCvM", p = 3))
#' (vk2 <- bk_to_vk2(bk = bk, p = 3))
#' (uk <- bk_to_uk(bk = bk, p = 3))
#'
#' # vk2 is the same as
#' weights_dfs_Sobolev(K_max = 10, thre = 0, p = 3, type = "PCvM")$weights
#'
#' # bk and uk for the Rothman test in p = 3, with adequate signs
#' t <- 1 / 3
#' (bk <- Gegen_coefs_Pn(k = 1:5, type = "PRt", p = 3, Rothman_t = t))
#' (ak <- akx(x = drop(q_proj_unif(t, p = 3)), p = 3, k = 1:5, sqr = TRUE))
#' (uk <- bk_to_uk(bk = bk, p = 3, signs = ak))
#' @name Sobolev_coefs
#' @rdname Sobolev_coefs
#' @export
bk_to_vk2 <- function(bk, p) {
# Check dimension
p <- as.integer(p)
stopifnot(p >= 2)
# Add factor
if (p == 2) {
return(bk / 2)
} else {
return(bk / (1 + 2 * seq_along(bk) / (p - 2)))
}
}
#' @rdname Sobolev_coefs
#' @export
bk_to_uk <- function(bk, p, signs = 1) {
# Check dimension
p <- as.integer(p)
stopifnot(p >= 2)
# Check signs
stopifnot(length(signs) %in% c(1, length(bk)))
# Add factor
if (p == 2) {
return(sign(signs) * sqrt(2 * bk))
} else {
return(sign(signs) * sqrt((1 + 2 * seq_along(bk) / (p - 2)) * bk))
}
}
#' @rdname Sobolev_coefs
#' @export
vk2_to_bk <- function(vk2, p) {
# Check dimension
p <- as.integer(p)
stopifnot(p >= 2)
# Add factor
if (p == 2) {
return(2 * vk2)
} else {
return((1 + 2 * seq_along(vk2) / (p - 2)) * vk2)
}
}
#' @rdname Sobolev_coefs
#' @export
vk2_to_uk <- function(vk2, p, signs = 1) {
# Check dimension
p <- as.integer(p)
stopifnot(p >= 2)
# Check signs
stopifnot(length(signs) %in% c(1, length(vk2)))
# Add factor
if (p == 2) {
return(2 * sign(signs) * sqrt(vk2))
} else {
return((1 + 2 * seq_along(vk2) / (p - 2)) * sign(signs) * sqrt(vk2))
}
}
#' @rdname Sobolev_coefs
#' @export
uk_to_vk2 <- function(uk, p) {
# Check dimension
p <- as.integer(p)
stopifnot(p >= 2)
# Add factor
if (p == 2) {
return((uk / 2)^2)
} else {
return((uk / (1 + 2 * seq_along(uk) / (p - 2)))^2)
}
}
#' @rdname Sobolev_coefs
#' @export
uk_to_bk <- function(uk, p) {
# Check dimension
p <- as.integer(p)
stopifnot(p >= 2)
# Add factor
if (p == 2) {
return(uk^2 / 2)
} else {
return(uk^2 / (1 + 2 * seq_along(uk) / (p - 2)))
}
}
#' @title Sample non-uniformly distributed spherical data
#'
#' @description Simple simulation of prespecified non-uniform spherical
#' distributions: von Mises--Fisher (vMF), Mixture of vMF (MvMF),
#' Angular Central Gaussian (ACG), Small Circle (SC), Watson (W), or
#' Cauchy-like (C).
#'
#' @inheritParams r_unif
#' @param alt alternative, must be \code{"vMF"}, \code{"MvMF"},
#' \code{"ACG"}, \code{"SC"}, \code{"W"}, or \code{"C"}. See details below.
#' @param kappa non-negative parameter measuring the strength of the deviation
#' with respect to uniformity (obtained with \eqn{\kappa = 0}).
#' @param nu projection along \eqn{{\bf e}_p}{e_p} controlling the modal
#' strip of the small circle distribution. Must be in (-1, 1). Defaults to
#' \code{0.5}.
#' @param F_inv quantile function returned by \code{\link{F_inv_from_f}}. Used
#' for \code{"SC"}, \code{"W"}, and \code{"C"}. Computed by internally if
#' \code{NULL} (default).
#' @inheritParams F_inv_from_f
#' @param axial_MvMF use a mixture of vMF that is axial (i.e., symmetrically
#' distributed about the origin)? Defaults to \code{TRUE}.
#' @details
#' The parameter \code{kappa} is used as \eqn{\kappa} in the following
#' distributions:
#' \itemize{
#' \item \code{"vMF"}: von Mises--Fisher distribution with concentration
#' \eqn{\kappa} and directional mean \eqn{{\bf e}_p = (0, 0, \ldots, 1)}{
#' e_p = (0, 0, \ldots, 1)}.
#' \item \code{"MvMF"}: equally-weighted mixture of \eqn{p} von Mises--Fisher
#' distributions with common concentration \eqn{\kappa} and directional means
#' \eqn{\pm{\bf e}_1, \ldots, \pm{\bf e}_p}{±e_1, \ldots, ±e_p} if
#' \code{axial_MvMF = TRUE}. If \code{axial_MvMF = FALSE}, then only means
#' with positive signs are considered.
#' \item \code{"ACG"}: Angular Central Gaussian distribution with diagonal
#' shape matrix with diagonal given by
#' \deqn{(1, \ldots, 1, 1 + \kappa) / (p + \kappa).}
#' \item \code{"SC"}: Small Circle distribution with axis mean
#' \eqn{{\bf e}_p = (0, 0, \ldots, 1)}{e_p = (0, 0, \ldots, 1)} and
#' concentration \eqn{\kappa} about the projection along the mean, \eqn{\nu}.
#' \item \code{"W"}: Watson distribution with axis mean
#' \eqn{{\bf e}_p = (0, 0, \ldots, 1)}{e_p = (0, 0, \ldots, 1)} and
#' concentration \eqn{\kappa}. The Watson distribution is a particular case
#' of the Bingham distribution.
#' \item \code{"C"}: Cauchy-like distribution with directional mode
#' \eqn{{\bf e}_p = (0, 0, \ldots, 1)}{e_p = (0, 0, \ldots, 1)} and
#' concentration \eqn{\kappa = \rho / (1 - \rho^2)}. The circular Wrapped
#' Cauchy distribution is a particular case of this Cauchy-like distribution.
#' }
#' @return An \bold{array} of size \code{c(n, p, M)} with \code{M} random
#' samples of size \code{n} of non-uniformly-generated directions on
#' \eqn{S^{p-1}}.
#' @details
#' Much faster sampling for \code{"SC"}, \code{"W"}, and \code{"C"} is achieved
#' providing \code{F_inv}, see examples.
#' @examples
#' ## Simulation with p = 2
#'
#' p <- 2
#' n <- 200
#' kappa <- 20
#' nu <- 0.5
#' rho <- ((2 * kappa + 1) - sqrt(4 * kappa + 1)) / (2 * kappa)
#' F_inv_SC_2 <- F_inv_from_f(f = function(z) exp(-kappa * (z - nu)^2), p = 2)
#' F_inv_W_2 <- F_inv_from_f(f = function(z) exp(kappa * z^2), p = 2)
#' F_inv_C_2 <- F_inv_from_f(f = function(z) (1 - rho^2) /
#' (1 + rho^2 - 2 * rho * z)^(p / 2), p = 2)
#' x1 <- r_alt(n = n, p = p, alt = "vMF", kappa = kappa)[, , 1]
#' x2 <- r_alt(n = n, p = p, alt = "MvMF", kappa = kappa)[, , 1]
#' x3 <- r_alt(n = n, p = p, alt = "ACG", kappa = kappa)[, , 1]
#' x4 <- r_alt(n = n, p = p, alt = "SC", F_inv = F_inv_SC_2)[, , 1]
#' x5 <- r_alt(n = n, p = p, alt = "W", F_inv = F_inv_W_2)[, , 1]
#' x6 <- r_alt(n = n, p = p, alt = "C", F_inv = F_inv_C_2)[, , 1]
#' r <- runif(n, 0.95, 1.05) # Radius perturbation to improve visualization
#' plot(r * x1, pch = 16, xlim = c(-1.1, 1.1), ylim = c(-1.1, 1.1), col = 1)
#' points(r * x2, pch = 16, col = 2)
#' points(r * x3, pch = 16, col = 3)
#' points(r * x4, pch = 16, col = 4)
#' points(r * x5, pch = 16, col = 5)
#' points(r * x6, pch = 16, col = 6)
#'
#' ## Simulation with p = 3
#'
#' n <- 200
#' p <- 3
#' kappa <- 20
#' nu <- 0.5
#' rho <- ((2 * kappa + 1) - sqrt(4 * kappa + 1)) / (2 * kappa)
#' F_inv_SC_3 <- F_inv_from_f(f = function(z) exp(-kappa * (z - nu)^2), p = 3)
#' F_inv_W_3 <- F_inv_from_f(f = function(z) exp(kappa * z^2), p = 3)
#' F_inv_C_3 <- F_inv_from_f(f = function(z) (1 - rho^2) /
#' (1 + rho^2 - 2 * rho * z)^(p / 2), p = 3)
#' x1 <- r_alt(n = n, p = p, alt = "vMF", kappa = kappa)[, , 1]
#' x2 <- r_alt(n = n, p = p, alt = "MvMF", kappa = kappa)[, , 1]
#' x3 <- r_alt(n = n, p = p, alt = "ACG", kappa = kappa)[, , 1]
#' x4 <- r_alt(n = n, p = p, alt = "SC", F_inv = F_inv_SC_3)[, , 1]
#' x5 <- r_alt(n = n, p = p, alt = "W", F_inv = F_inv_W_3)[, , 1]
#' x6 <- r_alt(n = n, p = p, alt = "C", F_inv = F_inv_C_3)[, , 1]
#' s3d <- scatterplot3d::scatterplot3d(x1, pch = 16, xlim = c(-1.1, 1.1),
#' ylim = c(-1.1, 1.1), zlim = c(-1.1, 1.1))
#' s3d$points3d(x2, pch = 16, col = 2)
#' s3d$points3d(x3, pch = 16, col = 3)
#' s3d$points3d(x4, pch = 16, col = 4)
#' s3d$points3d(x5, pch = 16, col = 5)
#' s3d$points3d(x6, pch = 16, col = 6)
#' @export
r_alt <- function(n, p, M = 1, alt = "vMF", kappa = 1, nu = 0.5, F_inv = NULL,
K = 1e3, axial_MvMF = TRUE) {
# Common mean (North pole)
mu <- c(rep(0, p - 1), 1)
# Check concentration parameter and sample size
stopifnot(kappa >= 0)
stopifnot(n >= 1)
# Sampling from uniform
if (kappa == 0) {
return(r_unif_sph(n = n, p = p, M = M))
}
# Choose alternative
if (alt == "vMF") {
long_samp <- rotasym::r_vMF(n = n * M, mu = mu, kappa = kappa)
} else if (alt == "MvMF") {
# Mixture components
j <- sample(x = 1:p, size = n * M, replace = TRUE)
nM_j <- tabulate(bin = j, nbins = p)
mu_j <- diag(1, nrow = p, ncol = p)
# Sample components
long_samp <- matrix(nrow = n * M, ncol = p)
for (k in which(nM_j > 0)) {
long_samp[j == k, ] <- rotasym::r_vMF(n = nM_j[k], mu = mu_j[k, ],
kappa = kappa)
}
# Add plus and minus means
if (axial_MvMF) {
long_samp <- sample(x = c(-1, 1), size = n * M, replace = TRUE) *
long_samp
}
# Shuffle data
long_samp <- long_samp[sample(x = n * M), , drop = FALSE]
} else if (alt == "ACG") {
Lambda <- diag(c(rep(1 / (p + kappa), p - 1),
(1 + kappa) / (p + kappa)), nrow = p, ncol = p)
long_samp <- rotasym::r_ACG(n = n * M, Lambda = Lambda)
} else if (alt == "SC") {
# Compute the inverse of the distribution function F?
if (is.null(F_inv)) {
stopifnot(-1 < nu & nu < 1)
f <- function(z) exp(-kappa * (z - nu)^2)
F_inv <- F_inv_from_f(f = f, p = p, K = K)
}
# Sample the small circle distribution
r_U <- function(n) r_unif_sph(n = n, p = p - 1, M = 1)[, , 1]
r_V <- function(n) F_inv(runif(n = n))
long_samp <- rotasym::r_tang_norm(n = n * M, theta = mu,
r_U = r_U, r_V = r_V)
} else if (alt == "W") {
# Compute the inverse of the distribution function F?
if (is.null(F_inv)) {
f <- function(z) exp(kappa * z^2)
F_inv <- F_inv_from_f(f = f, p = p, K = K)
}
# Sample the small circle distribution
r_U <- function(n) r_unif_sph(n = n, p = p - 1, M = 1)[, , 1]
r_V <- function(n) F_inv(runif(n = n))
long_samp <- rotasym::r_tang_norm(n = n * M, theta = mu,
r_U = r_U, r_V = r_V)
} else if (alt == "C") {
# Compute the inverse of the distribution function F?
if (is.null(F_inv)) {
rho <- ifelse(kappa == 0, 0,
((2 * kappa + 1) - sqrt(4 * kappa + 1)) / (2 * kappa))
f <- function(z) (1 - rho^2) / (1 + rho^2 - 2 * rho * z)^(p / 2)
F_inv <- F_inv_from_f(f = f, p = p, K = K)
}
# Sample the small circle distribution
r_U <- function(n) r_unif_sph(n = n, p = p - 1, M = 1)[, , 1]
r_V <- function(n) F_inv(runif(n = n))
long_samp <- rotasym::r_tang_norm(n = n * M, theta = mu,
r_U = r_U, r_V = r_V)
} else {
stop(paste("Wrong alt; must be \"vMF\", \"MvMF\", \"Bing\"",
"\"ACG\", \"SC\", \"W\", or \"C\"."))
}
# As an array
samp <- array(dim = c(n, p, M))
for (j in 1:M) {
samp[, , j] <- long_samp[(1 + (j - 1) * n):(j * n), , drop = FALSE]
}
return(samp)
}
|
58b84bb5a5d26644c823b15457d4367c3a14f31c
|
ebad9ef7a3678fcf491ee0574377714648e4c0e8
|
/man/officer_prep.Rd
|
18d765dd26430ea825233e3e6f06ea66b2d8e450
|
[
"MIT"
] |
permissive
|
dpowerstp/arrprocess
|
3b7339cd8feb1eda32878715a0ad79683ea57828
|
b1eb48431c6d8588ea80639d349f22c274854c0f
|
refs/heads/main
| 2023-07-07T02:26:43.997908
| 2021-08-09T18:45:19
| 2021-08-09T18:45:19
| 394,312,589
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| true
| 540
|
rd
|
officer_prep.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/officer_prep.R
\name{officer_prep}
\alias{officer_prep}
\title{function to re-factor anonymous officer values by values present in given dataset}
\usage{
officer_prep(df, officer_anon_df)
}
\arguments{
\item{df}{df with officer_anon and officer_order columns}
\item{officer_anon_df}{df with officer information}
}
\value{
df with officer names ordered by order
}
\description{
function to re-factor anonymous officer values by values present in given dataset
}
|
dd8397bd2a9bee629c08cd50f110f031721aa7cd
|
0b73cc18412dfad492ab5e8b9f8c2ec0735d8e0c
|
/tests/testthat/test-13-upgrade_old_files.R
|
1708eb472db81b7ab7466da7d29cb2e90a179c08
|
[
"MIT"
] |
permissive
|
UZH-PEG/dmdScheme
|
30f2b38b1d1e292443408d589f3ffdcf4dc51375
|
7441e6fbc8807aa6343a982c22b80dfae7d079bd
|
refs/heads/master
| 2023-04-16T17:08:19.543231
| 2022-08-30T07:29:04
| 2022-08-30T07:29:04
| 188,025,722
| 0
| 0
|
NOASSERTION
| 2022-03-18T14:32:08
| 2019-05-22T11:33:47
|
R
|
UTF-8
|
R
| false
| false
| 709
|
r
|
test-13-upgrade_old_files.R
|
context("13-upgrade_old_files()")
fn <- tempfile(fileext = ".xxx")
file.create(fn)
test_that(
"upgrade_old_files() raises error if file is of wrong extension",
{
expect_error(
object = upgrade_old_files(file = fn),
regexp = "x has to have the extension 'xls' 'xlsx' or 'xml'"
)
}
)
unlink(fn)
test_that(
"upgrade_old_files() gives warning and returns `NULL` if same version as current",
{
expect_warning(
object = upgrade_old_files(file = scheme_path_xlsx()),
regexp = "File has same version as the installed package. No conversion necessary!"
)
expect_null(
object = suppressWarnings(upgrade_old_files(file = scheme_path_xlsx()))
)
}
)
|
978c675e4125fa0d6f04c3a994a0ab79fccb2739
|
50c1aa6ee2cde2e58b2e55d014ef33e37383ac66
|
/R/WSPG-DT model R code.R
|
e8def68ac60cc84dfc98ea58a88dbcbeb7903c64
|
[] |
no_license
|
terryferg/Analytic-Notes-examples
|
934533a37391d9150a393b365c76960588abe3c8
|
6ceb2cf10bc04524e303fc99fd7463438a6cf826
|
refs/heads/master
| 2021-01-12T01:15:10.585952
| 2017-01-09T17:16:37
| 2017-01-09T17:16:37
| 78,361,464
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 1,529
|
r
|
WSPG-DT model R code.R
|
#data <- Red.Decision.Tree...Associations
#data <- Red.Decision.Tree...Associations[Red.Decision.Tree...Associations$User==2,]
#data <- Red.Decision.Tree...Associations[Red.Decision.Tree...Associations$PricePoint==1,]
#data <- Red.Decision.Tree...Associations[Red.Decision.Tree...Associations$PricePoint==2,]
#data <- Red.Decision.Tree...Associations[Red.Decision.Tree...Associations$PricePoint==3,]
#data <- Red.Decision.Tree...Associations[Red.Decision.Tree...Associations$PricePoint==4,]
#data <- rbind(Red.Decision.Tree...Associations[Red.Decision.Tree...Associations$PricePoint==1,],Red.Decision.Tree...Associations[Red.Decision.Tree...Associations$PricePoint==2,])
#data <- data[data$PricePoint==3,]
library(randomForest)
library(rpart)
for(i in 2:28){
data[,i] <- as.factor(as.character(data[,i]))
}
### Tree ############################
tree.fit1=rpart(Red2008Cluster ~.,data[,5:28],control=rpart.control(cp=0.00005),method="class")
printcp(tree.fit1)
tree.fit2<-prune(tree.fit1,cp=0.00005)
tree.pred <- predict(tree.fit2,data[,5:27],type="class")
table(tree.pred,data[,28])
1-sum(diag(table(tree.pred,data[,28])))/dim(data)[1]
plot(tree.fit2)
text(tree.fit2,use.n=FALSE,all=TRUE,cex=1,xpd=TRUE)
as.data.frame(tree.fit1$variable.importance)
### Random Forest ###################
rf.fit <- randomForest(Red2008Cluster ~.,data[,5:28],mtry=5,ntree=1000,importance=TRUE)
pred.rf <- predict(rf.fit,data[,5:28])
table(data[,28] == pred.rf)
importance(rf.fit)
varImpPlot(rf.fit)
|
4e9a8c8f796dafe8b9db5ee1c3bb2387de998438
|
016d4c8380b71bd9641a4f27bf4825ca66300980
|
/man/virtual_temperature.Rd
|
97564a5bacc81ff0150d8bd25bc98a211d87fcd5
|
[] |
no_license
|
cran/aiRthermo
|
a55926857cea020fd8bd975726f4c284f18b7281
|
3e8289b8068ed38835fc728b99418566cc30de41
|
refs/heads/master
| 2021-01-01T06:03:00.580148
| 2018-09-16T21:40:03
| 2018-09-16T21:40:03
| 97,342,340
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 921
|
rd
|
virtual_temperature.Rd
|
\name{virtual_temperature}
\alias{virtual_temperature}
%- Also NEED an '\alias' for EACH other topic documented here.
\title{
Virtual Temperature
}
\description{
This function calculates the virtual temperature from given pressure and mixing ratio.
}
\usage{
virtual_temperature(P, Temp, w, consts = export_constants())
}
%- maybe also 'usage' for other objects documented here.
\arguments{
\item{P}{
A vector with pressure values in Pa.
}
\item{Temp}{
A vector with temperature values in Kelvin.
}
\item{w}{
A vector with mixing ratio values in kg/kg.
}
\item{consts}{
The constants defined in \emph{aiRthermoConstants} data are necessary.
}
}
\value{
This function returns a vector with virtual temperature values.
}
\seealso{
\code{\link{q2e}}
}
\examples{
data(RadiosondeD)
dPs<-RadiosondeD[,1]*100
dTs<-C2K(RadiosondeD[,3])
dws<-RadiosondeD[,6]/1000
virtual_temperature(dPs,dTs,dws)
}
\keyword{Functions}
|
171095fcc8676c3e2d02a70d3674aaded284ff82
|
72d9009d19e92b721d5cc0e8f8045e1145921130
|
/MGMM/R/05_Mix_MVN.R
|
2441ca19debccb958a7f233eba8239e7fef4b028
|
[] |
no_license
|
akhikolla/TestedPackages-NoIssues
|
be46c49c0836b3f0cf60e247087089868adf7a62
|
eb8d498cc132def615c090941bc172e17fdce267
|
refs/heads/master
| 2023-03-01T09:10:17.227119
| 2021-01-25T19:44:44
| 2021-01-25T19:44:44
| 332,027,727
| 1
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 14,894
|
r
|
05_Mix_MVN.R
|
# Purpose: Fits a multivariate normal mixture in the presence of missingness.
# Updated: 19/07/18
#------------------------------------------------------------------------------
#' Parameter Initialization for Mixture of Multivariate Normals.
#'
#' @param split_data Data partitioned by missingness.
#' @param k Number of mixture components.
#' @param init_means Optional list of initial mean vectors.
#' @param init_covs Optional list of initial covariance matrices.
#' @param init_props Optional vector of initial cluster proportions.
#'
#' @importFrom stats kmeans
fit.mix.miss.init <- function(
split_data,
k,
init_means,
init_covs,
init_props
) {
# Unpack
theta0 <- list()
n0 <- split_data$n0
d <- split_data$n_col
data_comp <- split_data$data_comp
# Case 1: All parameters provided.
if (!is.null(init_means) & !is.null(init_covs) & !is.null(init_props)) {
theta0$means <- init_means
theta0$covs <- init_covs
theta0$pi <- init_props
# Case 2: Initial values partially missing.
} else {
if (n0 == 0) {
stop("If no observations are complete, initial values are required for all parameters.")
}
# If complete cases are available, apply kmeans.
k_means <- kmeans(data_comp, k, iter.max = 100, nstart = 100)
# Cluster assignments.
cluster_assignment <- k_means$cluster
# Initialize means.
if (is.null(init_means)) {
means <- k_means$centers
theta0$means <- lapply(1:k, function(i){means[i, ]})
} else {
theta0$means <- init_means
}
# Initialize covariances
if (is.null(init_covs)) {
theta0$covs <- lapply(1:k, function(i) {
clust <- data_comp[cluster_assignment == i, , drop = FALSE]
return(matCov(clust, clust))
})
} else {
theta0$covs <- init_covs
}
# Initialize proportions
if (is.null(init_props)) {
theta0$pi <- as.numeric(table(cluster_assignment)) / n0
} else {
theta0$pi <- init_props
}
} # End Case 2.
# Check that estimated covariances are positive definite.
eigen_values <- unlist(lapply(theta0$covs, FUN = eigSym))
if (min(eigen_values) <= 0) {
stop("Initial covariance matrices are not all positive definite.")
}
# Initial responsibilities
theta0$gamma <- Responsibility(
split_data,
theta0$means,
theta0$covs,
theta0$pi
)
# Output
return(theta0)
}
#------------------------------------------------------------------------------
#' Cluster Sizes for a Mixutre of MVNs.
#'
#' @param split_data Data partitioned by missingness.
#' @param gamma List cof component responsibilities.
MixClusterSizes <- function(
split_data,
gamma
) {
# Unpack.
n0 <- split_data$n0
n1 <- split_data$n1
k <- gamma$k
# Cluster sizes.
cluster_sizes <- rep(0, k)
## Complete cases.
if (n0 > 0) {
gamma0 <- gamma$gamma0
cluster_sizes <- cluster_sizes + apply(gamma0, 2, sum)
}
## Incomplete cases.
if (n1 > 0) {
gamma1 <- gamma$gamma1
cluster_sizes <- cluster_sizes + apply(gamma1, 2, sum)
}
# Output.
return(cluster_sizes)
}
#------------------------------------------------------------------------------
#' Expected Residual Outer Product for a Mixutre of MVNs.
#'
#' @param split_data Data partitioned by missingness.
#' @param new_means List of updated means.
#' @param old_means List of previous means.
#' @param covs List of component covariances.
#' @param gamma List cof component responsibilities.
#' @return List of k expected residual outer products.
MixResidOP <- function(
split_data,
new_means,
old_means,
covs,
gamma
) {
# Unpack.
n0 <- split_data$n0
n1 <- split_data$n1
d <- split_data$n_col
k <- gamma$k
# Loop over mixture components.
aux <- function(j){
resid_op <- array(0, dim = c(d, d))
## Complete cases.
if (n0 > 0) {
# Residuals
mean_mat <- matrix(data = new_means[[j]], nrow = n0, ncol = d, byrow = TRUE)
resid <- split_data$data_comp - mean_mat
# Responsibility-weighted OP
resid_op <- resid_op + matIP(resid, gamma$gamma0[, j] * resid)
}
## Incomplete cases
if (n1 > 0) {
# Responsibility-weighted OP
resid_op <- resid_op + ExpResidOP(
split_data$data_incomp,
new_means[[j]],
old_means[[j]],
covs[[j]],
gamma$gamma1[, j])
}
# Return residual outer product.
return(resid_op)
}
# Loop over mixture components.
out <- lapply(1:k, aux)
return(out)
}
#------------------------------------------------------------------------------
#' EM Objective for a Mixture of MVNs.
#'
#' @param cluster_sizes Cluster sizes.
#' @param pi Cluster proportions
#' @param covs List of component covariances.
#' @param resid_ops List of residual outer products.
MixEMObj <- function(
cluster_sizes,
pi,
covs,
resid_ops
) {
# Pi term.
k <- length(pi)
pi_term <- sum(cluster_sizes * log(pi))
# Determinant term.
det_term <- lapply(1:k, function(j) {
cluster_sizes[j] * log(det(covs[[j]]))
})
det_term <- do.call(sum, det_term)
# Trace term.
trace_term <- lapply(1:k, function(j) {
tr(MMP(matInv(covs[[j]]), resid_ops[[j]]))
})
trace_term <- do.call(sum, trace_term)
# Objective.
obj <- pi_term - det_term - trace_term
return(obj)
}
#------------------------------------------------------------------------------
#' Mean Update for Mixture of MVNs with Missingness.
#'
#' @param split_data Data partitioned by missingness.
#' @param means List of component means.
#' @param covs List of component covariances.
#' @param gamma List of component responsibilities.
#' @return List containing the updated component means.
fit.mix.miss.update.means <- function(
split_data,
means,
covs,
gamma
) {
# Unpack.
n0 <- split_data$n0
n1 <- split_data$n1
k <- length(means)
# Cluster sizes.
cluster_sizes <- MixClusterSizes(
split_data,
gamma
)
# Loop over mixture components.
aux <- function(j) {
total <- 0
## Complete cases.
if (n0 > 0) {
total <- total + apply(gamma$gamma0[, j] * split_data$data_comp, 2, sum)
}
## Incomplete cases.
if (n1 > 0) {
working_response <- WorkResp(
split_data$data_incomp,
means[[j]],
covs[[j]],
gamma$gamma1[, j]
)
total <- total + apply(working_response, 2, sum)
}
# Update
new_mean <- (total) / cluster_sizes[j]
names(new_mean) <- split_data$orig_col_names
return(new_mean)
}
# Update means
new_means <- lapply(1:k, aux)
return(new_means)
}
#------------------------------------------------------------------------------
#' Parameter Update for Mixutre of MVNs with Missingness.
#'
#' @param split_data Data partitioned by missingness.
#' @param theta List containing the current `means`, `covs`, `pi`, and `gamma`.
#' @param fix_means Fix the mean to its starting value? Must initialize.
#' @return List containing:
#' \itemize{
#' \item The updated `mean`, `cov`, `pi`, and `gamma`.
#' \item The initial `old_obj` and final `new_obj` EM objective.
#' \item The increase in the EM objective `delta`.
#' }
fit.mix.miss.update <- function(
split_data,
theta,
fix_means
) {
# Previous parameters.
old_means <- theta$means
old_covs <- theta$covs
old_pi <- theta$pi
old_gamma <- theta$gamma
# Cluster sizes.
old_cluster_sizes <- MixClusterSizes(
split_data,
old_gamma
)
# Old residual outer products.
old_resid_ops <- MixResidOP(
split_data,
old_means,
old_means,
old_covs,
old_gamma
)
# Initial objective.
old_obj <- MixEMObj(
old_cluster_sizes,
old_pi,
old_covs,
old_resid_ops
)
# Update means.
if(fix_means){
new_means <- old_means
} else {
new_means <- fit.mix.miss.update.means(
split_data,
old_means,
old_covs,
old_gamma
)
}
# Update covariances.
## Update outer products
new_resid_ops <- MixResidOP(
split_data,
new_means,
old_means,
old_covs,
old_gamma
)
## Normalize.
aux <- function(j) {
# Covariances
new_cov <- new_resid_ops[[j]] / old_cluster_sizes[[j]]
rownames(new_cov) = colnames(new_cov) <- split_data$orig_col_names
return(new_cov)
}
# Update covariances
k <- theta$gamma$k
new_covs <- lapply(1:k, aux)
## Update responsibilities
new_gamma <- Responsibility(
split_data,
new_means,
new_covs,
old_pi
)
# Update cluster proportions.
new_pi <- old_cluster_sizes / sum(old_cluster_sizes)
# New EM objective.
new_obj <- MixEMObj(
old_cluster_sizes,
new_pi,
new_covs,
new_resid_ops
)
# Increment
delta <- new_obj - old_obj
# Output
out <- list()
out$means <- new_means
out$covs <- new_covs
out$pi <- new_pi
out$gamma <- new_gamma
out$new_obj <- new_obj
out$old_obj <- old_obj
out$delta <- delta
return(out)
}
#------------------------------------------------------------------------------
#' Cluster Assignment for Mixutre of MVNs with Missingness.
#'
#' @param split_data Data partitioned by missingness.
#' @param theta List containing the current `means`, `covs`, `pi`, and `gamma`.
#' @return List containing:
#' \itemize{
#' \item Matrix of cluster `Assignments`.
#' \item Matrix of `Density` evaluations.
#' \item Matrix of cluster `Responsibilities`.
#' }
MixClusterAssign <- function(
split_data,
theta
) {
# Unpack.
n2 <- split_data$n2
d <- split_data$n_col
k <- theta$gamma$k
# Responsibilities
resp <- rbind(theta$gamma$gamma0, theta$gamma$gamma1)
# Density evaluations.
dens <- rbind(theta$gamma$dens_eval0, theta$gamma$dens_eval1)
# Assignments.
map_assign <- apply(resp, 1, which.max)
if (n2 > 0) {
map_assign <- c(map_assign, rep(NA, n2))
}
# Recover initial order.
init_order <- split_data$init_order
map_assign <- map_assign[order(init_order)]
names(map_assign) <- split_data$orig_row_names
# Responsibilities.
if (n2 > 0) {
resp <- rbind(resp, array(NA, dim = c(n2, k)))
}
# Recover initial order.
resp <- resp[order(init_order), ]
rownames(resp) <- NULL
# Add entropies
entropy <- aaply(
.data = resp,
.margins = 1,
.fun = function(x){-sum(x * log(x)) / log(k)}
)
# Density evaluations.
if (n2 > 0) {
dens <- rbind(dens, array(NA, dim = c(n2, k)))
}
# Recover initial order.
dens <- dens[order(init_order), ]
rownames(dens) <- NULL
# Assignment matrix.
assign = cbind(
'Assignments' = map_assign,
'Entropy' = entropy
)
rownames(assign) <- split_data$orig_row_names
# Responsibility matrix.
rownames(resp) <- split_data$orig_row_names
# Density matrix
rownames(dens) <- split_data$orig_row_names
# Output.
out <- list()
out$Assignments <- assign
out$Responsibilities <- resp
out$Density <- dens
return(out)
}
#------------------------------------------------------------------------------
#' Imputation for Mixutre of MVNs with Missingness.
#'
#' @param split_data Data partitioned by missingness.
#' @param theta List containing the current `means`, `covs`, `pi`, and `gamma`.
#' @return Data.matrix, in the same order as the original data, with missing values
#' imputed to their expectations.
fit.mix.miss.impute <- function(
split_data,
theta
) {
# Unpack.
n0 <- split_data$n0
n1 <- split_data$n1
n2 <- split_data$n2
d <- split_data$n_col
k <- theta$gamma$k
# Output structure.
out <- matrix(NA, nrow = 0, ncol = d)
## Complete cases.
if (n0 > 0) {
out <- rbind(out, split_data$data_comp)
}
## Incomplete cases.
if (n1 > 0) {
aux <- function(j) {
working_response <- WorkResp(
split_data$data_incomp,
theta$means[[j]],
theta$covs[[j]],
theta$gamma$gamma1[, j]
)
return(working_response)
}
data_imp <- lapply(1:k, aux)
data_imp <- Reduce("+", data_imp)
out <- rbind(out, data_imp)
}
## Empty cases.
if (n2 > 0) {
aux <- function(j) {
return(theta$means[[j]] * theta$pi[j])
}
data_imp <- lapply(1:k, aux)
data_imp <- Reduce("+", data_imp)
data_imp <- matrix(data = data_imp, nrow = n2, ncol = d, byrow = TRUE)
out <- rbind(out, data_imp)
}
# Output
init_order <- split_data$init_order
out <- out[order(init_order), ]
rownames(out) <- split_data$orig_row_names
colnames(out) <- split_data$orig_col_names
return(out)
}
#------------------------------------------------------------------------------
# Main Function
#------------------------------------------------------------------------------
#' Fit Multivariate Mixture Distribution
#'
#' Given a matrix of random vectors, estimates the parameters for a mixture of
#' multivariate normal distributions. Accommodates arbitrary patterns of
#' missingness, provided the elements are missing at random (MAR).
#'
#' @param data Numeric data matrix.
#' @param k Number of mixture components. Defaults to 2.
#' @param init_means Optional list of initial mean vectors.
#' @param fix_means Fix means to their starting values? Must initialize.
#' @param init_covs Optional list of initial covariance matrices.
#' @param init_props Optional vector of initial cluster proportions.
#' @param maxit Maximum number of EM iterations.
#' @param eps Minimum acceptable increment in the EM objective.
#' @param report Report fitting progress?
#' @return Object of class \code{mix} containing the estimated
#'
#' @importFrom methods new
#' @importFrom mvnfast dmvn
#' @importFrom plyr aaply
#' @importFrom stats kmeans
fit.mix <- function(
data,
k = 2,
init_means = NULL,
fix_means = FALSE,
init_covs = NULL,
init_props = NULL,
maxit = 100,
eps = 1e-6,
report = FALSE
) {
# Partition data.
split_data <- PartitionData(data)
# Initialization.
theta0 <- fit.mix.miss.init(split_data, k, init_means, init_covs, init_props)
# Maximzation.
Update <- function(theta){fit.mix.miss.update(split_data, theta, fix_means)}
theta1 <- Maximization(theta0, Update, maxit, eps, report)
# Cluster assignments.
assign <- MixClusterAssign(
split_data,
theta1
)
# Imputation.
imputed <- fit.mix.miss.impute(split_data, theta1)
# Output
out <- new(
Class = "mix",
Assignments = assign$Assignments,
Completed = imputed,
Components = k,
Covariances = theta1$covs,
Density = assign$Density,
Means = theta1$means,
Objective = theta1$new_obj,
Proportions = theta1$pi,
Responsibilities = assign$Responsibilities
)
return(out)
}
|
e94fb9c7ef915fe8a0579fc9f74cc9630ff92c27
|
61f21afe4f78dd93079dd4108ac6e5020eb6e021
|
/modular_classes.R
|
69c3bf2c2f628176e30c448d4995d38de3e87e0d
|
[
"MIT"
] |
permissive
|
diogro/EL-snp_selection
|
35e81c78bc04981f0d2b97e7ad8ed802f1d4b5c6
|
3a05ec689250578874de9eb3b816faf9ccbb84bc
|
refs/heads/master
| 2023-04-04T14:18:19.438842
| 2023-03-29T18:45:18
| 2023-03-29T18:45:18
| 56,320,935
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 2,336
|
r
|
modular_classes.R
|
modular_matrix = matrix(
c(1, 1, 0, 0,
-1, -1, 0, 0,
0, 0, 1, 1,
0, 0, -1, -1,
1, 0.5, 0, 0,
0.5, 1, 0, 0,
-0.5, -1, 0, 0,
-1, -0.5, 0, 0,
0, 0, 0.5, 1,
0, 0, 1, 0.5,
0, 0, -0.5, -1,
0, 0, -1, -0.5), ncol = 4, byrow = TRUE)
intra_antagonistic_matrix = matrix(
c(1., 1, 1, -1,
1, 1, -1, 1,
1, 0, 1, -1,
1, 0, -1, 1,
1, -1, 1, 1,
1, -1, 1, 0,
1, -1, 1, -1,
1, -1, 0, 1,
1, -1, 0, 0,
1, -1, 0, -1,
1, -1, -1, 1,
1, -1, -1, 0,
1, -1, -1, -1,
0, 1, 1, -1,
0, 1, -1, 1,
0, 0, 1, -1,
0, 0, -1, 1,
0, -1, -1, 1,
-1, 1, 1, 1,
-1, 1, 1, 0,
-1, 1, 1, -1,
-1, 1, 0, 1,
-1, 1, 0, 0,
-1, 1, 0, -1,
-1, 1, -1, 1,
-1, 1, -1, 0,
-1, 1, -1, -1,
-1, 0, 1, -1,
-1, 0, -1, 1,
-1, -1, -1, 1), ncol = 4, byrow = TRUE)
antagonistic_matrix = matrix(
c(1., 1, 0, -1,
1, 1, -1, 0,
1, 1, -1, -1,
1, 0, 0, -1,
1, 0, -1, 0,
1, 0, -1, -1,
0, 1, 0, -1,
0, 1, -1, 0,
0, 1, -1, -1,
0, -1, 1, 1,
0, -1, 1, 0,
0, -1, 0, 1,
-1, 0, 1, 1,
-1, 0, 1, 0,
-1, 0, 0, 1,
-1, -1, 1, 1,
-1, -1, 1, 0), ncol = 4, byrow = TRUE)
local_matrix = matrix(
c(1., 0, 0, 0,
0, 1, 0, 0,
0, 0, 1, 0,
0, 0, 0, 1,
-1, 0, 0, 0,
0, -1, 0, 0,
0, 0, -1, 0,
0, 0, 0, -1), ncol = 4, byrow = TRUE)
integrated_matrix = matrix(
c(1., 1, 1, 1,
1, 1, 1, 0,
1, 1, 0, 1,
1, 0, 1, 1,
-1, -1, -1, -1,
0, 1, 1, 1,
0, -1, -1, -1,
-1, 0, -1, -1,
-1, -1, 0, -1,
-1, -1, -1, 0,
0, 1, 0, 1,
1, 0, 1, 0,
0, 1, 1, 0,
1, 0, 0, 1,
0, -1, 0, -1,
-1, 0, -1, 0,
0, -1, -1, 0,
-1, 0, 0, -1), ncol = 4, byrow = TRUE)
directional_matrices = list(Modular = modular_matrix,
Antagonistic = antagonistic_matrix,
Local = local_matrix,
Integrated = integrated_matrix,
"Intra\nmodule" = intra_antagonistic_matrix)
classifyVector = function(x){
names(which.max(llply(directional_matrices, function(mats) max(abs(apply(mats, 1, vectorCor, x))))))
}
|
7754e81d41a3ab3e5c17202019199a3de64ddb41
|
7cc51784a7a3b1ba46441d1f68d43e82f7d1688b
|
/man/focalExtract.Rd
|
7b94d71d5eb81fa46a0d1a7920aab2cb846c55f9
|
[] |
no_license
|
rvalavi/myspatial
|
39d4e8754c9fdf771fc0e70dbce21acff4ae2c7e
|
d44713ca8d6574b8ffd088ecf8e2db963b4da3e3
|
refs/heads/master
| 2021-06-17T13:50:20.255470
| 2021-05-10T02:29:39
| 2021-05-10T02:29:39
| 204,388,450
| 3
| 1
| null | null | null | null |
UTF-8
|
R
| false
| true
| 923
|
rd
|
focalExtract.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/extraction.R
\name{focalExtract}
\alias{focalExtract}
\title{Focal extraction from a raster object
A function to extract raster values from neighbourhood of points.}
\usage{
focalExtract(
r,
p,
neighbourhood = c(3, 3),
fun = mean,
na.rm = TRUE,
progress = TRUE
)
}
\arguments{
\item{r}{raster layer}
\item{p}{the spatial point or sf objects}
\item{neighbourhood}{the dimention of the neighourhood kernel}
\item{fun}{function to summarize the extracted values (e.g. mean). If
no function is provided, a list is retuned}
\item{na.rm}{logical. If na.rm=TRUE (the default value), NA values are removed before fun is applied.}
\item{progress}{logical. Show a progress par.}
}
\value{
}
\description{
Focal extraction from a raster object
A function to extract raster values from neighbourhood of points.
}
\author{
Roozbeh Valavi
}
|
2309686398bc109575019d35b5e312f1a0fe629a
|
49e55ac34a33c1fda61bb722657c4531858b4a49
|
/R/order_by.R
|
6c8bdfa2667a63ee1d8cc1f84cdfb736e78baa12
|
[] |
no_license
|
Denis-pereira/rquery
|
c83a8c30a04e8ef53269a6e8638c7c47bb3d4ab2
|
09f76333e0eff4637b8ec229ee9a0e4ee3b5f929
|
refs/heads/master
| 2020-04-24T05:09:08.435940
| 2019-02-19T17:32:55
| 2019-02-19T17:32:55
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 7,347
|
r
|
order_by.R
|
#' Make an orderby node (not a relational operation).
#'
#' Order a table by a set of columns (not general expressions) and
#' limit number of rows in that order.
#'
#' Note: this is a relational operator in that it takes a table that
#' is a relation (has unique rows) to a table that is still a relation.
#' However, most relational systems do not preserve row order in storage or between
#' operations. So without the limit set this is not a useful operator except
#' as a last step prior to pulling data to an in-memory \code{data.frame} (
#' which does preserve row order).
#'
#'
#' @param source source to select from.
#' @param cols order by column names.
#' @param ... force later arguments to be bound by name
#' @param reverse character, which columns to reverse ordering of.
#' @param limit number limit row count.
#' @param env environment to look to.
#' @return order_by node.
#'
#' @examples
#'
#' if (requireNamespace("DBI", quietly = TRUE) && requireNamespace("RSQLite", quietly = TRUE)) {
#' my_db <- DBI::dbConnect(RSQLite::SQLite(), ":memory:")
#' d <- rq_copy_to(my_db, 'd',
#' data.frame(AUC = 0.6, R2 = 0.2))
#' optree <- orderby(d, cols = "AUC", reverse = "AUC", limit=4)
#' cat(format(optree))
#' sql <- to_sql(optree, my_db)
#' cat(sql)
#' print(DBI::dbGetQuery(my_db, sql))
#' DBI::dbDisconnect(my_db)
#' }
#'
#' @export
#'
orderby <- function(source,
cols = NULL,
...,
reverse = NULL,
limit = NULL,
env = parent.frame()) {
force(env)
UseMethod("orderby", source)
}
#' @export
orderby.relop <- function(source,
cols = NULL,
...,
reverse = NULL,
limit = NULL,
env = parent.frame()) {
force(env)
wrapr::stop_if_dot_args(substitute(list(...)),
"rquery::orderby.relop")
if(length(setdiff(reverse, cols))>0) {
stop("rquery::orderby.relop all reverse columns must be in cols list")
}
have <- column_names(source)
check_have_cols(have, cols, "rquery::orderby.relop")
if(!is.null(limit)) {
if(limit<0) {
stop("rquery::orderby.relop limit must be >=0 or NULL")
}
}
r <- list(source = list(source),
table_name = NULL,
parsed = NULL,
orderby = cols,
reverse = reverse,
limit = limit)
r <- relop_decorate("relop_orderby", r)
r
}
#' @export
orderby.data.frame <- function(source,
cols = NULL,
...,
reverse = NULL,
limit = NULL,
env = parent.frame()) {
force(env)
wrapr::stop_if_dot_args(substitute(list(...)),
"rquery::orderby.data.frame")
if(length(setdiff(reverse, cols))>0) {
stop("rquery::orderby.data.frame all reverse columns must also be orderby columns")
}
tmp_name <- mk_tmp_name_source("rquery_tmp")()
dnode <- mk_td(tmp_name, colnames(source))
enode <- orderby(dnode,
cols = cols,
reverse = reverse,
limit = limit,
env = env)
rquery_apply_to_data_frame(source, enode, env = env)
}
#' @export
format_node.relop_orderby <- function(node) {
ot <- node$orderby
if(length(node$reverse)>0) {
ot[ot %in% node$reverse] <- paste0("desc(", ot[ot %in% node$reverse], ")")
}
paste0("orderby(., ",
ifelse(length(ot)>0,
paste(ot, collapse = ", "),
""),
ifelse((length(node$limit)>0) && (length(node$orderby)>0),
paste0(", LIMIT ",
format(ceiling(node$limit), scientific = FALSE)),
""),
")",
"\n")
}
calc_used_relop_orderby <- function (x, ...,
using = NULL) {
wrapr::stop_if_dot_args(substitute(list(...)),
"rquery:::calc_used_relop_orderby")
if(length(using)<=0) {
using <- column_names(x)
}
consuming <- x$orderby
using <- unique(c(using, consuming))
missing <- setdiff(using, column_names(x$source[[1]]))
if(length(missing)>0) {
stop(paste("rquery::calc_used_relop_orderby unknown columns",
paste(missing, collapse = ", ")))
}
using
}
#' @export
columns_used.relop_orderby <- function (x, ...,
using = NULL) {
wrapr::stop_if_dot_args(substitute(list(...)),
"rquery::columns_used.relop_orderby")
cols <- calc_used_relop_orderby(x,
using = using)
return(columns_used(x$source[[1]],
using = cols))
}
#' @export
to_sql.relop_orderby <- function (x,
db,
...,
limit = NULL,
source_limit = NULL,
indent_level = 0,
tnum = mk_tmp_name_source('tsql'),
append_cr = TRUE,
using = NULL) {
if(length(list(...))>0) {
stop("rquery::to_sql.relop_orderby unexpected arguments")
}
dispatch_to_sql_method(
method_name = "to_sql.relop_orderby",
x = x,
db = db,
limit = limit,
source_limit = source_limit,
indent_level = indent_level,
tnum = tnum,
append_cr = append_cr,
using = using)
}
to_sql_relop_orderby <- function(
x,
db,
...,
limit = NULL,
source_limit = NULL,
indent_level = 0,
tnum = mk_tmp_name_source('tsql'),
append_cr = TRUE,
using = NULL) {
wrapr::stop_if_dot_args(substitute(list(...)),
"rquery::to_sql.relop_orderby")
cols1 <- column_names(x$source[[1]])
cols <- vapply(cols1,
function(ci) {
quote_identifier(db, ci)
}, character(1))
ot <- vapply(x$orderby,
function(ci) {
quote_identifier(db, ci)
}, character(1))
if(length(x$reverse)>0) {
ot[x$orderby %in% x$reverse] <- paste(ot[x$orderby %in% x$reverse], "DESC")
}
subcols <- calc_used_relop_orderby(x, using=using)
subsql_list <- to_sql(x$source[[1]],
db = db,
limit = NULL, # can't pass down limit from order_by
source_limit = source_limit,
indent_level = indent_level + 1,
tnum = tnum,
append_cr = FALSE,
using = subcols)
subsql <- subsql_list[[length(subsql_list)]]
tab <- tnum()
prefix <- paste(rep(' ', indent_level), collapse = '')
q <- paste0(prefix, "SELECT * FROM (\n",
subsql, "\n",
prefix, ") ",
tab,
ifelse(length(ot)>0,
paste0(" ORDER BY ", paste(ot, collapse = ", ")),
""))
if(!is.null(x$limit)) {
limit <- min(limit, x$limit)
}
if(!is.null(limit)) {
q <- paste(q, "LIMIT",
format(ceiling(limit), scientific = FALSE))
}
if(append_cr) {
q <- paste0(q, "\n")
}
c(subsql_list[-length(subsql_list)], q)
}
|
f7fda034a527647c2e0e35b370e9d6697f76ee32
|
c6d22e9bea028bfee229c323d0b501c28fad95dd
|
/man/plot.magree.Rd
|
a079dea479b5554ff7682f24f29ed2b6ae909a07
|
[] |
no_license
|
cran/magree
|
f6b46e954c13999e5fdd4269a01bcef15dd88f70
|
ac3acc89451207f393c06e5586aa35830cc2a8a4
|
refs/heads/master
| 2021-07-09T23:24:13.614475
| 2020-09-03T03:10:02
| 2020-09-03T03:10:02
| 76,046,518
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 2,258
|
rd
|
plot.magree.Rd
|
\name{plot.magree}
\alias{plot.magree}
\alias{plot.oconnell}
\alias{plot.schouten}
%- Also NEED an '\alias' for EACH other topic documented here.
\title{
plot methods for magree, oconnell and schouten objects
}
\description{
plot methods for magree, oconnell and schouten objects
}
\usage{
\method{plot}{magree}(x, type = c("p1", "kappa by observer"),
xlab = NULL, ylab = NULL, main = NULL, ...)
\method{plot}{oconnell}(x, type = c("p1"), xlab = NULL, ylab = NULL, main = NULL, ...)
\method{plot}{schouten}(x, type = c("kappa by observer"), xlab = NULL,
ylab = NULL,
main = NULL, xdelta = 0.1, axes = TRUE, ...)
}
%- maybe also 'usage' for other objects documented here.
\arguments{
\item{x}{ magree, oconnell or schouten object.
%% ~~Describe \code{obj} here~~
}
\item{type}{Type of plot. For \code{"p1"}, plot the probabilities by observer. For \code{"kappa by observer"}, plot the kappas for each observer.
%% ~~Describe \code{type} here~~
}
\item{xlab}{
%% ~~Describe \code{xlab} here~~
}
\item{ylab}{
%% ~~Describe \code{ylab} here~~
}
\item{main}{
%% ~~Describe \code{main} here~~
}
\item{xdelta}{For plot.schouten and "kappa by observer", specifies the width of
the brackets for the confidence intervals.
%% ~~Describe \code{xdelta} here~~
}
\item{axes}{
Bool for whether to plot the axes.
}
\item{\dots}{
%% ~~Describe \code{\dots} here~~
}
}
%% \details{
%% %% ~~ If necessary, more details than the description above ~~
%% }
%% \value{
%% %% ~Describe the value returned
%% %% If it is a LIST, use
%% %% \item{comp1 }{Description of 'comp1'}
%% %% \item{comp2 }{Description of 'comp2'}
%% %% ...
%% }
%% \references{
%% %% ~put references to the literature/web site here ~
%% }
%% \author{
%% %% ~~who you are~~
%% }
%% \note{
%% %% ~~further notes~~
%% }
%% %% ~Make other sections like Warning with \section{Warning }{....} ~
%% \seealso{
%% %% ~~objects to See Also as \code{\link{help}}, ~~~
%% }
\examples{
fit <- schouten(landis)
plot(fit)
fit <- oconnell(landis)
plot(fit,type="p1")
}
% Add one or more standard keywords, see file 'KEYWORDS' in the
% R documentation directory.
\keyword{ design }% use one of RShowDoc("KEYWORDS")
%% \keyword{ ~kwd2 }% __ONLY ONE__ keyword per line
|
bce765d3577b183f131e5f8ea5c3ef72f8619f92
|
6b32948c7241e204753cb88999c76cee323b4736
|
/TransMetaRare/man/x.list.Rd
|
229ed4176677d0d88cab767ff2528d83f6061523
|
[] |
no_license
|
shijingc/TransMetaRare
|
e9a0e74fef68bdfb59f35741b1e777afa23b1e44
|
5206b4a880c07b2d5df3b8d26a4bf0d6f88d77a6
|
refs/heads/master
| 2020-03-19T10:24:07.058000
| 2018-06-06T18:05:12
| 2018-06-06T18:05:12
| 136,367,016
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 471
|
rd
|
x.list.Rd
|
\name{x.list}
\alias{x.list}
\docType{data}
\title{Example dataset, a list of non-genetic adjusting covariate matrices}
\description{
a list object of covariates. It has 4 elements for 4 study cohorts. Each element is a matrix of covariates. The first, third and last elements have two covariates (two columns), and the second element has one covariate (one column).
}
\usage{data(x.list)}
\examples{
data(x.list)
length(x.list)
head(x.list[[1]])
}
\keyword{datasets}
|
a91db51b02c6fb26be262fe1318ff1c1508e8c1a
|
f1d78ff9d5603149ffedf9428342fadcccf2cb69
|
/plot4.R
|
d35f48c2cf72e6e2e1e3be67dc33e8cfdb2d12ef
|
[] |
no_license
|
BuiQuocChinh/ExData_Plotting1
|
6eb19e632e8cbdc25d32f3448619da0ef749cf01
|
da4d431ebf5995cf0d79dcdee8f508aac5e5901b
|
refs/heads/master
| 2020-12-28T21:05:58.473776
| 2014-05-11T11:54:15
| 2014-05-11T11:54:15
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 1,726
|
r
|
plot4.R
|
#This R script is used to produce the plot4 for the Assignment 1
#Load data from current working directory
Data <- read.table("household_power_consumption.txt",header=TRUE, sep=";",
stringsAsFactors=FALSE,
colClasses=c("character","character","numeric", "numeric","numeric","numeric","numeric","numeric","numeric"),
na.strings="?")
#Select relevant data for analysis
SubData <- Data[Data[,1]=="1/2/2007" | Data[,1]=="2/2/2007",]
#Convert date and time variables
SubData$Time <- strptime(paste(SubData[,1], SubData[,2]), format='%d/%m/%Y %H:%M:%S')
SubData$Date<- as.Date(SubData$Date, format="%d/%m/%Y");
#Set background color to transparent
par(bg=NA)
# Setup output with a given size
png("plot4.png", width=480, height= 480)
# Set layout: two rows and two columns
par(mfcol=c(2,2))
# plot 1,1
plot(SubData$Time, SubData$Global_active_power, type= "l", lwd=1, ylab= "Global Active Power", xlab="")
# plot 2,1
plot(SubData$Time, SubData$Sub_metering_1, type="l", ylab= "Energy sub metering", xlab="")
lines(SubData$Time, SubData$Sub_metering_2, type="l", col="red")
lines(SubData$Time, SubData$Sub_metering_3, type="l", col="blue")
legend("topright", c("Sub_metering_1", "Sub_metering_2", "Sub_metering_3"), lty=1, bty="n", col=c("black", "red", "blue"))
#legend("topright", lty = 1, bty = "n", cex = 0.5, col = c("Black", "Red", "Blue"),
# legend = c("Sub_metering_1 ", "Sub_metering_2 ", "Sub_metering_3 "))
#plot 1,2
plot(SubData$Time, SubData$Voltage, type="l", xlab="datetime", ylab="Voltage")
#Plot 2,2
plot(SubData$Time, SubData$Global_reactive_power, type="l", xlab="datetime", ylab="Global_reactive_power")
dev.off()
|
f1a20986ee4cfd1a75b304cd37b8b3adc88b40be
|
35d547a03919cf01465b49bdb273e28a9984d88d
|
/Section 6/Section6_Basic_RNN.R
|
bfda7f068ce7b3c5d17ca05688b591305e267e92
|
[
"MIT"
] |
permissive
|
PacktPublishing/R-Deep-Learning-Solutions
|
be51a80c95cb609d9a5b03f7fbbce685c481ae5d
|
55b82518d8c5111f96286200d76dea5a70b07397
|
refs/heads/master
| 2021-06-25T00:51:23.747975
| 2021-01-19T12:57:13
| 2021-01-19T12:57:13
| 187,345,999
| 0
| 1
| null | null | null | null |
UTF-8
|
R
| false
| false
| 2,342
|
r
|
Section6_Basic_RNN.R
|
library(tensorflow)
require(imager)
require(caret)
datasets <- tf$contrib$learn$datasets
mnist <- datasets$mnist$read_data_sets("MNIST-data", one_hot = TRUE)
plot_mnist<-function(imageD, pixel.y=16){
require(imager)
actImage<-matrix(imageD, ncol=pixel.y, byrow=FALSE)
img.col.mat <- imappend(list(as.cimg(actImage)), "c")
plot(img.col.mat, axes=F)
}
reduceImage<-function(actds, n.pixel.x=16, n.pixel.y=16){
actImage<-matrix(actds, ncol=28, byrow=FALSE)
img.col.mat <- imappend(list(as.cimg(actImage)),"c")
thmb <- resize(img.col.mat, n.pixel.x, n.pixel.y)
outputImage<-matrix(thmb[,,1,1], nrow = 1, byrow = F)
return(outputImage)
}
trainData<-t(apply(mnist$train$images, 1, FUN=reduceImage))
validData<-t(apply(mnist$test$images, 1, FUN=reduceImage))
labels <- mnist$train$labels
labels_valid <- mnist$test$labels
rm(mnist)
tf$reset_default_graph()
sess<-tf$InteractiveSession()
n_input<-16
step_size<-16
n.hidden<-64
n.class<-10
lr<-0.01
batch<-500
iteration = 100
rnn<-function(x, weight, bias){
x = tf$unstack(x, step_size, 1)
rnn_cell = tf$contrib$rnn$BasicRNNCell(n.hidden)
cell_output = tf$contrib$rnn$static_rnn(rnn_cell, x, dtype=tf$float32)
last_vec=tail(cell_output[[1]], n=1)[[1]]
return(tf$matmul(last_vec, weights) + bias)
}
eval_acc<-function(yhat, y){
correct_Count = tf$equal(tf$argmax(yhat,1L), tf$argmax(y,1L))
mean_accuracy = tf$reduce_mean(tf$cast(correct_Count, tf$float32))
return(mean_accuracy)
}
with(tf$name_scope('input'), {
x = tf$placeholder(tf$float32, shape=shape(NULL, step_size, n_input), name='x')
y <- tf$placeholder(tf$float32, shape(NULL, n.class), name='y')
weights <- tf$Variable(tf$random_normal(shape(n.hidden, n.class)))
bias <- tf$Variable(tf$random_normal(shape(n.class)))
})
yhat = rnn(x, weights, bias)
cost = tf$reduce_mean(tf$nn$softmax_cross_entropy_with_logits(logits=yhat, labels=y))
optimizer = tf$train$AdamOptimizer(learning_rate=lr)$minimize(cost)
sess$run(tf$global_variables_initializer())
for(i in 1:iteration){
spls <- sample(1:dim(trainData)[1],batch)
sample_data<-trainData[spls,]
sample_y<-labels[spls,]
sample_data=tf$reshape(sample_data, shape(batch, step_size, n_input))
out<-optimizer$run(feed_dict = dict(x=sample_data$eval(), y=sample_y))
if (i %% 1 == 0){
cat("iteration - ", i, "Training Loss - ", cost$eval(feed_dict = dict(x=sample_data$eval(), y=sample_y)), "\n")
}
}
|
9deaa7cd77c8f9b4d6a19599fc8f4023d90efce1
|
6f91ab8f666ed398aed6d5f9a31cd29fe3ce29d8
|
/Cardwell_Wind2DailySummary.R
|
2f210fd96309b7a5efb3c31f31daf4fcf39df614
|
[] |
no_license
|
CassieJames/NESP-processing-scipts
|
366bb10084eef53f48c12289434147c1d0af8315
|
31c735d4dfe32c878ce0b076a143ebe848dae852
|
refs/heads/master
| 2023-01-24T14:55:45.670542
| 2020-12-09T01:01:23
| 2020-12-09T01:01:23
| 277,948,260
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 17,883
|
r
|
Cardwell_Wind2DailySummary.R
|
#---
# title: "Wind Data Treatment"
# author: "Eduardo Teixeira da Silva", modified by C James (4/02/2020)
# date: "3 March 2016"
# output: html_document
#---
# This code gets wind data from the database as hourly readings with directon and quality coentrol, and output it into the data base as decomposed wind into componets u (x-axis, E-w) and v (y-axis, N-S).
# Wind data is collected every three hours from midnight till 9 pm, so at 00:00, 03:00, 06:00, 09:00, 12:00, 15:00, 18:00, and 21:00. If no unwanted hours is selected, a daily u and v components are calculated.
# If any hour must be excluded, they must be speficied below:
# Hours to be excluded:
exclude<-c('03','06','18','21') # in this example wind data colected at 03:00, 06:00, 18:00 and 21:00 will be excluded from the mean daily wind components. And comment out the next command line.
# If there is no need for exclusion, use any time not in the normal sampling time, e.g., 22.
exclude<-c('22')
#Selecting Wind Station of interest and period for data analysis
#All possible stations are:
# Station_Number Station_Name Closest_River commence
# 31209 COOKTOWN AIRPORT Daintree 2000-06-01
# 31011 CAIRNS AERO Barron 1941-05-01
# 32004 CARDWELL MARINE PDE Tully 1871-01-01
# 32025 INNISFAIL Johnstone 1881-01-01 # Appears to be closed as of 30th June 2020
# 32197 INNISFAIL AERO Johnstone 2016-06-01
# 32078 INGHAM COMPOSITE Herbert 1968-02-01
# 32141 LUCINDA POINT Herbert 1980-01-01
# 32040 TOWNSVILLE AERO Cleveland Bay 1940-01-01
# 33295 ALVA BEACH Burdekin 1997-02-01
# 33119 MACKAY M.O Pioneer 1959-01-01
# 33294 YEPPOON THE ESPLANADE Fitzroy 1993-11-01
# 39123 GLADSTONE RADAR Fitzroy 1957-01-01
# 39128 BUNDABERG AERO Burnett 1942-01-01
# 40405 HERVEY BAY AIRPORT Mary 1999-03-01
#* 39083 ROCKHAMPTON AERO TOO FAR FROM THE COAST, USE NOT RECOMMENDED 1939-01-01 *
# Station_Number:
WdStn<-c('32004')
# Start date and End date:
datei='2016-06-01'
datef='2020-10-26'
# STARTING UP THE BIG DATA PROCESSING
#Getting WIND data from our data base and melting table
library(reshape)
library(RODBC)
library(doBy)
library(plyr)
library(dplyr)
library(rCAT)
library(data.table)
ch<-odbcConnectAccess("C:/Users/jc246980/OneDrive - James Cook University/Current Projects/MMP/DATA_BASE/ACTFR.mdb")
for (j in 1:length(WdStn)){
w1<-sqlQuery(ch,paste("SELECT Station_Number, datestamp, Wind_speed_at_00_hours_Local_Time_measured_in_km_h, Quality_of_wind_speed_at_00_hours_Local_Time, Wind_speed_at_03_hours_Local_Time_measured_in_km_h, Quality_of_wind_speed_at_03_hours_Local_Time, Wind_speed_at_06_hours_Local_Time_measured_in_km_h, Quality_of_wind_speed_at_06_hours_Local_Time, Wind_speed_at_09_hours_Local_Time_measured_in_km_h, Quality_of_wind_speed_at_09_hours_Local_Time, Wind_speed_at_12_hours_Local_Time_measured_in_km_h, Quality_of_wind_speed_at_12_hours_Local_Time, Wind_speed_at_15_hours_Local_Time_measured_in_km_h, Quality_of_wind_speed_at_15_hours_Local_Time, Wind_speed_at_18_hours_Local_Time_measured_in_km_h, Quality_of_wind_speed_at_18_hours_Local_Time, Wind_speed_at_21_hours_Local_Time_measured_in_km_h, Quality_of_wind_speed_at_21_hours_Local_Time, Wind_direction_at_00_hours_Local_Time_measured_in_degrees, Quality_of_wind_direction_at_00_hours_Local_Time, Wind_direction_at_03_hours_Local_Time_measured_in_degrees, Quality_of_wind_direction_at_03_hours_Local_Time, Wind_direction_at_06_hours_Local_Time_measured_in_degrees, Quality_of_wind_direction_at_06_hours_Local_Time, Wind_direction_at_09_hours_Local_Time_measured_in_degrees, Quality_of_wind_direction_at_09_hours_Local_Time, Wind_direction_at_12_hours_Local_Time_measured_in_degrees, Quality_of_wind_direction_at_12_hours_Local_Time, Wind_direction_at_15_hours_Local_Time_measured_in_degrees, Quality_of_wind_direction_at_15_hours_Local_Time, Wind_direction_at_18_hours_Local_Time_measured_in_degrees, Quality_of_wind_direction_at_18_hours_Local_Time, Wind_direction_at_21_hours_Local_Time_measured_in_degrees, Quality_of_wind_direction_at_21_hours_Local_Time FROM wind_data WHERE Station_Number = '",WdStn[j],"' AND datestamp >= #",datei,"# AND datestamp <= #",datef,"# ORDER BY datestamp",sep=''))
w2<-sqlQuery(ch,paste("SELECT Station_Number, Station_Name, Lat_DD, Log_DD FROM wind_info WHERE Station_Number = '",WdStn[j],"'",sep=''))
wSPEED<-reshape2::melt(w1, id=1:2, measure.var=seq(3,17,2), var='speed_km_h')
colnames(wSPEED)[3:4]<-c('time','speed_km_h')
wSPEED$time<-substr(wSPEED$time,15,16)
wSPEEDqc<-reshape2::melt(w1, id=1:2, measure.var=seq(4,18,2), var='QC_speed')
colnames(wSPEEDqc)[3:4]<-c('time','QC_speed')
wSPEEDqc$time<-substr(wSPEEDqc$time,26,27)
wDIR<-reshape2::melt(w1, id=1:2, measure.var=seq(19,33,2), var='dir_degrees')
colnames(wDIR)[3:4]<-c('time','dir_degrees')
wDIR$time<-substr(wDIR$time,19,20)
wDIRqc<-reshape2::melt(w1, id=1:2, measure.var=seq(20,34,2), var='QC_direction')
colnames(wDIRqc)[3:4]<-c('time','QC_direction')
wDIRqc$time<-substr(wDIRqc$time,30,31)
ws<-merge(wSPEED,wSPEEDqc,by=c('Station_Number','datestamp','time'))
wd<-merge(wDIR,wDIRqc,by=c('Station_Number','datestamp','time'))
ww<-merge(ws,wd,by=c('Station_Number','datestamp','time'))
#Checking wind data based on their QC criteria:
# QUALITY FLAG DESCRIPTIONS
# ____________________________________________________________________
# Y: quality controlled and acceptable
# N: not quality controlled
# S: quality controlled and considered suspect
# I: quality controlled and inconsistent with other known information
# blank (X): no quality information available
# ____________________________________________________________________
#The only data excluded is that in category I
head(ww$QC_speed)
head(ww$QC_direction)
if(length(which(ww$QC_speed=='I'))>0){ww<-ww[-c(which(ww$QC_speed=='I')),]}
if(length(which(ww$QC_direction=='I'))>0){ww<-ww[-c(which(ww$QC_direction=='I')),]}
if(length(which(ww$QC_speed=='S'))>0){ww<-ww[-c(which(ww$QC_speed=='S')),]}
if(length(which(ww$QC_direction=='S'))>0){ww<-ww[-c(which(ww$QC_direction=='S')),]}
# Wind data decomposition.
# Adding components u and v to wind data table (angles must be in radians, so times pi/180)
ww$u_comp_EW<-ww$speed_km_h*cos((ww$dir_degrees)*pi/180) # u-component, x-axis, E-W component
ww$v_comp_NS<-ww$speed_km_h*sin((ww$dir_degrees)*pi/180) # v-component, y-axis, N-S component
# Excluding unwanted hours:
# if (exclude %in% c('03','06','09','12','15','18','21')){ww<-ww[-c(which(ww$time %in% exclude)),]}
# Calculating mean wind data by simply averaging values within a day. This works better for the wind components
# check conversion between direction degrees and vector direction on hourly data. Don't usually need to run this but useful to check conversions
#ww$vect_direction <- atan2(ww$v_comp_NS,ww$u_comp_EW)
#ww$vect_direction <- rad2deg(ww$vect_direction)
#convert_360 <- function(x) {
#x<-x[!is.na(x)]
#x[x < 0] <- 360 + x[x < 0]
#return(x)
#}
#ww$vect_direction=lapply(X=ww$vect_direction,convert_360)
ww1 <- ddply(ww, ~datestamp, numcolwise(mean), na.rm=T)
ww1 <- ww1[,-c(5,6)]
wwU<-summaryBy(u_comp_EW ~ datestamp, data = ww, FUN = function(x) {c(Mean = mean(x, na.rm=T), N = length(which(!is.na(x))))})
wwV<-summaryBy(v_comp_NS ~ datestamp, data = ww, FUN = function(x) {c(Mean = mean(x, na.rm=T), N = length(which(!is.na(x))))})
ww2 <- merge(ww1,wwU, by='datestamp')
ww <- merge(ww2,wwV, by='datestamp')
# Excluding days with less than 3 wind measurements
ww$vect_direction <- atan2(ww$v_comp_NS.Mean,ww$u_comp_EW.Mean)
ww$vect_direction <- rad2deg(ww$vect_direction)
convert_360 <- function(x) {
x<-x[!is.na(x)]
x[x < 0] <- 360 + x[x < 0]
return(x)
}
ww$vect_direction=lapply(X=ww$vect_direction,convert_360)
# Sort out decimal places
ww$speed_km_h <- round(ww$speed_km_h,4)
ww$dir_degrees <- round(ww$dir_degrees,4)
# Fixing column names
colnames(ww)[c(5,7)]<-c("u_comp_EW","v_comp_NS")
# Adding Lat & Long plus Stn Name and Number
ww$Lat_DD<-rep(w2$Lat_DD,length(ww[,1]))
ww$Long_DD<-rep(w2$Log_DD,length(ww[,1]))
ww$StnNum<-rep(w2$Station_Number,length(ww[,1]))
ww$StnName<-rep(w2$Station_Name,length(ww[,1]))
if (j==1){
WW<-ww
} else {
WW<-rbind(WW,ww)
}
}
# Excluding 2nd column (Station_Number) because it is repeated and the controll for the number of data points per day
WW<-WW[,-c(2,6,8)]
#All possible stations are:
# Station_Number Station_Name Closest_River commence
# 31209 COOKTOWN AIRPORT Daintree 2000-06-01
# 31011 CAIRNS AERO Barron 1941-05-01
# 32004 CARDWELL MARINE PDE Tully 1871-01-01
# 32025 INNISFAIL Johnstone 1881-01-01 # Appears to be closed as of 30th June 2020
# 32197 INNISFAIL AERO Johnstone 2016-06-01
# 32078 INGHAM COMPOSITE Herbert 1968-02-01
# 32141 LUCINDA POINT Herbert 1980-01-01
# 32040 TOWNSVILLE AERO Cleveland Bay 1940-01-01
# 33295 ALVA BEACH Burdekin 1997-02-01
# 33119 MACKAY M.O Pioneer 1959-01-01
# 33294 YEPPOON THE ESPLANADE Fitzroy 1993-11-01
# 39123 GLADSTONE RADAR Fitzroy 1957-01-01
# 39128 BUNDABERG AERO Burnett 1942-01-01
# 40405 HERVEY BAY AIRPORT Mary 1999-03-01
#* 39083 ROCKHAMPTON AERO TOO FAR FROM THE COAST, USE NOT RECOMMENDED 1939-01-01 *
# Station_Number:
WdStn<-c('32004')
# Start date and End date:
datei='2016-06-01'
datef='2020-10-26'
# STARTING UP THE BIG DATA PROCESSING
#Getting WIND data from our data base and melting table
library(reshape)
library(RODBC)
library(doBy)
library(plyr)
library(dplyr)
library(rCAT)
library(data.table)
ch<-odbcConnectAccess("C:/Users/jc246980/OneDrive - James Cook University/Current Projects/MMP/DATA_BASE/ACTFR.mdb")
for (j in 1:length(WdStn)){
w1<-sqlQuery(ch,paste("SELECT Station_Number, datestamp, Wind_speed_at_00_hours_Local_Time_measured_in_km_h, Quality_of_wind_speed_at_00_hours_Local_Time, Wind_speed_at_03_hours_Local_Time_measured_in_km_h, Quality_of_wind_speed_at_03_hours_Local_Time, Wind_speed_at_06_hours_Local_Time_measured_in_km_h, Quality_of_wind_speed_at_06_hours_Local_Time, Wind_speed_at_09_hours_Local_Time_measured_in_km_h, Quality_of_wind_speed_at_09_hours_Local_Time, Wind_speed_at_12_hours_Local_Time_measured_in_km_h, Quality_of_wind_speed_at_12_hours_Local_Time, Wind_speed_at_15_hours_Local_Time_measured_in_km_h, Quality_of_wind_speed_at_15_hours_Local_Time, Wind_speed_at_18_hours_Local_Time_measured_in_km_h, Quality_of_wind_speed_at_18_hours_Local_Time, Wind_speed_at_21_hours_Local_Time_measured_in_km_h, Quality_of_wind_speed_at_21_hours_Local_Time, Wind_direction_at_00_hours_Local_Time_measured_in_degrees, Quality_of_wind_direction_at_00_hours_Local_Time, Wind_direction_at_03_hours_Local_Time_measured_in_degrees, Quality_of_wind_direction_at_03_hours_Local_Time, Wind_direction_at_06_hours_Local_Time_measured_in_degrees, Quality_of_wind_direction_at_06_hours_Local_Time, Wind_direction_at_09_hours_Local_Time_measured_in_degrees, Quality_of_wind_direction_at_09_hours_Local_Time, Wind_direction_at_12_hours_Local_Time_measured_in_degrees, Quality_of_wind_direction_at_12_hours_Local_Time, Wind_direction_at_15_hours_Local_Time_measured_in_degrees, Quality_of_wind_direction_at_15_hours_Local_Time, Wind_direction_at_18_hours_Local_Time_measured_in_degrees, Quality_of_wind_direction_at_18_hours_Local_Time, Wind_direction_at_21_hours_Local_Time_measured_in_degrees, Quality_of_wind_direction_at_21_hours_Local_Time FROM wind_data WHERE Station_Number = '",WdStn[j],"' AND datestamp >= #",datei,"# AND datestamp <= #",datef,"# ORDER BY datestamp",sep=''))
w2<-sqlQuery(ch,paste("SELECT Station_Number, Station_Name, Lat_DD, Log_DD FROM wind_info WHERE Station_Number = '",WdStn[j],"'",sep=''))
wSPEED<-reshape2::melt(w1, id=1:2, measure.var=seq(3,17,2), var='speed_km_h')
colnames(wSPEED)[3:4]<-c('time','speed_km_h')
wSPEED$time<-substr(wSPEED$time,15,16)
wSPEEDqc<-reshape2::melt(w1, id=1:2, measure.var=seq(4,18,2), var='QC_speed')
colnames(wSPEEDqc)[3:4]<-c('time','QC_speed')
wSPEEDqc$time<-substr(wSPEEDqc$time,26,27)
wDIR<-reshape2::melt(w1, id=1:2, measure.var=seq(19,33,2), var='dir_degrees')
colnames(wDIR)[3:4]<-c('time','dir_degrees')
wDIR$time<-substr(wDIR$time,19,20)
wDIRqc<-reshape2::melt(w1, id=1:2, measure.var=seq(20,34,2), var='QC_direction')
colnames(wDIRqc)[3:4]<-c('time','QC_direction')
wDIRqc$time<-substr(wDIRqc$time,30,31)
ws<-merge(wSPEED,wSPEEDqc,by=c('Station_Number','datestamp','time'))
wd<-merge(wDIR,wDIRqc,by=c('Station_Number','datestamp','time'))
ww<-merge(ws,wd,by=c('Station_Number','datestamp','time'))
#Checking wind data based on their QC criteria:
# QUALITY FLAG DESCRIPTIONS
# ____________________________________________________________________
# Y: quality controlled and acceptable
# N: not quality controlled
# S: quality controlled and considered suspect
# I: quality controlled and inconsistent with other known information
# blank (X): no quality information available
# ____________________________________________________________________
#The only data excluded is that in category I
head(ww$QC_speed)
head(ww$QC_direction)
if(length(which(ww$QC_speed=='I'))>0){ww<-ww[-c(which(ww$QC_speed=='I')),]}
if(length(which(ww$QC_direction=='I'))>0){ww<-ww[-c(which(ww$QC_direction=='I')),]}
if(length(which(ww$QC_speed=='S'))>0){ww<-ww[-c(which(ww$QC_speed=='S')),]}
if(length(which(ww$QC_direction=='S'))>0){ww<-ww[-c(which(ww$QC_direction=='S')),]}
# Wind data decomposition.
# Adding components u and v to wind data table (angles must be in radians, so times pi/180)
ww$u_comp_EW<-ww$speed_km_h*cos((ww$dir_degrees)*pi/180) # u-component, x-axis, E-W component
ww$v_comp_NS<-ww$speed_km_h*sin((ww$dir_degrees)*pi/180) # v-component, y-axis, N-S component
# Excluding unwanted hours:
# if (exclude %in% c('03','06','09','12','15','18','21')){ww<-ww[-c(which(ww$time %in% exclude)),]}
# Calculating mean wind data by simply averaging values within a day. This works better for the wind components
# check conversion between direction degrees and vector direction on hourly data. Don't usually need to run this but useful to check conversions
#ww$vect_direction <- atan2(ww$v_comp_NS,ww$u_comp_EW)
#ww$vect_direction <- rad2deg(ww$vect_direction)
#convert_360 <- function(x) {
#x<-x[!is.na(x)]
#x[x < 0] <- 360 + x[x < 0]
#return(x)
#}
#ww$vect_direction=lapply(X=ww$vect_direction,convert_360)
ww1 <- ddply(ww, ~datestamp, numcolwise(mean), na.rm=T)
ww1 <- ww1[,-c(5,6)]
wwU<-summaryBy(u_comp_EW ~ datestamp, data = ww, FUN = function(x) {c(Mean = mean(x, na.rm=T), N = length(which(!is.na(x))))})
wwV<-summaryBy(v_comp_NS ~ datestamp, data = ww, FUN = function(x) {c(Mean = mean(x, na.rm=T), N = length(which(!is.na(x))))})
ww2 <- merge(ww1,wwU, by='datestamp')
ww <- merge(ww2,wwV, by='datestamp')
# Excluding days with less than 3 wind measurements
ww$vect_direction <- atan2(ww$v_comp_NS.Mean,ww$u_comp_EW.Mean)
ww$vect_direction <- rad2deg(ww$vect_direction)
convert_360 <- function(x) {
x<-x[!is.na(x)]
x[x < 0] <- 360 + x[x < 0]
return(x)
}
ww$vect_direction=lapply(X=ww$vect_direction,convert_360)
# Sort out decimal places
ww$speed_km_h <- round(ww$speed_km_h,4)
ww$dir_degrees <- round(ww$dir_degrees,4)
# Fixing column names
colnames(ww)[c(5,7)]<-c("u_comp_EW","v_comp_NS")
# Adding Lat & Long plus Stn Name and Number
ww$Lat_DD<-rep(w2$Lat_DD,length(ww[,1]))
ww$Long_DD<-rep(w2$Log_DD,length(ww[,1]))
ww$StnNum<-rep(w2$Station_Number,length(ww[,1]))
ww$StnName<-rep(w2$Station_Name,length(ww[,1]))
if (j==1){
WW<-ww
} else {
WW<-rbind(WW,ww)
}
}
# Excluding 2nd column (Station_Number) because it is repeated and the controll for the number of data points per day
WW<-WW[,-c(2,6,8)]
# save data out for caro
NESP.dir="C:/Users/jc246980/Documents/Current projects/NESP/Wind"
fwrite(WW,paste(NESP.dir,"Daily_wind_data_CARDWELL.csv",sep=""),dateTimeAs="write.csv")
# ____________________________________________________________________
# Saving data table into data base
sqlQuery(ch,paste("DELETE * FROM R_wind_components WHERE StnNum='",station,"' AND (datestamp >= #",datei,"# AND datestamp <= #",datef,"#)",sep=''), errors = F)
sqlSave(ch,WW,tablename="R_wind_components",append=T,rownames=F,colnames=F,verbose=F,safer=T,fast=T,test=F,nastring=NA)
# Closing the access to the database
odbcClose(ch)
|
a71474f5c9fda3612c9368be051f738d3eda6c18
|
2c61bde0bf72a60019eee9add698c7b58c756d46
|
/src/hic_genome_interaction_matrix.R
|
cb221f784e9c669379b58e6e2a0c2fe0ec621a96
|
[] |
no_license
|
sarahinwood/mh-hic-viral
|
09b175eeb17d8f277050e36c88065726a64802c1
|
7ec0e5dbe16c258e5ae1995b7518db8cd4ff847a
|
refs/heads/master
| 2021-07-02T17:59:22.492211
| 2021-06-17T20:39:30
| 2021-06-17T20:39:30
| 241,481,156
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 2,376
|
r
|
hic_genome_interaction_matrix.R
|
#!/usr/bin/env Rscript
#######
# LOG #
#######
log <- file(snakemake@log[[1]], open = "wt")
sink(log, type = "message")
sink(log, append = TRUE, type = "output")
#############
# LIBRARIES #
#############
library(data.table)
library(dplyr)
###########
# GLOBALS #
###########
matlock_bam <- snakemake@input[["bam"]]
viral_contigs_file <- snakemake@input[["viral_contigs_file"]]
########
# MAIN #
########
viral_contigs_table <- fread(viral_contigs_file)
viral_contigs <- subset(viral_contigs_table, plot_label == "Viral contig")
matlock_interactions <- fread(matlock_bam)
##make matrix of interactions
matlock_interaction_counts <- matlock_interactions[,.N,by=.(V2, V6)]
##write interaction matrix to analyse at later date
fwrite(matlock_interaction_counts, snakemake@output[["interaction_matrix"]])
##self interactions
self_interactions <- subset(matlock_interaction_counts, matlock_interaction_counts$V2 == matlock_interaction_counts$V6)
self_interactions$scaffold_no <- tstrsplit(self_interactions$V2, "_", keep=c(2))
self_interactions$scaffold_no <- as.character(self_interactions$scaffold_no)
self_interactions$scaffold_no <- as.numeric(self_interactions$scaffold_no)
setorder(self_interactions, scaffold_no)
##only hi-c self interactions
scaffolds <- self_interactions[c(1,2,3,4,5,6,7,8,9,10,11,12),]
sum(scaffolds$N)
##filter interaction matrix for viral contigs
V2_viral <- subset(matlock_interaction_counts, V2 %in% viral_contigs$`#Name`)
V6_viral <- subset(matlock_interaction_counts, V6 %in% viral_contigs$`#Name`)
viral_interactions <- full_join(V2_viral, V6_viral)
fwrite(viral_interactions, snakemake@output[['viral_interactions']])
##filter for viral contig interactions with hi-c scaffolds
hic_scaffolds <- subset(viral_contigs_table, plot_label == "Hi-C scaffold")
hic_viral_scaffolds <- subset(viral_contigs_table, plot_label == "Hi-C scaffold and viral")
all_hic<-full_join(hic_scaffolds, hic_viral_scaffolds)
viral_hic_V2 <- subset(viral_interactions, V2 %in% all_hic$`#Name`)
viral_hic_V6 <- subset(viral_interactions, V6 %in% all_hic$`#Name`)
viral_hic_interactions <- full_join(viral_hic_V2, viral_hic_V6)
sum(viral_hic_interactions$N)
interaction_locations <- matlock_interactions[,c(2,3,6,7)]
fwrite(interaction_locations, snakemake@output[["interaction_locations"]])
##may need to filter out viral interactions
# write log
sessionInfo()
|
e354a67dd48768dd0a305fc79b28e1e49499a42c
|
4ecc3c39c7c5b4d7d72c881381b3b2af6da50edb
|
/man/BayesPois.Rd
|
9f030099ee22ea75e171b35edefb66d9a8cefff4
|
[] |
no_license
|
cran/Bolstad2
|
e1a089ef39275817172e106e80efb2dd913d6b1f
|
34d0854c3b8af01aac374d6911975ebecdb293b1
|
refs/heads/master
| 2022-04-30T06:26:54.804699
| 2022-04-11T08:22:32
| 2022-04-11T08:22:32
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| true
| 1,749
|
rd
|
BayesPois.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/BayesPois.R
\name{BayesPois}
\alias{BayesPois}
\title{Bayesian Pois Regression}
\usage{
BayesPois(
y,
x,
steps = 1000,
priorMean = NULL,
priorVar = NULL,
mleMean = NULL,
mleVar,
startValue = NULL,
randomSeed = NULL,
plots = FALSE
)
}
\arguments{
\item{y}{the binary response vector}
\item{x}{matrix of covariates}
\item{steps}{the number of steps to use in the Metropolis-Hastings updating}
\item{priorMean}{the mean of the prior}
\item{priorVar}{the variance of the prior}
\item{mleMean}{the mean of the matched curvature likelihood}
\item{mleVar}{the covariance matrix of the matched curvature likelihood}
\item{startValue}{a vector of starting values for all of the regression
coefficients including the intercept}
\item{randomSeed}{a random seed to use for different chains}
\item{plots}{Plot the time series and auto correlation functions for each of
the model coefficients}
}
\value{
A list containing the following components:
\item{beta}{a data frame containing the sample of the model coefficients
from the posterior distribution} \item{mleMean}{the mean of the matched
curvature likelihood. This is useful if you've used a training set to
estimate the value and wish to use it with another data set}
\item{mleVar}{the covariance matrix of the matched curvature likelihood. See
mleMean for why you'd want this}
}
\description{
Performs Metropolis Hastings on the logistic regression model to draw sample
from posterior. Uses a matched curvature Student's t candidate generating
distribution with 4 degrees of freedom to give heavy tails.
}
\examples{
data(poissonTest.df)
results = BayesPois(poissonTest.df$y, poissonTest.df$x)
}
|
18a903a73294293170a549d1c370b8ee896dab54
|
e039685fc9bdac3a7ffbeedb5aa22e4275f5c6a0
|
/model-evaluation/Finding High Correlation [caret].R
|
cce3ea8b4ab18b4ae4030db12233c4745bf2ce18
|
[] |
no_license
|
cajogos/r-machine-learning
|
fb227124d2a393a612b22c065421a96b16c0cbe8
|
261ebe2c5def39a6db4f31395a9d92fe26a81eda
|
refs/heads/master
| 2020-08-21T05:27:38.660252
| 2019-12-25T19:02:35
| 2019-12-25T19:02:35
| 216,102,102
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 1,202
|
r
|
Finding High Correlation [caret].R
|
# Finding highly correlated features with the caret package
# NOTE: In regression or classification models perform better if highly correlated attributes are removed
rm(list = ls(all = TRUE)) # Clean-up environment
dev.off() # Clean-up any plots
# --- The prepared churn dataset --- #
library(C50)
data(churn)
churnTrain <- churnTrain[, ! names(churnTrain) %in% c("state", "area_code", "account_length")]
set.seed(2)
ind <- sample(2, nrow(churnTrain), replace = TRUE, prob = c(0.7, 0.3))
trainset <- churnTrain[ind == 1,]
testset <- churnTrain[ind == 2, ]
# ------ #
library(caret)
# Remove any features that are not coded in numeric characters
new_train <- trainset[, ! names(churnTrain) %in% c("churn", "international_plan", "voice_mail_plan")]
# Obtain correlation of each attribute
cor_mat <- cor(new_train)
cor_mat
# Use findCorrelation to search for highly correlated attributes
highlyCorrelated <- findCorrelation(cor_mat, cutoff = 0.75) # Using cut off of 0.75
# Obtain the column names of highly correlated features
names(new_train)[highlyCorrelated]
# You can consider removing some highly correlated attributes and keep one or two for better accuracy
|
bea9254b3d6741d8fc5c0234ec8f3b4cae381e3e
|
9aa4cc14706efc160068bb205291326b30e9c16d
|
/tests/testthat/test_options.R
|
20925ce2437607ef1e6ad696d5ff18a2494c9850
|
[
"MIT",
"LicenseRef-scancode-unknown-license-reference"
] |
permissive
|
arturochian/parcoords
|
15c01e252b06d40576b2c67fa279605d6834d724
|
a16eec94cd86b92909aa2b02e035bf3970d01961
|
refs/heads/master
| 2021-01-12T19:53:15.650837
| 2015-01-28T23:13:43
| 2015-01-28T23:13:43
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 961
|
r
|
test_options.R
|
test_that("basic creation",{
expect_is( parcoords(data.frame()), c("parcoords","htmlwidget") )
expect_error( parcoords() )
})
test_that("options",{
# use mtcars dataset
data(mtcars)
# check rownames T
expect_identical( parcoords(mtcars)$x$data, data.frame(names = rownames(mtcars),mtcars,stringsAsFactors=F ))
# check rownames F
expect_identical( parcoords(mtcars,rownames=F)$x$data, mtcars )
# check brushmode
# this is designed to be flexible and forgiving
expect_null( parcoords( data.frame(), brushMode = "something" )$x$options$brushMode )
expect_match( parcoords( data.frame(), brushMode = "1d" )$x$options$brushMode, "1D-axes" )
expect_match( parcoords( data.frame(), brushMode = "1D-axis" )$x$options$brushMode, "1D-axes" )
expect_match( parcoords( data.frame(), brushMode = "2d" )$x$options$brushMode, "2D-strums" )
expect_match( parcoords( data.frame(), brushMode = "2Dstrum" )$x$options$brushMode, "2D-strums" )
})
|
e572ce54f1b0968d41d0d41b629db61ed709585a
|
34d07ad26869ffbb81c534c2fd49209708ffbffa
|
/UserInput2JASON.R
|
ecdc7380eb648589cea7b4d9942c490bfe544903
|
[] |
no_license
|
ericaenjoy3/GRFLoop
|
22ec10b86e5eee527031c49f7d5c6102f3fbd0da
|
3d41f8f4200f569da3ae7c8c5493ad751f0caf67
|
refs/heads/master
| 2021-05-09T19:20:44.279147
| 2018-08-15T23:36:13
| 2018-08-15T23:36:13
| 118,637,045
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 1,135
|
r
|
UserInput2JASON.R
|
#!/usr/bin/env Rscript
###
# Input data file into JSON for read into H3K27AC_LoopType.R
# input:
# (1) H3K27ac HiChiP file (minimum 4 columns: locus1, locus2, gene1, gene2): hichip
# (2) ChIP-seq file(s) to overlap with at least one of hi-chip anchors for loop validation: vchip
# (3) ChIP-seq file(s) to overlap at non-gene anchr for anchor to be called enhancers: echip
# (4) Output bed-like file for downstream analyses.
# output:
# (1) JSON file
###
library(RJSONIO)
readInput <- function(prompt){
str <- readline(prompt = prompt)
return(str)
}
fs <- c("hichip", "vchip", "echip", "bedout")
fs_list <- list()
i <- 1
repeat {
string <- readInput(prompt = paste0(fs[i], " file: "))
if (nchar(string) == 0) {
i <- i + 1
if (i > length(fs)) break
} else {
fs_list[[i]] <- if (length(fs_list) < i) {
structure(string, class = "character")
} else {
c(fs_list[[i]], structure(string, class = "character"))
}
}
}
names(fs_list) <- fs
repeat {
string <- readInput(prompt = "output configuration: ")
if (nchar(string) > 0) {
fout <- string
break
}
}
exportJson <- toJSON(fs_list)
write(exportJson, fout)
|
a195f30178188747d154ef1199c31fad5cd0fd4d
|
ffdea92d4315e4363dd4ae673a1a6adf82a761b5
|
/data/genthat_extracted_code/umx/examples/umx_set_optimization_options.Rd.R
|
e4c98f60cef9d01d84c42f8512e35c971cd68eaf
|
[] |
no_license
|
surayaaramli/typeRrh
|
d257ac8905c49123f4ccd4e377ee3dfc84d1636c
|
66e6996f31961bc8b9aafe1a6a6098327b66bf71
|
refs/heads/master
| 2023-05-05T04:05:31.617869
| 2019-04-25T22:10:06
| 2019-04-25T22:10:06
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 374
|
r
|
umx_set_optimization_options.Rd.R
|
library(umx)
### Name: umx_set_optimization_options
### Title: umx_set_optimization_options
### Aliases: umx_set_optimization_options
### ** Examples
umx_set_optimization_options() # print the existing state(s)
umx_set_optimization_options("mvnRelEps") # show this one
## Not run:
##D umx_set_optimization_options("mvnRelEps", .01) # update globally
## End(Not run)
|
30d0d6b2c46892043170f049888aa8b9ccb6b08f
|
eb6641b3761be376a4a404456b7694ae58ba19c3
|
/plot2.R
|
b6eeb76789eb32d07b9b0509ccbc2d112f8629c3
|
[] |
no_license
|
Indy275/ExData_Plotting1
|
d9840ca8d1ea3a46ef5eeb21c33e3777ee98bf81
|
a385dc2b45919169a90bf4b1ed45897e18bc0047
|
refs/heads/master
| 2022-07-03T02:27:30.421708
| 2020-05-14T20:11:04
| 2020-05-14T20:11:04
| 264,014,000
| 0
| 0
| null | 2020-05-14T20:06:18
| 2020-05-14T20:06:17
| null |
UTF-8
|
R
| false
| false
| 719
|
r
|
plot2.R
|
power_consumption <- read.table("rprj/ExplorDataAnalysis/household_power_consumption.txt", sep = ';', header=TRUE, na.strings='?', nrows=2075259, check.names=F, stringsAsFactors=F, comment.char="", quote='\"')
power_consumption$Date <- as.Date( as.character(power_consumption$Date), "%d/%m/%Y")
power_subset <- subset(power_consumption, Date==as.Date("2007-02-01") | Date==as.Date("2007-02-02"))
power_subset$Date <- as.Date(power_subset$Date, format="%d/%m/%Y")
power_subset$timestamp <- as.POSIXct(paste(as.Date(power_subset$Date), power_subset$Time))
png(filename="plot2.png")
with(power_subset, plot(Global_active_power~timestamp, type='l', ylab="Global Active Power (kilowatts)", xlab=""))
dev.off()
|
9419ea4d97afae91e832030140c4095c97f7fef2
|
0121d69e4551d8d1fa755e3b306a345c5a809bb4
|
/7ARIMA.R
|
dc7b7ad4e3da49c0608ceca9dbac5174ff904ef5
|
[] |
no_license
|
omarterp/forecast-learn
|
856bed4b8916a406711b7b049dd284a98691be5c
|
38174d73d21877857b33987e5888bcbc19a07d20
|
refs/heads/master
| 2021-01-10T05:54:59.821986
| 2015-10-23T02:50:12
| 2015-10-23T02:50:12
| 43,187,835
| 1
| 4
| null | null | null | null |
UTF-8
|
R
| false
| false
| 1,449
|
r
|
7ARIMA.R
|
Amtrak.data <- read.csv("/Users/jasonmerrick2/Documents/Teaching/Exec Ed/Decision Analytics/Forecasting/Amtrak data.csv")
ridership.ts <- ts(Amtrak.data$Ridership, start = c(1991,1), end = c(2004, 3), freq = 12)
library("forecast")
plot(ridership.ts)
nValid <- 36
nTrain <- length(ridership.ts) - nValid
train.ts <- window(ridership.ts, start = c(1991, 1), end = c(1991, nTrain))
valid.ts <- window(ridership.ts, start = c(1991, nTrain + 1), end = c(1991, nTrain + nValid))
tsdisplay(train.ts)
fitARIMA <- arima(train.ts, order = c(1,0,0))
summary(fitARIMA)
Box.test(residuals(fitARIMA), lag=24, fitdf=1, type="Ljung-Box")
residualARIMA <- arima.errors(fitARIMA)
tsdisplay(residualARIMA)
par(mfrow = c(2, 1))
forecastARIMA <- forecast(fitARIMA, level=c(80,95), h=12)
plot(forecastARIMA)
diff.train.ts <- diff(train.ts, lag = 1)
tsdisplay(diff.train.ts)
fitSARIMA <- auto.arima(train.ts)#arima(train.ts, order = c(0,1,0), seasonal=c(1,0,0))
summary(fitSARIMA)
Box.test(residuals(fitSARIMA), lag=24, fitdf=1, type="Ljung-Box")
residualSARIMA <- arima.errors(fitSARIMA)
tsdisplay(residualSARIMA)
forecastSARIMA <- forecast(fitSARIMA, level=c(80,95), h=nValid)
plot(forecastSARIMA)
par(mfrow = c(2, 1))
hist(forecastSARIMA$residuals, ylab = "Frequency", xlab = "Fit Error", bty = "l", main = "")
hist(valid.ts - forecastSARIMA$mean, ylab = "Frequency", xlab = "Forecast Error", bty = "l", main = "")
accuracy(forecastSARIMA$mean, valid.ts)
|
ed1f07d4d8316fa143572e122ff906edd8318dce
|
599d6c8aff53bac7170f6d207f0851eb99cc85ae
|
/man/genotypes_pca.Rd
|
42724d9396a8c39468561bc405cc246c59567443
|
[] |
no_license
|
jinhyunju/icreport
|
6f76f993c7f7ba83676ac4e5cfc18b2c792fb128
|
9f0adca08badf4e47a52a227ee47614946b1f844
|
refs/heads/master
| 2020-04-16T00:22:48.646301
| 2016-09-15T22:08:09
| 2016-09-15T22:08:09
| 27,740,842
| 4
| 2
| null | 2015-10-14T15:42:28
| 2014-12-08T23:45:39
|
R
|
UTF-8
|
R
| false
| false
| 806
|
rd
|
genotypes_pca.Rd
|
% Generated by roxygen2 (4.0.2): do not edit by hand
\name{genotypes_pca}
\alias{genotypes_pca}
\title{Custom PCA function for analyzing genotype SNP data.}
\usage{
genotypes_pca(genotype.mx = NULL, info.df = NULL, check.covars = NULL,
cor.threshold = 0.05)
}
\arguments{
\item{genotype.mx}{Genotype matrix with diemnsions g x N}
\item{info.df}{Dataframe that holds sample covariates (ex. population, gender, age, etc...)}
\item{check.covars}{Column names of info.df which hold the covariates
that should be used for association testing with IC coefficients.}
\item{cor.threshold}{Threshold for significant correlation calling. Default is set to 0.05.}
}
\value{
List with the following entries.
}
\description{
Performing PCA on a dataset and create a list object with results.
}
\keyword{keywords}
|
55aaece3dcbd5aaa31bebd82e60faa9455fd799a
|
df301198556ac71ffbe3b56127811b202c605422
|
/9.20.2021_RST_hex_occupancy_and_div_accum_code.R
|
a24264924396b004b286507c5fb2579d0e79cee2
|
[] |
no_license
|
mavolio/BES_StreetTrees_Redlining
|
6fd98dedf2fd56ce35557439cfa6100984b025fb
|
0c26b0c8aff5e2d5637a464053b8cc183fc34fec
|
refs/heads/master
| 2023-04-13T00:27:15.236621
| 2022-09-26T17:41:33
| 2022-09-26T17:41:33
| 349,562,634
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 21,072
|
r
|
9.20.2021_RST_hex_occupancy_and_div_accum_code.R
|
#####--------------------------------------------------------------------------------------------
### Baltimore_street_tree_redline ###
#####--------------------------------------------------------------------------------------------
# Last updated: 20 Sept 2021
# Author: Karin Burghardt
# Contact: kburghar@umd.edu
setwd("/Volumes/GoogleDrive/My Drive/Projects/redlining_analysis/redline_balt_street")
date<-"2021_09_20"
# Load required libraries
library(readr)
library(car)
library(effects)
library(emmeans)
library(FD)
library(tidyverse)
library(ggplot2)
library('scales')
library(vegan)
library(reshape2)
library(dplyr)
library(lme4)
library(lmerTest)
library(stringr)
library(BiodiversityR)
library(iNEXT)
library(janitor)
library(cowplot)
library(MuMIn)
theme_set(theme_bw(base_family="Helvetica"))
options(scipen = 999, digits = 4)
#function to specify "not in"
`%nin%` <- Negate(`%in%`)
# HOLC custom colors
holc_col <- c("A"='#92BC6B', "B"='#92C7C9', "C"='#E7DC6B', "D"='#E47D67')
#import dataset- 95,119 trees (park trees still included)
st_tree <- read_csv("street_trees_Baltimore_w_HOLC_grades_HEX_2021-03-15.csv")
###This first bit is data wrangling to create CSV for composition analysis
#create neighbor,poly, hex column
st_tree$id_hex_poly<-paste(st_tree$holc_id.hex_id, st_tree$poly_id, sep=".")
#combine needed species column- must be done before character is changed to factor
st_tree$SPP <- str_replace_all(st_tree$SPP, 'Acer tataricum ginnala', 'Acer tataricum')
st_tree$SPP <- str_replace_all(st_tree$SPP, 'Populus nigra Italica', 'Populus nigra')
st_tree$SPP <- str_replace_all(st_tree$SPP, 'persimmon, Japanese', 'Diospyros kaki')
st_tree$SPP <- str_replace_all(st_tree$SPP, 'X Cupressocyparis leylandii', 'Cupressocyparis leylandii')
st_tree$SPP <- str_replace_all(st_tree$SPP, 'Gleditsia triacanthos inermis', 'Gleditsia triacanthos')
st_tree$SPPorig<-st_tree$SPP#column to preserve original species designation
#Replace all trees rated with condition as stump or dead so that they stay in dataset but get removed from SPP column
st_tree$CONDITION<-as.factor(st_tree$CONDITION)
Condition_inHex_percent <- tabyl(st_tree,CONDITION, show_na = FALSE)
st_tree$SPP[st_tree$CONDITION %in% c("Dead", "Stump","Stump w")] <- "Dead"
#name factors
st_tree$holc_id<-as.factor(st_tree$holc_id)
st_tree$holc_id.hex_id<-as.factor(st_tree$holc_id.hex_id)
st_tree$holc_grade<-as.factor(st_tree$holc_grade)
st_tree$poly_id<-as.factor(st_tree$poly_id)
st_tree$hex_id<-as.factor(st_tree$hex_id)
st_tree$id_hex_poly<-as.factor(st_tree$id_hex_poly)
st_tree$SPP<-as.factor(st_tree$SPP)
st_tree$COMMON<-as.factor(st_tree$COMMON)
st_tree$GENUS<-as.factor(st_tree$GENUS)
st_tree$SPACE_TYPE<-as.factor(st_tree$SPACE_TYPE)
st_tree$LOC_TYPE<-as.factor(st_tree$LOC_TYPE)
st_tree<-separate(data = st_tree, col = SPP, into = c("Genus", "species"), sep = "\\ ",remove = FALSE)
st_tree$Genus<-as.factor(st_tree$Genus)
st_tree$COLLECTOR<-as.factor(st_tree$COLLECTOR)
st_tree$CULTIVAR<-as.factor(st_tree$CULTIVAR)
summary(st_tree)
levels(st_tree$SPP)
#remove park trees= 5911 trees
st_tree2<-st_tree%>%
filter(LOC_TYPE=="Street")
#check trees per neighborhood- smallest=B7 with 92 trees
numbertrees_per_holcid <- tabyl(st_tree2,holc_id, show_na = FALSE)
#create csv file for Meghan
write.csv(st_tree2,file=sprintf("output/st_tree_inHex%s.csv",date), row.names = TRUE)
#create list of trees per species to check
SPP_list_inHex_with_potential <- tabyl(st_tree2,SPP, show_na = FALSE)
print(SPP_list_inHex_with_potential)
write.csv(SPP_list_inHex_with_potential,file=sprintf("output/SPP_list_inHex_with_potential%s.csv",date), row.names = FALSE)
################Occupancy analysis and figures#############
#create list of SPP designations to make zero but keep as potential site in hex for abundance analysis as potential sites of trees
undesired <- c('Vacant Site', 'Vacant Potential', 'Stump','Vacant Site Not Suitable','NA',"Z Add 01"," ","Dead")
#create list of designations to keep as living trees but remove for diversity analysis
undesiredSPP <- c('unknown shrub','unknown tree','Ficus spp.','Fraxinus spp.','Hydrangea spp.','Ilex spp.','Ilex x','Juniperus spp.','Magnolia x','Photinia spp.','Populus spp.','Quercus spp.','Quercus x','Salix spp.','Ulmus spp.')
#create dummy abundance column for pivoting later so each potential site is hex is counted
st_tree2$abundance<-1
#create columns to quantify abundance based on size classes (S,M,L,empty)- need to do this to retain potenial sites when pivoting later
st_tree2<-st_tree2%>%
mutate(small=ifelse(SPP %nin% undesired&DBH>=0&DBH<=5, 1, 0))
st_tree2<-st_tree2%>%
mutate(large=ifelse(SPP %nin% undesired&DBH>=20, 1, 0))
st_tree2<-st_tree2%>%
mutate(medium=ifelse(SPP %nin% undesired&DBH>5&DBH<20, 1, 0))
st_tree2<-st_tree2%>%
mutate(empty=ifelse(SPP%in%undesired, 1, 0))
#####add sanity check column to check if we aren't doublecounting any trees and all are in one category only- result= all good!
st_tree2<-st_tree2%>%
mutate(sum = rowSums(.[51:54]))
#####create a series of species x hex matrices for both occupancy analysis- note: this keeps in undesiredSPP because they are living trees!#####
#create holc_id_id_hex_poly x species matrix with all potential sites included
com.pot<-pivot_wider(st_tree2, id_cols=holc_id.hex_id, names_from = SPP, values_from = c(abundance),values_fn = list(abundance = sum),values_fill = 0)
com.pot<-column_to_rownames(com.pot, var = "holc_id.hex_id")
######create matrix with only trees than less than 5dbh for size-based occupancy analysis
com.small<-pivot_wider(st_tree2, id_cols=holc_id.hex_id, names_from = SPP, values_from = c(small),values_fn = list(small = sum),values_fill = 0)
com.small<-column_to_rownames(com.small, var = "holc_id.hex_id")
######create matrix with only trees than more than 5 dbh and less than 20 for size-based occupancy analysis
com.medium<-pivot_wider(st_tree2, id_cols=holc_id.hex_id, names_from = SPP, values_from = c(medium),values_fn = list(medium = sum),values_fill = 0)
com.medium<-column_to_rownames(com.medium, var = "holc_id.hex_id")
######create matrix with only trees than less than more than 20 for size-based occupancy analysis
com.large<-pivot_wider(st_tree2, id_cols=holc_id.hex_id, names_from = SPP, values_from = c(large),values_fn = list(large = sum),values_fill = 0)
com.large<-column_to_rownames(com.large, var = "holc_id.hex_id")
######create matrix of vacancies
com.empty<-pivot_wider(st_tree2, id_cols=holc_id.hex_id, names_from = SPP, values_from = c(empty),values_fn = list(empty = sum),values_fill = 0)
com.empty<-column_to_rownames(com.empty, var = "holc_id.hex_id")
#remove potential spots with no tree- only live trees left- use for abundance values for proportion analysis
com.live <- com.pot %>%
select(-one_of(undesired))
summary(com.live)
# This matrix removes SPP designations that are unclear, repetitive or at genera level when species level ident are present for other trees.Some no longer exist with new hex size. Use: diversity analysis by hex level
com <- com.live %>%
select(-one_of(undesiredSPP))
summary(com)
# Create an environmental dataframe that includes total for occupancy analysis, add holc_grade factor back to enviro dataframe, create columns with totals for size classes and empties
env<-pivot_wider(st_tree2, id_cols=holc_id.hex_id, names_from = holc_grade)
env$holc_grade<-substr(env$holc_id.hex_id, 1, 1)
env<-separate(data = env, col = holc_id.hex_id, into = c("holc_id", "hex_id"), sep = "\\.",remove = FALSE)
env$holc_grade<-as.factor(env$holc_grade)
env$hex_id<-as.factor(env$hex_id)
env$holc_id<-as.factor(env$holc_id)
env<-column_to_rownames(env, var = "holc_id.hex_id")
env$site.totals.pot <- apply(com.pot,1,sum)
env$site.totals.live <- apply(com.live,1,sum)
env$site.totals <- apply(com,1,sum)
env$site.totals.small <- apply(com.small,1,sum)
env$site.totals.medium <- apply(com.medium,1,sum)
env$site.totals.large <- apply(com.large,1,sum)
env$site.totals.empty <- apply(com.empty,1,sum)
env$proportion_of_possible_sites_occupied <- env$site.totals.live/env$site.totals.pot
env$proportion_of_possible_sites_occupied_by_small <- env$site.totals.small/env$site.totals.pot
env$proportion_of_possible_sites_occupied_by_medium <- env$site.totals.medium/env$site.totals.pot
env$proportion_of_possible_sites_occupied_by_large <- env$site.totals.large/env$site.totals.pot
env$proportion_of_possible_sites_empty <- env$site.totals.empty/env$site.totals.pot
summary(env)
#######Create Occupancy figures
####### figures by totals not proportions#####
########create plot of potential tree spots per hex by grade- illustrates differences in # of potential locations for trees in D- that is why standardization by number of sites is needed####
tree_number_grade_hex_pot<-ggplot(env, aes(x=holc_grade, y=site.totals.pot,fill=holc_grade)) +
geom_jitter(width = 0.2, colour="black",alpha=.2)+
geom_boxplot(notch=TRUE,outlier.shape = NA,weight=5)+scale_fill_manual(values = holc_col)+
labs(x = "HOLC Grade", y = "Potential tree sites/3.3 ha hexagon", colour = "HOLC grade", shape = "HOLC grade")
ggsave(file=sprintf("output/tree_number_grade_hex_pot%s.tiff",date), plot=tree_number_grade_hex_pot, width=6, height=4)
########create plot of living trees per hex by grade- illustrates differences in # of potential locations for trees in D- that is why standardization by number of sites is needed####
tree_number_grade_hex_live<-ggplot(env, aes(x=holc_grade, y=site.totals.live,fill=holc_grade)) +
geom_jitter(width = 0.2, colour="black",alpha=.2)+
geom_boxplot(notch=TRUE,outlier.shape = NA,weight=5)+scale_fill_manual(values = holc_col)+
labs(x = "HOLC Grade", y = "Living street trees/3.3 ha hexagon", colour = "HOLC grade", shape = "HOLC grade")
ggsave(file=sprintf("output/tree_number_grade_hex_live%s.tiff",date), plot=tree_number_grade_hex_live, width=6, height=4)
#######Create multipanel boxplot#######
#create plot of empty tree spots per hex by grade
tree_number_grade_hex_empty<-ggplot(env, aes(x=holc_grade, y=proportion_of_possible_sites_empty,fill=holc_grade))+ geom_jitter(width = 0.2, colour="black",alpha=.2)+
geom_boxplot(notch=TRUE,outlier.shape = NA,weight=5)+scale_fill_manual(values = holc_col)+
labs(x = "HOLC Grade", y = "proportion of potential sites/3.3 ha hexagon", fill = "HOLC grade", shape = "HOLC grade")+ theme(legend.position = "none")
#create plot of proportion small trees per hex by grade
tree_number_grade_hex_small<-ggplot(env, aes(x=holc_grade, y=proportion_of_possible_sites_occupied_by_small,fill=holc_grade))+ geom_jitter(width = 0.2, colour="black",alpha=.2)+
geom_boxplot(notch=TRUE,outlier.shape = NA,weight=5)+scale_fill_manual(values = holc_col)+
labs(x = "HOLC Grade", y = "proportion of potential sites/3.3 ha hexagon", colour = "HOLC grade", shape = "HOLC grade")+ theme(legend.position = "none")
#create plot of proportion medium trees per hex by grade
tree_number_grade_hex_medium<-ggplot(env, aes(x=holc_grade, y=proportion_of_possible_sites_occupied_by_medium,fill=holc_grade))+ geom_jitter(width = 0.2, colour="black",alpha=.2)+
geom_boxplot(notch=TRUE,outlier.shape = NA,weight=5)+scale_fill_manual(values = holc_col)+
labs(x = "HOLC Grade", y = "proportion of potential sites/3.3 ha hexagon", colour = "HOLC grade", shape = "HOLC grade")+ theme(legend.position = "none")
#create plot of proportion large trees per hex by grade
tree_number_grade_hex_large<-ggplot(env, aes(x=holc_grade, y=proportion_of_possible_sites_occupied_by_large,fill=holc_grade))+ geom_jitter(width = 0.2, colour="black",alpha=.2)+
geom_boxplot(notch=TRUE,outlier.shape = NA,weight=5)+scale_fill_manual(values = holc_col)+
labs(x = "HOLC Grade", y = "proportion of potential sites/3.3 ha hexagon", colour = "HOLC grade", shape = "HOLC grade")+ theme(legend.position = "none")
#individual graphs
ggsave(file=sprintf("output/tree_number_grade_hex_empty%s.pdf",date), plot=tree_number_grade_hex_empty, width=3, height=4)
ggsave(file=sprintf("output/tree_number_grade_hex_small%s.pdf",date), plot=tree_number_grade_hex_small, width=3, height=4)
ggsave(file=sprintf("output/tree_number_grade_hex_medium%s.pdf",date), plot=tree_number_grade_hex_medium, width=3, height=4)
ggsave(file=sprintf("output/tree_number_grade_hex_large%s.pdf",date), plot=tree_number_grade_hex_large, width=3, height=4)
####create multipanel plot of proportion all categories of trees per hex by grade- an R update broke this code but individual graphs above can be combined
Fig3boxplot<-plot_grid(tree_number_grade_hex_empty, tree_number_grade_hex_large, tree_number_grade_hex_medium, tree_number_grade_hex_small, labels = c('I. No living tree', 'II. Large tree','III. Medium tree','IV. Small tree'), label_size = 12,ncol = 4, nrow = 1,hjust = 0.01, label_x = 0.24,vjust = -.2)+
theme(plot.margin = unit(c(1,0,0,0), "lines"))
ggsave(file=sprintf("output/Fig3boxplot_proportion%s.pdf",date), plot=Fig3boxplot, width=7.5, height=3.5)
###### Occupancy models####
#Do the number of potential sites differ across holc_grades?
#check distribution of data- much more normal than I would think for count data but poisson better still!
ggplot(env,aes(site.totals.pot,fill = holc_grade, colour = holc_grade))+
geom_density(alpha = 0.1) +
xlim(0, 160)+ scale_colour_manual(values = holc_col)+scale_fill_manual(values = holc_col)
# glmer poisson
mod.pot<-glmer(site.totals.pot~holc_grade+(1|holc_id),data=env,family="poisson")
summary(mod.pot)
plot(mod.pot)
ranef(mod.pot)
r.squaredGLMM(mod.pot)
drop1(mod.pot, test="Chisq")
#Yes- potential sites per area (3.3 ha) varies across neighborhoods with D neighborhoods with the most potential locations (likely due to denser roads)
#So- need to use proportional occupancy analysis rather then living tree densities to account for different number of potential sites
####Occupancy analysis
###LARGE TREES
mod.large<-glmer(site.totals.large/site.totals.pot~holc_grade+(1|holc_id),weights=site.totals.pot,
data=env,family="binomial")
summary(mod.large)
ranef(mod.large)
coef(mod.large)
summ(mod.large)
r.squaredGLMM(mod.large)
#this is the intercept only model I am using drop1() Chisq to compare to:
#mod.large.nul<-glmer(site.totals.large/site.totals.pot~1+(1|holc_id),weights=site.totals.pot,data=env,family="binomial")
mod.large.chi<-drop1(mod.large, test="Chisq")
print(mod.large.chi)
#If holc_grade significant than further do paired comparisons with emmeans: it is
emms.large<-emmeans(mod.large,~holc_grade,type = "response")
summary(emms.large)
emms.large.df = as.data.frame(emms.large)
pairs(emms.large,ratios = TRUE, type="response")
plot(emms.large,comparisons = TRUE) + theme_bw() +
labs(x = "Estimated marginal mean (Large tree in location- back-transformed)", y = "HOLC grade")
####(A&B;C;D) are the groups.
####Medium trees
mod.medium<-glmer(site.totals.medium/site.totals.pot~holc_grade+(1|holc_id),weights=site.totals.pot,
data=env,family="binomial")
summary(mod.medium)
ranef(mod.medium)
coef(mod.medium)
summ(mod.medium)
r.squaredGLMM(mod.medium)
#this is the intercept only model I am using drop1() Chisq to compare to:
#mod.medium.nul<-glmer(site.totals.medium/site.totals.pot~1+(1|holc_id),weights=site.totals.pot,data=env,family="binomial")
mod.medium.chi<-drop1(mod.medium, test="Chisq")
mod.medium.chi
#HOLC_GRADE_NOT_SIG- do not run comparisons
#### EMPTY LOCATIONS
mod.empty<-glmer(site.totals.empty/site.totals.pot~holc_grade+(1|holc_id),weights=site.totals.pot,
data=env,family="binomial")
summary(mod.empty)
ranef(mod.empty)
coef(mod.empty)
r.squaredGLMM(mod.empty)
#this is the intercept only model I am using drop1() Chisq to compare to:
#mod.empty.nul<-glmer(site.totals.empty/site.totals.pot~1+(1|holc_id),weights=site.totals.pot,data=env,family="binomial")
mod.empty.chi<-drop1(mod.empty, test="Chisq")
mod.empty.chi
#If holc_grade significant than further do paired comparisons with emmeans: Not different.
####SMALL
mod.small<-glmer(site.totals.small/site.totals.pot~holc_grade+(1|holc_id),weights=site.totals.pot,
data=env,family="binomial")
summary(mod.small)
ranef(mod.small)
coef(mod.small)
r.squaredGLMM(mod.small)
#this is the intercept only model I am using drop1() Chisq to compare to:
#mod.small.nul<-glmer(site.totals.small/site.totals.pot~1+(1|holc_id),weights=site.totals.pot,data=env,family="binomial")
mod.small.chi<-drop1(mod.small, test="Chisq")
mod.small.chi
#If holc_grade significant than further do paired comparisons with emmeans: It is!
emms.small<-emmeans(mod.small,~holc_grade,type = "response")
summary(emms.small)
emms.small.df = as.data.frame(emms.small)
pairs(emms.small,ratios = TRUE, type="response")
plot(emms.small,comparisons = TRUE) + theme_bw() +
labs(x = "Estimated marginal mean (small tree in location- back-transformed)", y = "HOLC grade")
pwpp(emms.small)
#B&D differ; no other pair-wise diffs
###### FIGURE 2: Species accumulation curves-extrapolation with I-Next package:
st_tree_next<-st_tree2%>%
filter(!SPP %in% undesired) %>%
filter(!is.na(SPP))%>%
filter(!SPP %in% undesiredSPP)
#create grade x species matrix for all trees
x<-pivot_wider(st_tree_next, id_cols=SPP, names_from = holc_grade, values_from = c(abundance),values_fn = list(abundance = sum),values_fill = 0)
x<-column_to_rownames(x, var = "SPP")
#ALLTREES FIGURE 1- rarefaction and estimates at all 3 q levels- takes 20 + mins to run
#estimate exact species diversity across holc grades standardized both by number of trees and coverage
size_rare_estD<-estimateD(x, datatype = "abundance", base = "size", level = NULL,conf = 0.95)
print(size_rare_estD)
write.csv(size_rare_estD,file=sprintf("output/size_rare_estD%s.csv",date), row.names = FALSE)
SC_rare_estD<-estimateD(x, "abundance", base="coverage", level=NULL, conf=0.95)
print(SC_rare_estD)
write.csv(SC_rare_estD,file=sprintf("output/SC_rare_estD%s.csv",date), row.names = FALSE)
#create rarefaction/extrapolation curves for figure
out.all<- iNEXT(x, q=c(0,1,2),datatype="abundance")
out.all$DataInfo # showing basic data information.
out.all$AsyEst # showing asymptotic diversity estimates.
out.all$iNextEst # showing diversity estimates with rarefied and extrapolated.
#summary rare/extrapolation figure
qlabels <- c("0" = "richness (q=0)", "1" = "Shannon's EFN (q=1)","2" = "Simpson's EFN (q=2)")
accum_alltree_all_q<-ggiNEXT(out.all, type=1, facet.var="order") + theme_bw(base_size=10)+ theme_bw(base_size=10)+ xlim(c(0,20000))+scale_colour_manual(values = holc_col,name="HOLC grade")+scale_fill_manual(values = holc_col, name="HOLC grade")+
labs(x = "Number of individual trees sampled", y = "Tree species diversity", colour = "HOLC grade", shape = "HOLC grade", fill="HOLC grade")+
theme(legend.position="bottom",legend.title=element_blank())+facet_wrap(~order, scales="free",labeller=labeller(order = qlabels))
#save summary figure
ggsave(file=sprintf("output/accum_alltree_all_div_pooled_indv%s.pdf",date), plot=accum_alltree_all_q, width=7, height=4)
ggsave(file=sprintf("output/accum_alltree_all_div_pooled_indv%s.tiff",date), plot=accum_alltree_all_q, width=7, height=4)
#summary rarefaction figure- simply ends same curves without extrapolation
out.all.rare<- iNEXT(x, q=c(0,1,2),datatype="abundance", endpoint=5238)
accum_alltree_all_q_rare<-ggiNEXT(out.all.rare, type=1, facet.var="order") +geom_line(size = .1, alpha=.2)+ theme_bw(base_size=10)+ theme_bw(base_size=10)+ xlim(c(0,6000))+scale_colour_manual(values = holc_col,name="HOLC grade")+scale_fill_manual(values = holc_col, name="HOLC grade")+
labs(x = "Number of individual trees sampled", y = "Tree species diversity", colour = "HOLC grade", shape = "HOLC grade", fill="HOLC grade")+
theme(legend.position="bottom",legend.title=element_blank())+facet_wrap(~order, scales="free",labeller=labeller(order = qlabels))
#save summary figure
ggsave(file=sprintf("output/accum_alltree_all_div_pooled_indv_rare%s.pdf",date), plot=accum_alltree_all_q_rare, width=7, height=4)
ggsave(file=sprintf("output/accum_alltree_all_div_pooled_indv_rare%s.tiff",date), plot=accum_alltree_all_q_rare, width=7, height=4)
#summary SC rarefaction figure
accum_alltree_all_q_rareSC<-ggiNEXT(out.all.rare, type=3, facet.var="order")+geom_line(size = .5, alpha=.9)+ theme_bw(base_size=10)+ theme_bw(base_size=10)+scale_colour_manual(values = holc_col,name="HOLC grade")+scale_fill_manual(values = holc_col, name="HOLC grade")+
labs(x = "Sample coverage", y = "Tree species diversity", colour = "HOLC grade", shape = "HOLC grade", fill="HOLC grade")+
theme(legend.position="bottom",legend.title=element_blank())+facet_wrap(~order, scales="free",labeller=labeller(order = qlabels))
#save summary figure
ggsave(file=sprintf("output/accum_alltree_all_div_pooled_indv_rareSC%s.pdf",date), plot=accum_alltree_all_q_rareSC, width=7, height=4)
ggsave(file=sprintf("output/accum_alltree_all_div_pooled_indv_rareSC%s.tiff",date), plot=accum_alltree_all_q_rareSC, width=7, height=4)
|
c7ac77f4b54b825f509d1412cda5b0999c3f267e
|
ecd10e14781c11f0a934ba6b4b1b758ad2de07a3
|
/resource_code/Untitled.R
|
8e5039edea1f483899c2889b03095cd210aa04b0
|
[] |
no_license
|
scottkelleher/course_project_debate
|
6e445543b2f5c805ff366926d5b63d521ef907f5
|
6239432b98920d8f482c5fade3dda0e773c2cab2
|
refs/heads/master
| 2021-01-18T23:14:23.879428
| 2016-12-12T20:28:33
| 2016-12-12T20:28:33
| 72,665,039
| 0
| 3
| null | 2016-12-05T18:50:39
| 2016-11-02T17:36:03
|
HTML
|
UTF-8
|
R
| false
| false
| 315
|
r
|
Untitled.R
|
getwd()
library(readr)
setwd("/Users/Tulsigompo/Desktop/Brucellosis_data_final")
Bruce<-read.csv("/Users/Tulsigompo/Desktop/Brucellosis_data_final/Brucelosis_R_data_New.csv",header = TRUE, sep=",",skip = 0)
list.files()
colnames(Bruce)
table(Bruce$Place_of_Origin)
table(Bruce$Brucelosis_status)
table(Bruce$Breed)
|
5fe66b95bf354a53eadb13488acde8042a2ecaba
|
211b6cc8ab75ae26ea2a5900e1664546796b6c47
|
/featureanalysis.R
|
9222682f6d2a4fd3ec30f94b658df738bbfcb962
|
[] |
no_license
|
justinwang1/rulebreaksaesthetics
|
88230fdd1db19744613f19bd4fc427b78c1f2c14
|
6e3cf8b2d0c21227dd0a164cade9d52e44247b3d
|
refs/heads/master
| 2020-03-31T04:10:52.868347
| 2018-10-07T02:23:11
| 2018-10-07T02:23:11
| 151,894,190
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 3,427
|
r
|
featureanalysis.R
|
<<<<<<< HEAD
setwd("C:/Users/jstwa/Desktop/ML/")
source("AesthSetup.R")
#Percentile some features - OPTIONAL
highData$Blur <- to.percentile(highData$Blur)
highData$Size <- to.percentile(highData$Size)
highData$Avg..S <- to.percentile(highData$Avg..S)
highData$Avg..V <- to.percentile(highData$Avg..V)
#t tests table for High Data vs. Low Data
results.table(highData,lowData)
##Kernel Density Plot for Aspect
plot.aspect(highData,lowData)
#Obtain RF Predictions
rf.preds <- cv$predictions$rf; rf.mc <- abs(rf.preds - labels)
highmc <- sort(ids[which(rf.mc == 1 & labels == 1)])
lowmc <- sort(ids[which(rf.mc == 1 & labels == 0)])
##Specific Features
#S
s.idx <- bottom.per(highData$Avg..S,per=0.3); s.ids <- high.ids[s.idx]
rtable.ids(s.ids)
#Blur
blur.idx <- bottom.per(highData$Blur); blur.ids <- high.ids[blur.idx]
rtable.ids(blur.ids)
plot.aspect(badblur.data,goodblur.data)
#Size
size.idx <- bottom.per(highData$Size); size.ids <- high.ids[size.idx]
results.table.ids(size.ids)
plot.aspect(badsize.data,goodsize.data)
#Hue Count
hue.idx <- bottom.per(highData$Hue.Count,direction='negative'); hue.ids <- high.ids[hue.idx]
results.table.ids(hue.ids)
plot.aspect(badhc.data,goodhc.data)
#NN
nn.idx <- which(highData$NN.Hist <= 0.2); nn.ids <- high.ids[nn.idx]
badnnhist.data <- highData[nn.idx,]; goodnnhist.data <- highData[-nn.idx,]
rtable(badnnhist.data,goodnnhist.data)
plot.aspect(badnnhist.data,goodnnhist.data)
#Aspect 4/3
aspect43.idx <- which(highData$Aspect >= 7/6 & highData$Aspect < 17/12); aspect43.ids <- high.ids[aspect43.idx]
aspect43.data <- highData[aspect43.idx,]; noaspect43.data <- highData[-aspect43.idx,]
=======
setwd("C:/Users/jstwa/Desktop/ML/")
source("AesthSetup.R")
#Percentile some features - OPTIONAL
highData$Blur <- to.percentile(highData$Blur)
highData$Size <- to.percentile(highData$Size)
highData$Avg..S <- to.percentile(highData$Avg..S)
highData$Avg..V <- to.percentile(highData$Avg..V)
#t tests table for High Data vs. Low Data
results.table(highData,lowData)
##Kernel Density Plot for Aspect
plot.aspect(highData,lowData)
#Obtain RF Predictions
rf.preds <- cv$predictions$rf; rf.mc <- abs(rf.preds - labels)
highmc <- sort(ids[which(rf.mc == 1 & labels == 1)])
lowmc <- sort(ids[which(rf.mc == 1 & labels == 0)])
##Specific Features
#S
s.idx <- bottom.per(highData$Avg..S,per=0.3); s.ids <- high.ids[s.idx]
rtable.ids(s.ids)
#Blur
blur.idx <- bottom.per(highData$Blur); blur.ids <- high.ids[blur.idx]
rtable.ids(blur.ids)
plot.aspect(badblur.data,goodblur.data)
#Size
size.idx <- bottom.per(highData$Size); size.ids <- high.ids[size.idx]
results.table.ids(size.ids)
plot.aspect(badsize.data,goodsize.data)
#Hue Count
hue.idx <- bottom.per(highData$Hue.Count,direction='negative'); hue.ids <- high.ids[hue.idx]
results.table.ids(hue.ids)
plot.aspect(badhc.data,goodhc.data)
#NN
nn.idx <- which(highData$NN.Hist <= 0.2); nn.ids <- high.ids[nn.idx]
badnnhist.data <- highData[nn.idx,]; goodnnhist.data <- highData[-nn.idx,]
rtable(badnnhist.data,goodnnhist.data)
plot.aspect(badnnhist.data,goodnnhist.data)
#Aspect 4/3
aspect43.idx <- which(highData$Aspect >= 7/6 & highData$Aspect < 17/12); aspect43.ids <- high.ids[aspect43.idx]
aspect43.data <- highData[aspect43.idx,]; noaspect43.data <- highData[-aspect43.idx,]
>>>>>>> 8cc9dedaa868523276b391673922e48859692bb4
results.table(aspect43.data,noaspect43.data)
|
12a19073c1c3f4c6ee0598b1716465551716c146
|
ce236ff46444554634a802672bdcfcf33070bef2
|
/man/classify.frsvm.Rd
|
97281930fbd61c0690a4d49927736e1607a2b0fd
|
[] |
no_license
|
ktargows/netClass
|
ed102f18545d9b58a01676971cd635295041bac1
|
1e3afbb9b798407e9032c5ab881e2fde801a9627
|
refs/heads/master
| 2021-01-20T21:46:05.907127
| 2013-12-03T00:00:00
| 2013-12-03T00:00:00
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 2,198
|
rd
|
classify.frsvm.Rd
|
\name{classify.frsvm}
\alias{classify.frsvm}
%- Also NEED an '\alias' for EACH other topic documented here.
\title{
Training and predicting using FrSVM
}
\description{
Training and predicting using FrSVM
}
\usage{
classify.frsvm(fold, cuts, x, y, cv.repeat, DEBUG = DEBUG, Gsub = Gsub,
d = d, op = op, aa = aa, Cs = Cs)
}
%- maybe also 'usage' for other objects documented here.
\arguments{
\item{fold}{
number of folds to perform
}
\item{cuts}{
list for randomly divide the training set in to x-x-CV
}
\item{x}{
expression data
}
\item{y}{
a factor of length p comprising the class labels.
}
\item{cv.repeat}{
model for one CV training and predicting
}
\item{DEBUG}{
show debugging information in screen more or less.
}
\item{Gsub}{
an adjacency matrix that represents the underlying biological network.
}
\item{d}{
damping factor for GeneRank, defaults value is 0.5
}
\item{op}{
the uper bound of top ranked genes
}
\item{aa}{
the lower bound of top ranked genes
}
\item{Cs}{
soft-margin tuning parameter of the SVM. Defaults to \code{10^c(-3:3)}.
}
}
%\details{
%% ~~ If necessary, more details than the description in \name{FrSVM.cv}
%}
\value{
%% ~Describe the value returned
\item{fold }{the recored for test fold}
\item{auc }{The AUC values of test fold}
\item{train }{The tranined models for traning folds}
\item{feat }{The feature selected by each by the train}
%% ...
}
\references{
Yupeng Cun, Holger Frohlich (2012) Integrating Prior Knowledge Into Prognostic Biomarker Discovery Based on Network Structure.arXiv:1212.3214 \cr
Winter C, Kristiansen G, Kersting S, Roy J, Aust D, et al. (2012) Google Goes Cancer: Improving Outcome Prediction for Cancer Patients by Network-Based Ranking of Marker Genes. PLoS Comput Biol 8(5): e1002511. doi:10.1371/journal.pcbi.1002511 \cr
}
\author{
Yupeng Cun \email{yupeng.cun@gmail.com}
}
%\note{
%% ~~further notes~~
%}
%% ~Make other sections like Warning with \section{Warning }{....} ~
\seealso{
See Also as cv.frsvm
}
\examples{
#see cv.frsvm
}
% Add one or more standard keywords, see file 'KEYWORDS' in the
% R documentation directory.
\keyword{ frsvm }
%\keyword{ ~kwd2 }% __ONLY ONE__ keyword per line
|
40725b9543aa7f3285ead4dbef20e356e64097ce
|
543f91156d72a12796a2fe34ebb2f2ed03058326
|
/r/src/calc_trajectory.r
|
39535f286dcafbe28cc9ba925af1cc88e3262a32
|
[] |
no_license
|
ddlddl58/stilt
|
1bc8a8308a158189d07f06b28700e074bc3aace1
|
bb16f2f3a438fccdb0332d5321e24e3afbcdc00a
|
refs/heads/master
| 2020-09-25T17:12:00.912210
| 2019-10-28T22:37:35
| 2019-10-28T22:38:23
| 226,051,456
| 1
| 0
| null | 2019-12-05T08:21:12
| 2019-12-05T08:21:11
| null |
UTF-8
|
R
| false
| false
| 6,045
|
r
|
calc_trajectory.r
|
#' calc_footprint generates upstream influence footprint
#' @author Ben Fasoli
#'
#' Aggregates the upstream particle trajectories into a time integrated
#' footprint, expanding particle influence using variable 2d gaussian kernels
#' with bandwidths proportional to the mean pairwise distance between all
#' particles at each time step. Requires compiled permute.so to build the
#' gaussian kernels with fortran.
#'
#' For documentation, see https://uataq.github.io/stilt/
#'
#' @export
calc_trajectory <- function(varsiwant,
conage,
cpack,
delt,
dxf,
dyf,
dzf,
emisshrs,
frhmax,
frhs,
frme,
frmr,
frts,
frvs,
hnf_plume,
hscale,
ichem,
iconvect,
initd,
isot,
ivmax,
kbls,
kblt,
kdef,
khmax,
kmix0,
kmixd,
kmsl,
kpuff,
krnd,
kspl,
kzmix,
maxdim,
maxpar,
met_files,
mgmin,
ncycl,
ndump,
ninit,
numpar,
nturb,
n_hours,
outdt,
outfrac,
output,
p10f,
qcycle,
random,
splitf,
tkerd,
tkern,
rm_dat,
timeout,
tlfrac,
tratio,
tvmix,
veght,
vscale,
winderrtf,
w_option,
zicontroltf,
ziscale,
z_top,
rundir) {
# Enable manual rescaling of mixed layer height
if (as.logical(zicontroltf)) {
write_zicontrol(ziscale, file.path(rundir, 'ZICONTROL'))
}
# Write SETUP.CFG and CONTROL files to control model
write_setup(varsiwant, conage, cpack, delt, dxf, dyf, dzf, frhmax, frhs, frme,
frmr, frts, frvs, hscale, ichem, iconvect, initd, isot, kbls, kblt,
kdef, khmax, kmix0, kmixd, kmsl, kpuff, krnd, kspl, kzmix, maxdim,
maxpar, mgmin, ncycl, ndump, ninit, numpar, nturb, outdt, outfrac,
p10f, qcycle, random, splitf, tkerd, tkern, tlfrac, tratio, tvmix,
veght, vscale, winderrtf, zicontroltf,
file.path(rundir, 'SETUP.CFG'))
write_control(output$receptor, emisshrs, n_hours, w_option, z_top, met_files,
file.path(rundir, 'CONTROL'))
# Simulation timeout ---------------------------------------------------------
# Monitors time elapsed running hymodelc. If elapsed time exceeds timeout
# specified in run_stilt.r, kills hymodelc and moves on to next simulation
#
# TODO: as of R 3.5, system() and system2() have introduced a timout arg that
# may enable this to be depracated in the future. For now, most linux package
# libraries are not up to date so waiting to implement edge requirements
of <- file.path(rundir, 'hymodelc.out')
eval_start <- Sys.time()
cmd <- paste('(cd', rundir, '&& (./hymodelc > hymodelc.out & echo $!))')
pid <- system(cmd, intern = T)
on.exit(tools::pskill(pid))
repeat {
elapsed <- as.double.difftime(Sys.time() - eval_start, units = 'secs')
if (!pid_is_active(pid)) {
on.exit()
break
} else if (elapsed > timeout) {
msg <- paste('hymodelc timeout after', elapsed, ' seconds\n')
warning(msg)
cat(msg, '\n', file = file.path(rundir, 'ERROR'))
return()
}
Sys.sleep(1)
}
# Error check hymodelc output
pf <- file.path(rundir, 'PARTICLE.DAT')
if (!file.exists(pf)) {
msg <- paste('Failed to output PARTICLE.DAT in', basename(rundir),
'Check for errors in hymodelc.out')
warning(msg)
cat(msg, '\n', file = file.path(rundir, 'ERROR'))
return()
}
n_lines <- count_lines(pf)
if (n_lines < 2) {
msg <- paste(pf, 'does not contain any trajectory data.',
'Check for errors in hymodelc.out')
warning(msg)
cat(msg, '\n', file = file.path(rundir, 'ERROR'))
return()
}
# Read particle file, optionally remove PARTICLE.DAT in favor of compressed
# .rds file, and return particle data frame
p <- read_particle(file = pf, varsiwant = varsiwant)
if (rm_dat) system(paste('rm', pf))
# For column trajectories, preserve release height as xhgt
if (length(output$receptor$zagl) > 1) {
x_heights <- output$receptor$zagl
px <- data.frame(indx = 1:numpar)
px$xhgt <- rep(x_heights, each = length(px$indx) / length(x_heights))
p <- merge(p, px, by = 'indx', sort = F)
}
# Calculate near-field dilution height based on gaussian plume width
# approximation and recalculate footprint sensitivity for cases when the
# plume height is less than the PBL height scaled by veght
if (hnf_plume)
p <- calc_plume_dilution(p, numpar, output$receptor$zagl, veght)
p
}
|
e0ece2bb5ac9f453cf4bb5661d6de1a41869be72
|
e907785f763bcfabec58dd01278d6996cd2525ab
|
/man/inputInterestGene.Rd
|
ef123b3977067b8198c8192e7c0aef4aa528463a
|
[] |
no_license
|
cran/SPMS
|
2a8a9796677f908cae849e3a61c183394ef0c980
|
7f337e242f6f743ec4d6b88bfb1a264b63bd1ea8
|
refs/heads/master
| 2016-09-05T16:19:08.045425
| 2013-05-08T00:00:00
| 2013-05-08T00:00:00
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 1,198
|
rd
|
inputInterestGene.Rd
|
\name{inputInterestGene}
\alias{inputInterestGene}
%- Also NEED an '\alias' for EACH other topic documented here.
\title{
Input interestGene.
}
\description{
Users can input to interest genes according to the path. The genes input by user can be perpared in one file and each line represents one gene. The gene IDs identified by this function are the same as the "graphics name" in xml files and they may be the gene symbol ID or ORF nameID ,etc.
}
\usage{
inputInterestGene()
}
%- maybe also 'usage' for other objects documented here.
\references{
None
}
\author{
Xiaomeng Ni
}
\examples{
##---- Should be DIRECTLY executable !! ----
##-- ==> Define data, use random,
##-- or do help(data=index) for the standard data sets.
##This function's returnd value must named as interestOfGene.
#--interestOfGene<-inputInterestGene()
## The function is currently defined as
function ()
{
interestOfGene <- read.table(file.choose(), sep = ",", header = FALSE)
return(interestOfGene)
}
}
% Add one or more standard keywords, see file 'KEYWORDS' in the
% R documentation directory.
\keyword{ interest }
\keyword{ gene }% __ONLY ONE__ keyword per line
|
7786bd8b8e88c90dfa7567784dd59c15ad276153
|
d825e8fa597121edb8ad38329e45b675f60a3c33
|
/inst/plumber.R
|
01f0ca25f23cf0a82b8ccd2a9edbbdc51f5d7d36
|
[] |
no_license
|
AndreGuerra123/BSSEmsembleR
|
559b05e75ee1e926f63774d0b1836532dc8efc4e
|
3d5a825f010ffd21f1b1787385129617d5a14a9d
|
refs/heads/master
| 2020-03-27T14:32:25.365818
| 2018-09-06T03:46:52
| 2018-09-06T03:46:52
| 146,668,332
| 0
| 1
| null | null | null | null |
UTF-8
|
R
| false
| false
| 10,451
|
r
|
plumber.R
|
#* @apiTitle BSSEnsembleR
#* @apiDescription a plumber back-end for real-time ensemble modelling
# ---- GENERICS ------ #
isValidString<-function(x){
!all(is.null(x) || is.na(x) || !is.atomic(x) || identical(x,"") || !is.character(x))
} #Done
OBID <- function(){ #Done
ei <- as.hexmode(as.integer(Sys.time())) # 4-byte
mi <- as.hexmode(6666666) #3-byte (I don't really care about the machine suplying this)
pi <- as.hexmode(Sys.getpid()) # 2-byte
ci <- as.hexmode(sample(1048576:16777215,1)) # 3-byte
return(paste0(ei,mi,pi,ci))
}
assim <- function(exp,msg){
a<-tryCatch(exp,error=function(e){a<-as.character(e)})
if(!identical(a,T)){
if(identical(a,F)){
stop(paste0("Asserted that ",msg))
}else{
stop(paste0("Fail to asssert: ",msg,", cause: ",as.character(a)))
}
}
}
classNumber<-function(x){
inherits(x,"numeric") || inherits(x,"integer")
}
# ----- FILTERS ------ #
#* @filter cors
cors <- function(res) { #Done
res$setHeader("Access-Control-Allow-Origin", "*")
plumber::forward()
} #Done
#* @filter tokenizer
tokenizer <-function(req){ #Done MUST BE VERIFIED
body<-jsonlite::fromJSON(req$postBody)
assertion<-getTokenValidation(body)
if(assertion$Valid){
plumber::forward()
}else{
out <- list(error = assertion$Message)
res$status <- 404
return(out)
}
}
# ----- QUERIES ---- #
queryByID <- function(obid,field='_id'){
q<-list(list("$oid" = unbox(obid)))
names(q)<-field
return(jsonlite::toJSON(q))
} # Done
queryByField<-function(obj,field){
q<-list(unbox(obj))
names(q)<-field
return(jsonlite::toJSON(q))
} #Done Verified
queryByUsername<-function(username){ #Done
out<-queryByField(username,"username")
return(out)
} #Done Verified
# ----- GETTERS ---- #
getUserByUsername<-function(username){
out<-.GlobalEnv$users$find(queryByUsername(username),'{}')
return(out)
} #Done Verified
getUserByID<-function(userid){
out<-.GlobalEnv$users$find(queryByID(userid),'{}')
return(out)
} #Done Verified
getFileIDByObjectID<- function(col,obid){
col$find(queryByID(obid),'{"file":1,"_id":0}')$file
} #Done
getFileGridFS <- function(grid,fileID){
t <- tempfile()
out <- grid$read(paste0("id:", fileID),t, progress = FALSE)
return(t)
} #Done
# -- HELPERS -- #
createNewUser<-function(username,password){
id<-OBID()
hash<-bcrypt::hashpw(password)
.GlobalEnv$users$insert(jsonlite::toJSON(list("_id"=list("$oid" = jsonlite::unbox(id)),"username"=username,"hash"=hash)))
out<-list("_id"=id,"username"=username,"hash"=hash)
return(out)
} #Done #Verified
authorizeUser<-function(user,password){
nrow(user) == 1 && isValidString(user$hash[[1]]) && bcrypt::checkpw(password, user$hash[[1]])
} #Done #Verified
authorizeToken<-function(user,token){
nrow(user) == 1 && bcrypt::checkpw(user$hash[[1]],token)
}#Done verified
registerUserFile <- function(col,userid,fileid){#Done
obid <- OBID()
q<-list(list("$oid" = unbox(obid)),list("$oid" = unbox(userid)),list("$oid" = unbox(fileid)))
names(q)<-c("_id","user","file")
data<-jsonlite::toJSON(q)
col$insert(data)
return(obid)
}
# -- VALIDATIONs -- #
getRegistrationValidation <- function(body) {
tryCatch({
assim({isValidString(body$username) == T},"username is not valid.")
assim({isValidString(body$password) == T},"passord is not valid")
assim({isValidString(body$validation) == T},"password confirmation is not valid.")
assim({body$password == body$validation},"passwords don't match.")
assim({body$invitation == .GlobalEnv$BSSEInvitation},"invitation key don't match.")
assim({length(getUserByUsername(body$username)) == 0},"username already exists.")
out <- list(Valid = T, Message = '')
return(out)
}, error = function(e) {
out <- list(Valid = F, Message = e)
return(out)
})
} #Done Verified
getLoginValidation <- function(body) {
tryCatch({
assim({isValidString(body$username)},'username is invalid.')
assim({isValidString(body$password)},'password is invalid.')
user<-getUserByUsername(body$username);
assim({authorizeUser(user,body$password)},'username does not exist or password is wrong.')
out <- list(Valid = T, Message = '')
return(out)
}, error = function(e) {
out <- list(Valid = F, Message = as.character(e))
return(out)
})
} #Done Verified
getTokenValidation<-function(body){
tryCatch({
assim({isValidString(body$userid)},'userid is missing, token is invalid.')
assim({isValidString(body$token)},'token is invalid.')
user <- getUserByID(body$userid)
assim({authorizeToken(user,body$token)},'token is invalid.')
out <- list(Valid = T, Message = '')
return(out)
}, error = function(e) {
out <- list(Valid = F, Message = as.character(e))
return(out)
})
} # Done verified
getDatasetValidation <- function(file){
tryCatch({
load(file)
X<-as.data.frame(X)
Y<-as.data.frame(Y)
#X Validation
assim({ncol(X)>2},paste0('X has insufficient number of predictors inputs:',as.character(ncol(X))))
assim({nrow(X)>0},paste0('X has insufficient number of observations:',as.character(nrow(X))))
assim({is.integer(X[,1])},paste0('Firts column of X is class ',class(X[,1]),', and not integer class.'))
assim({is.factor(X[,2])},paste0('Second column of X is class ',class(X[,2]),', and not factor class.'))
assim({all(sapply(X[,3:ncol(X)], classNumber))},'All supplied predictors inputs, except for column one and two, should be of integer or numeric class.')
#Y validation
assim({ncol(Y)>0},paste0('Y has insufficient number of predictors outputs:',as.character(ncol(Y))))
assim({nrow(Y)>0},paste0('Y has insufficient number of observations:',as.character(nrow(Y))))
assim({classNumber(Y[,1])},'The Supplied predictor output should be of integer or numeric class.')
#mutual validation
assim({nrow(X)==nrow(Y)},paste0('X number of observations (',as.character(nrow(X)),') differs from Y (',as.character(nrow(Y)),').'))
assim({sum((complete.cases(X) & complete.cases(Y)))>0},'X and Y have independent number of NA or null observations.')
out <- list(Valid = T, Message = '')
return(out)
}, error = function(e) {
out <- list(Valid = F, Message = as.character(e))
return(out)
})
} #Done
# -- AUTHENTICATION -- #
#* Allow user to validate in server creating a user document (passwords should not be stored in the database)
#* @preempt tokenizer
#* @post /register
function(req, res) {
body <- jsonlite::fromJSON(req$postBody)
assertion <- getRegistrationValidation(body)
if (assertion$Valid) {
newuser <- createNewUser(body$username, body$password)
out <- list(userid = newuser$'_id' ,token = bcrypt::hashpw(newuser$'hash'))
res$status <- 202
return(out)
} else{
out <- list(error = assertion$Message)
res$status <- 404
return(out)
}
} #Done Verified
#* Initial login validation
#* @preempt tokenizer
#* @post /login
function(req, res) {
body <- jsonlite::fromJSON(req$postBody)
assertion <- getLoginValidation(body)
if (assertion$Valid) {
user <- getUserByUsername(body$username)
out <-
list(userid = user$"_id",
token = bcrypt::hashpw(user$hash[[1]]))
res$status <- 202
return(out)
} else{
out <- list(error = assertion$Message)
res$status <- 404
return(out)
}
} #Done Verified
# -------------------------------------------------- DATASET ---------------------------------------------------------- #
# -- Available -- #
#* Get list of available datasets for a user
#* @post /datasets/available
function(req,res){
body<-jsonlite::fromJSON(req$postBody)
query<-queryByID(body$userid, field="user")
fields<-'{"_id":1}'
return(.GlobalEnv$datasets$find(query,fields)$'_id')
} #Done Verified
# -- Load -- #
#* Loads dataset file in BSSEmsembler
#* @preempt tokenizer
#* @param userid
#* @param token
#* @post /datasets/load
function(req,userid,token){
val<-getTokenValidation(list('userid'=userid,'token'=token))
if(val$Valid){
fileid <- MultipartDataset2GridFS(req)
obid<-registerUserFile(.GlobalEnv$datasets,userid,fileid)
return(obid)
}else{
stop(val$Message)
}
} #Done
MultipartDataset2GridFS <- function(req){
form <- Rook::Multipart$parse(req)
assim({grepl(".RData",form$file$filename)},"Input file is not a valid .RData file.")
val<-getDatasetValidation(form$file$tempfile)
if(val$Valid){
upload <-.GlobalEnv$gridFS$write(form$file$tempfile,form$file$filename)
return(upload$id)
}else{
stop(val$Message)
}
}
#-- Delete -- #
#* Gets dataset information in BSSEmsembler
#* @post /datasets/delete
function(req){
body <- jsonlite::fromJSON(req$postBody)
.GlobalEnv$datasets$remove(queryByID(body$datasetid), just_one = TRUE)
.GlobalEnv$gridFS$remove(body$datasetid)
}
#-- Info -- #
#* Gets dataset information in BSSEmsembler
#* @post /datasets/info
function(datasetid){
body <- jsonlite::fromJSON(req$postBody)
fileid <- getFileIDByObjectID(.GlobalEnv$datasets,body$datasetid)#done
file <- getFileGridFS(.GlobalEnv$gridFS, fileid)
met<-getFileMetaInfo(fileid) #done
sum<-getDatasetSummary(file) #done
val<-getDatasetValidation(file) #done
pls<-getDatasetPlots(file)
unlink(file)
return(list('Meta'=met,'Summary'=sum,'Validation'=val,'Plots'=pls))
}
getDatasetSummary <- function(file){
XSummary <- NULL
XBatchSummary <- NULL
YSummary <- NULL
YBatchSummary <- NULL
tryDo({load(file)})
tryDo(X<-as.data.frame(X))
tryDo(Y<-as.data.frame(Y))
tryDo({XSummary<-getHtmlSummary(X)})
tryDo({XBatchSummary<-getHtmlBatchSummary(X,X[,2])})
tryDo({YSummary<-getHtmlSummary(Y)})
tryDo({YSummary<-getHtmlBatchSummary(Y,X[,2])})
lst<-list(XSummary,XBatchSummary,YSummary,YBatchSummary)
names(lst)<-c('XSummary','XBatchSummary','YSummary','YBatchSummary')
return(lst)
}
getHtmlSummary <- function(df){
st<- summarytools::dfSummary(df, round.digits = 3)
stv<- summarytools::view(st,method='render',transpose =T,style="rmarkdown")
html<- htmltools::renderTags(stv)$html
return(html)
}
getHtmlDescriptive <-function(df){
st<- summarytools::descr(df)
stv<- summarytools::view(st,method='render',transpose =T,style="rmarkdown")
return( htmltools::renderTags(stv)$html)
}
getHtmlBatchSummary <-function(df,cla){
lapply(split(df,cla),getHtmlDescriptive)
}
getFileMetaInfo<-function(fileid){
.GlobalEnv$gridFS$find(queryByID(fileid),'{}')
}
getDatasetPlots<-function(file){
}
|
3f32b9c5f1d0ed9892bf0f12f76f7e8cb606b280
|
988a0594b826dc860629d8a9138acab52c29acb5
|
/Asst9.2.r
|
dc76974a082f942e8abad126a259fd45bada3eca
|
[] |
no_license
|
vasanthi72/DARET_Assignment9.2
|
c48b110947be357b8fb06e0778cf5b81eff57f32
|
93b3e62b672218d197387d641639ba5f8ecf883a
|
refs/heads/master
| 2020-03-24T23:55:11.515102
| 2018-08-01T13:59:26
| 2018-08-01T13:59:26
| 143,160,395
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 734
|
r
|
Asst9.2.r
|
data(mtcars)
table(mtcars$am)
trans <- as.data.frame(table(mtcars$am))
trans
SamProp <- trans[1,2]/sum(trans$Freq)
PopProp <- 0.4
n <- sum(trans$Freq)
z <- (SamProp - PopProp) / (sqrt(PopProp*(1-PopProp))/n)
z
((19/32)-0.4)/sqrt((0.4*(1-0.4))/32)
SamProp
19/32
z <- (SamProp - PopProp) / sqrt((PopProp*(1-PopProp))/n)
z
qnorm(1-(0.5/2))
qnorm(1-(0.05/2))
pvalue <- 2 * pnorm(z,FALSE)
pvalue
z
pnorm(z)
prop.test(trans[1,2],sum(trans$Freq),0.4,two.sided,0.95,TRUE)
prop.test(trans[1,2],sum(trans$Freq),0.4,"two.sided",0.95,TRUE)
pvalue <- 2 * pnorm(z,lower.tail=FALSE)
pvalue
1-pnorm(z)
2 * (1-pnorm(z))
prop.test(trans[1,2],sum(trans$Freq),0.4,"two.sided",0.95,FALSE)
savehistory("E:/kamagyana/Computing/DARET/Submissions/Asst9.2.r")
|
b678cfe12e44763cb4f291e52e682de11faa452e
|
0cd9371fd96cb4ec5aaf816520d4ca64b2a82e49
|
/run_gsea.R
|
f4443905b6962eff32959b5075fb17ae478df720
|
[] |
no_license
|
JManc2003/gsea
|
c018a682e1a66db5d1c5168f1b94ec7cf19d1bd9
|
39e189d63a66951599381873adfca96b91ca7097
|
refs/heads/master
| 2020-06-14T06:12:02.682673
| 2014-07-13T17:52:02
| 2014-07-13T17:52:02
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 3,143
|
r
|
run_gsea.R
|
# run_gsea.R will read in parameters from an input file "inputParams.txt," create
# submission scripts to run the java version of GSEA on sherlock, and submit them.
# USAGE RSCRIPT gsea.R gseadir inputprefix inputdata inputchip inputcls inputdb outdir
disorder = c("BRA")
gseadir = "/share/PI/dpwall/SOFTWARE/GSEA-P-R/gsea2-2.0.14.jar" # Path to main GSEA program
inputfile = paste("/scratch/PI/dpwall/DATA/GENE_EXPRESSION/gsea/",disorder,"/inputParam.txt",sep="") # Path to input parameter file
inputdb = c("/scratch/PI/dpwall/DATA/GENE_EXPRESSION/gsea/GENE_DATABASE/brainTerms.gmt")
outdirtop = paste("/scratch/PI/dpwall/DATA/GENE_EXPRESSION/gsea/",disorder,"/gsea",sep="") # Top level output directory - subdirectories will be made inside
setwd('/scratch/PI/dpwall/SCRIPT/R/gsea')
# Read in input parameter file - create job script and submit for each entry
inputfile = read.csv(inputfile,sep="\t",head=TRUE)
for (i in 1:dim(inputfile)[1]){
normdata = as.character(inputfile$NORMDATA[i])
inputchip = inputfile$CHIP[i]
inputcls = inputfile$CLASS[i]
for (db in inputdb){
dbname = strsplit(db,'/')[[1]]
dbname = gsub('.gmt','',dbname[length(dbname)])
folder = strsplit(as.character(normdata),"/")[[1]]
folder = folder[length(folder)]
folder = paste(gsub(".gct","",folder),"_",dbname,sep="")
inputprefix = folder
#outdir = paste(outdirtop,"/",folder,"/",sep="")
#dir.create(outdir, showWarnings = FALSE)
jobby = paste(folder,"96.job",sep="")
sink(file=paste(".job/",jobby,sep=""))
cat("#!/bin/bash\n")
cat("#SBATCH --job-name=",jobby,"\n",sep="")
cat("#SBATCH --output=.out/",jobby,".out\n",sep="")
cat("#SBATCH --error=.out/",jobby,".err\n",sep="")
cat("#SBATCH --time=2-00:00\n",sep="")
cat("#SBATCH --mem=8000\n",sep="")
#cat("java -cp",gseadir,"xtools.gsea.Gsea -res",normdata,"-cls",as.character(inputcls),"-gmx",db,"-collapse false -mode Max_probe -norm meandiv -nperm 1000 -permute phenotype -rnd_type no_balance -scoring_scheme weighted -rpt_label",inputprefix,"-metric Signal2Noise -sort real -order descending -include_only_symbols true -make_sets true -median false -num 100 -plot_top_x 20 -rnd_seed timestamp -save_rnd_lists false -set_max 500 -set_min 15 -zip_report false -out",outdirtop,"-gui false\n")
cat("java -cp",gseadir,"xtools.gsea.Gsea -res",normdata,"-cls",as.character(inputcls),"-gmx",db,"-chip",as.character(inputchip),"-collapse true -mode Max_probe -norm None -nperm 1000 -permute genes -rnd_type no_balance -scoring_scheme weighted -rpt_label",inputprefix,"-metric Signal2Noise -sort real -order descending -include_only_symbols true -make_sets true -median false -num 100 -plot_top_x 525 -rnd_seed timestamp -save_rnd_lists false -set_max 500 -set_min 15 -zip_report false -out",outdirtop,"-gui false\n")
#cat("Rscript /scratch/PI/dpwall/SCRIPT/R/gsea/gsea.R",gseadir,inputprefix,normdata,as.character(inputchip),as.character(inputcls),db,outdir,"\n")
sink()
# SUBMIT R SCRIPT TO RUN ON CLUSTER
system(paste("sbatch",paste(".job/",jobby,sep="")))
}
}
|
161b8f50005f2a57cce394b265e09a6f9b40874c
|
757c2db95bc362678b15b4f23968f7e3f6ebabac
|
/데이터정제_지자체관련자료.R
|
3f0db5ed4d71222558797b50d8b3191cdc488c7e
|
[] |
no_license
|
key9187/Dart-crawling
|
86071df6cc63a867151d2b0ba977677c277d53b6
|
ce7ea964adc0561d7617de06e38b5dc845ea0099
|
refs/heads/main
| 2023-07-08T06:32:43.477513
| 2021-08-05T06:38:09
| 2021-08-05T06:38:09
| 390,225,208
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 9,536
|
r
|
데이터정제_지자체관련자료.R
|
######################################## 0. 환경설정 ########################################
# if(!require(readxl)){install.packages('readxl')}; library(readxl) # 버전 바뀜
# if(!require(XLConnect)){install.packages('XLConnect')}; library(XLConnect) # xls읽을 수 있지만, 자바설치 필요
if(!require(openxlsx)){install.packages('openxlsx')}; library(openxlsx) # xlsx만 읽을 수 있음
if(!require(dplyr)){install.packages('dplyr')}; library(dplyr)
if(!require(stringr)){install.packages('stringr')}; library(stringr)
if(!require(zoo)){install.packages('zoo')}; library(zoo)
setwd("")
######################################## 1. 파일 정제 및 합치기 #######################################
file_list <- list.files(pattern="xls") # 통제변수 폴더에 있는 xls파일 리스트 생성
data_set <- data.frame()
for(i in 1:length(file_list)){
list_number <- file_list[i] # i번째 파일 파일명을 list_number에 저장
### 연도별 다른 정제과정 필요
if(substr(list_number,1,2) %in% c("06", "07")){
### 2006, 2007년 데이터 정제 과정
tmp_data <- read_excel(list_number) %>% as.data.frame() # readxl: 버전달라져서 오류발생
# tmp_data <- readWorksheet(loadWorkbook(list_number), sheet=1) # XLConnect: xls읽을 수 있지만, 자바설치 필요
# tmp_data <- openxlsx::read.xlsx(list_number, sheet = 1) # openxlsx: tmp_data에 파일저장
tmp_nrow <- nrow(tmp_data); tmp_ncol <- ncol(tmp_data) # dimension 계산
row_name <- str_extract(gsub("\\s", "", tmp_data[,1]), "[가-힣]+") # 1열정제(한글만 남기고 제거)하고 행제목 row_name에 저장
col_name <- str_extract(gsub("\\s", "", tmp_data[3,]), "[가-힣]+") # 3행정제(한글만 남기고 제거)하고 열제목 col_name에 저장
tmp_data <- tmp_data[12:tmp_nrow,2:tmp_ncol] # 금액 나타나있는 부분을 tmp_data에 저장
rownames(tmp_data) <- row_name[12:tmp_nrow] # tmp_data의 행제목 변경
colnames(tmp_data) <- col_name[2:tmp_ncol] # tmp_data의 열제목 변경
tmp_data <- tmp_data[,!is.na(colnames(tmp_data))] # tmp_data에 NA있는 열 제거
} else if(substr(list_number,1,2) %in% c("08")){
### 2008년 데이터 정제 과정
# tmp_data <- read_excel(list_number) %>% as.data.frame() # readxl: 버전달라져서 오류발생
# tmp_data <- readWorksheet(loadWorkbook(list_number), sheet=1) # XLConnect: xls읽을 수 있지만, 자바설치 필요
tmp_data <- openxlsx::read.xlsx(list_number, sheet = 1) # openxlsx: tmp_data에 파일저장
tmp_data <- tmp_data[!is.na(tmp_data[,1]),] # tmp_data의 NA인 행 제거
tmp_data <- tmp_data[,!is.na(tmp_data[1,])] # tmp_data의 NA인 열 제거
tmp_nrow <- nrow(tmp_data); tmp_ncol <- ncol(tmp_data) # dimension 계산
row_name <- str_extract(gsub("\\s", "", tmp_data[,1]), "[가-힣]+") # 1열정제(한글만 남기고 제거)하고 행제목 row_name에 저장
col_name <- str_extract(gsub("\\s", "", tmp_data[1,]), "[가-힣]+") # 1행정제(한글만 남기고 제거)하고 열제목 col_name에 저장
tmp_data <- tmp_data[10:tmp_nrow,2:tmp_ncol] # 금액 나타나있는 부분을 tmp_data에 저장
rownames(tmp_data) <- row_name[10:tmp_nrow] # tmp_data의 행제목 변경
colnames(tmp_data) <- col_name[2:tmp_ncol] # tmp_data의 열제목 변경
tmp_data <- tmp_data[,!is.na(colnames(tmp_data))] # tmp_data에 NA있는 열 제거
} else if(as.numeric(substr(list_number,1,2)) >= 09){
### 2009년 이상 데이터 정제과정
sheets <- excel_sheets(list_number) # 버전달라져서 오류발생
tmp_list <- lapply(sheets, function(x) read_excel(list_number, sheet = x)) # 버전달라져서 오류발생
# sheets = openxlsx::getSheetNames(list_number) # sheet명 알아내기
# tmp_list <- lapply(sheets, function(x) openxlsx::read.xlsx(list_number, sheet = x)) # sheet별로 리스트 생성(sheet갯수 = list length)
tmp_data <- do.call("cbind", tmp_list) # list형태를 data.frame으로 cbind해서 tmp_data에 저장
tmp_data <- tmp_data[-1,] # tmp_data의 1열 제거
tmp_data <- tmp_data[!is.na(tmp_data[,1]),] # tmp_data의 NA인 행 제거
tmp_data <- tmp_data[,!is.na(tmp_data[1,])] # tmp_data의 NA인 열 제거
row_name <- str_extract(gsub("\\s", "", tmp_data[,1]), "[가-힣]+") # 1열정제(한글만 남기고 제거)하고 행제목 row_name에 저장
col_name <- str_extract(gsub("\\s", "", tmp_data[1,]), "[가-힣]+") # 1행정제(한글만 남기고 제거)하고 열제목 col_name에 저장
tmp_nrow <- nrow(tmp_data) # dimension 계산
start_n <- which(str_detect(tmp_data[,1], "인건비")) # 인건비부터 예비비및기타 항목까지 필요하므로 인건비가 포함된 행 알아내기
tmp_data <- tmp_data[start_n:tmp_nrow,!col_name %>% str_detect("단체별")] # 필요한 행, 열을 선별해서 tmp_data에 저장
tmp_ncol <- ncol(tmp_data) # dimension 계산
rownames(tmp_data) <- row_name[start_n:tmp_nrow] # tmp_data의 행제목 변경
colnames(tmp_data) <- col_name[!col_name %>% str_detect("단체별")] # tmp_data의 열제목 변경
tmp_data <- tmp_data[,!is.na(colnames(tmp_data))] # tmp_data에 NA있는 열 제거
}
### 1차 데이터 정제
tmp_data <- tmp_data %>% select(-one_of("합계","시계","군계","구계")) %>% t() # 특정열 제거 후 행렬전환
연도 <- paste0("20", substr(list_number, 1, 2)) # 연도구하기
if(str_detect(list_number, "14-2")){
### '특별광역시'(14-2)일 경우 수행
수준 <- "광역시" # 수준 = 광역시
name1 <- rownames(tmp_data) # name1 지정
name2 <- rownames(tmp_data) # name2 지정
} else if(str_detect(list_number, "19-2")){
### '도'(19-2)일 경우 수행
수준 <- "도" # 수준 = 도
name1 <- rownames(tmp_data) # name1 지정
name2 <- rownames(tmp_data) # name2 지정
} else if(str_detect(list_number, "24-2")){
### '시'(24-2)일 경우 수행
수준 <- "시" # 수준 = 시
name1 <- rownames(tmp_data) # name1 지정
col_name <- (col_name[!is.na(col_name)])[-1]
tmp_tf <- str_detect(col_name, "시계")
name2 <- na.locf(ifelse(tmp_tf == FALSE, NA, col_name), fromLast = FALSE)
name2 <- substr(name2[!tmp_tf], 1, 2) # name2 지정
} else if(str_detect(list_number, "29-2")){
### '군'(29-2)일 경우 수행
수준 <- "군" # 수준 = 군
name1 <- rownames(tmp_data) # name1 지정
col_name <- (col_name[!is.na(col_name)])[-1]
tmp_tf <- str_detect(col_name, "군계")
name2 <- na.locf(ifelse(tmp_tf == FALSE, NA, col_name), fromLast = FALSE)
name2 <- substr(name2[!tmp_tf], 1, 2) # name2 지정
} else if(str_detect(list_number, "34-2")){
### '자치구'(34-2)일 경우 수행
수준 <- "구" # 수준 = 구
name1 <- rownames(tmp_data) # name1 지정
col_name <- (col_name[!is.na(col_name)])[-1]
tmp_tf <- str_detect(col_name, "구계")
name2 <- na.locf(ifelse(tmp_tf == FALSE, NA, col_name), fromLast = FALSE)
name2 <- gsub("특별", "서울", name2)
name2 <- substr(name2[!tmp_tf], 1, 2) # name2 지정
}
name3 <- paste0(name1, "_", name2) # name3 지정
final_data <- cbind(연도, 수준, name1, name2, name3, tmp_data) # tmp_data를 원하는 형태로 생성해서 final_data에 저장
colnames(final_data)[6:ncol(final_data)] <-
c("인건비", "물건비", "경상이전", "자본지출", "융자및출자", "보전재원", "내부거래", "예비비및기타") # 열이름정제(다시볼것...)
### 누적으로 데이터 합치는 과정
data_set <- rbind(data_set, final_data)
}
### 2차 데이터 정제과정 _ name2 NA 처리
data_set <- as.data.frame(lapply(data_set, function(x) as.character(x)), stringsAsFactors = F) # factor를 character로 변경
# 광주지역 먼저 처리
# data_set%>% filter(name1=="광주") #확인용
data_set[(data_set$수준=="시") & (data_set$name1 == "광주"), 3] <- "광주.1" # 시 수준이고 name1이 광주일때 name2에 "광주.1" 값 넣어주기
data_set$name2[(data_set$name1 == "통합청주")] <- "충북" # name1이 통합청주일때 name2에 "충북" 값 넣어주기
match_list_u <- data_set %>% filter(name2 != "NA") %>% select(starts_with("name")) %>% unique() # unique한 c(name1, name2, name3) 구해서 match_list_u에 저장
match_list_o <- data_set %>% select(starts_with('name')) # 바꿔야할 c(name1, name2, name3) 구해서 match_list_o에 저장
match_list <- left_join(match_list_o, match_list_u, by="name1") # name1기준으로 값채워넣기 위해 join
match_list <- match_list %>% select(name1, name2.y) # match_list에서 필요한 변수만 선택해서 저장
colnames(match_list) <- c("name1", "name2") # match_list 이름 바꾸기
match_list$name1 <- str_extract(match_list$name1, "[가-힣]+") # name1 정제: 한글만 남김
match_list$name3 <- paste0(match_list$name2, "_", match_list$name1) # name3 새로 생성(name1이 변경되었기 때문)
data_set <- cbind(data_set, match_list) # data_set 재생성
data_set <- data_set[,c(1,2,14:16,6:13)] # data_set 재배열
rownames(data_set) <- NULL # data_set rowname 제거하기
######################################## 2. 최종데이터 저장하기 #######################################
write.csv(data_set, file = "data_set.csv")
|
e97ba938ab52ed326b8fca6abd3b01627bd6a2f3
|
2764143779bda1d3a777b1311a27506498704cad
|
/R/partialDependence.R
|
963dd81ef3b58aaac6270ab7a14c3f46c9560680
|
[
"MIT"
] |
permissive
|
hansvomkreuz/autoML
|
7ed716170c8d3e3ea33f2f813455c28be0ceb04d
|
36af7614f63a4ecb21da735591c7ff509193d33b
|
refs/heads/master
| 2021-01-26T00:17:22.648913
| 2020-02-20T15:46:42
| 2020-02-20T15:46:42
| 243,238,443
| 1
| 0
|
MIT
| 2020-02-26T10:44:50
| 2020-02-26T10:44:49
| null |
UTF-8
|
R
| false
| false
| 1,845
|
r
|
partialDependence.R
|
#' Generate partial dependence plots
#'
#' Creates a list of partial dependence plots for each feature used by the model. Partial dependence is simply the average prediction path a model takes whilst iterating through unique values of a feature and keeping the rest of the features static
#'
#' @param train [data.frame | Required] Training set on which the model was trained
#' @param trainedModel [mlr obj | Required] MLR trained moodel object
#' @param sample [numeric | Optional] A number between 0 - 1 to sub-sample the training set for faster computational time. Default of 0.1
#' @param seed [integer | Optional] Random seed number for reproducable results. Default of 1991
#'
#' @return List object containing a plot for each feature in the dataset.
#' @export
#' @examples
#' mod <- mlr::train(makeLearner("classif.ranger"), iris.task)
#' partialDependence(train = iris, mod)
#' @author
#' Xander Horn
partialDependence <- function(train, trainedModel, sample = 0.1, seed = 1991){
library(iml)
library(caret)
library(mlr)
if(missing(train)){
stop("Provide training set")
}
if(missing(trainedModel)){
stop("Provide trained mlr model obj")
}
set.seed(seed)
feats <- trainedModel$features
y <- trainedModel$task.desc$target
temp <- train[caret::createDataPartition(y = train[,y], p = sample, list = FALSE), ]
predObj <- Predictor$new(model = trainedModel, data = temp[,feats], y = temp[,y])
plots <- list()
for(i in 1:length(feats)){
pd <- FeatureEffect$new(predObj, feature = feats[i], method = "pdp")
plots[[i]] <- plot(pd) +
theme_bw() +
ggtitle(paste0(feats[i], " Partial Dependence")) +
geom_line(size = 1, col = "#3A48C5")
}
names(plots) <- feats
return(list(plots = plots))
}
|
6e48472a8027c10b9e37feefb744689048f07080
|
32811ffa5097c963274e6eb74ca2fd28fa7320ee
|
/tests/test-shared_residues.R
|
4a8143a3f370a4b6a5bd44d68e3a64476ea0973c
|
[
"MIT"
] |
permissive
|
Arcadia-Science/2022-actin-prediction
|
bf978ad5d54c4a4a53aae169f7dabc91ce428778
|
03425fca69fe45288f5f6e90be84c91564634459
|
refs/heads/main
| 2023-06-22T08:35:54.115985
| 2023-06-20T18:31:08
| 2023-06-20T18:31:08
| 545,442,889
| 4
| 0
|
MIT
| 2023-06-20T18:31:09
| 2022-10-04T11:33:25
|
R
|
UTF-8
|
R
| false
| false
| 768
|
r
|
test-shared_residues.R
|
test_that("check that giardia residue calculations match expectations", {
source("../R/shared_residues.R")
df <- readr::read_tsv("P51775_ACT_GIAIN-longitudinal_actin_contact_full.tsv")
df_summary <- calculate_shared_residues(df)
test_summary <- readr::read_tsv("P51775_ACT_GIAIN-longitudinal_actin_contact_summary.tsv")
expect_equal(df_summary$num_matching, test_summary$num_matching)
})
test_that("check that bovine residue calculations match expectations", {
source("../R/shared_residues.R")
df <- readr::read_tsv("P63258_ACTG_BOVIN-atp_binding_full.tsv")
df_summary <- calculate_shared_residues(df)
test_summary <- readr::read_tsv("P63258_ACTG_BOVIN-atp_binding_summary.tsv")
expect_equal(df_summary$num_matching, test_summary$num_matching)
})
|
e7e907dabfa7cdf7569c777b8027090fb0f1bd65
|
86888c1d5a4e086590cc51d6826bc845ddb56b9a
|
/Actividad-1 y 2-Trabajo-Final.R
|
378e069086b39d6cecc762f4fc8ff73de206ef80
|
[] |
no_license
|
BrandonNarvaez/ClaseAlgoritmosyLenguajes
|
ad2c48248f2ac8883a6360a0637bcf5f0d5ba610
|
c9a20b5039fcbeadb432f41cb94c7c5c692f3e24
|
refs/heads/master
| 2020-03-26T22:47:52.334931
| 2018-12-07T16:53:17
| 2018-12-07T16:53:17
| 145,487,839
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 213
|
r
|
Actividad-1 y 2-Trabajo-Final.R
|
santiago<-function(nombre,a,b,c){
print(paste(nombre,a+b*c))
}
santiago("santiago",4,5,6)
x=1
y=1
z=1
for (h in 1:5) {
print(h)
santiago("santiago",x,y,z)
x=x+1
y=y+1
z=z+1
}
|
8823d07468d177090795d7ac720d6d5303cdce1e
|
53f27fb6f7699cfcadeb1b01eb8965a6c7187c16
|
/Paddy_Lab.R
|
22021160bd03635cff1975825a65fff3273dc287
|
[] |
no_license
|
mllimesha/DPA-Assessment
|
c27c5bb4bf7250e9b7bbb47be2e84ee0e38d0ce6
|
c4031f4e4dc2de9c2359dff6cad12fc46b7be0c3
|
refs/heads/master
| 2022-10-21T15:32:45.598512
| 2020-06-13T15:09:50
| 2020-06-13T15:09:50
| 272,031,768
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 364
|
r
|
Paddy_Lab.R
|
paddyData = read.csv("PaddyNew.csv", header = TRUE)
view(paddyData)
plot (paddyData [2:3])
par(mfrow=c(1, 2))
hist(paddyData$Sown_Acres, cex.main = 0.75)
p = ecdf(paddyData$Sown_Acres)
plot(p, cex.main = 0.75)
# Stacked histogram #
ggplot(paddyData, aes(fill=Season, y=Production_Bushels, x=Year)) +
geom_bar(position="dodge", stat="identity")
|
eb662c51410c73b22f7ba3351e116d2abdeace8e
|
0733ee5d0081e6bda5a86214e85dd9e9582c4225
|
/R/5_get_compound_data.R
|
2c6cc4a70d9c517e8751813eff98ffb4e8893194
|
[
"MIT"
] |
permissive
|
gjgetzinger/monaR
|
26c83a9adc06e47a245de1d5736ee03e23d76cd0
|
ccca6dd98318c638cf66fbaa85c90b7fb20f68a7
|
refs/heads/master
| 2022-11-06T06:29:51.896774
| 2020-06-21T23:18:39
| 2020-06-21T23:18:39
| 258,864,327
| 1
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 4,534
|
r
|
5_get_compound_data.R
|
#' Extract chemical data from MONA queries
#'
#' @param df A tibble returned by a MoNA query
#' @param var The variable to return from the meta data
#'
#' @return a tibble containing extracted chemical data
#' @export
#'
mona_getChem <- function(df, var) {
UseMethod("mona_getChem")
}
#' @describeIn mona_getChem Get values from meta data from ID query
#' @export
#' @examples
#' mona_getChem(example_id_query, var = "inchi")
#' mona_getChem(example_id_query, var = "inchiKey")
#' mona_getChem(example_id_query, var = "external id")
mona_getChem.mona_id_query <-
function(df,
var = c("inchi", "inchiKey", "molFile", "names", "SMILES",
"compound class", "molecular formula", "external id",
"computed")
) {
var <- match.arg(var, several.ok = FALSE)
if (var %in% c("inchi", "inchiKey", "molFile")) {
d <- purrr::map(df$compound,
.f = function(x) {
xx <- dplyr::as_tibble(x)
if (var %in% colnames(xx)) {
dplyr::select_at(xx, var)
} else {
dplyr::transmute(xx, !!var := NA)
}
}
) %>% stats::setNames(nm = df$id) %>% dplyr::bind_rows(.id = "id")
}
if (var %in% c("names")) {
d <- purrr::map(df$compound,
.f = function(x) {
dplyr::as_tibble(x$names[[1]])
}
) %>% stats::setNames(nm = df$id) %>% dplyr::bind_rows(.id = "id")
}
if (var %in% c("SMILES", "compound class", "molecular formula")) {
d <- purrr::map(df$compound, function(x) {
dplyr::as_tibble(x$metaData[[1]]) %>%
dplyr::filter_at(
.vars = dplyr::vars(name),
.vars_predicate = dplyr::any_vars(. == !!var)
)
}) %>% stats::setNames(nm = df$id) %>% dplyr::bind_rows(.id = "id") %>%
dplyr::group_by(id) %>% dplyr::filter(!is.na(value)) %>%
dplyr::distinct(id, .keep_all = TRUE) %>% tidyr::pivot_wider(id)
}
if (var %in% c("external id", "computed")) {
d <- purrr::map(df$compound, function(x) {
dplyr::as_tibble(x$metaData[[1]]) %>%
dplyr::filter_at(dplyr::vars(category), dplyr::any_vars(. == !!var))
}) %>%
stats::setNames(nm = df$id) %>% dplyr::bind_rows(.id = "id") %>%
dplyr::group_by(id) %>% tidyr::pivot_wider(id, name)
}
class(d) <- append("mona_meta", class(d))
return(d)
}
#' @describeIn mona_getChem Extract meta data from spectrum queries
#' @export
#' @examples
#' mona_getChem(example_spec_query, 'inchi')
mona_getChem.mona_spec_query <- function(df,
var = c("inchi", "inchiKey", "molFile",
"names", "SMILES",
"compound class",
"molecular formula",
"external id", "computed")) {
var <- match.arg(var, several.ok = FALSE)
if (var %in% c("inchi", "inchiKey", "molFile")) {
d <- purrr::map( df$hit$compound,
.f = function(x) {
xx <- dplyr::as_tibble(x)
if (var %in% colnames(xx)) {
dplyr::select_at(xx, var)
} else {
dplyr::transmute(xx, !!var := NA)
}
}
) %>% stats::setNames(nm = df$hit$id) %>% dplyr::bind_rows(.id = "id")
}
if (var %in% c("names")) {
d <- purrr::map(df$hit$compound,
.f = function(x) {
dplyr::as_tibble(x$names[[1]])
}
) %>% stats::setNames(nm = df$hit$id) %>% dplyr::bind_rows(.id = "id")
}
if (var %in% c( "SMILES", "compound class", "molecular formula")) {
d <- purrr::map(df$hit$compound, function(x) {
dplyr::as_tibble(x$metaData[[1]]) %>%
dplyr::filter_at(
.vars = dplyr::vars(name),
.vars_predicate = dplyr::any_vars(. == !!var)
)
}) %>% stats::setNames(nm = df$hit$id) %>% dplyr::bind_rows(.id = "id") %>%
dplyr::group_by(id) %>% dplyr::filter(!is.na(value)) %>%
dplyr::distinct(id, .keep_all = TRUE) %>% tidyr::pivot_wider(id)
}
if (var %in% c("external id", "computed")) {
d <- purrr::map(df$hit$compound, function(x) {
dplyr::as_tibble(x$metaData[[1]]) %>%
dplyr::filter_at(dplyr::vars(category), dplyr::any_vars(. == !!var))
}) %>%
stats::setNames(nm = df$hit$id) %>% dplyr::bind_rows(.id = "id") %>%
dplyr::group_by(id) %>% tidyr::pivot_wider(id, name)
}
class(d) <- append("mona_meta", class(d))
return(d)
}
|
bd41230aa5887682f7a4befe56e5adceba35af85
|
f2a982ef2ad5d0a1086830a59f2700bc7e0c668a
|
/man/read_csv_sampled.Rd
|
94780b36fd41403c125af348227f39d409c471b3
|
[] |
no_license
|
jimsforks/cleanser
|
6f87363fefd5c0223c17d349ffa19f8d5ff1956c
|
1597f2bfcf58a0084c2810fea236e38a51385e43
|
refs/heads/master
| 2022-03-16T23:49:13.342589
| 2019-09-27T07:43:25
| 2019-09-27T07:43:25
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| true
| 1,465
|
rd
|
read_csv_sampled.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/chunked_sampled.R
\name{read_csv_sampled}
\alias{read_csv_sampled}
\title{Read in a file with randomly sampled lines.}
\usage{
read_csv_sampled(file, size = 100, header = TRUE, sep = ",",
dec = ".", nlines = NULL, skip = 0, fill = TRUE, ...)
}
\arguments{
\item{file}{The name of the file which the data are to be read from.}
\item{size}{integer Number of rows to import.}
\item{header}{logical A logical value indicating whether the file contains the names of the variables as its first line. If missing, the value is determined from the file format: header is set to TRUE if and only if the first row contains one fewer field than the number of columns.}
\item{sep}{character The field separator character.}
\item{dec}{character The character used in the file for decimal points.}
\item{nlines}{integer Total number of lines in the file}
\item{skip}{integer The number of lines of the data file to skip before beginning to read data.}
\item{...}{Other read.table parameters.}
}
\description{
Read in a file with randomly sampled lines.
}
\examples{
big_iris <- system.file("dataset","big_iris.csv",package = "cleanser")
big_iris2 <- system.file("dataset","big_iris2.csv",package = "cleanser")
guess_separator(big_iris)
guess_separator(big_iris2)
read_csv_sampled(big_iris,size=5)
read_csv_sampled(big_iris2,size=5,sep = ";",dec=",")
}
|
f70758ea428a7f3f113c0e218e08de76cda674eb
|
0900287305c2a1c8e3ea89ce22dd86b42d3146e1
|
/man/seabirds.Rd
|
f834216a420093c49bfe6b249f4676b66c1b28b5
|
[] |
no_license
|
cran/GLMsData
|
29ff0d385b86d8b69b647a4a3cdde59c3fa995ef
|
adf13787011fc2603433c97edc40940a7408b48e
|
refs/heads/master
| 2022-09-11T10:58:41.904753
| 2022-08-22T05:20:08
| 2022-08-22T05:20:08
| 128,936,898
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 1,324
|
rd
|
seabirds.Rd
|
\name{seabirds}
\alias{seabirds}
\docType{data}
\title{Counts of seabirds}
\description{
The number of four species of seabirds
}
\usage{data(seabirds)}
\format{
A data frame with 40 observations on the following 3 variables.
\describe{
\item{\code{Quadrat}}{the quadrat;
a numeric factor with levels \code{0} through \code{10}}
\item{\code{Species}}{the species;
a factor with levels \code{M} (murre),
\code{CA} (crested auklet), \code{LA} (least auklet)
and \code{P} (puffin)}
\item{\code{Count}}{the number of seabirds of the given species
in the given quadrat;
a numeric vector}
}
}
\details{
The data are counts of four seabird species in ten 0.25 square-km quadrats in the
Anadyr Strait (off the Alaskan coast)
during summer, 1998.
}
\source{
Andrew R. Solow and Woollcott Smith (1991)
Cluster in a heterogeneous community sampled by quadrats.
\emph{Biometrics},
\bold{47}(1),
311--317.
}
\references{
D. J. Hand, F. Daly, A. D. Lunn, K. J. McConway, and E. Ostrowski (1994)
\emph{A Handbook of Small Data Sets}, London: Chapman and Hall.
Dataset 215.
}
\examples{
data(seabirds)
summary(seabirds)
}
\keyword{datasets}
|
bab45081d56bf71ddced57fcd55448296a1c7c44
|
61aba3afa4f16ce312aa5b0b8d5c939e54afc6d2
|
/Influencer _analysis.R
|
b8bc2ccb54959ca534c66ac4a9f511cbaf362567
|
[] |
no_license
|
Vivek-Sakthivel/SentimentAnalysis
|
4e06a94ffa3725599e70b18fe4c947db9b34e5aa
|
d3e18083bfcd3b99d144aae30077a88676eb638c
|
refs/heads/master
| 2020-04-21T01:51:37.221584
| 2019-08-17T19:49:27
| 2019-08-17T19:49:27
| 169,236,614
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 922
|
r
|
Influencer _analysis.R
|
#To extract the top 20 tweeters of the extracted hashtag in the collected corpus
toptweeters-function(tweetDataset)
{
sampleTweets - twListToDF(tweetDataset)
sampleTweets - unique(sampleTweets)
# Make a table of the number of Tweets per user
tweeterData - as.data.frame(table(sampleTweets$screenName))
tweeterData - tweeterData[order(tweeterData$Freq, decreasing=T), ] #descending order of tweeters according to frequency of sampleTweets
names(tweeterData) - c(User,Tweets)
return (tweeterData)
}
# Tabular representation of the Top 20 tweeters details
tweeterData-reactive({tweeterData-toptweeters( extracted_TweetsList() ) })
output$top20TweetersGraph-renderPlot ( barplot(head(tweeterData()$Tweets, 20), names=head(tweeterData()$User, 20), horiz=F, las=2, main=Top Tweeters, col=1) )
output$top20TweetersTable-renderTable(head(tweeterData(),20))
|
9f4092eaa760b659cc969d725a8dfcea22f5a89e
|
8c5c5530bdcabed4ca47875f5bb2cb355cf93100
|
/run_analysis.r
|
ea875d25d70a6de47d4ff7119f7f1fdf2e747e7d
|
[] |
no_license
|
JadfGitHub/GettingAndCleaningDataCourse
|
b69aeb2512b2006de19919992627caa6ca5cd2c8
|
5c0c4e43279bc49ce62e22c7afa30e99752d1a0c
|
refs/heads/master
| 2021-01-23T19:36:07.914844
| 2014-05-25T20:17:08
| 2014-05-25T20:17:08
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 1,566
|
r
|
run_analysis.r
|
#load the various datasets
activity_labels <- read.table("./UCI HAR Dataset/activity_labels.txt", quote="\"")
subject_test <- read.table("./UCI HAR Dataset/test/subject_test.txt", quote="\"")
X_test <- read.table("./UCI HAR Dataset/test/X_test.txt", quote="\"")
y_test <- read.table("./UCI HAR Dataset/test/y_test.txt", quote="\"")
features <- read.table("./UCI HAR Dataset/features.txt", quote="\"")
#name the variables in X_test dataset using features
names(X_test)<-features$V2
#add subjects variable to X_test using cbind function and subject_test dataset
X_test_s<-cbind(subject_test,X_test)
#name V1 of X_test_s
names(X_test_s)[1]<-"Subject"
subject_train <- read.table("./UCI HAR Dataset/train/subject_train.txt", quote="\"")
X_train <- read.table("./UCI HAR Dataset/train/X_train.txt", quote="\"")
y_train <- read.table("./UCI HAR Dataset/train/y_train.txt", quote="\"")
#name the variables in X_train dataset using features
names(X_train)<-features$V2
#add subjects variable to X_train using cbind function and subject_train dataset
X_train_s<-cbind(subject_train,X_train)
#name V1 of X_train_s
names(X_train_s)[1]<-"Subject"
#merge X_test and X_train to form "X_complete" using rbind function
X_complete<-rbind(X_train_s,X_test_s)
#extract only measurements on mean and standard deviation
meanVars<-grep("mean()",names(X_complete))
stdVars<-grep("std()",names(X_complete))
RelevantVars<-c(1,meanVars,stdVars)
X_tidy<-X_complete[,RelevantVars]
#export tidy dataset text file
write.table(X_tidy, "./Course_Project_TidyData.txt", sep="\t")
|
af6374022b26dea7d9c930d41df30a4d02863603
|
a9c565654a27a7013dd7d5323bd9d018111211e5
|
/R/yml_to_pkg.R
|
279d6ec713cbe5636c286fa94749a08cf9195065
|
[] |
no_license
|
c5sire/yml2shiny
|
ee956ae6788571820a2386c030c2e7c91c50c782
|
e4723746ef8bdcf615719c1a21c47ca2192a84b3
|
refs/heads/master
| 2016-09-05T12:14:15.407786
| 2015-05-04T20:47:29
| 2015-05-04T20:47:29
| 35,058,721
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 4,188
|
r
|
yml_to_pkg.R
|
cols_to_sl3 <- function(columns){
cols <- unlist(columns)
types <-as.vector(cols)
ff <- function(type) {switch(type,
integer = "INTEGER",
real = "REAL",
string = "TEXT",
logical = "TEXT",
date = "TEXT",
time = "TEXT",
blob = "BLOB"
)}
tt <- unlist(lapply(types, ff))
paste(names(cols), tt) %>% paste(collapse = ",\n ")
}
get_out_name <- function (tpl_file, table_name, target_dir) {
tbl_fil<- stringr::str_replace(tpl_file, "table", table_name)
file.path(target_dir, tbl_fil)
}
get_ui_type <- function(ui_el){
stringr::str_split(ui_el, ";")[[1]][1] %>% stringr::str_trim()
}
get_ui_details <- function(ui_el){
stringr::str_split(ui_el, ";")[[1]][2] %>% stringr::str_trim()
}
get_dict <- function(ll){
i=1
ids <- names(ll)
ent <- character(length(ids))
for(i in 1:length(ids)){
z <- paste("\n", names(ll[[i]]), " = '", unlist(ll[[i]]), "'", sep="")
v <- paste(z, collapse=", ")
ent[i] <- paste(toupper(ids[i]), " = list(", v, "\n)")
}
paste(ent, collapse=",\n ")
}
get_ui_details_slider <- function(ui_el){
x <- get_ui_details(ui_el)
x <- stringr::str_split(x, ",")[[1]]
x <- stringr::str_split(x, "=") %>% unlist
x <- sapply(x, stringr::str_trim) %>% matrix(ncol=2, byrow=TRUE) %>% as.data.frame
as.integer(as.character(x[,2]))
}
yml_to_pkg <- function(name="", path=getwd()){
# read yaml file
yml <- yaml::yaml.load_file("inst/examples/db.yml")
# read function templates to init database, tables, etc
# database files are directly under the directory
tpl_files <- list.files("inst/templates", "*.R")
target_dir <- "."
for(i in 1:n){
table_name <- names(yml)[i]
col_defs <- cols_to_sl3(yml[[i]]$columns)
# tpl_id <- "inst/templates/init_table.R"
# tpl <- readLines(tpl_id)
# eventually as for loop
n <- length(names(yml))
for(j in 1:length(tpl_files)){
tpl <- readLines(file.path("inst/templates", tpl_files[j]))
out <- whisker::whisker.render(tpl)
out_id <- get_out_name(tpl_files[j], table_name, target_dir)
writeLines(out, con = out_id)
#cat(paste("\twriting", out_id, "\n"))
}
# create language lookup dictionary
dict_defs <- get_dict(yml[[i]]$ll)
tpl <- readLines(file.path("inst/templates/ui", "ui_dict.R"))
out_id <- file.path(target_dir, paste("ui_",table_name,"_dict.R", sep="" ))
out <- whisker::whisker.render(tpl)
writeLines(out, con = out_id)
# create ui elements based on yml declarations in ui
columns <- names(yml[[i]]$ui)
#cat(columns)
for(j in 1:length(columns)) {
column <- columns[j]
ui_type = get_ui_type(yml[[i]]$ui[[column]])
#cat(ui_type)
if(ui_type == "select"){
choices = get_ui_details(yml[[i]]$ui[[column]])
tpl <- readLines(file.path("inst/templates/ui", "select.R"))
out_id <- file.path(target_dir, paste0("ui_", column,".R" ))
out <- whisker::whisker.render(tpl)
writeLines(out, con = out_id)
#cat(paste("\twriting", out_id, "\n"))
}
if(ui_type == "slider"){
choices = get_ui_details(yml[[i]]$ui[[column]])
tpl <- readLines(file.path("inst/templates/ui", "slider.R"))
out_id <- file.path(target_dir, paste0("ui_", column,".R" ))
y<- get_ui_detais_slider(yml[[i]]$ui[[column]])
column_min <- y[1]
column_max <- y[2]
column_default <- y[3]
out <- whisker::whisker.render(tpl)
writeLines(out, con = out_id)
#cat(paste("\twriting", out_id, "\n"))
}
}
}
# read shiny templates to create shiny fragment and shiny app
}
# standard functions: add one record, update one record
# add dictionary function
# internally: check on import that the excel table headers match the definition file!
# also on import: option to batch replace or add from an excel file!
# Next create shiny functions by ui_element, grouped into forms, editable table and a whole example
# with linked lookup tables.
# other features: import/export to excel files. One table per sheet.
# use devtools::in_dir to create a new package per database!
|
050571b1d5b407ba046a3c1123cb1fa26f8e028b
|
ebbe08d58a57ae2e9d308a12df500e1e0ef8d098
|
/microbiome/figures/fig2_colorbar.R
|
2f2ccded9c21f4dbb99d794b947700b345e8f176
|
[] |
no_license
|
Drizzle-Zhang/bioinformatics
|
a20b8b01e3c6807a9b6b605394b400daf1a848a3
|
9a24fc1107d42ac4e2bc37b1c866324b766c4a86
|
refs/heads/master
| 2022-02-19T15:57:43.723344
| 2022-02-14T02:32:47
| 2022-02-14T02:32:47
| 171,384,799
| 2
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 1,174
|
r
|
fig2_colorbar.R
|
library(ggplot2)
# color bar
df_plot <- data.frame(pval = c(0, 1, 2, 3, 4), TF = as.character(c(0, 1, 2, 3, 4)))
df_plot$NUL <- rep('1', nrow(df_plot))
plot_bar_up <-
ggplot(data = df_plot, aes(x = TF, y = NUL, fill = pval)) +
geom_tile() +
scale_fill_gradient(low = 'transparent', high = '#8B0000', breaks = c(0, 2, 4)) +
labs(fill = '') +
theme(legend.title = element_text(size = 6, color = "black"),
legend.text = element_text(size = 9, color = "black"))
file.up <- '/home/drizzle_zhang/microbiome/result/Figs/ColorBar_up.png'
ggsave(plot = plot_bar_up, filename = file.up,
height = 5, width = 5, units = 'cm')
plot_bar_down <-
ggplot(data = df_plot, aes(x = TF, y = NUL, fill = pval)) +
geom_tile() +
scale_fill_gradient(low = 'transparent', high = '#00008B', breaks = c(0, 2, 4)) +
labs(fill = '') +
theme(legend.title = element_text(size = 6, color = "black"),
legend.text = element_text(size = 9, color = "black"))
file.down <- '/home/drizzle_zhang/microbiome/result/Figs/ColorBar_down.png'
ggsave(plot = plot_bar_down, filename = file.down,
height = 5, width = 5, units = 'cm')
|
8113eddeb804d9ff3f7c1179d68bb940a25383a0
|
2da2406aff1f6318cba7453db555c7ed4d2ea0d3
|
/inst/snippet/pheno-weight01.R
|
ec40bf1ec515db16385c032ac3e97befb9448b61
|
[] |
no_license
|
rpruim/fastR2
|
4efe9742f56fe7fcee0ede1c1ec1203abb312f34
|
d0fe0464ea6a6258b2414e4fcd59166eaf3103f8
|
refs/heads/main
| 2022-05-05T23:24:55.024994
| 2022-03-15T23:06:08
| 2022-03-15T23:06:08
| 3,821,177
| 11
| 8
| null | null | null | null |
UTF-8
|
R
| false
| false
| 92
|
r
|
pheno-weight01.R
|
pheno.lm <-
lm(log(weight) ~ log(waist) + log(height), data = Pheno)
msummary(pheno.lm)
|
892aaad6b21217a2c479aabf36adb6de9e4c0bab
|
c44188038f65d2e665ad8f7ee138a1cb48256efa
|
/man/kmeans.Rd
|
056898c37afb8e11760353935c60dbe7e5aef5f5
|
[] |
no_license
|
astamm/game
|
91972389d0e7dca54f1705d5f6fc7222c44a9071
|
353bc1f9ed8333f47933928feca0119f81a57f75
|
refs/heads/master
| 2020-05-04T23:00:19.845264
| 2019-06-04T08:55:48
| 2019-06-04T08:55:48
| 179,530,041
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| true
| 3,771
|
rd
|
kmeans.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/kmeans.R
\name{kmeans}
\alias{kmeans}
\alias{kmeans.default}
\alias{kmeans.sgd}
\alias{kmeans.gmd}
\title{K-Means Clustering}
\usage{
kmeans(x, ...)
\method{kmeans}{default}(x, centers, iter.max = 10L, nstart = 1L,
algorithm = c("Hartigan-Wong", "Lloyd", "Forgy", "MacQueen"),
trace = FALSE)
\method{kmeans}{sgd}(x, k = 2, iter.max = 50L)
\method{kmeans}{gmd}(x, k = 2, iter.max = 50L, d2 = NULL,
method = "ward.D", rule = 2, shift = FALSE,
avoid_mean_computation = FALSE)
}
\arguments{
\item{x}{A numeric matrix where each row is a data point or an object that
can be coerced to such a matrix (such as a numeric vector or a data frame
with all numeric columns), an \code{\link{sgd}} object or a
\code{\link{gmd}} object.}
\item{...}{not used.}
\item{centers}{either the number of clusters, say \eqn{k}, or a set of
initial (distinct) cluster centres. If a number, a random set of
(distinct) rows in \code{x} is chosen as the initial centres.}
\item{iter.max}{the maximum number of iterations allowed.}
\item{nstart}{if \code{centers} is a number, how many random sets
should be chosen?}
\item{algorithm}{character: may be abbreviated. Note that
\code{"Lloyd"} and \code{"Forgy"} are alternative names for one
algorithm.}
\item{trace}{logical or integer number, currently only used in the
default method (\code{"Hartigan-Wong"}): if positive (or true),
tracing information on the progress of the algorithm is
produced. Higher values may produce more tracing information.}
\item{k}{The number of clusters to look for (default: \code{2L}).}
\item{method}{character: may be abbreviated. \code{"centers"} causes
\code{fitted} to return cluster centers (one for each input point) and
\code{"classes"} causes \code{fitted} to return a vector of class
assignments.}
}
\value{
An object of class \code{"kmeans"} which as a \code{print} and a
\code{fitted} methods. It is a list with at least the following components:
\describe{
\item{\code{cluster}}{A vector of integers (among \code{1:k}) indicating
the cluster to which each point is allocated.}
\item{\code{centers}}{A matrix of cluster centres.}
\item{\code{totss}}{The total sum of squares.}
\item{\code{withinss}}{Vector of within-cluster sum of squares, one
component per cluster.}
\item{\code{tot.withinss}}{Total within-cluster sum of squares.}
\item{\code{betweenss}}{The between-cluster sum of squares.}
\item{\code{size}}{The number of points in each cluster.}
\item{\code{iter}}{The number of (outer) iterations.}
\item{\code{ifault}}{integer: indicator of a possible algorithm problem –
for experts.}
}
}
\description{
This function performs k-means clustering of the data points in a data set.
}
\section{Methods (by class)}{
\itemize{
\item \code{default}: This is the \code{\link[stats]{kmeans}} function of the
\pkg{stats} package. We refer the user to the corresponding documentation
for more details on the available algorithms and examples.
\item \code{sgd}: Implementation for Single Gaussian Data (stored in objects
of class \code{\link{sgd}}).
\item \code{gmd}: Implementation for Gaussian Mixture Data (stored in objects
of class \code{\link{gmd}}).
}}
\examples{
x <- sgd(
c(mean = 0, precision = 1 ),
c(mean = 3, precision = 0.5),
c(mean = -1, precision = 2 )
)
kmeans(x)
N <- 100
M <- 4
w <- matrix(runif(N * M), N, M)
w <- w / rowSums(w)
samp <- tidyr::crossing(
observation = paste0("O", 1:N),
component = paste0("C", 1:M)
) \%>\%
dplyr::mutate(mixing = as.numeric(t(w)))
dict <- tibble::tibble(
component = paste0("C", 1:M),
mean = numeric(M),
precision = 1:M
)
x <- gmd(samp, dict)
kx <- kmeans(x)
}
|
821bc2b0efcd26f6e7f8e78aa5b7ec526fbc2e20
|
f5d2dd91994929a25bd36dc78b246bee85202adf
|
/man/getWeights.Rd
|
d783975ac07ee60a9c5078104e67e44f9b536bb2
|
[] |
no_license
|
environmentalinformatics-marburg/Reot
|
1350feb80c342aa6c94172d68c58d5e55ae8ad1c
|
1a3e09b08e960b80b236d571d3c637b8e29272fd
|
refs/heads/master
| 2020-04-24T22:32:13.185940
| 2014-08-25T09:29:07
| 2014-08-25T09:29:07
| 11,943,730
| 1
| 1
| null | null | null | null |
UTF-8
|
R
| false
| false
| 597
|
rd
|
getWeights.Rd
|
% Generated by roxygen2 (4.0.1): do not edit by hand
\name{getWeights}
\alias{getWeights}
\title{Calculate weights from latitude}
\usage{
getWeights(x)
}
\arguments{
\item{x}{a Raster* object}
}
\value{
a numeric vector of weights
}
\description{
Calculate weights using the cosine of latitude to compensate for area
distortion of non-projected lat/lon data
}
\examples{
data("australiaGPCP")
wghts <- getWeights(australiaGPCP)
wghts.rst <- australiaGPCP[[1]]
wghts.rst[] <- wghts
opar <- par(mfrow = c(1,2))
plot(australiaGPCP[[1]], main = "data")
plot(wghts.rst, main = "weights")
par(opar)
}
|
69c35fc26bbd4c30c5464e2f22de203460004a57
|
6c9474206ce5cdd33adca1308ec9686ae6ca8b77
|
/phase1/daySummary.r
|
93f88746e23ea12c74ffd0e52a5b3143ce56febf
|
[
"MIT"
] |
permissive
|
rohbockn/tickerFeed
|
69a35cf824fa0720b3e41327a4f984d58a76768c
|
de85034e9423916c80445725e6b16cb22be9d957
|
refs/heads/master
| 2021-09-05T04:22:40.434570
| 2018-01-24T06:31:33
| 2018-01-24T06:31:33
| 116,171,178
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 3,963
|
r
|
daySummary.r
|
# Title: daySummary.r
# Objective: Gather data on positions and report at the end of day
# Created by: NR
# Additional editors:
########################################################################
# Preamble
########################################################################
# Set options
options(stringsAsFactors=F)
# libraries
library(lattice)
library(quantmod)
library(optparse)
library(reshape2)
# Structure/document how arguments are to be passed to this script
option_list = list(
make_option(c("-w", "--working_directory"), type="character", default=NULL,
help="Path to the base level of the local repository instance", metavar="character"),
)
opt_parser = OptionParser(option_list=option_list)
opt = parse_args(opt_parser)
working.dir <- opt$working_directory
setwd(working.dir)
getwd()
# Source common Fns
source('tickerFns.r')
source("tactFns.r")
########################################################################
# Gather data:
########################################################################
history <- read.csv(file=file.path(working.dir,'sim001','simPos.csv'),header=T)
history$timestamp <- with(history,as.POSIXct(timestamp))
# Determine if positions are closed
history.wd01 <- dcast(formula=basis.id + position ~ type, value.var='count', fun.aggregate=sum, data=history)
history.wd02 <- dcast(formula=basis.id + position ~ type, value.var='cash.transaction', fun.aggregate=sum, data=history)
# start here tomorrow, finish this and use as duration of hold
history.wd03 <- dcast(formula=basis.id + position ~ type, value.var='timestamp',fun.aggregate=function(x) max(x)-min(x),data=history)
history.wd01 <- history.wd01[which(!is.na(history.wd01$basis.id)),]
history.wd02 <- history.wd02[which(!is.na(history.wd02$basis.id)),]
history.wd01$closed <- with(history.wd01, ifelse(acquire==sell,1,0))
history.wd03 <- merge(history.wd01,history.wd02,by=c('basis.id','position'),all=T, suffixes=c(".count",".total_price"))
history.wd <- history.wd03[,-grep(names(history.wd03),pattern="^Deposit.*")]
history.wd$date <- as.POSIXct(gsub(x=history.wd$basis.id,pattern="^(.*)_\\d",replacement="\\1"))
history.wd$delta <- with(history.wd, acquire.total_price+sell.total_price)
history.wd$success <- with(history.wd, ifelse(delta>0,1,0))
history.wd$percent.return <- with(history.wd,delta/abs(acquire.total_price)*closed)
full.outlay <- with(history.wd,aggregate(x=list(day.outlay=abs(acquire.total_price)),by=list(date=date),FUN=sum))
history.wd <- merge(history.wd,full.outlay,by='date',all=T)
day.profit <- with(history.wd,aggregate(x=list(day.profit=delta*closed), by=list(date=date),FUN=sum))
history.wd <- merge(history.wd,day.profit,by='date',all=T)
day.utilization <- with(history.wd,aggregate(x=list(day.turned=abs(acquire.total_price)*closed),by=list(date=date),FUN=sum))
history.wd <- merge(history.wd, day.utilization,by='date',all=T)
history.wd$day.utilization <- with(history.wd, day.turned/day.outlay)
history.wd$day.return <- with(history.wd, day.profit/day.outlay)
history.wd$day.turned.return <- with(history.wd, day.profit/day.turned)
# Next steps include
# accountability. Script running every day to take note of deposits and attribute percentages of fund to deposit source/designation
# Maybe look at position reports and rebalance designation percentages at the end of the day from deposits and current levels. Use bases instead of current value for non-closed positions as no profit has yet been realized.
# Install latex or markdown language to output a 'whitepaper' report at the end of each day
# When doing the day summary, use the major indices (dia, nasdaq, s&p500, etc) as baselines to judge performance by.
# Make sure reports will work for prod as well as sim.
# Have daily report identify candidate positions for start of next day
# Consider having any profits rebalance every day proportionate to all designations. Consider having a designation for tax, tithes, etc.
|
d250f9f442329def7e25d4755d65d5e3231fbad2
|
51863f0eec5d4536ae91f63e4366f6f361eff1b9
|
/Assignment for homework- Lecture 2.R
|
9c97bc4a779726dfac9b144252528f94e709bd65
|
[] |
no_license
|
kaoriito/BKN-599--Introduction-to-statistical-learning-for-Biological-Science-in-R
|
1745e77e3e147cc257aad855bbf2e4b910910b9d
|
264649f0c18620a761594c6aede9f7a7155f0c51
|
refs/heads/master
| 2020-04-30T19:51:02.148254
| 2019-03-21T21:04:01
| 2019-03-21T21:04:01
| 177,050,093
| 1
| 0
| null | 2019-03-22T01:24:24
| 2019-03-22T01:24:23
| null |
UTF-8
|
R
| false
| false
| 816
|
r
|
Assignment for homework- Lecture 2.R
|
# BKN 599- Lecture 2- Jan 17th 2019
# clear workspace variables
rm(list = ls());
# it means ctrl+L. clear window
cat("\014")
# close all plots
graphics.off()
library(ISLR)
# Use the lm() function to perform a simple linear regression with
# mpg as the response and horsepower as the predictor. Use the
# summary() function to print the results. Comment on the output.
lm.fit =lm(mpg~horsepower ,data=Auto )
summary(lm.fit)
# Predictions
predict (lm.fit ,data.frame(horsepower=98), interval ="confidence")
predict (lm.fit ,data.frame(horsepower=98), interval ="prediction")
# Plot
attach(Auto)
plot(horsepower,mpg)
abline (lm.fit, col= "red") #abline(a,b) draws any line with intercept a and slope b
# Splitting the screen into 4
par(mfrow=c(2,2))
plot(lm.fit)
par(mfrow=c(1,1))
|
991ba3c7da93aedd16ca181045932b6b14f250ef
|
414dcb572c9f3c417c505b0a2a0bb05596bab5c5
|
/R/stepwiseReversible.R
|
8bc0e99c1f4e46cdca275d7fa038baeea81804a9
|
[] |
no_license
|
magnusdv/pedmut
|
e053c69c4296ec60c5a33ba5eb810451cb1f1d0d
|
f02631674e4848ddde92504f1d3c1f1bce202d58
|
refs/heads/master
| 2023-06-02T16:04:14.385448
| 2023-05-25T12:49:53
| 2023-05-25T12:49:53
| 147,926,814
| 2
| 0
| null | 2023-05-22T20:32:50
| 2018-09-08T11:17:08
|
R
|
UTF-8
|
R
| false
| false
| 3,975
|
r
|
stepwiseReversible.R
|
#' Reversible stepwise mutation model
#'
#' A reversible stepwise mutation model is created following the approach of
#' Dawid et al. (2002).
#'
#' For the stepwise reversible model, the mutation rate \eqn{r_{i,j},\, i\neq
#' j} is proportional to the overall mutation rate \eqn{\lambda} for given
#' values of the range, the allele frequency \eqn{p_i} and n, the number of
#' alleles. Hence, one can determine bounds UW and UB so that the model is well
#' defined if \eqn{\lambda \leq UW} and bounded, i.e., \eqn{r_{i,j} \leq p_j,\,
#' i\neq j}, if \eqn{\lambda \leq UB}, The bounds UW and UB are computed.
#'
#' @param alleles A vector of integer integers.
#' @param afreq A numeric vector of allele frequencies.
#' @param rate A numeric mutation rate.
#' @param range A positive number.
#' @param maxRateOnly A logical, by default FALSE. See Value.
#'
#' @return A reversible stepwise mutation model with overall mutation rate equal
#' to `rate`.
#'
#' If `maxRateOnly` is TRUE, the function returns a vector of two numbers
#' named `UW` and `UB`. The first of these is the maximum overall mutation
#' rate for a well-defined stepwise reversible mutation matrix with the given
#' input. The latter (UB) is the maximum rate under the additional restraint
#' that the model is bounded by `afreq`.
#'
#' @author Thore Egeland.
#'
#' @export
#'
#' @examples
#' stepwiseReversible(alleles = 1:3,
#' afreq = c(0.2, 0.3, 0.5),
#' rate = 0.001,
#' range = 0.1)
#'
#' stepwiseReversible(alleles = 1:3,
#' afreq = c(0.2, 0.3, 0.5),
#' range = 0.1,
#' maxRateOnly = TRUE)
#'
#' # Model not well defined:
#' \dontrun{
#' stepwiseReversible(alleles = 1:3,
#' afreq = c(0.2, 0.3, 0.5),
#' rate = 0.7,
#' range = 0.1)
#' }
stepwiseReversible = function(alleles, afreq, rate, range, maxRateOnly = FALSE) {
if(!is.integer(alleles) && !(is.numeric(alleles) && all(alleles == as.integer(alleles))))
stop2("Non-integer alleles detected")
if(!is.numeric(range) || (range <= 0 || range >= 1))
stop2("`range` must be in the interval (0,1): ", range)
mxr = maxRate(alleles, afreq, range)
if(maxRateOnly)
return(mxr)
if(mxr[["UW"]] < rate)
stop2("Model not well defined; max `rate` for the given input is: ", mxr[["UW"]])
# remaining checking will be taken care of by `mutationModel` below
n = length(afreq)
a = (1 - range^n)/(1 - range)
R = matrix(ncol = n, nrow = n, 0)
for (i in 1:n){
for(j in (1:n)[-i]) {
R[i,j] = rate * (1 - range) * range^{abs(i-j)}/
(2*range*(n - a))*(1/afreq[i])
}
R[i,i] = 1 - sum(R[i,-i])
}
dimnames(R) = list(alleles, alleles)
mutationModel(matrix = R, model = "custom", afreq = afreq, alleles = alleles)
}
#' Upper limits for overall mutation rate for the stepwise reversible model.
#'
#' @param alleles A character vector with allele labels.
#' @param afreq A numeric vector of allele frequencies.
#' @param range A positive number.
#'
#' @return A vector of two numbers named `UW` and `UB`. The first of these is
#' the maximum overall mutation rate for a well-defined stepwise reversible
#' mutation matrix with the given input. The latter (UB) is the upper limit of
#' the overall mutation rate under the additional restraint that the model is
#' bounded by `afreq`.
#'
#' @author Thore Egeland.
#'
maxRate = function(alleles, afreq, range){
n = length(afreq)
R1 = matrix(ncol = n, nrow = n, 0)
for (i in 1:n){
for(j in setdiff(1:n, i)){
a = (1 - range^n)/(1 - range)
R1[i,j] = 1 * (1 - range) * range^{abs(i-j)}/
(2*range*(n - a))*(1/afreq[i])
}
}
# Essential that diag(R1) = 0
linesums = apply(R1, 1, sum)
boundDefined = 1/max(linesums)
maks = apply(R1, 2, max)
c(UW = boundDefined,
UB = min(afreq/maks))
}
|
f0b19e99bb49fd2b4f0046b4ef7b107185e6544b
|
01c7b98d9d798c0346701fee1d221c51b324080e
|
/R/device_query.R
|
93d2968483f3824c14507b98892f40f7cb03c801
|
[
"MIT"
] |
permissive
|
strategist922/RCUDA-1
|
a59f153fc3bbcfd184d1320a3bbed97d3ce419af
|
7f9cd49dc075bcfbc4422c6eebd150c291fd6397
|
refs/heads/master
| 2020-09-20T05:42:11.415829
| 2016-11-21T17:21:27
| 2016-11-21T17:21:27
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 287
|
r
|
device_query.R
|
#' gpuquery
#'
#' This function returns the information of available GPU device in system
#' @seealso \code{\link{creategpu}}
#' @export
#' @examples
#' gpuquery()
gpuquery <- function()
{
ext <- .Call(
"devicequery",
PACKAGE = "supplement"
)
}
|
feacd6369e7beff5b2e2fcff99d76d0fb25eb025
|
cfbb6b9abcfc107153a5f24e58823384e37ccc6d
|
/scripts/UKB_gxdrug/cv_overfit.R
|
ef45d8be2bafc47a0bdce20a5e94596c45360cfc
|
[] |
no_license
|
drewmard/druggene
|
63f6cca52d4bd0237b02363a14666d7397e4035e
|
b0339ec688645bacae7d34dd5be7fdbac99216e6
|
refs/heads/master
| 2023-05-22T16:57:53.280916
| 2021-06-14T19:29:03
| 2021-06-14T19:29:03
| 331,698,407
| 3
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 1,871
|
r
|
cv_overfit.R
|
pgs_name <- type.lst[i]
pgs_name <- 'PGS000007'
dataf.mg$combined <- scale(apply(data.frame(scale(dataf.mg[,'PGS000007']),scale(dataf.mg[,'P_0.00001.R2_0.1.KB_250'])),1,mean))
ind <- sample(1:10,nrow(dataf.mg),replace=TRUE)
res <- list()
for (ind.sub in 1:10) {
print(ind.sub)
df.train <- dataf.mg[ind!=ind.sub,]
df.test <- dataf.mg[ind==ind.sub,]
mod1 <- glm(disease ~ `PGS000007`+bmi+age+menopause+
number_live_birth+one_birth+
PC1+PC2+PC3+PC4+PC5+PC6+PC7+PC8+PC9+PC10, data = df.train,family = binomial(link='logit'))
pred1 <- predict(mod1,newdata = df.test,type='response')
mod2 <- glm(disease ~ P_0.00001.R2_0.1.KB_250+bmi+age+menopause+
number_live_birth+one_birth+
PC1+PC2+PC3+PC4+PC5+PC6+PC7+PC8+PC9+PC10, data = df.train,family = binomial(link='logit'))
pred2 <- predict(mod2,newdata = df.test,type='response')
mod3 <- glm(disease ~ combined+bmi+age+menopause+
number_live_birth+one_birth+
PC1+PC2+PC3+PC4+PC5+PC6+PC7+PC8+PC9+PC10, data = df.train,family = binomial(link='logit'))
pred3 <- predict(mod3,newdata = df.test,type='response')
res[[ind.sub]] <- data.frame(r1=cor(df.test$disease,pred1),r2=cor(df.test$disease,pred2),r3=cor(df.test$disease,pred3))
}
res.df <- do.call(rbind,res)
t.test(res.df[,1],res.df[,3],paired=TRUE)
t.test(res.df[,2],res.df[,3],paired=TRUE)
t.test(res.df[,1],res.df[,2],paired=TRUE)
for (i in 1:length(type.lst)) {
print(paste0(i,'/',length(type.lst)))
pgs_name <- type.lst[i]
mod <- glm(disease ~ dataf.mg[,type.lst[i]]+bmi+age+menopause+
number_live_birth+one_birth+
PC1+PC2+PC3+PC4+PC5+PC6+PC7+PC8+PC9+PC10, data = dataf.mg,family = binomial(link='logit'))
param <- nagelkerke(fit=mod,null=null.mod)$Pseudo.R.squared.for.model.vs.null[3,]
param.vec <- c(param.vec,param)
}
|
a812007e83d8fe29e49ce8b36da8b47497d7f60d
|
7aa114908d996c3e873c78bce8b17afe08cb7816
|
/man/uniqueTargets.Rd
|
04b400922135be7afe80afb4bef1fe91ae0aba30
|
[] |
no_license
|
peiwen18/miRNAmRNA
|
9c5f861d97c77cacc5089c50cb08df2313965a11
|
0756073a2ebb57e75543fe745f54e75b44747b31
|
refs/heads/master
| 2021-06-21T12:20:10.396855
| 2017-01-24T11:55:03
| 2017-01-24T11:55:03
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 607
|
rd
|
uniqueTargets.Rd
|
% Generated by roxygen2 (4.1.1): do not edit by hand
% Please edit documentation in R/db_functions.R
\name{uniqueTargets}
\alias{uniqueTargets}
\title{Unique mirs}
\usage{
uniqueTargets(path, dbName, mir, tables = NULL)
}
\arguments{
\item{path}{path to database}
\item{dbName}{database name}
\item{mir}{microRNA for which unique targets will be obtained}
\item{tables}{NULL or one or more table names in the database}
}
\value{
vector of unique targets
}
\description{
function to get all unique targets for a specific mir.
}
\details{
Details follow.
}
\author{
Maarten van Iterson, Sander Bervoets
}
|
c8c4ff2647651761b98a94dff38fb542db1bb824
|
a46fe604555ba1016139daec6710758512ec2c89
|
/man/getClinicalByGene.Rd
|
e8da9b7f13fcc87938e96cc4c604533ef5761249
|
[] |
no_license
|
melsiddieg/cellbaseR
|
201898d272734b0a97218d305c0d32987929036f
|
8b724bb65402ed8b445829f57fe58fbf09d4a7c1
|
refs/heads/master
| 2021-07-30T12:52:15.888698
| 2021-07-28T06:07:50
| 2021-07-28T06:07:50
| 56,924,006
| 1
| 0
| null | null | null | null |
UTF-8
|
R
| false
| true
| 275
|
rd
|
getClinicalByGene.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/user.R
\name{getClinicalByGene}
\alias{getClinicalByGene}
\title{getClinicalByGene}
\usage{
getClinicalByGene()
}
\description{
A convienice method to fetch clinical variants for specific gene/s
}
|
da0f2a083e29f0ad2fac77b1c18f499f17f6afff
|
91c07d83227fe6d47c4937baa24a7d57b6a8b4e4
|
/CCreplicationsV1.R
|
6db6a9764aa93f6b31334efc7c70b51bc4498030
|
[] |
no_license
|
BGFarrar/P-value-simulations
|
ca14462e2bdd913560f73dcdb9642c8fdcdf4a0f
|
ffec0eab51d0db9e732b74a32a472192195214c2
|
refs/heads/master
| 2021-07-16T22:24:34.395882
| 2020-08-23T09:29:42
| 2020-08-23T09:29:42
| 203,995,022
| 1
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 26,223
|
r
|
CCreplicationsV1.R
|
## Simulation code accompanying "What can comparative cognition expect from replication studies?"
## Ben Farrar
if (!require("effsize")) install.packages("effsize")
library(effsize)
if (!require("ggplot2")) install.packages("ggplot2")
library(ggplot2)
#### Secion 1 Stimulation Study ####
## Compute required differences between groups for a one sided two sample t test, with n=10 and alpha=0.05 ####
power.t.test(10, delta= NULL, 5, power = 0.8, sig.level=0.05, type="two.sample", alternative = "one.sided")
## 80% requires 5.781487 difference between groups
power.t.test(10, delta= NULL, 5, power = 0.5, sig.level=0.05, type="two.sample", alternative = "one.sided")
## 50% requires 3.822626 difference between groups
power.t.test(10, delta= NULL, 5, power = 0.2, sig.level=0.05, type="two.sample", alternative = "one.sided")
## 20% power requires 1.865756 difference between groups
#### Original Study Simulations #####
## Run 10,000 simulations for 80% power
p <- NULL
diff <- NULL
set.seed(22071996)
for(i in 1:10000){
sample1 <- rnorm(10, 50, 5)
sample1 <- ifelse(sample1 < 0, 0, sample1)
sample2 <- rnorm(10, 50-5.781487, 5)
sample2 <- ifelse(sample2 < 0, 0, sample2)
p[i] <- t.test(sample1, sample2, alternative = "greater")$p.value
diff[i] <- mean(sample1) - mean(sample2)
}
t.test(sample1, sample2, alternative = "greater", var.equal = TRUE)
## proportion significant
sum(p<0.05)/10000
## Compute mean difference between groups in all samples
mean(diff)
sd(diff)
## Compute mean difference between groups in samples leading to p<0.05
d <- data.frame(p, diff)
d1 <- subset(d, p<0.05)
mean(d1$diff)
## Compute inflation of difference estimate in p<0.05 samples vs all samples
(mean(d1$diff)-mean(diff))/mean(diff)
## Run 10,000 simulations for 50% power
p1 <- NULL
diff1 <- NULL
for(i in 1:10000){
sample1 <- rnorm(10, 50, 5)
sample1 <- ifelse(sample1 < 0, 0, sample1)
sample2 <- rnorm(10, 50-3.822626, 5)
sample2 <- ifelse(sample2 < 0, 0, sample2)
p1[i] <- t.test(sample1, sample2, alternative = "greater")$p.value
diff1[i] <- mean(sample1) - mean(sample2)
}
## proportion significant
sum(p1<0.05)/10000
## Compute mean difference between groups in all samples
mean(diff1)
## Compute mean difference between groups in samples leading to p<0.05
e <- data.frame(p1, diff1)
e1 <- subset(e, p1<0.05)
mean(e1$diff1)
## Compute inflation of difference estimate in p<0.05 samples vs all samples
(mean(e1$diff1)-mean(diff1))/mean(diff1)
## Run 10,000 simulations at 20% power
p2 <- NULL
diff2 <- NULL
for(i in 1:10000){
sample1 <- rnorm(10, 50, 5)
sample1 <- ifelse(sample1 < 0, 0, sample1)
sample2 <- rnorm(10, 50-1.865756, 5)
sample2 <- ifelse(sample2 < 0, 0, sample2)
p2[i] <- t.test(sample1, sample2, alternative = "greater")$p.value
diff2[i] <- mean(sample1) - mean(sample2)
}
## total significant
sum(p2<0.05)/10000
## Compute mean difference between groups in all samples
mean(diff2)
## Compute mean difference between groups in samples leading to p<0.05
f <- data.frame(p2, diff2)
f1 <- subset(f, p2<0.05)
mean(f1$diff2)
## Compute inflation of difference estimate in p<0.05 samples vs all samples
(mean(f1$diff2)-mean(diff2))/mean(diff2)
## Run 10,000 simulations at 5% power - only false positives
p3 <- NULL
diff3 <- NULL
for(i in 1:10000){
sample1 <- rnorm(10, 50, 5)
sample1 <- ifelse(sample1 < 0, 0, sample1)
sample2 <- rnorm(10, 50, 5)
sample2 <- ifelse(sample2 < 0, 0, sample2)
p3[i] <- t.test(sample1, sample2, alternative = "greater")$p.value
diff3[i] <- mean(sample1) - mean(sample2)
}
## total significant
sum(p3<0.05)/10000
## Compute mean difference between groups in all samples
mean(diff3)
## Compute mean difference between groups in samples leading to p<0.05
g <- data.frame(p3, diff3)
g1 <- subset(g, p3<0.05)
mean(g1$diff3)
## Compute inflation of difference estimate in p<0.05 samples vs all samples
(mean(g1$diff3)-mean(diff3))/mean(diff3)
#### Caculate expected number of successfull replications ####
## Mathematically derived using power * proportion / total replication attempts
(0.05*0.05+0.2*0.2 + 0.5*0.5 + 0.8*0.8)/(0.8+0.5+0.2+0.05)
## Number of successful replications by p-value
fivep1 <- sum(p3 <= 0.01)
fivep2 <- sum(0.01 < p3 & p3 <= 0.02)
fivep3 <- sum(0.02 < p3 & p3 <= 0.03)
fivep4 <- sum(0.03 < p3 & p3 <= 0.04)
fivep5 <- sum(0.04 < p3 & p3 <= 0.05)
twentyp1 <- sum(p2 <= 0.01)
twentyp2 <- sum(0.01 < p2 & p2 <= 0.02)
twentyp3 <- sum(0.02 < p2 & p2 <= 0.03)
twentyp4 <- sum(0.03 < p2 & p2 <= 0.04)
twentyp5 <- sum(0.04 < p2 & p2 <= 0.05)
fiftyp1 <- sum(p1 <= 0.01)
fiftyp2 <- sum(0.01 < p1 & p1 <= 0.02)
fiftyp3 <- sum(0.02 < p1 & p1 <= 0.03)
fiftyp4 <- sum(0.03 < p1 & p1 <= 0.04)
fiftyp5 <- sum(0.04 < p1 & p1 <= 0.05)
eightyp1 <- sum(p <= 0.01)
eightyp2 <- sum(0.01 < p & p <= 0.02)
eightyp3 <- sum(0.02 < p & p <= 0.03)
eightyp4 <- sum(0.03 < p & p <= 0.04)
eightyp5 <- sum(0.04 < p & p <= 0.05)
## mathematicaly expected replication success for this simulation
(pless01 <- (fivep1*0.05 + twentyp1*0.2 + fiftyp1*0.5 + eightyp1*0.8)/(fivep1 + twentyp1 + fiftyp1 + eightyp1))
(p0102 <- (fivep2*0.05 + twentyp2*0.2 + fiftyp2*0.5 + eightyp2*0.8)/(fivep2 + twentyp2 + fiftyp2 + eightyp2))
(p0203 <- (fivep3*0.05 + twentyp3*0.2 + fiftyp3*0.5 + eightyp3*0.8)/(fivep3 + twentyp3 + fiftyp3 + eightyp3))
(p0304 <- (fivep4*0.05 + twentyp4*0.2 + fiftyp4*0.5 + eightyp4*0.8)/(fivep4 + twentyp4 + fiftyp4 + eightyp4))
(p0405 <- (fivep5*0.05 + twentyp5*0.2 + fiftyp5*0.5 + eightyp5*0.8)/(fivep5 + twentyp5 + fiftyp5 + eightyp5))
(p0405noeighty <- (fivep5*0.05 + twentyp5*0.2 + fiftyp5*0.5)/(fivep5 + twentyp5 + fiftyp5))
## Proportion of successfull replications overall
(sum(p <= 0.05)*0.8 + sum(p1 <= 0.05)*0.5 + sum(p2 <= 0.05)*0.2 + sum(p3 <= 0.05)*0.05) / (sum(p <= 0.05) + sum(p1 <= 0.05) + sum(p2 <= 0.05) + sum(p3 <= 0.05))
## mathematically expected replication success for studies like these in general
0.8*(0.8/(0.8+0.5+0.2+0.05)) + 0.5*(0.5/(0.8+0.5+0.2+0.05)) + 0.2*(0.2/(0.8+0.5+0.2+0.05)) + 0.05*(0.05/(0.8+0.5+0.2+0.05))
##### plot p value distributions and simulate replication studies #####
## plot p values from original studies
d1.1 <- d1$p
e1.1 <- e1$p1
f1.1 <- f1$p2
g1.1 <- g1$p3
sigresults <- c(d1.1, e1.1, f1.1, g1.1)
power <- as.factor(c(rep(80, nrow(d1)), rep(50, nrow(e1)), rep(20, nrow(f1)), rep(5, nrow(g1))))
sigres <- data.frame("p" = sigresults, "power" = power)
ggplot(sigres, aes(x = power, y = p, color = power)) + geom_jitter() + geom_violin(draw_quantiles = c(0.25, 0.5, 0.75)) + theme(legend.position = "none")
## Run replication studies
sum(p<0.05)
## Run replication simulations for 80% power
ap <- NULL
adiff <- NULL
for(i in 1:nrow(d1)){
sample1 <- rnorm(10, 50, 5)
sample1 <- ifelse(sample1 < 0, 0, sample1)
sample2 <- rnorm(10, 50-5.781487, 5)
sample2 <- ifelse(sample2 < 0, 0, sample2)
ap[i] <- t.test(sample1, sample2, alternative = "greater")$p.value
adiff[i] <- mean(sample1) - mean(sample2)
}
## proportion significant
sum(ap<0.05)/nrow(d1)
## Compute mean difference between groups in all samples
mean(adiff)
## Run replication simulations for 50% power
ap1 <- NULL
adiff1 <- NULL
for(i in 1:nrow(e1)){
sample1 <- rnorm(10, 50, 5)
sample1 <- ifelse(sample1 < 0, 0, sample1)
sample2 <- rnorm(10, 50-3.822626, 5)
sample2 <- ifelse(sample2 < 0, 0, sample2)
ap1[i] <- t.test(sample1, sample2, alternative = "greater")$p.value
adiff1[i] <- mean(sample1) - mean(sample2)
}
## proportion significant
sum(ap1<0.05)/nrow(e1)
## Compute mean difference between groups in all samples
mean(adiff1)
## Run replication simulations for 20% power
ap2 <- NULL
adiff2 <- NULL
for(i in 1:nrow(f1)){
sample1 <- rnorm(10, 50, 5)
sample1 <- ifelse(sample1 < 0, 0, sample1)
sample2 <- rnorm(10, 50-1.865756, 5)
sample2 <- ifelse(sample2 < 0, 0, sample2)
ap2[i] <- t.test(sample1, sample2, alternative = "greater")$p.value
adiff2[i] <- mean(sample1) - mean(sample2)
}
## total significant
sum(ap2<0.05)/nrow(f1)
## Compute mean difference between groups in all samples
mean(adiff2)
## Run replication simulations for 5% power
ap3 <- NULL
adiff3 <- NULL
for(i in 1:nrow(g1)){
sample1 <- rnorm(10, 50, 5)
sample1 <- ifelse(sample1 < 0, 0, sample1)
sample2 <- rnorm(10, 50, 5)
sample2 <- ifelse(sample2 < 0, 0, sample2)
ap3[i] <- t.test(sample1, sample2, alternative = "greater")$p.value
adiff3[i] <- mean(sample1) - mean(sample2)
}
## total significant
sum(ap3<0.05)/nrow(g1)
## Compute mean difference between groups in all samples
mean(adiff3)
#### Plot overall p value distributions #####
replicationresults <- c(ap, ap1, ap2, ap3)
allresults <- data.frame("p" = sigresults, "power" = power, "replicationp" = replicationresults)
## Code provided by Manuel Bohn
library(tidyverse)
library(ggthemes)
allresults2 <- allresults %>%
gather(study, p, - power)%>%
mutate(study = ifelse(study == "p", "Original", "Replication"))
ggplot(data = allresults2, aes(x = p, col = study, fill = study))+
geom_density(alpha = .75)+
geom_vline(xintercept = 0.05, lty = 2)+
theme_bw() + xlim (0,1)+ facet_grid(power~.)+
+ ylim()
scale_color_ptol()+
scale_fill_ptol()
## End Manuel Bohn code
setwd("~/PhD/Thesis/What can CC expect from replication studies")
(p <- ggplot(allresults, aes(x = power, y = p, colour = power)) + geom_jitter() +
geom_violin(draw_quantiles = c(0.25, 0.5, 0.75)) + theme_bw() + ylim(0,1) +
theme(legend.position = "none") + ylab("Original p value") + theme(plot.margin = unit(c(1,1,1,1),"cm")))
ggsave("repp1.png", plot = p)
(repp <- ggplot(allresults, aes(x = power, y = replicationp, colour = power)) + geom_jitter() +
geom_violin(draw_quantiles = c(0.25, 0.5, 0.75)) + theme_bw() + theme(legend.position = "none") +
ylab("Replication p value") + theme(plot.margin = unit(c(1,1,1,1),"cm")))
ggsave("repp2.png", plot = repp)
#### Secion 2 Stimulation Study ####
### Data simulation function - DeBruine and Barr 2019 - doi: 10.31234/osf.io/xp5cy
#### Load packages and set up functions
library("lme4") # model specification / estimation
library("afex") # anova and deriving p-values from lmer
library("broom.mixed") # extracting data from model fits
library("Rmisc")
# set up the custom data simulation function
my_sim_data <- function(nsubj = 7, # number of subjects
nitem = c(possible = 5, impossible = 5), # number of items
b0 = 1000, # grand mean
b1 = 200, # effect of category
I0i_sd = 5, # by-item random intercept sd
S0s_sd = 100, # by-subject random intercept sd
S1s_sd = 40, # by-subject random slope sd
scor = 0.2, # correlation between intercept and slope
err_sd = 200 # residual (standard deviation)
) {
# simulate items
items <- data.frame(
item_id = 1:sum(nitem),
category = rep(c("possible", "impossible"), nitem),
cat = rep(c(-0.5, +0.5), nitem), # effect-code category
I0i = rnorm(sum(nitem), 0, I0i_sd)
)
# simulate subjects
Sigma <- matrix(c(S0s_sd^2, S0s_sd * S1s_sd * scor,
S0s_sd * S1s_sd * scor, S1s_sd^2),
nrow = 2, byrow = TRUE)
S <-MASS::mvrnorm(nsubj, c(0, 0), Sigma)
subjects <- data.frame(
subj_id = 1:nsubj,
S0s = S[, 1],
S1s = S[, 2]
)
# simulate trials
trials <- expand.grid(subj_id = subjects$subj_id,
item_id = items$item_id)
trials$err = rnorm(nrow(trials), mean = 0, sd = err_sd)
# join subject and item tables
joined <- merge(trials, subjects, by = "subj_id")
dat_sim <- merge(joined, items, by = "item_id")
dat_sim$LT <- b0 + dat_sim$I0i + dat_sim$S0s +
(b1 + dat_sim$S1s) * dat_sim$cat + dat_sim$err
dat_sim
}
# set up the power function
my_lmer_power <- function(...) {
# ... is a shoLTcut that forwards any arguments to my_sim_data()
dat_sim <- my_sim_data(...)
## Running model with just random intercepts
mod_sim <- lmer(LT ~ cat + (1 | subj_id),
dat_sim, REML = FALSE)
broom.mixed::tidy(mod_sim)
}
#### Run simulations ####
## Running simulations for 1, 5 and 100 trials in each condition, with a large (200 msec), medium (100 msec)
## or small (50 msec), effect size (my definitions, not Cohen's).
set.seed(20)
## Number of simulations
nSims <- 10000
alpha <- 0.05
#### One trial per condition ####
## 200 msec effect size, 1 trial each condition
## here, using a paired t test as most lmers were singular/failed to converge
# set up power function for single trial
p1.200 <- NULL
est1.200 <- NULL
## p1.200lmer <- NULL code to run and get p vals from lmer not run for time purposes
## est1.200lmer <- NULL
for(i in 1:nSims){
dat_sim <- my_sim_data(b1=200, nitem = c(possible = 1, impossible = 1))
## mod_sim <- lmer(LT ~ cat + (1 | subj_id),
## dat_sim, REML = FALSE)
## p1.200lmer[i] <- broom.mixed::tidy(mod_sim)$p.value[2]
## est1.200lmer[i] <- broom.mixed::tidy(mod_sim)$estimate[2]
b <- spread(dat_sim[ , c(2, 6, 9)], category, LT)
p1.200[i] <- t.test(b$impossible, b$possible, paired = TRUE)$p.value
est1.200[i] <- t.test(b$impossible, b$possible, paired = TRUE)$estimate
}
## data.frame(est1.200lmer, est1.200)
## pwr.1.200lmer <- sum(p1.200lmer <0.05)/nSims
(pwr1.200 <- sum(p1.200<alpha)/nSims)
## number significant
sum(p1.200<alpha)
## nummber significant in wrong direction
sum(p1.200<alpha & est1.200 < 0 )
## plot an example
dat_sim <- my_sim_data(b1=200, nitem = c(possible = 1, impossible = 1))
dat_sim$subj_id <- factor(dat_sim$subj_id)
df <- summarySE(dat_sim, measurevar="LT", groupvars=c("category","subj_id"))
plot1.200 <- ggplot(df, aes(x=category, y=LT, group=subj_id, color=subj_id)) +
geom_line() +
geom_point()+
geom_errorbar(aes(ymin=LT-ci, ymax=LT+ci), width=.2,
position=position_dodge(0.05)) +
theme_bw() +
theme(legend.position = "none",
plot.title = element_text(hjust = 0.5, size = 20),
strip.text = element_text(size = 20),
axis.title.y = element_text(size = 20),
axis.title.x = element_text(size = 20),
axis.text.x = element_text(size = 14),
axis.text.y = element_text(size = 14)) + xlab(paste("power =", round(pwr1.200, digits = 2))) + ylab("Looking Time") + coord_cartesian(ylim=c(600,1500))
## 100 msec effect size, 1 trial each condition
## here, using a paired t test as most lmers were singular/failed to converge
# set up power function for single trial
p1.100 <- NULL
est1.100 <- NULL
for(i in 1:nSims){
dat_sim <- my_sim_data(b1=100, nitem = c(possible = 1, impossible = 1))
b <- spread(dat_sim[ , c(2, 6, 9)], category, LT)
p1.100[i] <- t.test(b$impossible, b$possible, paired = TRUE)$p.value
est1.100[i] <- t.test(b$impossible, b$possible, paired = TRUE)$estimate
}
(pwr1.100 <- sum(p1.100<alpha)/nSims)
mean(est1.100)
## plot an example
dat_sim <- my_sim_data(b1=100, nitem = c(possible = 1, impossible = 1))
dat_sim$subj_id <- factor(dat_sim$subj_id)
df <- summarySE(dat_sim, measurevar="LT", groupvars=c("category","subj_id"))
plot1.100 <- ggplot(df, aes(x=category, y=LT, group=subj_id, color=subj_id)) +
geom_line() +
geom_point()+
geom_errorbar(aes(ymin=LT-ci, ymax=LT+ci), width=.2,
position=position_dodge(0.05)) +
theme_bw() +
theme(legend.position = "none",
plot.title = element_text(hjust = 0.5, size = 20),
strip.text = element_text(size = 20),
axis.title.y = element_text(size = 20),
axis.title.x = element_text(size = 20),
axis.text.x = element_text(size = 14),
axis.text.y = element_text(size = 14)) + xlab(paste("power =", round(pwr1.100, digits = 2))) + ylab("Looking Time") + coord_cartesian(ylim=c(600,1500))
## 50 msec effect size, 1 trial each condition
## here, using a paired t test as most lmers were singular/failed to converge
# set up power function for single trial
p1.50 <- NULL
est1.50 <- NULL
for(i in 1:nSims){
dat_sim <- my_sim_data(b1=50, nitem = c(possible = 1, impossible = 1))
b <- spread(dat_sim[ , c(2, 6, 9)], category, LT)
p1.50[i] <- t.test(b$impossible, b$possible, paired = TRUE)$p.value
est1.50[i] <- t.test(b$impossible, b$possible, paired = TRUE)$estimate
}
(pwr1.50 <- sum(p1.50<alpha)/nSims)
mean(est1.50)
## plot an example
dat_sim <- my_sim_data(b1=50, nitem = c(possible = 1, impossible = 1))
dat_sim$subj_id <- factor(dat_sim$subj_id)
df <- summarySE(dat_sim, measurevar="LT", groupvars=c("category","subj_id"))
plot1.50 <-ggplot(df, aes(x=category, y=LT, group=subj_id, color=subj_id)) +
geom_line() +
geom_point()+
geom_errorbar(aes(ymin=LT-ci, ymax=LT+ci), width=.2,
position=position_dodge(0.05)) +
theme_bw() +
theme(legend.position = "none",
plot.title = element_text(hjust = 0.5, size = 20),
strip.text = element_text(size = 20),
axis.title.y = element_text(size = 20),
axis.title.x = element_text(size = 20),
axis.text.x = element_text(size = 14),
axis.text.y = element_text(size = 14)) + xlab(paste("power =", round(pwr1.50, digits = 2 ))) + ylab("Looking Time") + coord_cartesian(ylim=c(600,1500))
#### Five trials per condition ####
## 200 msec effect size
sims <- replicate(nSims, my_lmer_power(b1=200, nitem = c(possible = 5, impossible = 5)), simplify = FALSE)
sims <- lapply(sims, as.data.frame)
sims <- do.call("rbind", sims)
fcat <- sims[sims$effect == "fixed" & sims$term == "cat", ]
(pwr5.200 <- mean(fcat$p.value < alpha))
mean_estimate5.200 <- mean(fcat$estimate)
mean_se5.200 <- mean(fcat$std.error)
## plot an example
dat_sim <- my_sim_data(b1=200, nitem = c(possible = 5, impossible = 5))
dat_sim$subj_id <- factor(dat_sim$subj_id)
df <- summarySE(dat_sim, measurevar="LT", groupvars=c("category","subj_id"))
plot5.200 <-ggplot(df, aes(x=category, y=LT, group=subj_id, color=subj_id)) +
geom_line() +
geom_point()+
geom_errorbar(aes(ymin=LT-ci, ymax=LT+ci), width=.2,
position=position_dodge(0.05)) +
theme_bw() +
theme(legend.position = "none",
plot.title = element_text(hjust = 0.5, size = 20),
strip.text = element_text(size = 20),
axis.title.y = element_text(size = 20),
axis.title.x = element_text(size = 20),
axis.text.x = element_text(size = 14),
axis.text.y = element_text(size = 14)) + xlab(paste("power =", round(pwr5.200, digits = 2))) + ylab("Looking Time") + coord_cartesian(ylim=c(600,1500))
## 100 msec effect size
sims <- replicate(nSims, my_lmer_power(b1=100, nitem = c(possible = 5, impossible = 5)), simplify = FALSE)
sims <- lapply(sims, as.data.frame)
sims <- do.call("rbind", sims)
fcat <- sims[sims$effect == "fixed" & sims$term == "cat", ]
(pwr5.100 <- mean(fcat$p.value < alpha))
mean_estimate5.100 <- mean(fcat$estimate)
mean_se5.100 <- mean(fcat$std.error)
## plot an example
dat_sim <- my_sim_data(b1=100, nitem = c(possible = 5, impossible = 5))
dat_sim$subj_id <- factor(dat_sim$subj_id)
df <- summarySE(dat_sim, measurevar="LT", groupvars=c("category","subj_id"))
plot5.100 <-ggplot(df, aes(x=category, y=LT, group=subj_id, color=subj_id)) +
geom_line() +
geom_point()+
geom_errorbar(aes(ymin=LT-ci, ymax=LT+ci), width=.2,
position=position_dodge(0.05)) +
theme_bw() +
theme(legend.position = "none",
plot.title = element_text(hjust = 0.5, size = 20),
strip.text = element_text(size = 20),
axis.title.y = element_text(size = 20),
axis.title.x = element_text(size = 20),
axis.text.x = element_text(size = 14),
axis.text.y = element_text(size = 14)) + xlab(paste("power =", round(pwr5.100, digits = 2))) + ylab("Looking Time") + coord_cartesian(ylim=c(600,1500))
## 50 msec effect size
sims <- replicate(nSims, my_lmer_power(b1=50, nitem = c(possible = 5, impossible = 5)), simplify = FALSE)
sims <- lapply(sims, as.data.frame)
sims <- do.call("rbind", sims)
fcat <- sims[sims$effect == "fixed" & sims$term == "cat", ]
(pwr5.50 <- mean(fcat$p.value < alpha))
mean_estimate5.50 <- mean(fcat$estimate)
mean_se5.50 <- mean(fcat$std.error)
## plot an example
dat_sim <- my_sim_data(b1=50, nitem = c(possible = 5, impossible = 5))
dat_sim$subj_id <- factor(dat_sim$subj_id)
df <- summarySE(dat_sim, measurevar="LT", groupvars=c("category","subj_id"))
plot5.50 <-ggplot(df, aes(x=category, y=LT, group=subj_id, color=subj_id)) +
geom_line() +
geom_point()+
geom_errorbar(aes(ymin=LT-ci, ymax=LT+ci), width=.2,
position=position_dodge(0.05)) +
theme_bw() +
theme(legend.position = "none",
plot.title = element_text(hjust = 0.5, size = 20),
strip.text = element_text(size = 20),
axis.title.y = element_text(size = 20),
axis.title.x = element_text(size = 20),
axis.text.x = element_text(size = 14),
axis.text.y = element_text(size = 14)) + xlab(paste("power =", round(pwr5.50, 2))) + ylab("Looking Time") + coord_cartesian(ylim=c(600,1500))
#### One-hundred trials per condition ####
## 200 msec effect size
sims <- replicate(nSims, my_lmer_power(b1=200, nitem = c(possible = 100, impossible = 100)), simplify = FALSE)
sims <- lapply(sims, as.data.frame)
sims <- do.call("rbind", sims)
fcat <- sims[sims$effect == "fixed" & sims$term == "cat", ]
(pwr100.200 <- mean(fcat$p.value < alpha))
mean_estimate100.200 <- mean(fcat$estimate)
mean_se100.200 <- mean(fcat$std.error)
## plot an example
dat_sim <- my_sim_data(b1=200, nitem = c(possible = 100, impossible = 100))
dat_sim$subj_id <- factor(dat_sim$subj_id)
df <- summarySE(dat_sim, measurevar="LT", groupvars=c("category","subj_id"))
plot100.200 <- ggplot(df, aes(x=category, y=LT, group=subj_id, color=subj_id)) +
geom_line() +
geom_point()+
geom_errorbar(aes(ymin=LT-ci, ymax=LT+ci), width=.2,
position=position_dodge(0.05)) +
theme_bw() +
theme(legend.position = "none",
plot.title = element_text(hjust = 0.5, size = 20),
strip.text = element_text(size = 20),
axis.title.y = element_text(size = 20),
axis.title.x = element_text(size = 20),
axis.text.x = element_text(size = 14),
axis.text.y = element_text(size = 14)) +
xlab(paste("power =", round(pwr100.200, digits = 2))) +
ylab("Looking Time") + coord_cartesian(ylim=c(600,1500))
## 100 msec effect size
sims <- replicate(nSims, my_lmer_power(b1=100, nitem = c(possible = 100, impossible = 100)), simplify = FALSE)
sims <- lapply(sims, as.data.frame)
sims <- do.call("rbind", sims)
fcat <- sims[sims$effect == "fixed" & sims$term == "cat", ]
(pwr100.100 <- mean(fcat$p.value < alpha))
mean_estimate100.100 <- mean(fcat$estimate)
mean_se100.100 <- mean(fcat$std.error)
## plot an example
dat_sim <- my_sim_data(b1=100, nitem = c(possible = 100, impossible = 100))
dat_sim$subj_id <- factor(dat_sim$subj_id)
df <- summarySE(dat_sim, measurevar="LT", groupvars=c("category","subj_id"))
plot100.100 <- ggplot(df, aes(x=category, y=LT, group=subj_id, color=subj_id)) +
geom_line() +
geom_point()+
geom_errorbar(aes(ymin=LT-ci, ymax=LT+ci), width=.2,
position=position_dodge(0.05)) +
theme_bw() +
theme(legend.position = "none",
plot.title = element_text(hjust = 0.5, size = 20),
strip.text = element_text(size = 20),
axis.title.y = element_text(size = 20),
axis.title.x = element_text(size = 20),
axis.text.x = element_text(size = 14),
axis.text.y = element_text(size = 14)) + xlab(paste("power =", round(pwr100.100, digits =2 ))) + ylab("Looking Time") + coord_cartesian(ylim=c(600,1500))
## 50 msec effect size
sims <- replicate(nSims, my_lmer_power(b1=50, nitem = c(possible = 100, impossible = 100)), simplify = FALSE)
sims <- lapply(sims, as.data.frame)
sims <- do.call("rbind", sims)
fcat <- sims[sims$effect == "fixed" & sims$term == "cat", ]
(pwr100.50 <- mean(fcat$p.value < alpha))
mean_estimate100.50 <- mean(fcat$estimate)
mean_se100.50 <- mean(fcat$std.error)
## plot an example
dat_sim <- my_sim_data(b1=50, nitem = c(possible = 100, impossible = 100))
dat_sim$subj_id <- factor(dat_sim$subj_id)
df <- summarySE(dat_sim, measurevar="LT", groupvars=c("category","subj_id"))
plot100.50 <- ggplot(df, aes(x=category, y=LT, group=subj_id, color=subj_id)) +
geom_line() +
geom_point()+
geom_errorbar(aes(ymin=LT-ci, ymax=LT+ci), width=.2,
position=position_dodge(0.05)) +
theme_bw() +
theme(legend.position = "none",
plot.title = element_text(hjust = 0.5, size = 20),
strip.text = element_text(size = 20),
axis.title.y = element_text(size = 20),
axis.title.x = element_text(size = 20),
axis.text.x = element_text(size = 14),
axis.text.y = element_text(size = 14)) + xlab(paste("power =", round(pwr100.50, digits = 2))) +
ylab("Looking Time") + coord_cartesian(ylim=c(600,1500)) + coord_cartesian(ylim=c(600,1500))
#### Plot all ####
par(mfrow = c(3, 3)) # Create a 3 x 3 plotting matrix
library(gridExtra)
grid.arrange(plot1.50, plot5.50, plot100.50, plot1.100, plot5.100, plot100.100, plot1.200, plot5.200, plot100.200, nrow=3)
|
cc35489e7905385b9282e05e841fffb1a29c72b0
|
412d7ac8d78ee6eac43787b8a8f1883ebcffd6da
|
/R/S1_eval_embedding.R
|
0c5fdcc745138af6bf91298a7a32833eba2152de
|
[] |
no_license
|
fhaertner/GeometricAnalysisDMs
|
76669248c750ce7a919f545d046854c75d559140
|
d0c23d650a3bfa7f695df94e04689c07b59e8a27
|
refs/heads/master
| 2020-03-10T02:02:20.435952
| 2019-03-12T21:05:32
| 2019-03-12T21:05:32
| 129,126,049
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 4,709
|
r
|
S1_eval_embedding.R
|
# Given an igraph network, its temperature and coordinates, evaluates the
# embedding to H2
library(NetHypGeom)
library(cowplot)
library(dplyr)
# Load network and coordinates --------------------------------------------
outname <- "emb_eval_hpin"
#net <- readRDS("data/hint.rds")
#load("results/coords_hPIN_150k.RData")
#coords_lh <- coords
load("data/hPIN_150k.RData")
load("data/coords_hPIN_150k.RData")
net <- hPIN
coords_lh <- coords
# Network properties ------------------------------------------------------
N <- vcount(net)
avg_k <- mean(degree(net))
gma <- fit_power_law(degree(net))$alpha
beta <- 1 / (gma - 1) # Parameter controlling popularity fading
m <- round(avg_k/2) # Parameter controlling average node degree
# Connection probability --------------------------------------------------
conn <- get_conn_probs(net, coords_lh, bins = 20)
theo <- get_theoretical_conn_probs(conn$dist, N, avg_k, gma, Temp)
conn <- rbind(conn, theo)
conn$label <- rep(c("LaBNE+HM", "Theory"), each = 20)
p_conn <- ggplot(conn, aes(dist, prob+0.00001, colour = label, shape = label)) +
geom_point(size = 3) + geom_line() +
scale_y_log10(breaks = scales::trans_breaks("log10", function(x) 10^x),
labels = scales::trans_format("log10", scales::math_format(10^.x))) +
annotation_logticks(sides = "l") +
scale_colour_manual(values = c("#339e2b", "#e3196a")) +
labs(x = "Hyperbolic distance", y = "Connection probability") +
theme_bw() + theme(legend.title = element_blank(),
legend.background = element_blank(),
legend.justification = c(0, 0), legend.position = c(0, 0))
# Real degrees vs expected degrees ----------------------------------------
degs <- tibble(k = degree(net), exp_k = numeric(N))
R <- 2*log(N) -
2*log((2*Temp*(1 - exp(-0.5*(1 - beta)*2*log(N))))/(sin(Temp*pi)*m*(1 - beta)))
for(i in 1:N){
# Compute the hyperbolic distance between a node and all others
d <- hyperbolic_dist(coords_lh[i, ], coords_lh)
# Compute the probability that the node will connect with all others
prob <- 1 / (1 + exp((d - R)/(2*Temp)))
# Compute the expected node degree
degs$exp_k[i] <- sum(prob)
}
p_deg <- ggplot(degs, aes(k, round(exp_k))) + geom_point(size = 0.2) +
scale_x_log10(breaks = scales::trans_breaks("log10", function(x) 10^x),
labels = scales::trans_format("log10", scales::math_format())) +
scale_y_log10(breaks = scales::trans_breaks("log10", function(x) 10^x),
labels = scales::trans_format("log10", scales::math_format())) +
annotation_logticks() + labs(x = "Node degree", y = "Expected node degree") +
theme_bw()
# Clustering and routeing -------------------------------------------------
epochs <- 3
cc <- transitivity(net, "average")
theo_cc <- numeric(epochs)
# Source and target nodes based on linear indexing
st <- 1000
idx <- sample(N * N, st)
src <- ((idx - 1) %% N) + 1
trg <- floor((idx - 1) / N) + 1
stretches <- greedy_route_packets(net, coords_lh, src, trg)
gr <- sum(stretches > 0)/st
hs <- mean(stretches[stretches > 0])
theo_gr <- numeric(epochs)
theo_hs <- numeric(epochs)
for(i in 1:epochs){
ps_net <- ps_model(N = N, avg.k = avg_k, gma = gma, Temp = Temp)
theo_cc[i] <- transitivity(ps_net$network, "average")
stretches <- greedy_route_packets(ps_net$network, ps_net$polar, src, trg)
theo_gr[i] <- sum(stretches > 0)/st
theo_hs[i] <- mean(stretches[stretches > 0])
}
clust <- tibble(label = c("Real", "Theory"), cc = c(cc, mean(theo_cc)),
err = c(0, sd(theo_cc)))
lbl <- factor(c("Greedy routing (GR)", "GR Theory",
"Hop stretch (HS)", "HS Theory"),
levels = c("Greedy routing (GR)", "GR Theory",
"Hop stretch (HS)", "HS Theory"),
ordered = TRUE)
routeing <- tibble(label = lbl,
gr = c(gr, mean(theo_gr), hs, mean(theo_hs)),
err = c(0, sd(theo_gr), 0, sd(theo_hs)))
dodge <- position_dodge(width = 0.9)
p_cc <- ggplot(clust, aes(label, cc)) + geom_col(position = dodge, width = 0.5) +
geom_errorbar(aes(ymin = cc - err, ymax = cc + err),
position = dodge, width = 0.25) +
labs(x = "", y = "Clustering coefficient") + theme_bw()
p_gr <- ggplot(routeing, aes(label, gr)) + geom_col(position = dodge, width = 0.5) +
geom_errorbar(aes(ymin = gr - err, ymax = gr + err),
position = dodge, width = 0.25) +
labs(x = "", y = "Success rate (%) / Hop stretch") + theme_bw()
fig <- plot_grid(p_conn, p_deg, p_cc, p_gr, nrow = 2, ncol = 2, labels = letters[1:4])
save(fig, conn, degs, clust, routeing,
file = paste0("results/", outname, ".RData"))
|
523db8105675db8d47be179f06b90918195b5ae4
|
a816bcd3416d393ef0b933c968ed100124dee404
|
/BG_model/herb_resist_proccess_functions.R
|
d74fd2ebafc9b7861efa0ca0d9bf956e821855d3
|
[] |
no_license
|
ShaunCoutts/BG_herb_res
|
c97ec21f07d864faaaed2a1243a99d8b74a39113
|
8056b1320d262efe8208753b9c15922494a4e99b
|
refs/heads/master
| 2021-01-17T04:02:14.378459
| 2018-07-31T15:43:15
| 2018-07-31T15:43:15
| 42,309,872
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 21,202
|
r
|
herb_resist_proccess_functions.R
|
#set of functions for herbicide resitance model covering various processes like genotype production and seed bank dynamics
#working_loc = '/home/shauncoutts/Dropbox/projects/MHR_blackgrass/BG_model'
#setwd(working_loc)
#test_obj_name <- load('nonspatial_model_test_answer_key.Rdata') #load the test key
## EVAL_POINTS_BUILDER(lower_eval_point, upper_eval_point, resolution, seed_expantion)
## produces a matrix of evaluation points. The first row is evaluation points for evaluating above ground individuals, second row (which contains the first row) is evalution points for seeds
## lower_eval_point = lower value of g to be evaluated for above ground plants
## upper_eval_point = upper value of g to be evaluated for above ground plants
## resolution = resolution to evaluate distribtuions at
## seed_expantion = factor to spread the distribtuion by, typically will be some multipule of the sd of conditional breeding value distribution
eval_points_builder <- function(lower_eval_point, upper_eval_point, resolution, seed_expantion){
above_ground_eval = seq(lower_eval_point, upper_eval_point, resolution)
seed_lower = seq(above_ground_eval[1] * seed_expantion, above_ground_eval[1], resolution)
seed_upper = seq(above_ground_eval[length(above_ground_eval)], above_ground_eval[length(above_ground_eval)] * seed_expantion, resolution)
seed_eval = c(seed_lower[1:(length(seed_lower) - 1)], above_ground_eval, seed_upper[2:length(seed_upper)])
list(above_ground = above_ground_eval, seed = seed_eval, above_ground_index = which(seed_eval %in% above_ground_eval))
}
## EVAL_POINYS_UPDATE()
## Takes and eval_points_builder object and a above ground distribtuion of plants: returns an eval_points_builder object with an updated the above_ground eval window based on above ground distribution
##eval_points_object = names list produced by EVAL_POINTS_BUILDER() or a previous call to EVAL_POINTS_UPDATE()
## above_ground_dist = a distribution of above ground plants evaluated on seed eval points, probably prduced by a call to EMERGENCE()
## density_cutoff = population density (on distrbution over g) above which seed evaluation points are retained in the above ground evaluation points
eval_points_update <- function(eval_points_object, above_ground_dist, density_cutoff){
eval_points_object$above_ground = eval_points_object$seed[which(above_ground_dist > density_cutoff)]
if(length(eval_points_object$above_ground) < 10) eval_points_object$above_ground = eval_points_object$seed[which(above_ground_dist %in% tail(sort(above_ground_dist), 10))]
eval_points_object$above_ground_index = which(eval_points_object$seed %in% eval_points_object$above_ground)
eval_points_object
}
## QUANT_GEN_OFFSPRING_DISTRIBUTION(N_f, eval_points, additive_variance, offspring_dist_res)
## produces a matrix where the rows are the distribution of offspring for each evaluated maternal breeding value based on paternal distributions on the breeeding vlaue (N_f) and
##a vector of evaluation points on breeding value (each row in returned matrix is the distribution of offspring breeding values returned by each maternal breeding value evaluated),
## along with parameter for the variance of breeding value for distribtuion of offspring breeding value (which we assume is normal).
## and a resolution to evluate the conditional offspring distribution at
#be careful this functions relies on N_f, being evaluated on eval_points before being passed to this function so that the indexes all match up
quant_gen_offspring_distribution <- function(N_f, eval_points, additive_variance, seed_eval_points){
dN = eval_points[2] - eval_points[1]
additive_sd = sqrt(additive_variance)
eval_grid = cbind(rep.int(eval_points, length(eval_points)), rep(eval_points, each = length(eval_points)))#make every combination of evaluation points
N_fathers = N_f / sum(N_f) #turns the distribution of fathers into a frequency distribution
vect_seed_eval_points = rep(seed_eval_points, times = length(eval_grid[,1]))
vect_breed_val_means = rep(eval_grid[,1] * 0.5 + eval_grid[,2] * 0.5, each = length(seed_eval_points))
cond_offspring_dist = matrix(dnorm(vect_seed_eval_points, vect_breed_val_means, additive_sd), ncol = length(seed_eval_points), byrow = TRUE)
cond_offspring_dist = cond_offspring_dist * dN #scales the distribtuion so that it sums to one, the assumption being that all offspring produced by a parental combination have to have a breeding value
offspring_3D_kernel = cond_offspring_dist * N_fathers
summing_grid = matrix(1:(length(eval_points) * length(eval_points)), ncol = length(eval_points), byrow = TRUE)
t(apply(summing_grid, MARGIN = 1, FUN = function(x) colSums(offspring_3D_kernel[x, ])))
}
## SURVIVAL(N_0, eval_points, herb_rate, sur0, sur_cost_resist, herb_effect, survive_resist, max_sur, ceiling_pop)
## produces a distribution (i.e. vector on eval_points) of survivors after herbicide application from a distribution of indivduals that emerge from the seed bank (N_0).
## eval_points = vector of g values to evaluate the populaiton over
## herb_rate = 0 or 1 factor that if herbicide was applied or not
## sur0 = susrvial rate when g is 0 and there is no herbicide
## sur_cost_resist = cost of higher resistenace score in terms of reduced survival
## herb_effect = effect of herbicide on survival
## survive_resist = protective effect of a one unit increase in resistance score g
## max_sur = maximum survival possible
#be careful this functions relies on N_0 being evaluated on eval_points before being passed to this function so that the indexes all match up
survival <- function(N_0, eval_points, herb_rate, sur0, sur_cost_resist, herb_effect, survive_resist, max_sur, ceiling_pop){
plant_happiness_sur = sur0 - sur_cost_resist * eval_points - herb_rate * (herb_effect - pmin(herb_effect, survive_resist * eval_points)) #alt: herb_effect * exp(-survive_resist * abs(g) + g)
density_independent_survival = max_sur / (1 + exp(-plant_happiness_sur))
density_independent_establishment = N_0 * density_independent_survival
if((sum(density_independent_establishment)) > ceiling_pop){
N_1 = density_independent_establishment * (ceiling_pop / sum(density_independent_establishment))
}else{
N_1 = density_independent_establishment
}
return(N_1)
}
##FECUNDITY(N_m, eval_points, fec_max, fec0, fec_cost, N_f, additive_variance, seed_eval_points)
## produces a distribution of seeds on eval_points of g produced by population N_m (distribution of mothers on g evaluated at eval_points)
## N_m = maternal distrbution of indviduals over g
## N_f = paternal distrbution of indviduals over g, in most cases N_m == N_f
## eval_points = the values of g on which above ground individuals are evaluated
## seed_eval_points = the values of g on which seeds are evaluated
## fec_max = the maximum number of seeds per mothers
## fec0 = cost of resistance when g = 0, in logits
## fec_cost = reduction in fecundity each additional unit of g causes, in logits
## additive_variance (passed to quant_gen_offspring_distribution()) = variance of conditional offspring distribution
#be careful this functions relies on N_m being evaluated on eval_points before being passed to this function so that the indexes all match up
fecundity <- function(N_m, eval_points, fec_max, fec0, fec_cost, N_f, additive_variance, seed_eval_points){
dg = eval_points[2] - eval_points[1]
plant_happiness_fec = fec0 - fec_cost * eval_points
seeds_each_g = N_m * (fec_max / (1 + exp(-plant_happiness_fec)))
colSums(seeds_each_g * quant_gen_offspring_distribution(N_f, eval_points, additive_variance, seed_eval_points)) * dg
}
## SEEDBANK()
## produces a distribution of seeds in the seed bank over the eval_points on g.
## seedbank0 = distrbution of seeds in the seedbank in the last timestep
## seed_survival = probability that a seed in the seed bank survives one year
## germination = the probability that a seed in the seedbank survies one timestep
## eval_object = object from EVAL_POINTS_BUILDER() that defines the above ground and below ground evaluation points
## ALL PASSED TO FECUNDITY()
## N_m = maternal distrbution of indviduals over g
## N_f = paternal distrbution of indviduals over g, in most cases N_m == N_f
## fec_max = the maximum number of seeds per mothers
## fec0 = cost of resistance when g = 0, in logits
## fec_cost = reduction in fecundity each additional unit of g causes, in logits
## additive_variance (passed to quant_gen_offspring_distribution()) = variance of conditional offspring distribution
seedbank <- function(seedbank0, seed_survival, germination, eval_object, N_m, fec_max, fec0, fec_cost, N_f, additive_variance){
seedbank0 * seed_survival * (1 - germination) + fecundity(N_m = N_m, eval_points = eval_object$above_ground, fec_max = fec_max, fec0 = fec0, fec_cost = fec_cost, N_f = N_f, additive_variance = additive_variance, seed_eval_points = eval_object$seed)
}
## EMERGENCE()
## produces a distribution of emerged indviduals
## seedbank_current = distribution of seeds in the seed bank over g (returned from SEEDBANK())
## germination = germination probability
emergence <- function(seedbank_current, germination){
seedbank_current * germination
}
## SINGLE_INTERATION()
## produces a distrbution over g of indivduals in the seed bank including survival, reproduction and emergence
## seedbank_current = distribution of seeds in the seed bank over g (returned from SEEDBANK() or another call to SINGLE_INTERATION())
## germination = germination probability
## seed_survival = probability that a seed in the seed bank survives one year
## eval_object = object from EVAL_POINTS_BUILDER() that defines the above ground and below ground evaluation points
## fec_max = the maximum number of seeds per mothers
## fec0 = cost of resistance when g = 0, in logits
## fec_cost = reduction in fecundity each additional unit of g causes, in logits
## additive_variance (passed to quant_gen_offspring_distribution()) = variance of conditional offspring distribution
## herb_rate = 0 or 1 factor that if herbicide was applied or not
## sur0 = susrvial rate when g is 0 and there is no herbicide
## sur_cost_resist = cost of higher resistenace score in terms of reduced survival
## herb_effect = effect of herbicide on survival
## survive_resist = protective effect of a one unit increase in resistance score g
## max_sur = maximum survival possible
## density_cutoff = population density (on distrbution over g) above which seed evaluation points are retained in the above ground evaluation points
single_iteration <- function(seedbank_current, germination, eval_object, herb_rate, sur0, sur_cost_resist, herb_effect, survive_resist, max_sur, ceiling_pop, seed_survival, fec_max, fec0, fec_cost, additive_variance, density_cutoff){
new_plants = emergence(seedbank_current = seedbank_current, germination = germination)
eval_object = eval_points_update(eval_points_object = eval_object, new_plants, density_cutoff = density_cutoff) #update evaluation window
survivors = survival(N_0 = new_plants[eval_object$above_ground_index], eval_points = eval_object$above_ground, herb_rate = herb_rate, sur0 = sur0, sur_cost_resist = sur_cost_resist,
herb_effect = herb_effect, survive_resist = survive_resist, max_sur = max_sur, ceiling_pop = ceiling_pop)
new_seedbank = seedbank(seedbank0 = seedbank_current, seed_survival = seed_survival, germination = germination, eval_object = eval_object,
N_m = survivors, fec_max = fec_max, fec0 = fec0, fec_cost = fec_cost, N_f = survivors, additive_variance = additive_variance)
new_seedbank
}
## MULTI_ITERATION()
## produces a num_iter by length(eval_object$seed) matrix where each row is a distrbution over g of indivduals in the seedbank for 1 iteration
## num_iter = number of iterations to run the simulation form
## initial_seedbank = distribution of seeds in the seed bank over g
## herb_schedual = vector of 0,1's of length num_iter that defines when herbicide is applied
## germination = germination probability
## seed_survival = probability that a seed in the seed bank survives one year
## eval_object = object from EVAL_POINTS_BUILDER() that defines the above ground and below ground evaluation points
## fec_max = the maximum number of seeds per mothers
## fec0 = cost of resistance when g = 0, in logits
## fec_cost = reduction in fecundity each additional unit of g causes, in logits
## additive_variance (passed to quant_gen_offspring_distribution()) = variance of conditional offspring distribution
## sur0 = susrvial rate when g is 0 and there is no herbicide
## sur_cost_resist = cost of higher resistenace score in terms of reduced survival
## herb_effect = effect of herbicide on survival
## survive_resist = protective effect of a one unit increase in resistance score g
## max_sur = maximum survival possible
## density_cutoff = population density (on distrbution over g) above which seed evaluation points are retained in the above ground evaluation points
multi_iteration <- function(num_iter, initial_seedbank, herb_schedual, germination, eval_object, sur0, sur_cost_resist, herb_effect, survive_resist, max_sur, ceiling_pop, seed_survival, fec_max, fec0, fec_cost, additive_variance, density_cutoff){
results = matrix(NA, nrow = num_iter, ncol = length(eval_object$seed))
results[1, ] = single_iteration(seedbank_current = initial_seedbank, germination = germination, eval_object = eval_object, herb_rate = herb_schedual[1], sur0 = sur0, sur_cost_resist = sur_cost_resist,
herb_effect = herb_effect, survive_resist = survive_resist, max_sur = max_sur, ceiling_pop = ceiling_pop, seed_survival = seed_survival, fec_max = fec_max, fec0 = fec0,
fec_cost = fec_cost, additive_variance = additive_variance, density_cutoff = density_cutoff)
for(i in 2:num_iter){
results[i, ] = single_iteration(seedbank_current = results[i - 1, ], germination = germination, eval_object = eval_object, herb_rate = herb_schedual[i], sur0 = sur0, sur_cost_resist = sur_cost_resist,
herb_effect = herb_effect, survive_resist = survive_resist, max_sur = max_sur, ceiling_pop = ceiling_pop, seed_survival = seed_survival, fec_max = fec_max, fec0 = fec0,
fec_cost = fec_cost, additive_variance = additive_variance, density_cutoff = density_cutoff)
}
results
}
seedbank_animator <- function(results_matrix, eval_object, herb_schedual, pause = 1, ...){
max_value = max(results_matrix)
for(i in 1:dim(results_matrix)[1]){
plot(eval_object$seed, eval_object$seed, type = 'n', ylim = c(0, max_value), bty = 'n', xlab = 'resistance score',
main = paste0(ifelse(herb_schedual[i] == 0, 'No herbicide applied', 'Herbicide being applied'), '\nturn ', i), ...)
polygon(x = eval_object$seed, y = results_matrix[i, ], col = ifelse(herb_schedual[i] == 0, 'skyblue', 'red'))
Sys.sleep(pause)
}
}
#Check the test results
test_functions_broken <- function(file_loc){
setwd(file_loc)
test_obj_name <- load('nonspatial_model_test_answer_key.Rdata') #load the test key
eval(parse(text = nonspace_test_answer_key[[1]]$question))#set parameters for the test run
test1 = quant_gen_offspring_distribution(N_f = N_f, eval_points = eval_all$above_ground, additive_variance = additive_variance, seed_eval_points = eval_all$seed) #get the output form the current version of the function
eval(parse(text = nonspace_test_answer_key[[2]]$question))#set parameters for the test run
test2 = survival(N_0 = N_0, eval_points = eval_points, herb_rate = herb_rate, sur0 = sur0, sur_cost_resist = sur_cost_resist, herb_effect = herb_effect, survive_resist = survive_resist, max_sur = max_sur, ceiling_pop = ceiling_pop)
eval(parse(text = nonspace_test_answer_key[[3]]$question))#set parameters for the test run
test3 = eval_points_builder(lower_eval_point = lower_eval_point, upper_eval_point = upper_eval_point, resolution = resolution, seed_expantion = seed_expantion)
eval(parse(text = nonspace_test_answer_key[[4]]$question))#set parameters for the test run
test4 = fecundity(N_m = N_m, eval_points = eval_all$above_ground, fec_max = fec_max, fec0 = fec0, fec_cost = fec_cost, N_f = N_f, additive_variance = additive_variance, seed_eval_points = eval_all$seed)
eval(parse(text = nonspace_test_answer_key[[5]]$question))#set parameters for the test run
test5 = seedbank(seedbank0 = seedbank0 * 100, seed_survival = seed_survival, germination = germination, eval_object = eval_object, N_m = N_m, fec_max = fec_max, fec0 = fec0, fec_cost = fec_cost, N_f = N_f, additive_variance = additive_variance)
eval(parse(text = nonspace_test_answer_key[[6]]$question))#set parameters for the test run
test6 = emergence(seedbank_current = seedbank_current, germination = germination)
eval(parse(text = nonspace_test_answer_key[[7]]$question))#set parameters for the test run
test7 = single_iteration(seedbank_current = seedbank_current, germination = germination, eval_object = eval_object, herb_rate = herb_rate, sur0 = sur0, sur_cost_resist = sur_cost_resist,
herb_effect = herb_effect, survive_resist = survive_resist, max_sur = max_sur, ceiling_pop = ceiling_pop, seed_survival = seed_survival, fec_max = fec_max, fec0 = fec0,
fec_cost = fec_cost, additive_variance = additive_variance, density_cutoff = density_cutoff)
eval(parse(text = nonspace_test_answer_key[[8]]$question))#set parameters for the test run
test8 = eval_points_update(eval_points_object = eval_points_object, above_ground_dist = above_ground_dist, density_cutoff = density_cutoff)
eval(parse(text = nonspace_test_answer_key[[9]]$question))#set parameters for the test run
test9 = multi_iteration(num_iter = num_iter, initial_seedbank = initial_seedbank, germination = germination, eval_object = eval_object, herb_schedual = herb_schedual, sur0 = sur0, sur_cost_resist = sur_cost_resist,
herb_effect = herb_effect, survive_resist = survive_resist, max_sur = max_sur, ceiling_pop = ceiling_pop, seed_survival = seed_survival, fec_max = fec_max, fec0 = fec0,
fec_cost = fec_cost, additive_variance = additive_variance, density_cutoff = density_cutoff)
test_results <- ifelse(all.equal(test1, nonspace_test_answer_key[[1]]$answer), 'QUANT_GEN_OFFSPRING_DISTRIBUTION() still fine', 'Something you did broke the function QUANT_GEN_OFFSPRING_DISTRIBUTION()')
test_results[2] <- ifelse(identical(test2, nonspace_test_answer_key[[2]]$answer), 'SURVIVAL() still fine', 'Something you did broke the function SURVIVAL()')
test_results[3] <- ifelse(identical(test3, nonspace_test_answer_key[[3]]$answer), 'EVAL_POINTS_BUILDER() still fine', 'Something you did broke the function EVAL_POINTS_BUILDER()')
test_results[4] <- ifelse(identical(test4, nonspace_test_answer_key[[4]]$answer), 'FECUNDITY() still fine', 'Something you did broke the function FECUNDITY()')
test_results[5] <- ifelse(identical(test5, nonspace_test_answer_key[[5]]$answer), 'SEEDBANK() still fine', 'Something you did broke the function SEEDBANK()')
test_results[6] <- ifelse(identical(test6, nonspace_test_answer_key[[6]]$answer), 'EMERGENCE() still fine', 'Something you did broke the function EMERGENCE()')
test_results[7] <- ifelse(identical(test7, nonspace_test_answer_key[[7]]$answer), 'SINGLE_INTERATION() still fine', 'Something you did broke the function SINGLE_INTERATION()')
test_results[8] <- ifelse(identical(test8, nonspace_test_answer_key[[8]]$answer), 'EVAL_POINTS_UPDATE() still fine', 'Something you did broke the function EVAL_POINTS_UPDATE()')
test_results[9] <- ifelse(identical(test9, nonspace_test_answer_key[[9]]$answer), 'MULTI_ITERATION() still fine', 'Something you did broke the function MULTI_ITERATION()')
print(test_results)
}
#NOTE TO SELF add non-heritable variance in resitance in the fecundity function so individuals can be resistant through their life time, basically n(g) should be n(g, z) and resistance should then
#be a function of g and z, so that survival is actually a distribtuion for each element of eval_points do simple version for now.
##area to test things with throw away code#######################################################################################################################
#library(microbenchmark)
#speed_test = microbenchmark(quant_gen_offspring_distribution(N_f = N_f, eval_points = eval_all$above_ground, additive_variance = additive_variance, seed_eval_points = eval_all$seed),
# quant_gen_offspring_distribution_vect(N_f = N_f, eval_points = eval_all$above_ground, additive_variance = additive_variance, seed_eval_points = eval_all$seed),
# times = 100)
#speed_test #turns out the fully vectorised version is much a bit slower which was unexpected but possibly due to large number of multiplications and additions required
#out1 = quant_gen_offspring_distribution(N_f = N_f, eval_points = eval_all$above_ground, additive_variance = additive_variance, seed_eval_points = eval_all$seed)
#out2 = quant_gen_offspring_distribution_vect(N_f = N_f, eval_points = eval_all$above_ground, additive_variance = additive_variance, seed_eval_points = eval_all$seed)
#all.equal(out1, out2)
|
9c0cfdaae553c14a37d24f0780af6cb24c501c8b
|
dbc0c9c17314a709824f167225cdd45839e4f56c
|
/chap3/tf_idf_phy.R
|
3290f1a0cdf812dfbff783d52fe69e9b465bb561
|
[] |
no_license
|
trisgelar/tidytextmining
|
3c608b5aaf741ccbab3309f4e65d4a9e7bc86f86
|
46ef954e7a74987ffd0d4d0e68ac2bba9a43a85d
|
refs/heads/master
| 2020-03-25T06:26:56.170153
| 2018-08-06T09:34:01
| 2018-08-06T09:34:01
| 143,502,140
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 2,059
|
r
|
tf_idf_phy.R
|
library(gutenbergr)
physics <- gutenberg_download(
c(37729, 14725, 13476, 5001),
meta_fields = "author"
)
physics_words <- physics %>%
unnest_tokens(word, text) %>%
count(author, word, sort = TRUE) %>%
ungroup
physics_words
plot_physics <- physics_words %>%
bind_tf_idf(word, author, n) %>%
arrange(desc(tf_idf)) %>%
mutate(word = factor(word, levels = rev(unique(word)))) %>%
mutate(author = factor(author, levels =
c(
"Galilei, Galileo",
"Huygens, Christiaan",
"Tesla, Nikola",
"Einstein, Albert"
)))
plot_physics %>%
group_by(author) %>%
top_n(15, tf_idf) %>%
ungroup %>%
mutate(word = reorder(word, tf_idf)) %>%
ggplot(aes(word, tf_idf, fill = author)) +
geom_col(show.legend = FALSE) +
labs(x = NULL, y = "tf-idf") +
facet_wrap(~author, ncol = 2, scales = "free") +
coord_flip()
library(stringr)
physics %>%
filter(str_detect(text, "eq\\.")) %>%
select(text)
physics %>%
filter(str_detect(text, "K1")) %>%
select(text)
physics %>%
filter(str_detect(text, "AK")) %>%
select(text)
mystopwords <- data_frame(word =
c(
"eq", "co", "rc", "ac", "ak", "bn",
"fig", "file", "cg", "cb", "cm"
))
physics_words <- anti_join(physics_words, mystopwords, by = "word")
plot_physics <- physics_words %>%
bind_tf_idf(word, author, n) %>%
arrange(desc(tf_idf)) %>%
mutate(word = factor(word, levels = rev(unique(word)))) %>%
group_by(author) %>%
top_n(15, tf_idf) %>%
ungroup %>%
mutate(author = factor(author, levels =
c(
"Galilei, Galileo",
"Huygens, Christiaan",
"Tesla, Nikola",
"Einstein, Albert"
)))
library(ggplot2)
ggplot(plot_physics, aes(word, tf_idf, fill = author)) +
geom_col(show.legend = FALSE) +
labs(x = NULL, y = "tf-idf") +
facet_wrap(~author, ncol = 2, scales = "free") +
coord_flip()
|
439122625063ffb40ae84ebaa1a1d61b9f22119a
|
ffdea92d4315e4363dd4ae673a1a6adf82a761b5
|
/data/genthat_extracted_code/powdR/examples/powdRlib.Rd.R
|
38d5678e053a391790022a412b89896b259f5088
|
[] |
no_license
|
surayaaramli/typeRrh
|
d257ac8905c49123f4ccd4e377ee3dfc84d1636c
|
66e6996f31961bc8b9aafe1a6a6098327b66bf71
|
refs/heads/master
| 2023-05-05T04:05:31.617869
| 2019-04-25T22:10:06
| 2019-04-25T22:10:06
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 354
|
r
|
powdRlib.Rd.R
|
library(powdR)
### Name: powdRlib
### Title: Create an XRD reference library
### Aliases: powdRlib
### ** Examples
#load an example xrd_table
data(minerals_xrd)
#load an example phases_table
data(minerals_phases)
#Create a reference library object
xrd_lib <- powdRlib(xrd_table = minerals_xrd,
phases_table = minerals_phases)
|
2a720e39aa91816ef1d5d0f936fe718718d7b73d
|
b82635a880c39fed94e77ad850968a32db1f603a
|
/Constrained_Optimization_Lagrange_Multiplier.R
|
ae4a536e22db69bba9eb04b0b454acd29d492110
|
[] |
no_license
|
btemovska/Machine_Learning_OSU_class
|
b70b84602d64fa6e4d43dd02ba423ce2d961a5a7
|
836205e27771447e7980898b3323dae51514d3b7
|
refs/heads/master
| 2023-01-08T02:13:51.247815
| 2020-11-13T03:11:52
| 2020-11-13T03:11:52
| 306,176,695
| 1
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 958
|
r
|
Constrained_Optimization_Lagrange_Multiplier.R
|
install.packages("Rsolnp")
library(Rsolnp)
# fn1 return the value of value of the function by taking x as input vector
fn1=function(x){ x[1]^2+2*x[2]^2+5*x[3]^2}
#eqn1 is the function that returns the constraint
eqn1=function(x){
z1 = x[1]+x[2]+x[3]
return(c(z1))
}
#initial values
x0 = c(0,0,0)
x_optimal = solnp(x0, fun = fn1, eqfun = eqn1, eqB = c(2))
##Iter: 1 fn: 1.9097 Pars: 1.06946 0.51417 0.21782
#Iter: 2 fn: 2.3529 Pars: 1.17647 0.58824 0.23529
#Iter: 3 fn: 2.3529 Pars: 1.17647 0.58824 0.23529
#solnp--> Completed in 3 iterations
x_optimal$pars
#1.1764706 0.5882353 0.2352941
x_optimal$convergence
# 0
x_optimal$values
#0.000000 1.909693 2.352941 2.352941
x_optimal$lagrange
# [,1]
#[1,] 2.352942
x_optimal$hessian
# [,1] [,2] [,3]
#[1,] 1.9084214 -1.019843 -0.5157249
#[2,] -1.0198432 2.209799 -1.3718505
#[3,] -0.5157249 -1.371851 9.0934124
x_optimal$elapsed
#Time difference of 0.04440808 secs
|
fedd9e065269d408fa3ad370339859375083381c
|
8940282cdb736e66f8a48b3533ba180630172f17
|
/c/hello/src/Object.r
|
ebf8f76913ad0631509a775864fbe673fb6ceca1
|
[] |
no_license
|
bsdelf/research
|
306e2fccb122bd2956aaa5219ac779cd07a6dc8c
|
a9f6739603e9e804f9079f29183ec777be4bd714
|
refs/heads/master
| 2021-01-02T23:12:16.842153
| 2012-05-02T05:21:26
| 2012-05-02T05:21:26
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 248
|
r
|
Object.r
|
#ifndef Object_r
#define Object_r
struct Class
{
size_t size;
void* (* ctor)(void* self, va_list* app);
void* (* dtor)(void* self);
void* (* clone)(const void* self);
int (* differ)(const void* self, const void* b);
};
#endif
|
00b8d1c99576a7dc867fd2efbca7fe361fd8532f
|
c92d5cb354087582a6ee725e789d891c2d6e5cc7
|
/man/remove_bottom.Rd
|
f7732daace719f17d2056679355ffeba5aa11151
|
[
"MIT"
] |
permissive
|
josesamos/flattabler
|
bc6063389362843f8181e1724c1f18304529d9d0
|
6ac67dc39d52653a286bd0a0cb15f545de75cdc4
|
refs/heads/master
| 2023-09-04T09:01:00.265314
| 2023-08-13T09:05:08
| 2023-08-13T09:05:08
| 275,351,769
| 3
| 0
|
NOASSERTION
| 2023-08-09T19:17:23
| 2020-06-27T10:47:12
|
R
|
UTF-8
|
R
| false
| true
| 1,412
|
rd
|
remove_bottom.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/pivot_table.R
\name{remove_bottom}
\alias{remove_bottom}
\alias{remove_bottom.pivot_table}
\title{Remove bottom rows from a pivot table}
\usage{
remove_bottom(pt, n)
\method{remove_bottom}{pivot_table}(pt, n)
}
\arguments{
\item{pt}{A \code{pivot_table} object.}
\item{n}{A number, number of rows to remove.}
}
\value{
A \code{pivot_table} object.
}
\description{
Remove bottom rows from the pivot table represented by the object.
}
\details{
A pivot table should only contain label rows and columns, and an array of
values, usually numeric data.
All rows not belonging to the pivot table must be removed. It is common to
find rows with footer information, which must be removed.
This function is very useful because it is not necessary to know the number
of rows in the table.
}
\examples{
pt <- pt_ex |> remove_bottom(3)
}
\seealso{
\code{\link{pivot_table}}
Other pivot table transformation functions:
\code{\link{extract_labels}()},
\code{\link{fill_labels}()},
\code{\link{fill_values}()},
\code{\link{remove_agg}()},
\code{\link{remove_cols}()},
\code{\link{remove_empty}()},
\code{\link{remove_k}()},
\code{\link{remove_left}()},
\code{\link{remove_right}()},
\code{\link{remove_rows}()},
\code{\link{remove_top}()},
\code{\link{replace_dec}()},
\code{\link{unpivot}()}
}
\concept{pivot table transformation functions}
|
5118d574e4dfa922fd138a58b30dd4069d49b2ac
|
56d196a66c8bbbd195478446d48b299eaa0cbb64
|
/Chapter8/Problem62.R
|
cb284258939567a24f9914dec196f9b529cc165d
|
[] |
no_license
|
aulchen/rice_stats
|
bbe8370faae2ead11b68fad3fc489abb571bac53
|
13705602642b5c470d80f2edd23384b30092786d
|
refs/heads/master
| 2023-07-16T04:30:34.610651
| 2021-08-25T20:31:53
| 2021-08-25T20:31:53
| 271,953,575
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 1,129
|
r
|
Problem62.R
|
distSeq <- seq(0, 0.6, by = .01)
partA <- list()
partA$prior <- function(x) dgamma(x, shape = .25, rate = .5)
partA$posterior <- function(x) dgamma(x, shape = 20.25, rate = 102.5)
partB <- list()
partB$prior <- function(x) dgamma(x, shape = .25, rate = .025)
partB$posterior <- function(x) dgamma(x, shape = 20.25, rate = 102.025)
png('../Desktop/Projects/Stat135/Chapter8/output/Problem62Plot_a.png', type = 'cairo',
width = 640, height = 480)
plot(distSeq, partA$prior(distSeq), type = 'l', ylim = c(0, 10),
xlab = 'Lambda', ylab = 'Density', main = 'Gamma(.25, .5) Prior')
lines(distSeq, partA$posterior(distSeq), type = 'l', lty = 2)
legend(x = 'topright', legend = c('Prior', 'Posterior'), lty = c(1, 2))
dev.off()
png('../Desktop/Projects/Stat135/Chapter8/output/Problem62Plot_b.png', type = 'cairo',
width = 640, height = 480)
plot(distSeq, partB$prior(distSeq), type = 'l', ylim = c(0, 10),
xlab = 'Lambda', ylab = 'Density', main = 'Gamma(.25, .025) Prior')
lines(distSeq, partB$posterior(distSeq), type = 'l', lty = 2)
legend(x = 'topright', legend = c('Prior', 'Posterior'), lty = c(1, 2))
dev.off()
|
e45c922196153c7bfb6511d098751ba02785efb8
|
2bec5a52ce1fb3266e72f8fbeb5226b025584a16
|
/MultivariateRandomForest/man/Imputation.Rd
|
a939bde280c6fcf8313a7a20033e1fab3d8097a8
|
[] |
no_license
|
akhikolla/InformationHouse
|
4e45b11df18dee47519e917fcf0a869a77661fce
|
c0daab1e3f2827fd08aa5c31127fadae3f001948
|
refs/heads/master
| 2023-02-12T19:00:20.752555
| 2020-12-31T20:59:23
| 2020-12-31T20:59:23
| 325,589,503
| 9
| 2
| null | null | null | null |
UTF-8
|
R
| false
| true
| 578
|
rd
|
Imputation.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/Imputation.R
\name{Imputation}
\alias{Imputation}
\title{Imputation of a numerical vector}
\usage{
Imputation(XX)
}
\arguments{
\item{XX}{a vector of size N x 1}
}
\value{
Imputed vector of size N x 1
}
\description{
Imputes the values of the vector that are NaN
}
\details{
If a value is missing, it will be replaced by an imputed value that is an average of previous and
next value. If previous or next value is also missing, the closest value is used as the imputed value.
}
|
acaf61879d62f2c7672c6950d96ceceb0bff3a72
|
00eb5271deba1c20f9445643085bf7dc89566661
|
/Activity5.R
|
d5b096c9d842d422cbbafa5979fa7aaccf40ca34
|
[] |
no_license
|
mmcgrawcolgate/GEOG331
|
c6ab13a8380af562ce1fadb071e23073494c670d
|
25e833cbecc858f3d03c51d153774d4e430eb9f6
|
refs/heads/master
| 2023-02-06T03:51:33.214733
| 2020-12-18T18:26:04
| 2020-12-18T18:26:04
| 292,890,542
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 6,691
|
r
|
Activity5.R
|
#ACTIVITY 5 CODE
#by: Matt McGraw
##READING IN DATA AND MAKING DATAFRAMES##
library(lubridate)
#read in streamflow data file
datH <- read.csv("stream_flow_data.csv",
na.strings = c("Eqp"))
#read in precip data file
datP <- read.csv("2049867.csv")
#create new dataframe using approved data
datD <- datH[datH$discharge.flag == "A",]
###QUESTION 1###
#no code
###QUESTION 2 CODE###
#defining time for streamflow
#convert date and time
datesD <- as.Date(datD$date, "%m/%d/%Y")
#get day of year
datD$doy <- yday(datesD)
#calculate year
datD$year <- year(datesD)
#define time
timesD <- hm(datD$time)
#defining time for precipitation
dateP <- ymd_hm(datP$DATE)
#get day of year
datP$doy <- yday(dateP)
#get year
datP$year <- year(dateP)
#decimal formats for datD
#convert time from a string to a more usable format with a decimal hour
datD$hour <- hour(timesD ) + (minute(timesD )/60)
#get full decimal time
datD$decDay <- datD$doy + (datD$hour/24)
#calculate a decimal year, but account for leap year
datD$decYear <- ifelse(leap_year(datD$year),datD$year + (datD$decDay/366),
datD$year + (datD$decDay/365))
#decimal formats for datP
datP$hour <- hour(dateP ) + (minute(dateP )/60)
#get full decimal time
datP$decDay <- datP$doy + (datP$hour/24)
#calculate a decimal year, but account for leap year
datP$decYear <- ifelse(leap_year(datP$year),datP$year + (datP$decDay/366),
datP$year + (datP$decDay/365))
###QUESTION 3 CODE###
#find lengths of both dataframes, check out how frequent data appears
length(datP$STATION)
length(datD$site_no)
head(datP, 15)
head(datD, 15)
###QUESTION 4 CODE###
#plot discharge
plot(datD$decYear, datD$discharge, type="l", xlab="Year",
ylab=expression(paste("Discharge ft"^"3 ","sec"^"-1")))
help("expression")
help("paste")
###QUESTION 5 CODE###
#start to formatting plot
aveF <- aggregate(datD$discharge, by=list(datD$doy), FUN="mean")
colnames(aveF) <- c("doy","dailyAve")
sdF <- aggregate(datD$discharge, by=list(datD$doy), FUN="sd")
colnames(sdF) <- c("doy","dailySD")
#using 2017 as year to add line
d2017 <- datD[datD$year==2017,]
average2017 <- aggregate(d2017$discharge, by=list(d2017$doy), FUN="mean")
colnames(average2017) <- c("doy", "dailyAve")
#format of plot, extra line added
#bigger margins
par(mai=c(1,1,1,1))
#make plot
plot(aveF$doy,aveF$dailyAve,
type="l",
xlab="Month",
ylab=expression(paste("Discharge ft"^"3 ","sec"^"-1")),
lwd=2,
ylim=c(0,90),
xaxs="i", yaxs ="i",#remove gaps from axes
axes=FALSE)#no axes
polygon(c(aveF$doy, rev(aveF$doy)), #x coordinates
c(aveF$dailyAve-sdF$dailySD,rev(aveF$dailyAve+sdF$dailySD)), #ycoord
col=rgb(0.392, 0.584, 0.929,.2), #color that is semi-transparent
border=NA #no border
)
#line added based on 2017 averages, made purple for contrast
lines(average2017$doy, average2017$dailyAve, col="purple")
axis(1, seq(1,365, by=31), #tick intervals
lab = c("jan","feb","mar","apr","may","jun","jul","aug",
"sep","oct","nov","dec")) #tick labels for month
axis(2, seq(0,80, by=20),
seq(0,80, by=20),
las = 2) #show ticks at 90 degree angle
legend("topright", c("mean","1 standard deviation","2017 mean"), #legend items
lwd=c(2,NA,2),#lines
col=c("black",rgb(0.392, 0.584, 0.929,.2),"purple"),#colors
pch=c(NA,15),#symbols
bty="n")#no legend border
###QUESTION 6 CODE###
#no code
###QUESTION 7 CODE###
library(dplyr)
#create code to adjust for days of precip with all 24 hours
total_hours <- datP %>% group_by(year, doy) %>%
count()
fulldays <- total_hours[total_hours$n == 24,]
datP <- datP %>% mutate(doy_year = paste(doy, year, sep= "_"))
fulldays <- fulldays %>% mutate(doy_year = paste(doy, year, sep= "_"))
datP$complete <- ifelse(datP$doy_year %in% fulldays$doy_year, 1, 0)
#create plot with days with 24 hours added
par(mai=c(1,1,1,1))
plot(datD$decYear,datD$discharge,
type="l",
xlab="Year",
ylab=expression(paste("Discharge ft"^"3 ","sec"^"-1")),
lwd=2,
ylim=c(0,400),
xaxs="i", yaxs ="i")
#add in days with 24 hours of precipitation
for (i in 1:nrow(fulldays)){
good <- datP[datP$complete == 1,]
m <- i + 24*(i-1) + 12
points(good[m,"decYear"], 350, pch=20, col="maroon", cex=0.7)
}
title("Yearly Discharge with Days with 24 Hours of Precip")
###QUESTION 8 CODE###
##FIRST HYDROGRAPH CODE##
hydroD <- datD[datD$doy >= 248 & datD$doy < 250 & datD$year == 2011,]
hydroP <- datP[datP$doy >= 248 & datP$doy < 250 & datP$year == 2011,]
#get minimum and maximum range of discharge to plot
#go outside of the range so that it's easy to see high/low values
#floor rounds down the integer
yl <- floor(min(hydroD$discharge))-1
#ceiling rounds up to the integer
yh <- ceiling(max(hydroD$discharge))+1
#minimum and maximum range of precipitation to plot
pl <- 0
pm <- ceiling(max(hydroP$HPCP))+.5
#scale precipitation to fit on the
hydroP$pscale <- (((yh-yl)/(pm-pl)) * hydroP$HPCP) + yl
par(mai=c(1,1,1,1))
#make plot of discharge
plot(hydroD$decDay,
hydroD$discharge,
type="l",
ylim=c(yl,yh),
lwd=2,
xlab="Day of year",
ylab=expression(paste("Discharge ft"^"3 ","sec"^"-1")))
#add bars to indicate precipitation
for(i in 1:nrow(hydroP)){
polygon(c(hydroP$decDay[i]-0.017,hydroP$decDay[i]-0.017,
hydroP$decDay[i]+0.017,hydroP$decDay[i]+0.017),
c(yl,hydroP$pscale[i],hydroP$pscale[i],yl),
col=rgb(0.392, 0.584, 0.929,.2), border=NA)
}
###QUESTION 9 CODE###
#load in ggplot
library(ggplot2)
#mark days when seasons change
spring <- 60
summer <- 152
fall <- 244
winter <- 335
#define years as dataframes
datD2k16 <- datD[datD$year==2016,]
datD2k17 <- datD[datD$year==2017,]
#define seasons within years
datD2k16$seasons <- ifelse(datD2k16$decDay >= spring & datD2k16$decDay < summer, "Spring",
ifelse(datD2k16$decDay >= summer & datD2k16$decDay < fall, "Summer",
ifelse(datD2k16$decDay >= fall & datD2k16$decDay < winter, "Fall",
"Winter")))
datD2k17$seasons <- ifelse(datD2k17$decDay >= spring & datD2k17$decDay < summer, "Spring",
ifelse(datD2k17$decDay >= summer & datD2k17$decDay < fall, "Summer",
ifelse(datD2k17$decDay >= fall & datD2k17$decDay < winter, "Fall",
"Winter")))
#create violin plots
plot2k16 <- ggplot(data = datD2k16, aes(seasons, discharge)) +geom_violin()
plot2k17 <- ggplot(data = datD2k17, aes(seasons, discharge)) +geom_violin()
###QUESTION 10 CODE###
#no code, see doc
|
a0954921d81899e8a8a06b724fb8ac2bb01cfd4b
|
e1e27fea18045b209988f932d8bcf47f1135a546
|
/Hurricane-Ike.R
|
cc6c1048202384443547cb010168da5fba5e3fec
|
[] |
no_license
|
GabrielAlejandra/Build-a-New-Geom
|
a909236083dc0430650b89efd7fde1dc25e31789
|
87284977788095bd2c2efdf4665f9ea14405c32d
|
refs/heads/master
| 2021-09-01T17:14:45.962273
| 2017-12-28T02:47:44
| 2017-12-28T02:47:44
| 115,577,741
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 13,420
|
r
|
Hurricane-Ike.R
|
install.packages("tidyverse")
install.packages("geosphere")
install.packages("ggmap")
install.packages("ggplot2")
library("readr")
#library("tidyverse")
#library("geosphere")
library("ggplot2")
library("ggmap")
library("magrittr")
#' Read the data base
ext_tracks_widths <- c(7, 10, 2, 2, 3, 5, 5, 6, 4, 5, 4, 4, 5, 3, 4, 3, 3, 3,
4, 3, 3, 3, 4, 3, 3, 3, 2, 6, 1)
ext_tracks_colnames <- c("storm_id", "storm_name", "month", "day",
"hour", "year", "latitude", "longitude",
"max_wind", "min_pressure", "rad_max_wind",
"eye_diameter", "pressure_1", "pressure_2",
paste("radius_34", c("ne", "se", "sw", "nw"), sep = "_"),
paste("radius_50", c("ne", "se", "sw", "nw"), sep = "_"),
paste("radius_64", c("ne", "se", "sw", "nw"), sep = "_"),
"storm_type", "distance_to_land", "final")
ext_tracks <- read_fwf("ebtrk_atlc_1988_2015.txt",
fwf_widths(ext_tracks_widths, ext_tracks_colnames),
na = "-99")
ext_tracks
#' Data cleaning: Combine storm and year into a varibale storm_id, changing longitude to be 0 to -180 W hemisphere, 0-180 in E hemisphere
#' combine columns describing the date and time to create a single variable with the date-time of each observation.
ext_tracks <- ext_tracks %>%
dplyr::mutate(storm_id=paste(storm_name,"-",year,sep=""),
longitude=ifelse(longitude>180,360-longitude,-longitude),
date = as.POSIXct(paste(year,month,day,hour),format="%Y %m %d %H")
)
ext_tracks
#' Convert the data to a “long” format, with separate rows for each of the three wind speeds for wind radii.
ext_tracks_34 <- ext_tracks %>%
dplyr::mutate(wind_speed=34) %>%
dplyr::rename(ne=radius_34_ne,nw=radius_34_nw,se=radius_34_se,sw=radius_34_sw) %>%
dplyr::select(storm_id,date,latitude,longitude,wind_speed,ne,nw,se,sw)
ext_tracks_50 <- ext_tracks %>%
dplyr::mutate(wind_speed=50) %>%
dplyr::rename(ne=radius_50_ne,nw=radius_50_nw,se=radius_50_se,sw=radius_50_sw) %>%
dplyr::select(storm_id,date,latitude,longitude,wind_speed,ne,nw,se,sw)
ext_tracks_64 <- ext_tracks %>%
dplyr::mutate(wind_speed=64) %>%
dplyr::rename(ne=radius_64_ne,nw=radius_64_nw,se=radius_64_se,sw=radius_64_sw) %>%
dplyr::select(storm_id,date,latitude,longitude,wind_speed,ne,nw,se,sw)
ext_tracks<-rbind(ext_tracks_34,ext_tracks_50,ext_tracks_64)
ext_tracks
#' Filter out hurricane observation tables for Ike.
Ike_obs<-dplyr::filter(ext_tracks,storm_id=="IKE-2008",date=="2008-09-13 12:00:00")
#' Stat for creating wind radii chart data
#'
#' This stat takes the hurricane observation file containing the latitude, longitude, ne, nw, se and sw
#' distances and via the function compute_group creates the x and y data for a wind radius chart.
#' @param data A data frame containing the required aesthetics passed from the geom associated with the stat
#'
#' @param scale A list with the range for the x and y axes
#'
#' @return compute_group returns a data frame with the wind radii map x and y coordinates by group for each wind speed group
StatHurr <- ggproto("StatHurr", Stat,
compute_group = function(data, scales) {
# naming variables for the center x,y coordinates
xob<-data$x[1]
yob<-data$y[1]
# creating 34 knot outer wind chart coordinates via geosphere funcgtion destPoint, including other required columns.
ne<-geosphere::destPoint(c(xob,yob),b=0:90,d=dplyr::filter(data,fill==34)$ne*1852)
se<-geosphere::destPoint(c(xob,yob),b=90:180,d=dplyr::filter(data,fill==34)$se*1852)
sw<-geosphere::destPoint(c(xob,yob),b=180:270,d=dplyr::filter(data,fill==34)$sw*1852)
nw<-geosphere::destPoint(c(xob,yob),b=270:360,d=dplyr::filter(data,fill==34)$nw*1852)
poly_34<-cbind(group=1L,colour="34",fill="34",as.data.frame(rbind(c(xob,yob),ne,se,sw,nw))) #Include center as first row so can refer to it in geom for rscale
# same for 50 know middle wind chart.
ne<-geosphere::destPoint(c(xob,yob),b=0:90,d=dplyr::filter(data,fill==50)$ne*1852)
se<-geosphere::destPoint(c(xob,yob),b=90:180,d=dplyr::filter(data,fill==50)$se*1852)
sw<-geosphere::destPoint(c(xob,yob),b=180:270,d=dplyr::filter(data,fill==50)$sw*1852)
nw<-geosphere::destPoint(c(xob,yob),b=270:360,d=dplyr::filter(data,fill==50)$nw*1852)
poly_50<-cbind(group=2L,colour="50",fill="50",as.data.frame(rbind(c(xob,yob),ne,se,sw,nw)))
# same for 64 know inner wind chart.
ne<-geosphere::destPoint(c(xob,yob),b=0:90,d=dplyr::filter(data,fill==64)$ne*1852)
se<-geosphere::destPoint(c(xob,yob),b=90:180,d=dplyr::filter(data,fill==64)$se*1852)
sw<-geosphere::destPoint(c(xob,yob),b=180:270,d=dplyr::filter(data,fill==64)$sw*1852)
nw<-geosphere::destPoint(c(xob,yob),b=270:360,d=dplyr::filter(data,fill==64)$nw*1852)
poly_64<-cbind(group=3L,colour="64",fill="64",as.data.frame(rbind(c(xob,yob),ne,se,sw,nw)))
# combine the data for all the wind charts, rename the lon lat created by destPoint to x, y
wind_cht<-data.frame(rbind(poly_34,poly_50,poly_64))
colnames(wind_cht)[4]<-"x"
colnames(wind_cht)[5]<-"y"
wind_cht
},
required_aes = c("x", "y","fill","ne","nw","se","sw")
)
#' Stat_* function that builds the layer for ggplot functions (parameter descriptions taken from ggplot documentation)
#'
#' @param mapping Set of aesthetic mappings created by aes or aes_. If specified and inherit.aes = TRUE (the default), it is combined with the default mapping at the top level of the plot. You must supply mapping if there is no plot mapping.
#'
#' @param data The data to be displayed in this layer. There are three options: If NULL, the default, the data is inherited from the plot data as specified in the call to ggplot. A data.frame, or other object, will override the plot data. All objects will be fortified to produce a data frame. See fortify for which variables will be created. A function will be called with a single argument, the plot data. The return value must be a data.frame., and will be used as the layer data.
#'
#' @param geom The geometric object to use display the data
#'
#' @param position Position adjustment, either as a string, or the result of a call to a position adjustment function
#'
#' @param na.rm If FALSE, the default, missing values are removed with a warning. If TRUE, missing values are silently removed.
#'
#' @param show.legend logical. Should this layer be included in the legends? NA, the default, includes if any aesthetics are mapped. FALSE never includes, and TRUE always includes.
#'
#' @param inherit.aes If FALSE, overrides the default aesthetics, rather than combining with them. This is most useful for helper functions that define both data and aesthetics and shouldn't inherit behaviour from the default plot specification, e.g. borders.
#'
#' @return
stat_hurr <- function(mapping = NULL, data = NULL, geom = "polygon",
position = "identity", na.rm = FALSE, show.legend = NA,
inherit.aes = TRUE, ...) {
layer(
stat = StatHurr, data = data, mapping = mapping, geom = geom,
position = position, show.legend = show.legend, inherit.aes = inherit.aes,
params = list(na.rm = na.rm, ...)
)
}
#' Geom for creating polygon group with wind radii charts
#'
#' This Geom is based on polygon geom and takes the data from StatHurr, scales it, and creates the polygonGrob via the draw_group
#' function. The optional aesthetic rscale is introduced that scales the wind radii back to a certain percent (rscale) of the
#' maximum radii. (parameter descriptions taken from ggplot documentation)
#'
#' @data a data frame passed by StatHurr after converting to wind radii charts
#'
#' @panel_scales a list containing information about the scales in the current panel.
#'
#' @coord a coordinate specification
#'
#' @return
GeomHurricane <- ggproto("GeomPolygon", Geom,
required_aes = c("x", "y"),
default_aes = aes(
colour = "black", fill = "grey20", size = 0.5,
linetype = 1, alpha = .6, rscale = 1.0
),
draw_key = draw_key_polygon,
draw_group = function(data, panel_scales, coord) {
# Scale the maximum wind chart by rscale factor
data$x <- data$x[1] * (1 - data$rscale) + data$x * data$rscale
data$y <- data$y[1] * (1 - data$rscale) + data$y * data$rscale
coords <- coord$transform(data, panel_scales)
grid::polygonGrob(
coords$x, coords$y,
default.units = "native",
gp = grid::gpar(
col = coords$colour,
lwd = coords$size * .pt,
fill = scales::alpha(coords$fill, coords$alpha),
lty = coords$linetype
)
)
}
)
#' Geom_* function that builds the layer based on the geom specifications (parameter descriptions taken from ggplot documentation)
#'
#' @param mapping Set of aesthetic mappings created by aes or aes_. If specified and inherit.aes = TRUE (the default), it is combined with the default mapping at the top level of the plot. You must supply mapping if there is no plot mapping.
#'
#' @param data The data to be displayed in this layer. There are three options: If NULL, the default, the data is inherited from the plot data as specified in the call to ggplot. A data.frame, or other object, will override the plot data. All objects will be fortified to produce a data frame. See fortify for which variables will be created. A function will be called with a single argument, the plot data. The return value must be a data.frame., and will be used as the layer data.
#'
#' @param stat The statistical transformation to use on the data for this layer, as a string.
#'
#' @param position Position adjustment, either as a string, or the result of a call to a position adjustment function.
#'
#' @param na.rm If FALSE, the default, missing values are removed with a warning. If TRUE, missing values are silently removed.
#'
#' @param show.legend logical. Should this layer be included in the legends? NA, the default, includes if any aesthetics are mapped. FALSE never includes, and TRUE always includes.
#'
#' @param inherit.aes If FALSE, overrides the default aesthetics, rather than combining with them. This is most useful for helper functions that define both data and aesthetics and shouldn't inherit behaviour from the default plot specification, e.g. borders.
#'
#' @return
geom_hurricane <- function(mapping = NULL, data = NULL, stat = "hurr",
position = "identity", na.rm = FALSE, show.legend = NA,
inherit.aes = TRUE, ...) {
layer(
geom = GeomHurricane, data = data, mapping = mapping, stat = stat,
position = position, show.legend = show.legend, inherit.aes = inherit.aes,
params = list(na.rm = na.rm, ...)
)
}
#
map_data <- get_map("Louisiana", zoom = 6, maptype = "toner-background")
base_map <- ggmap(map_data, extent = "device")
Ike_Map <- (base_map +
geom_hurricane(data = Ike_obs, aes(x = longitude, y = latitude,
ne = ne, se = se,
nw = nw, sw = sw,
fill = wind_speed,
color = wind_speed)) +
ggtitle("Ike windmap for 2008-09-13 12:00:00") +
theme(plot.margin=grid::unit(c(0.5,0.5,0.5,0.5), "in")) +
scale_color_manual(name = "Wind speed (kts)",
values = c("red", "orange", "yellow")) +
scale_fill_manual(name = "Wind speed (kts)",
values = c("red", "orange", "yellow")))
Ike_Map
ggsave("Hurricane Ike_Map.pdf")
|
7c4875c364a56798673eb03855a68769561f98d9
|
280302213d20c55dae9b555a6ec2890767ffd5c8
|
/Project 1/plot3.R
|
3b6be623da089cff5b75c36ab235d17cfde72ff3
|
[] |
no_license
|
amiles2233/ExData
|
7b34ff81da75b91f8e29f91f5ae0449778f4aea9
|
39113bec0c5eca7d169c8df08aa61b4b8ef5ef0a
|
refs/heads/master
| 2021-01-20T21:29:22.861741
| 2015-09-27T19:53:46
| 2015-09-27T19:53:46
| 41,609,930
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 665
|
r
|
plot3.R
|
# Set Working Directory
wkdir <- "C:/Users/amile_000/Documents/Coursera Data Science/Exploratory Data Analysis/Project 1"
setwd(wkdir)
# Read in Data
source(read_data.R)
# Plot 3
png(filename = "plot3.png",
width = 480, height = 480, units = "px",
bg = "white")
plot(x=dat$DateTime, y=dat$Sub_metering_1, col="black", type="l",
ylab="Energy sub metering", xlab="")
lines(x=dat$DateTime, y=dat$Sub_metering_2, col="red", type="l")
lines(x=dat$DateTime, y=dat$Sub_metering_3, col="blue", type="l")
legend("topright",c("Sub Metering 1","Sub Metering 2", "Sub Metering 3"),
lty=c(1,1,1),lwd=c(2.5,2.5,2.5),col=c("black","blue","red"))
dev.off()
|
3cea7969281f8dc2ade2a163cd565c52dbf2475a
|
48d3c07dbcfd56902fd73f2b436f180703b565bf
|
/wk2/corr-with-apply.R
|
4f81934d6bd32dfef38bf322a399e724496216c2
|
[] |
no_license
|
fosdick/datasciencecoursera
|
445798c60ff91967ce125c417d1568dc79bb3bb4
|
8091a52040224287d4705c5155ca153f98423915
|
refs/heads/master
| 2021-09-15T01:26:09.209577
| 2018-03-04T20:14:49
| 2018-03-04T20:14:49
| 118,483,749
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 1,032
|
r
|
corr-with-apply.R
|
complete <- function(directory,id = 1:332) {
csvfiles <- sprintf("%s/%03d.csv", directory, id)
nrows <- sapply( csvfiles, function(f) sum(complete.cases(read.csv(f)), na.rm=TRUE))
rowlabels <- nrow(nrows)
data.frame(id=sprintf('%3d', id),
nobs=sapply( csvfiles, function(f) sum(complete.cases(read.csv(f)), na.rm=TRUE)),
row.names=rowlabels
)
}
corr <- function(directory, threshold = 0) {
#set the path
path = directory
#get the file List in that directory
fileList = list.files(path)
#extract the file names and store as numeric for comparison
file.names = as.numeric(sub("\\.csv$","",fileList))
rd <- function(f) {
data <- read.csv(f)
check <- sum(complete.cases(data), na.rm=TRUE)
if (check > threshold) {
cor(data$sulfate , data$nitrate, use="complete.obs")
} else {
F
}
}
len <- length(fileList)
tr <- numeric(len)
for (i in 1:len) {
d <- rd(file.path(path,fileList[i]))
if (d) {
tr[i] = d
}
}
tr
}
|
48f9ac8faec26f23b9b14553ca88c68f2919118f
|
dae6befcea92b6171d6e592d58ecb7c499a2ae9a
|
/R/is-inconsistent-terms.R
|
0f6576df3b13c6eb63a9275f0af78865053ca2d1
|
[
"MIT"
] |
permissive
|
krlmlr/term
|
7b85ba675bbdff76e28e89d3c20c6726bd253303
|
f46b5b47455330ce3130ad858ac36055663dfa3d
|
refs/heads/master
| 2020-12-27T08:22:42.952071
| 2020-02-01T23:46:58
| 2020-02-01T23:46:58
| 237,830,909
| 0
| 0
|
NOASSERTION
| 2020-02-02T20:20:03
| 2020-02-02T20:20:02
| null |
UTF-8
|
R
| false
| false
| 542
|
r
|
is-inconsistent-terms.R
|
#' Is Inconsistent Terms
#'
#' Tests whether a term vector has inconsistent elements.
#'
#' @inheritParams params
#' @return A logical scalar indicating whether the object's terms are inconsistent.
#' @seealso [term-vector()] and [consistent_term()]
#' @export
#'
#' @examples
#' is_inconsistent_terms(as.term("b[2]"))
#' is_inconsistent_terms(as.term(c("b[2]", "b[1]")))
#' is_inconsistent_terms(as.term(c("b[2]", "b[1,1]")))
is_inconsistent_terms <- function(x, ...) {
if (!length(x)) {
return(FALSE)
}
any(!consistent_term(x))
}
|
87bea30d94a7329e9826657c4036e0b354264192
|
909d7b3eee81c7c55f1a6e40a8e466ebb29e59cb
|
/PCA_IRIS.R
|
0dd24a16534e72b7148e46f4b623afe0ac494e71
|
[
"MIT"
] |
permissive
|
malunjkarusa/DataAnalytics-R
|
2046248c4a38cf34533c923cd134a5a5a675e43f
|
c5b3eff41d1fd9e96d72609007fdbd656d5da32c
|
refs/heads/master
| 2020-07-29T17:31:47.786249
| 2020-02-01T21:06:31
| 2020-02-01T21:06:31
| 209,902,906
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 1,827
|
r
|
PCA_IRIS.R
|
# pacman is a package manager package - You just need to install pacman once,
# and load it in every new session. Then, the function p_load, which is from pacman package,
# allows you to install (if not already) and load all the required packages by passing their
# names as arguments in the p_load function.
install.packages("pacman") # install the package pacman
library(pacman) # laod the ackage in the current session
p_load("factoextra") # install (if not already) and load the package factoextra. if already installed,
# this will only load the package without installing again(redundant)
# PCA on IRIS dataset
# IRIS dataset is already available in base R. However, it can also be read from the csv file in your working directory
iris <- read.csv("iris.csv")
# Only the first four columns are the features and we will use them in our PCA
# prcomp is available in base package, we also need to center and scale our variables- recommended whenever
# use a distance measure in our algorithm
pca_iris <- prcomp(iris[,-5], center = TRUE, scale. = TRUE) # we remove the fifth column for PCA
summary(pca_iris) # Examine the results of PCA model
# We note that the first component explains almost 73% and 2nd component explains around 23%, first two epxlain almost 96%
P_components <- pca_iris$x #extract principal components
fviz_pca_ind(pca_iris, # This will plot individual obsrevations in a scatter plot where PC1 and PC2 are the two axes.
label = "none", # hide individual labels
habillage = iris$Species, # color by groups
palette = c("#00AFBB", "#E7B800", "#FC4E07"),
addEllipses = TRUE # Concentration ellipses
)
#################################################################################################
|
fc05096ec1bb0495e2c5c6f936f044e1c73aae2c
|
1482c0c2e994197d04c2149eb19ce2f313cd7a45
|
/man/selectControlsHier.Rd
|
3d502a9efb4409fc439bd1e68c37e9c11d6c35bb
|
[
"MIT"
] |
permissive
|
alexloboda/SVDFunctions
|
4adffe4b7e101a68b5cf756d8fefee45610303c5
|
666dbc820f81a3ab03e706fea380deaeb1d6f4f5
|
refs/heads/master
| 2023-05-11T13:44:28.623205
| 2023-03-28T15:12:38
| 2023-03-28T15:12:38
| 153,036,232
| 6
| 1
| null | 2019-05-14T17:17:20
| 2018-10-15T01:28:35
|
C++
|
UTF-8
|
R
| false
| true
| 1,179
|
rd
|
selectControlsHier.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/hier.R
\name{selectControlsHier}
\alias{selectControlsHier}
\title{Select a set of controls that populationally matches a set of cases.}
\usage{
selectControlsHier(
controlGMatrix,
originalControlGMatrix,
cases,
SVDReference,
controlsMean,
clusterMergeCoef = 1.1,
softMinLambda = 0.9,
softMaxLambda = 1.05,
...
)
}
\arguments{
\item{controlGMatrix}{numeric matrix(0 - ref, 1 - het, 2 - both alt).
Intermediate values are allowed, NAs are not.}
\item{originalControlGMatrix}{integer matrix(0 - ref, 1 - het, 2 - both alt)
Missing values are allowed.}
\item{cases}{result of calling function readInstanceFromYml.}
\item{SVDReference}{reference basis of the left singular vectors.}
\item{controlsMean}{mean value of the reference genotypes.}
\item{clusterMergeCoef}{numeric coefficient of preference of merging clusters.}
\item{softMinLambda}{desirable minimum for lambda.}
\item{softMaxLambda}{desirable maximum for lambda.}
\item{...}{parameters to be passed to selectControls function.}
}
\description{
Select a set of controls that populationally matches a set of cases.
}
|
7687737a4e7522422380e3a041833df73aad1b5f
|
feb501c5f88cc204b6617ddddbbe4aa1b914ef63
|
/man/as.triangle.Rd
|
34f813fe6836152d2cfc01352847b85c694f8454
|
[] |
no_license
|
stjordanis/trinalysis
|
50ddc9c0251abc524470b6bc4d2bc40e178a3ba1
|
a8dfbe66cae49854832611aa63e84e9a4a1dbb43
|
refs/heads/master
| 2020-06-28T02:06:34.002320
| 2019-05-06T14:34:13
| 2019-05-06T14:34:13
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| true
| 898
|
rd
|
as.triangle.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/as.triangle.R
\name{as.triangle}
\alias{as.triangle}
\title{as.triangle}
\usage{
as.triangle(triangleDT, valueCol = "Transactions",
descriptiveHeaders = TRUE, idCols = "Cohort")
}
\arguments{
\item{triangleDT}{A triangle in tall, data.table format}
\item{valueCol}{Name of the column to convert to triangular format}
\item{descriptiveHeaders}{Should headers be descriptive?}
}
\description{
Convert a single triangle from tall format to triangular format
}
\details{
Returns a matrix object with rows representing origin periods and columns representing valuation ages
}
\examples{
library(data.table)
set.seed(2357)
transactions <- sample_transactions(10)
triangles <- make_triangles(transactions, format="tall")
as.triangle(triangles, valueCol="ActiveCustomers")
as.triangle(triangles, valueCol="Transactions")
}
|
92b5a4113ee2288b70fc3c80bb74a77b3d6db02f
|
90cb71d7d5e4b0169d5b3fe8272c11d59471dff7
|
/R/utils/OsmDataApiDownload.R
|
7f6b4a582330d8b6fc5e1a457ebb3ea6c215b4eb
|
[] |
no_license
|
davibicudo/matsim-toy
|
3958201c3330782b4e202ca525054a4152a179ee
|
69866f3f23859bc42390e7a2a23ba99f40e8c650
|
refs/heads/master
| 2022-12-21T05:55:55.677946
| 2022-12-16T23:51:57
| 2022-12-16T23:51:57
| 189,230,803
| 4
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 1,334
|
r
|
OsmDataApiDownload.R
|
get_osm_data <- function(city_name, country_name) {
# print variables used in function
print(mget(setdiff(ls(), c("opt", "option_list", match.call()[[1]]))))
# load needed libraries
pacman::p_load("osmdata")
# get bounding box for city
bbox <- getbb(place_name=paste0(city_name, ", ", country_name), featuretype = "city", limit = 1)
#bbox [1, ] <- bbox [1, ] + c (-0.05, 0.05) * diff (bbox [1, ])
#bbox [2, ] <- bbox [2, ] + c (-0.05, 0.05) * diff (bbox [2, ])
osm_query <- opq(bbox, timeout = 6000, memsize = 1e9) %>%
add_osm_feature("highway")
osm_query$suffix <- gsub(">;", "<;>;", osm_query$suffix) # up and down OSM member recursion
osm_query$features <- ""
osm_query <- opq_string(osm_query)
# run query to get osm data from overpass api
cat("Downloading OSM data...")
osmdata_xml(osm_query, "osm_data.osm", encoding = 'utf-8')
#doc <- osmdata:::overpass_query (query = osm_query, quiet = T, encoding = 'utf-8')
#fileConn <- file("osm_data.osm")
#writeLines(doc, fileConn)
#close(fileConn)
cat("Done")
}
pacman::p_load("optparse")
option_list <- list(
make_option("--city_name"),
make_option("--country_name")
)
opt <- parse_args(OptionParser(option_list=option_list))
get_osm_data(gsub("(\")|(')","", opt$city_name), gsub("(\")|(')","", opt$country_name))
|
dd3bf97fa60c56bd2523a547621b4f15615a193f
|
5c68fc959e3be7f6b77151774d1d05b9a5e359be
|
/R/3_customize.R
|
674b17ec32b1388cca50718cf0708ffd3aadaae8
|
[] |
no_license
|
mmontesinosanmartin/itoiz_article
|
26717eda5e58b35d2e1bdb9b10877978255de6e5
|
26821b2d6a05f58d8150f577e83076b59c926960
|
refs/heads/master
| 2021-05-26T02:43:30.794613
| 2020-06-12T14:33:48
| 2020-06-12T14:33:48
| 254,020,006
| 0
| 1
| null | null | null | null |
UTF-8
|
R
| false
| false
| 1,495
|
r
|
3_customize.R
|
###############################################################################
# R Code: Using RGISTools to estimate the water levels in reservoirs and lakes
###############################################################################
# Militino, A.F., Montesino-SanMartin, Pérez-Goya, U.,M., Ugarte, M.D.
# Public University of Navarre
# License: Availability of material under
# [CC-BY-SA](https://creativecommons.org/licenses/by-sa/2.0/).
###############################################################################
# MOSAIC
###############################################################################
t.st <- Sys.time()
# Landsat - 8
wdir.ls8.untar <- file.path(wdir.ls8, "untar")
t.st.ls8 <- Sys.time()
lsMosaic(src = wdir.ls8.untar,
region = roi.sf,
out.name = "ls8_itoiz",
gutils = TRUE,
AppRoot = wdir.ls8)
t.mos.ls8 <- Sys.time() - t.st.ls8
print(t.mos.ls8)
# Time difference of 1.535575 mins
# Sentinel-2
wdir.sn2.unzip <- file.path(wdir.sn2, "unzip")
t.st.sn2 <- Sys.time()
senMosaic(src = wdir.sn2.unzip,
region = roi.sf,
out.name = "sn2_itoiz",
gutils = TRUE,
AppRoot = wdir.sn2)
t.mos.sn2 <- Sys.time() - t.st.sn2
print(t.mos.sn2)
# Time difference of 11.78283 mins
t.mos <- Sys.time() - t.st
print(t.mos)
# Time difference of 13.31863 mins
# Remove the original files to free memory space in the disk
# unlink(wdir.ls8.untar, recursive = TRUE)
# unlink(wdir.sn2.unzip, recursive = TRUE)
|
fd8a49875d5978ed6164d54f32857430bc7b5e99
|
21f8a43099e22dbb00a670fcbede1697d17c34ce
|
/# 03 - Getting and Cleaning Data/Project 1/run_analysis.R
|
6e918c0a1ec2e138bf006f8383913ddb5138c949
|
[] |
no_license
|
thefier/datasciencecoursera
|
75bd4ab50a3f7869dacd931704604b1a925402f5
|
473b16cbbf6ba87e66022aa24522647205c7ab05
|
refs/heads/master
| 2021-01-19T10:57:45.879059
| 2014-07-20T22:00:18
| 2014-07-20T22:00:18
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 4,638
|
r
|
run_analysis.R
|
run_analysis<-function(){
## 1. Read Files
## 1.1 Variables to read the Folders
ir.ma<-"./UCI Har Dataset"
ir.te<-"/test"
ir.tr<-"/train"
## 1.2 Read names
names.x<-read.table(paste(ir.ma,"/features.txt",sep=""),
colClasses="character")
## 1.3 Read labels
labels.y<- read.table(paste(ir.ma,"/activity_labels.txt",sep=""),
colClasses="character")
## 2 Read of the test data frame
## 2.1 Read of the labels vector
te<-paste(ir.ma,ir.te,sep="")
read.te.y<-read.table(paste(te,"/y_test.txt",sep=""))
## 2.1.1 Assign the names to the test data.frame x
names(read.te.y)<-"Activity_Labels"
## 2.1.2 Assign Activity labels for test data frame
read.te.y[,1]<-as.character(read.te.y[,1])
read.te.y[,1]<-as.factor(read.te.y[,1])
levels(read.te.y[,1])<-c(labels.y[1:6,2])
## 2.2 Read of the subject vector
read.te.s<-read.table(paste(te,"/subject_test.txt",sep=""))
## 2.2.1 Assign of the name to the "Subject Vector"
names(read.te.s)<-"Subject"
## 2.3 Read of the Test Values
read.te.x<-read.table(paste(te,"/X_test.txt",sep=""))
## 2.4 Assign the names to the test data.frame x
for(i in 1:ncol(read.te.x)){names(read.te.x)[i]<-names.x[i,2]}
## 2.5 Creation of the Test Data Frame
read.te<-data.frame(read.te.y,read.te.s,read.te.x)
## 3 Read of the train data frame
## 3.1 Read of the labels vector
tr<-paste(ir.ma,ir.tr,sep="")
read.tr.y<-read.table(paste(tr,"/y_train.txt",sep=""))
## 3.1.1 Assign the names to the train data.frame x
names(read.tr.y)<-"Activity_Labels"
## 3.1.2 Assign Activity labels for train data frame
read.tr.y[,1]<-as.character(read.tr.y[,1])
read.tr.y[,1]<-as.factor(read.tr.y[,1])
levels(read.tr.y[,1])<-c(labels.y[1:6,2])
## 3.2 Read of the subject vector
read.tr.s<-read.table(paste(tr,"/subject_train.txt",sep=""))
## 3.2.1 Assign of the name to the "Subject Vector"
names(read.tr.s)<-"Subject"
## 3.3 Read of the Train Values
read.tr.x<-read.table(paste(tr,"/X_train.txt",sep=""))
## 3.4 Assign the names to the train data.frame x
for(i in 1:ncol(read.tr.x)){names(read.tr.x)[i]<-names.x[i,2]}
## 3.5 Creation of the Train Data Frame
read.tr<-data.frame(read.tr.y,read.tr.s,read.tr.x)
## 4 Merge Data
read.all<-rbind(read.tr,read.te)
## 5 Selection of the variables with mean() and std()
## 5.1 Creation of a logical vector to select the right columns
names.test<-grepl("-mean\\(\\)|-std\\(\\)",names.x[,2])
names.test<-c(T,T,names.test)
names.test<-as.logical(names.test)
## 5.2 subset the columns with just "mean" or "sd" values
read.subset<-read.all[,names.test]
## 6 Order the data with a list
## Create a list, with every element as variable
## and in every element a matrix with the average results
## of Activity Vs. Subject
read.list<-list()
for(i in 1:(length(names(read.subset))-2)){
read.list[[i]]<-tapply(read.subset[,i+2],
list(read.subset[,2],read.subset[,1]),mean)
}
## 7 Creation of the Final Data Frame with the tidy data
## 7.1 Assign the index values
## The first column signal the Activities
## Per every set of activities (6 records) is assigned
## a subject, in the seconf column.
read.final<-data.frame(Activity=rep(labels.y[,2],each=30),Subject=rep(1:30,len=180))
## 7.2 Assign the record for every index
for(i in 1:(length(names(read.subset))-2)){
vec.help<-numeric()
for(u in 1:6){
vec.help<-c(vec.help,read.list[[i]][,u])
}
read.final[,i+2]<-vec.help
}
## 7.2.1 Assign the names to the tidy data
names(read.final)<-names(read.subset)
## 8 Write the tidy data in a "txt" file
write.table(read.final,"run_analysis.txt",sep=",",row.names=F)
## 8.1 Assign the tidy data to the value of the function
read.final
}
|
d4df6a401351d16ff527605f0e8e28315c226602
|
1dc1a1a4e717c20112517501c43f9a966ab0c0e5
|
/R/od-funs.R
|
d830e75ca7d0d347d7b4515ff47092fcd4102851
|
[
"MIT"
] |
permissive
|
Robinlovelace/stplanr
|
5d11640b9b644e40d81b97ee1a2debb77ffb4e26
|
d1f10fe2335c2494ba153fd09675756e2c1572b3
|
refs/heads/master
| 2021-01-23T09:02:00.128500
| 2018-09-13T08:19:36
| 2018-09-13T08:19:36
| 30,063,520
| 14
| 4
| null | null | null | null |
UTF-8
|
R
| false
| false
| 21,392
|
r
|
od-funs.R
|
#' Extract coordinates from OD data
#'
#' @section Details:
#' Origin-destination ('OD') flow data is often provided
#' in the form of 1 line per flow with zone codes of origin and destination
#' centroids. This can be tricky to plot and link-up with geographical data.
#' This function makes the task easier.
#'
#' @param flow A data frame representing the flow between two points
#' or zones. The first two columns of this data frame should correspond
#' to the first column of the data in the zones. Thus in \code{\link{cents}},
#' the first column is geo_code. This corresponds to the first two columns
#' of \code{\link{flow}}.
#' @param zones A SpatialPolygonsDataFrame or SpatialPointsDataFrame
#' representing origins and destinations of travel flows.
#' @references
#' Rae, A. (2009). From spatial interaction data to spatial interaction information?
#' Geovisualisation and spatial structures of migration from the 2001 UK census.
#' Computers, Environment and Urban Systems, 33(3). doi:10.1016/j.compenvurbsys.2009.01.007
#' @export
#' @examples
#' data(flow)
#' data(zones)
#' od2odf(flow, zones)
od2odf <- function(flow, zones){
coords = dplyr::data_frame(code = as.character(zones[[1]]),
fx = coordinates(zones)[,1], fy = coordinates(zones)[,2])
flowcode = dplyr::data_frame(code_o = as.character(flow[[1]]), code_d = as.character(flow[[2]]))
odf = dplyr::left_join(flowcode, coords, by = c("code_o" = "code"))
coords = dplyr::rename_(coords, tx = quote(fx), ty = quote(fy))
odf = dplyr::left_join(odf, coords, by = c("code_d" = "code"))
data.frame(odf) # return data.frame as more compatible with spatial data
}
#' Convert flow data to SpatialLinesDataFrame
#'
#' Origin-destination ('OD') flow data is often provided
#' in the form of 1 line per flow with zone codes of origin and destination
#' centroids. This can be tricky to plot and link-up with geographical data.
#' This function makes the task easier.
#'
#' @details
#' The function expects zone codes to be in the 1st column of the zones/destinations
#' datasets and the 1st and 2nd columns of the flow data, respectively.
#'
#' \code{\link{od2line2}} is a faster implementation
#' (around 6 times faster on large datasets)
#' that returns a \code{SpatialLines} object, omitting the data and working
#' only when there is no destinations dataset (i.e. when the geography of
#' origins is the same as that of destinations).
#'
#' @param flow A data frame representing the flow between two points
#' or zones. The first two columns of this data frame should correspond
#' to the first column of the data in the zones. Thus in \code{\link{cents}},
#' the first column is geo_code. This corresponds to the first two columns
#' of \code{\link{flow}}.
#' @param zones A SpatialPolygonsDataFrame or SpatialPointsDataFrame
#' representing origins (and destinations if no separate destinations object is provided)
#' of travel flows.
#' @param destinations A SpatialPolygonsDataFrame or SpatialPointsDataFrame
#' representing destinations of travel flows.
#' @param zone_code Name of the variable in \code{zones} containing the ids of the zone.
#' By default this is the first column names in the zones.
#' @param origin_code Name of the variable in \code{flow} containing the ids of the zone of origin.
#' By default this is the first column name in the flow input dataset.
#' @param dest_code Name of the variable in \code{flow} containing the ids of the zone of destination.
#' By default this is the second column name in the flow input dataset or the first column name in the
#' destinations if that is set.
#' @param zone_code_d Name of the variable in \code{destinations} containing the ids of the zone.
#' By default this is the first column names in the destinations.
#' @param silent TRUE by default, setting it to TRUE will show you the matching columns
#' @export
#' @examples
#' data(flow) # load example data - see ?flow for mor information
#' data(cents)
#' newflowlines <- od2line(flow = flow, zones = cents)
#' newflowlines2 <- od2line2(flow = flow, zones = cents)
#' plot(cents)
#' lines(newflowlines, lwd = 3)
#' lines(newflowlines2, col = "white")
#' nfl_sldf <- sp::SpatialLinesDataFrame(newflowlines, flow, match.ID = FALSE)
#' identical(nfl_sldf, newflowlines)
#' # When destinations are different
#' data(destinations)
#' head(flow_dests[1:5]) # check data
#' head(destinations@data[1:5])
#' flowlines_dests = od2line(flow_dests, cents, destinations = destinations, silent = FALSE)
#' plot(flowlines_dests)
#' nfl_sf <- od2line(flow, zones_sf)
#' @name od2line
NULL
#' @rdname od2line
#' @export
od2line <- function(flow, zones, destinations = NULL,
zone_code = names(zones)[1],
origin_code = names(flow)[1],
dest_code = names(flow)[2],
zone_code_d = NA, silent = TRUE) {
UseMethod("od2line", object = zones)
}
#' @export
od2line.sf <- function(flow, zones, destinations = NULL,
zone_code = names(zones)[1],
origin_code = names(flow)[1],
dest_code = names(flow)[2],
zone_code_d = NA, silent = TRUE){
if(grepl(pattern = "POLYGON", x = unique(sf::st_geometry_type(zones)))) {
zones <- sf::st_centroid(zones)
}
coords_o <- sf::st_coordinates(zones)[, 1:2]
origin_points <- coords_o[match(flow[[origin_code]], zones[[zone_code]]), ]
if(is.null(destinations)){
if(!silent){
message(paste("Matching", zone_code, "in the zones to", origin_code, "and", dest_code,
"for origins and destinations respectively"))
}
dest_points <- coords_o[match(flow[[dest_code]], zones[[zone_code]]), ]
} else {
dest_points <- coords_o[match(flow[[dest_code]], destinations[[zone_code_d]]), ]
}
l <- lapply(1:nrow(flow), function(x)
sf::st_linestring(rbind(origin_points[x, ], dest_points[x, ]))) %>%
sf::st_sfc(crs = sf::st_crs(zones))
sf::st_sf(flow, geometry = l)
}
#' @export
od2line.Spatial <- function(flow, zones, destinations = NULL,
zone_code = names(zones)[1],
origin_code = names(flow)[1],
dest_code = names(flow)[2],
zone_code_d = NA, silent = TRUE){
l <- vector("list", nrow(flow))
if(is.null(destinations)){
if(!silent){
message(paste("Matching", zone_code, "in the zones to", origin_code, "and", dest_code,
"for origins and destinations respectively"))
}
for(i in 1:nrow(flow)){
from <- zones@data[[zone_code]] %in% flow[[origin_code]][i]
if(sum(from) == 0)
warning(paste0("No match for line ", i))
to <- zones@data[[zone_code]] %in% flow[[dest_code]][i]
if(sum(to) == 0 & sum(from) == 1)
warning(paste0("No match for line ", i))
x <- sp::coordinates(zones[from, ])
y <- sp::coordinates(zones[to, ])
l[[i]] <- sp::Lines(list(sp::Line(rbind(x, y))), as.character(i))
}
} else {
if(is.na(zone_code_d)){
zone_code_d <- names(destinations)[1]
}
if(!silent){
message(paste("Matching", zone_code, "in the zones and", zone_code_d, "in the destinations,\nto",
origin_code, "and", dest_code,
"for origins and destinations respectively"))
}
for(i in 1:nrow(flow)){
from <- zones@data[[zone_code]] %in% flow[[origin_code]][i]
if(sum(from) == 0)
warning(paste0("No match for line ", i))
to <- destinations@data[[zone_code_d]] %in% flow[[dest_code]][i]
if(sum(to) == 0 & sum(from) == 1)
warning(paste0("No match for line ", i))
x <- sp::coordinates(zones[from, ])
y <- sp::coordinates(destinations[to, ])
l[[i]] <- sp::Lines(list(sp::Line(rbind(x, y))), as.character(i))
}
}
l <- sp::SpatialLines(l)
l <- sp::SpatialLinesDataFrame(l, data = flow, match.ID = FALSE)
sp::proj4string(l) <- sp::proj4string(zones)
l
}
#' @rdname od2line
#' @export
od2line2 <- function(flow, zones){
odf = od2odf(flow, zones)
l <- vector("list", nrow(odf))
for(i in 1:nrow(odf)){
l[[i]] <- sp::Lines(list(sp::Line(rbind(c(odf$fx[i], odf$fy[i]), c(odf$tx[i], odf$ty[i])))), as.character(i))
}
l <- sp::SpatialLines(l)
}
#' Convert geographic line objects to a data.frame with from and to coords
#'
#' This function returns a data frame with fx and fy and tx and ty variables
#' representing the beginning and end points of spatial line features respectively.
#'
#' @param l A spatial lines object
#' @export
#' @examples
#' data(flowlines)
#' line2df(flowlines[5,]) # beginning and end of a single straight line
#' line2df(flowlines) # on multiple lines
#' line2df(routes_fast[5:6,]) # beginning and end of routes
#' line2df(routes_fast_sf[5:6,]) # beginning and end of routes
line2df <- function(l) {
UseMethod("line2df")
}
#' @export
line2df.sf <- function(l){
X = rlang::quo(X)
Y = rlang::quo(Y)
L1 = rlang::quo(L1)
ldf_geom = sf::st_coordinates(l)
dplyr::group_by(dplyr::as_data_frame(ldf_geom), !!L1) %>%
dplyr::summarise(fx = dplyr::first(!!X), fy = dplyr::first(!!Y),
tx = dplyr::last(!!X), ty = dplyr::last(!!Y))
}
#' @export
line2df.Spatial <- function(l){
ldf_geom = raster::geom(l)
dplyr::group_by_(dplyr::as_data_frame(ldf_geom), 'object') %>%
dplyr::summarise_(fx = quote(first(x)), fy = quote(first(y)), tx = quote(last(x)), ty = quote(last(y)))
}
#' Convert a SpatialLinesDataFrame to points
#' The number of points will be double the number of lines with \code{line2points}.
#' A closely related function, \code{line2pointsn} returns all the points that were line vertices.
#' The points corresponding with a given line, \code{i}, will be \code{(2*i):((2*i)+1)}.
#' @param l A SpatialLinesDataFrame
#' @param ids Vector of ids (by default \code{1:nrow(l)})
#' @export
#' @examples
#' l <- routes_fast[2:4,]
#' lpoints <- line_to_points(l)
#' lpoints2 <- line2pointsn(l)
#' plot(lpoints, pch = lpoints$id, cex = lpoints$id)
#' points(lpoints2, add = TRUE)
#' line_to_points(routes_fast_sf[2:4,])
#' @aliases line2points
#' @export
line_to_points <- function(l, ids = rep(1:nrow(l), each = 2)){
UseMethod("line_to_points")
}
#' @export
line_to_points.sf <- function(l, ids = rep(1:nrow(l), each = 2)){
y_coords <- x_coords <- double(length = length(ids)) # initiate coords
d_indices <- 1:nrow(l) * 2
o_indices <- d_indices - 1
x_coords[o_indices] <- sapply(l$geometry, `[[`, 1) # first (x) element of each line
x_coords[d_indices] <- sapply(l$geometry, function(x) x[length(x) / 2]) # last (x) element of each line
y_coords[o_indices] <- sapply(l$geometry, function(x) x[length(x) / 2 + 1]) # first (y) element of each line
y_coords[d_indices] <- sapply(l$geometry, tail, n = 1) # last (y) element of each line
p_multi <- sf::st_multipoint(cbind(x_coords, y_coords))
p <-sf::st_cast(sf::st_sfc(p_multi), "POINT")
sf::st_sf(data.frame(id = ids), p)
}
#' @export
line_to_points.Spatial <- function(l, ids = rep(1:nrow(l), each = 2)){
for(i in 1:length(l)){
lcoords <- sp::coordinates(l[i,])[[1]][[1]]
pmat <- matrix(lcoords[c(1, nrow(lcoords)),], nrow = 2)
lpoints <- sp::SpatialPoints(pmat)
if(i == 1){
out <- lpoints
} else {
out <- raster::bind(out, lpoints)
}
}
sp::proj4string(out) <- sp::proj4string(l)
out <- sp::SpatialPointsDataFrame(coords = out, data = data.frame(id = ids))
out
}
#' @export
line2points <- function(l){
for(i in 1:length(l)){
l1 <- l[i,]
lcoords <- sp::coordinates(l1)[[1]][[1]]
lpoints <- sp::SpatialPoints(matrix(lcoords[c(1, nrow(lcoords)),], nrow = 2))
sp::proj4string(lpoints) <- sp::proj4string(l)
if(i == 1){
out <- lpoints
} else {
out <- raster::bind(out, lpoints)
}
}
out
}
#' @rdname line_to_points
#' @export
line2pointsn <- function(l){
spdf = raster::geom(l)
p = sp::SpatialPoints(coords = spdf[,c("x", "y")])
raster::crs(p) = raster::crs(l)
p
}
#' Convert straight OD data (desire lines) into routes
#'
#' @section Details:
#'
#' See \code{\link{route_cyclestreet}} and other route functions for details.
#'
#' A parallel implementation of this was available until version 0.1.8.
#' See \href{https://github.com/ropensci/stplanr/blob/18a598674bb378d5577050178da1561489496157/R/od-funs.R}{github.com/ropensci/stplanr} for details.
#'
#'
#' @param l A SpatialLinesDataFrame
#' @param route_fun A routing function to be used for converting the straight lines to routes
#' \code{\link{od2line}}
#' @param n_print A number specifying how frequently progress updates
#' should be shown
#' @param list_output If FALSE (default) assumes SpatialLinesDataFrame output. Set to TRUE to save output as a list.
#' @param l_id Character string naming the id field from the input lines data,
#' typically the origin and destination ids pasted together. If absent, the row name of the
#' straight lines will be used.
#' @param ... Arguments passed to the routing function, e.g. \code{\link{route_cyclestreet}}
#' @inheritParams route_cyclestreet
#' @export
#' @examples
#' \dontrun{
#' l = flowlines[2:5,]
#' r <- line2route(l, "route_osrm")
#' rf <- line2route(l = l, "route_cyclestreet", plan = "fastest")
#' rq <- line2route(l = l, plan = "quietest", silent = TRUE)
#' plot(r)
#' plot(rf, col = "red", add = TRUE)
#' plot(rq, col = "green", add = TRUE)
#' plot(l, add = T)
#' line2route(flowlines_sf[2:3, ], route_osrm)
#' # Plot for a single line to compare 'fastest' and 'quietest' route
#' n = 2
#' plot(l[n,])
#' lines(rf[n,], col = "red")
#' lines(rq[n,], col = "green")
#' # Example with list output
#' l <- l[1:3,]
#' rf_list <- line2route(l = l, list_output = TRUE)
#' line2route(l[1,], route_graphhopper)
#' }
line2route <- function(l, route_fun = "route_cyclestreet", n_print = 10, list_output = FALSE, l_id = NA, ...){
return_sf <- is(l, "sf")
if(return_sf) {
l <- as(l, "Spatial")
}
FUN <- match.fun(route_fun)
ldf <- line2df(l)
n_ldf <- nrow(ldf)
error_fun <- function(e){
warning(paste("Fail for line number", i))
e
}
rc <- as.list(rep(NA, length(l)))
for(i in 1:n_ldf){
rc[[i]] <- tryCatch({
FUN(from = c(ldf$fx[i], ldf$fy[i]), to = c(ldf$tx[i], ldf$ty[i]), ...)
}, error = error_fun)
perc_temp <- i %% round(n_ldf / n_print)
# print % of distances calculated
if(!is.na(perc_temp) & perc_temp == 0){
message(paste0(round(100 * i/n_ldf), " % out of ", n_ldf, " distances calculated"))
}
}
if(list_output) {
r <- rc
} else {
# Set the names based on the first non failing line (then exit loop)
for(i in 1:n_ldf){
if(grepl("Spatial.*DataFrame", class(rc[[i]]))[1]) {
rdata <- data.frame(matrix(nrow = nrow(l), ncol = ncol(rc[[i]]) + 1))
names(rdata) <- c(names(rc[[i]]), "error")
r <- l
r@data <- rdata
break
}
}
# Copy rc into r including the data or copy the error into r
for(i in 1:n_ldf){
if(grepl("Spatial.*DataFrame", class(rc[[i]]))[1]) {
r@lines[[i]] <- Lines(rc[[i]]@lines[[1]]@Lines, row.names(l[i,]))
r@data[i,] <- c(rc[[i]]@data, error = NA)
} else {
r@data[i, "error"] <- rc[[i]][1]
}
}
# Set the id in r
l_ids <- c(l_id, "id")
l_id <- l_ids[!is.na(l_ids)][1]
r$id <- if(l_id %in% names(l)){
l@data[[l_id]]
} else {
row.names(l)
}
}
if(return_sf) {
r <- sf::st_as_sf(r)
}
r
}
#' Convert straight SpatialLinesDataFrame from flow data into routes retrying
#' on connection (or other) intermittent failures
#'
#' @section Details:
#'
#' See \code{\link{line2route}} for the version that is not retried on errors.
#' @param lines A SpatialLinesDataFrame
#' @param pattern A regex that the error messages must not match to be retried, default "^Error: " i.e. do not retry errors starting with "Error: "
#' @param n_retry Number of times to retry
#' @inheritParams line2route
#' @export
#' @examples
#' \dontrun{
#' data(flowlines)
#' rf_list <- line2routeRetry(flowlines[1:2,], pattern = "nonexistanceerror", silent = F)
#' }
line2routeRetry <- function(lines, pattern = "^Error: ", n_retry = 3, ...) {
routes <- line2route(lines, reporterrors = T, ...)
# When the time is NA then the routing failed,
# if there is no error message or the message matches the pattern select line to be retried
failed_to_route <- lines[is.na(routes$time) & (is.na(routes$error) | !grepl(pattern, routes$error)),]
if (nrow(failed_to_route) > 0 && n_retry > 0){
ids <- routes$ids
routes_retry <- line2routeRetry(failed_to_route, pattern = pattern, n_retry = n_retry-1, ...)
for (idx_retry in 1:nrow(routes_retry)) {
# Merge in retried routes if they are Spatial DataFrames
if(grepl("Spatial.*DataFrame", class(routes_retry[[idx_retry]]))) {
idx_to_replace <- which(routes$id == routes_retry$id[idx_retry])
routes@data[idx_to_replace,] <- routes_retry@data[idx_retry,]
routes@lines[[idx_to_replace]] <- Lines(routes_retry@lines[[idx_retry]]@Lines, row.names(routes_retry[idx_retry,]))
}
}
}
routes
}
#' Convert a series of points into a dataframe of origins and destinations
#'
#' Takes a series of geographical points and converts them into a data.frame
#' representing the potential flows, or 'spatial interaction', between every combination
#' of points.
#'
#' @param p A spatial points object
#' @export
#' @examples
#' data(cents)
#' df <- points2odf(cents)
#' cents_centroids <- rgeos::gCentroid(cents, byid = TRUE)
#' df2 <- points2odf(cents_centroids)
#' df3 <- points2odf(cents_sf)
points2odf <- function(p) {
UseMethod("points2odf")
}
#' @export
points2odf.sf <- function(p){
odf = data.frame(
expand.grid(p[[1]], p[[1]])[2:1]
)
names(odf) <- c("O", "D")
odf
}
#' @export
points2odf.Spatial <- function(p){
if(grepl(pattern = "DataFrame", class(p))){
geo_code <- p@data[,1]
} else if(is(p, "SpatialPoints")){
geo_code <- 1:length(p)
} else {
geo_code <- p[,1]
}
odf = data.frame(
expand.grid(geo_code, geo_code)[2:1]
)
names(odf) <- c("O", "D")
odf
}
#' Convert a series of points into geographical flows
#'
#' Takes a series of geographical points and converts them into a SpatialLinesDataFrame
#' representing the potential flows, or 'spatial interaction', between every combination
#' of points.
#'
#' @param p SpatialPointsDataFrame
#'
#' @export
#' @examples
#' data(cents)
#' plot(cents)
#' flow <-points2flow(cents)
#' plot(flow, add = TRUE)
#' flow_sf <- points2flow(cents_sf)
#' plot(flow_sf)
points2flow <- function(p){
odf <- points2odf(p)
od2line(flow = odf, zones = p)
}
#' Update line geometry
#'
#' Take two SpatialLines objects and update the geometry of the former with that of the latter,
#' retaining the data of the former.
#'
#' @param l A SpatialLines object, whose geometry is to be modified
#' @param nl A SpatialLines object of the same length as \code{l} to provide the new geometry
#'
#' @export
#' @examples
#' data(flowlines)
#' l <- flowlines[2:5,]
#' nl <- routes_fast
#' nrow(l)
#' nrow(nl)
#' l <- l[!is_linepoint(l),]
#' names(l)
#' names(routes_fast)
#' l_newgeom <- update_line_geometry(l, nl)
#' plot(l, lwd = l$All / mean(l$All))
#' plot(l_newgeom, lwd = l$All / mean(l$All))
#' names(l_newgeom)
update_line_geometry <- function(l, nl){
for(i in 1:nrow(l)){
l@lines[[i]] <- Lines(nl@lines[[i]]@Lines, row.names(l[i,]))
}
l
}
#' Quickly calculate Euclidean distances of od pairs
#'
#' It is common to want to know the Euclidean distance between origins and destinations
#' in OD data. You can calculate this by first converting OD data to SpatialLines data,
#' e.g. with \code{\link{od2line}}. However this can be slow and overkill if you just
#' want to know the distance. This function is a few orders of magnitude faster.
#'
#' Note: this function assumes that the zones or centroids in \code{cents} have a geographic
#' (lat/lon) CRS.
#'
#' @inheritParams od2line
#' @export
#' @examples
#' data(flow)
#' data(cents)
#' od_dist(flow, cents)
od_dist <- function(flow, zones){
omatch = match(flow[[1]], zones@data[[1]])
dmatch = match(flow[[2]], zones@data[[1]])
cents_o = zones@coords[omatch,]
cents_d = zones@coords[dmatch,]
geosphere::distHaversine(p1 = cents_o, p2 = cents_d)
}
#' Convert a series of points, or a matrix of coordinates, into a line
#'
#' This is a simple wrapper around \code{\link{spLines}} that makes the creation of
#' \code{SpatialLines} objects easy and intuitive
#'
#' @param p A SpatialPoints obect or matrix representing the coordinates of points.
#' @export
#' @examples
#' p = matrix(1:4, ncol = 2)
#' library(sp)
#' l = points2line(p)
#' plot(l)
#' l = points2line(cents)
#' plot(l)
#' p = line2points(routes_fast)
#' l = points2line(p)
#' plot(l)
#' l_sf = points2line(cents_sf)
#' plot(l_sf)
points2line <- function(p) {
UseMethod("points2line")
}
#' @export
points2line.sf <- function(p){
points2flow(p = p)
}
#' @export
points2line.Spatial <- function(p){
if(is(p, "SpatialPoints")){
p_proj = sp::proj4string(p)
p = sp::coordinates(p)
} else {
p_proj = NA
}
l = points2line(p)
raster::crs(l) = p_proj
l
}
#' @export
points2line.matrix <- function(p){
l = raster::spLines(p)
l
}
|
bfa48e5f648ce4a01354629e7984f91fe1f48565
|
71c95568a68e1d956b2ae5e4ef01e5cd94a039d2
|
/daily.R
|
137adf26ca025902f671b54d00dbdf6512859da3
|
[] |
no_license
|
EmilHvitfeldt/paletteerbot
|
2c7dd70e2caed9514d77c2aba0768b61dff5c85d
|
7a1c69bf3af153de02d74f4eabdfb5bb5ade4b9d
|
refs/heads/master
| 2023-02-06T06:02:23.384313
| 2021-01-03T21:09:32
| 2021-01-03T21:09:32
| 286,087,969
| 1
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 1,434
|
r
|
daily.R
|
library(paletteer)
library(tidyverse)
library(lubridate)
library(patchwork)
library(tidygraph)
library(ggraph)
library(rtweet)
library(glue)
library(emo)
lapply(list.files("./R", full.names = TRUE), source)
continuous_colors <- readRDS("data/continuous_colors.rds")
discrete_colors <- readRDS("data/discrete_colors.rds")
pokemon_colors <- readRDS("data/pokemon_colors.rds")
schedule <- readRDS("data/schedule.rds")
usa_plot <- readRDS("data/usa_plot.rds")
Today <- today()
Today_palette <- schedule %>%
filter(date == Today)
chart <- if (Today_palette$daytype == "discrete") {
discrete_colors %>%
filter(slug == Today_palette$slug) %>%
plot_discrete()
} else if (Today_palette$daytype == "pokemon") {
pokemon_colors %>%
filter(slug == Today_palette$slug) %>%
plot_discrete()
} else if (Today_palette$daytype == "continuous") {
continuous_colors %>%
filter(slug == Today_palette$slug) %>%
plot_continuous()
}
ggsave("chart.png", chart, width = 4, height = 2, dpi = 300, scale = 2)
paletteer_token <- function() {
rtweet::create_token(
"paletteerbot",
consumer_key = Sys.getenv("PALBOT_CONSUMER_KEY"),
consumer_secret = Sys.getenv("PALBOT_CONSUMER_SECRET"),
access_token = Sys.getenv("PALBOT_ACCESS_TOKEN"),
access_secret = Sys.getenv("PALBOT_ACCESS_SECRET"),
set_renv = FALSE
)
}
post_tweet(daily_tweet(Today_palette), media = "chart.png", token = paletteer_token())
|
b1f681fac9e7fd2dfa93992d431e0cac3e8a2d84
|
a53c1be476315400ceba9a42651019b09d7a7fee
|
/assets/scripts/4_MMA/_functions/bootstrap.condor.r
|
33e67dc7c76c2083230354d1befee28fdc126c90
|
[
"CC0-1.0"
] |
permissive
|
BPRC-EcoInformatics/BPRC-EcoInformatics.github.io
|
d8535043c8d4ae9cfba207efc99b43d8377a56b8
|
9f3b2b2e44271d547dd1af33323fd033eaf41b0e
|
refs/heads/master
| 2021-01-13T00:56:32.704123
| 2017-12-14T22:50:19
| 2017-12-14T22:50:19
| 54,073,297
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 633
|
r
|
bootstrap.condor.r
|
# Make bootstrap files for Condor directory "boot"
# Load functions
bootstrap.condor <- function(species.name,nreps) {
data <- read.table(paste(species.name,"_data_pres_abs_best.txt",sep=""), header=TRUE)
data <- data[data$pres==1,]
setwd(paste("C:\\MMA\\",species.name,"\\_condor\\boot",sep=""))
boot.mat <- matrix(NA,dim(data)[1],nreps)
colnames(boot.mat) <- c(paste(rep("Boot",nreps),1:nreps,sep=""))
for (i in 1:nreps) {
boot.mat[,i] <- !resample(data,method="boot")$test.ind
write.table(as.data.frame(boot.mat[,i]),paste("Boot",i,".txt",sep=""),
row.names=F,col.names=paste("Boot",i,sep=""))
}
}
|
7cc49edbfa068229f032ba09af69b4d2e38bb011
|
dd15d6f7bc83ac75695b64866e02f7ed81b75b7a
|
/man/plot_genedrop_lm_slopes.Rd
|
da799d9bcc1a157687c8719f3fd652dcd7c15533
|
[] |
no_license
|
susjoh/genedroppeR
|
fe2a912e065ca9fbd45bbd10107e832389c1af2a
|
3d9aae940707ba0316b9f2069c4f0794956501c9
|
refs/heads/master
| 2022-12-11T20:18:14.059881
| 2022-11-25T20:32:20
| 2022-11-25T20:32:20
| 178,249,087
| 1
| 0
| null | null | null | null |
UTF-8
|
R
| false
| true
| 1,154
|
rd
|
plot_genedrop_lm_slopes.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/genedrop_plot_functions.R
\name{plot_genedrop_lm_slopes}
\alias{plot_genedrop_lm_slopes}
\title{Plot a histogram of Gene-Drop Simulation linear regression slopes}
\usage{
plot_genedrop_lm_slopes(genedrop_object_summary,
n_founder_cohorts = NULL, remove_founders = T, method = "lm",
obs_line_col = "red")
}
\arguments{
\item{genedrop_object_summary}{Gene-Drop summary object from the function
`summary_genedrop()`}
\item{n_founder_cohorts}{integer. The number of cohorts at the top of the
pedigree that will sample from the true allele frequences (these are
defined as "sampled"). All cohorts following these ones are "simulated" and
are used for comparisons of changes in allele frequency.}
\item{remove_founders}{logical. Default = TRUE. Remove the founders from the
data before calculating the linear regression slopes.}
\item{method}{Default = "lm". Currently the only option.}
\item{obs_line_col}{line colour to use for the observed data.}
}
\description{
Plot a histogram of Gene-Drop Simulation linear regression slopes and return
true distribution values. #'
}
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.