blob_id
stringlengths 40
40
| directory_id
stringlengths 40
40
| path
stringlengths 2
327
| content_id
stringlengths 40
40
| detected_licenses
listlengths 0
91
| license_type
stringclasses 2
values | repo_name
stringlengths 5
134
| snapshot_id
stringlengths 40
40
| revision_id
stringlengths 40
40
| branch_name
stringclasses 46
values | visit_date
timestamp[us]date 2016-08-02 22:44:29
2023-09-06 08:39:28
| revision_date
timestamp[us]date 1977-08-08 00:00:00
2023-09-05 12:13:49
| committer_date
timestamp[us]date 1977-08-08 00:00:00
2023-09-05 12:13:49
| github_id
int64 19.4k
671M
⌀ | star_events_count
int64 0
40k
| fork_events_count
int64 0
32.4k
| gha_license_id
stringclasses 14
values | gha_event_created_at
timestamp[us]date 2012-06-21 16:39:19
2023-09-14 21:52:42
⌀ | gha_created_at
timestamp[us]date 2008-05-25 01:21:32
2023-06-28 13:19:12
⌀ | gha_language
stringclasses 60
values | src_encoding
stringclasses 24
values | language
stringclasses 1
value | is_vendor
bool 2
classes | is_generated
bool 2
classes | length_bytes
int64 7
9.18M
| extension
stringclasses 20
values | filename
stringlengths 1
141
| content
stringlengths 7
9.18M
|
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
e0107c401718aa403d3a8eb3c3b6b7f99f11e09b
|
00c39406f6b66c24629dbcba4aeb7dccbc908119
|
/man/gx.get.Rd
|
b5e5c0caee4bb4c8590a4da16fe22edfa8c7101d
|
[
"MIT"
] |
permissive
|
genexplain/geneXplainR
|
cca196ed558ed985c5b0e14716ce497697ce370e
|
3a5358f3d5f73a7a40ce7a15eb4b2846c7e01ac1
|
refs/heads/master
| 2022-08-27T23:12:52.705562
| 2021-12-14T08:16:43
| 2021-12-14T08:16:43
| 88,882,877
| 5
| 4
|
MIT
| 2022-08-09T14:56:12
| 2017-04-20T15:41:47
|
R
|
UTF-8
|
R
| false
| true
| 421
|
rd
|
gx.get.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/geneXplain.R
\name{gx.get}
\alias{gx.get}
\title{Gets a table from the platform workspace}
\usage{
gx.get(path)
}
\arguments{
\item{path}{path of object to load into a data.frame}
}
\value{
a list containing the table
}
\description{
Returns a list containing the specified table.
}
\seealso{
\code{\link[rbiouml]{biouml.get}}
}
\keyword{get}
|
e5512789a85e2dc4ffffbe834190df1ca1b2c624
|
ffdea92d4315e4363dd4ae673a1a6adf82a761b5
|
/data/genthat_extracted_code/HMP/examples/tonsils.Rd.R
|
871e1e910759485af92b0575c8216df0156e0eb1
|
[] |
no_license
|
surayaaramli/typeRrh
|
d257ac8905c49123f4ccd4e377ee3dfc84d1636c
|
66e6996f31961bc8b9aafe1a6a6098327b66bf71
|
refs/heads/master
| 2023-05-05T04:05:31.617869
| 2019-04-25T22:10:06
| 2019-04-25T22:10:06
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 149
|
r
|
tonsils.Rd.R
|
library(HMP)
### Name: tonsils
### Title: Palatine Tonsil Data Set
### Aliases: tonsils
### Keywords: datasets
### ** Examples
data(tonsils)
|
780bcf12153ef1072011348c362a40260c49a1bb
|
a0668f6fd7e370227443ef993d158efe6b20ae74
|
/General_Genomic_Structure_GenoplotR.R
|
d45044f2a15a1eb19536153a73cd4580d3a2f334
|
[] |
no_license
|
RobinHofmeister/SAGE_2
|
a646d1ab2a930d498cbbde6d33c83223a9b695fa
|
27c057ae84859ac2af1fbe260d52274e36f04837
|
refs/heads/master
| 2020-03-19T06:11:28.730376
| 2018-06-04T14:13:06
| 2018-06-04T14:13:06
| 135,998,578
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 1,375
|
r
|
General_Genomic_Structure_GenoplotR.R
|
library(genoPlotR)
# 1) The following lines produce the alignment of all Bombella and Commensalibacter genomes and represent the synteny across all these genomes.
bbone <- read_mauve_backbone("alignment_all.backbone", ref=2)
name<-c("PacBio-0368", "Bom_368","Bom_378","Bom_380","Bom_385","Bom_387", "Com_392", "Com_379", "Com_390", "Com_382", "Com_367", "Com_366")
names(bbone$dna_segs) <- name
for (i in 1:length(bbone$comparisons)){
cmp<- bbone$comparisons[[i]]
abs(cmp$end1- cmp$start1)+ abs(cmp$end2-cmp$start2)
}
plot_gene_map(dna_segs=bbone$dna_segs, comparisons=bbone$comparisons, override_color_schemes = T , global_color_scheme = c("auto","increasing", "red_blue", 0.5))
# 2) the following lines produce the alignment of three genomes and represent only syntenic domains larger than 2.5 kb to better identify the position of those conserved domains.
bbone <- read_mauve_backbone("alignment_reduced.backbone", ref=2, filter_low = 2500)
name<-c("PacBio_284", "Bombella_387","Commensalibacter_392")
names(bbone$dna_segs) <- name
for (i in 1:length(bbone$comparisons)){
cmp<- bbone$comparisons[[i]]
abs(cmp$end1- cmp$start1)+ abs(cmp$end2-cmp$start2)
}
plot_gene_map(dna_segs=bbone$dna_segs, comparisons=bbone$comparisons, override_color_schemes = T , global_color_scheme = c("auto","increasing", "red_blue", 0.5), cex.lab=5)
bbone$comparisons
|
365bde8ef5938d550702ecc345728d61ddb897e2
|
9b7888b0b9ecab83ac55e020d2c59917d6452f39
|
/R/addGeneIDs.R
|
91e131addde9c13a0c569372542f38c16ddc9e7d
|
[] |
no_license
|
jianhong/ChIPpeakAnno
|
703580b9ce6a7708f60d92a78a3714bc9d82a562
|
d2136538718c58881a420c9985c53c6e89e223f4
|
refs/heads/devel
| 2023-08-22T15:29:29.888828
| 2023-07-25T14:57:28
| 2023-07-25T14:57:28
| 186,652,664
| 10
| 6
| null | 2023-09-01T20:48:22
| 2019-05-14T15:41:28
|
R
|
UTF-8
|
R
| false
| false
| 12,483
|
r
|
addGeneIDs.R
|
#' Add common IDs to annotated peaks such as gene symbol, entrez ID,
#' ensemble gene id and refseq id.
#' @description Add common IDs to annotated peaks such as gene symbol,
#' entrez ID, ensemble gene id and refseq id leveraging organism annotation
#' dataset. For example, org.Hs.eg.db is the dataset from orgs.Hs.eg.db
#' package for human, while org.Mm.eg.db is the dataset from the org.Mm.eg.db
#' package for mouse.
#' @param annotatedPeak GRanges or a vector of feature IDs.
#' @param orgAnn organism annotation dataset such as org.Hs.eg.db.
#' @param IDs2Add a vector of annotation identifiers to be added
#' @param feature_id_type type of ID to be annotated, default is
#' ensembl_gene_id
#' @param silence TRUE or FALSE. If TRUE, will not show unmapped entrez id
#' for feature ids.
#' @param mart mart object, see \link[biomaRt:useMart]{useMart} of biomaRt
#' package for details
#' @details One of orgAnn and mart should be assigned.
#' \itemize{
#' \item If orgAnn is given, parameter feature_id_type should be
#' ensemble_gene_id, entrez_id, gene_symbol, gene_alias or refseq_id.
#' And parameter IDs2Add can be set to any combination of identifiers
#' such as "accnum", "ensembl", "ensemblprot", "ensembltrans", "entrez_id",
#' "enzyme", "genename", "pfam", "pmid", "prosite", "refseq", "symbol",
#' "unigene" and "uniprot". Some IDs are unique to an organism,
#' such as "omim" for org.Hs.eg.db and "mgi" for org.Mm.eg.db.
#'
#' Here is the definition of different IDs :
#' \itemize{
#' \item accnum: GenBank accession numbers
#' \item ensembl: Ensembl gene accession numbers
#' \item ensemblprot: Ensembl protein accession numbers
#' \item ensembltrans: Ensembl transcript accession numbers
#' \item entrez_id: entrez gene identifiers
#' \item enzyme: EC numbers
#' \item genename: gene name
#' \item pfam: Pfam identifiers
#' \item pmid: PubMed identifiers
#' \item prosite: PROSITE identifiers
#' \item refseq: RefSeq identifiers
#' \item symbol: gene abbreviations
#' \item unigene: UniGene cluster identifiers
#' \item uniprot: Uniprot accession numbers
#' \item omim: OMIM(Mendelian Inheritance in Man) identifiers
#' \item mgi: Jackson Laboratory MGI gene accession numbers
#' }
#'
#' \item If mart is used instead of orgAnn, for valid parameter
#' feature_id_type and IDs2Add parameters, please refer to
#' \link[biomaRt:getBM]{getBM} in bioMart package.
#' Parameter feature_id_type should be one valid filter name listed by
#' \link[biomaRt:listFilters]{listFilters(mart)} such as ensemble_gene_id.
#' And parameter IDs2Add should be one or more valid attributes name listed
#' by \link[biomaRt:listAttributes]{listAttributes(mart)} such as
#' external_gene_id, entrezgene, wikigene_name, or mirbase_transcript_name.
#'
#' }
#' @return GRanges if the input is a GRanges or dataframe if input is a vector.
#' @references http://www.bioconductor.org/packages/release/data/annotation/
#' @author Jianhong Ou, Lihua Julie Zhu
#' @seealso \link[biomaRt:getBM]{getBM}, AnnotationDb
#' @export
#' @importFrom AnnotationDbi mget
#' @importFrom biomaRt getBM
#' @importFrom utils installed.packages
#' @examples
#' data(annotatedPeak)
#' library(org.Hs.eg.db)
#' addGeneIDs(annotatedPeak[1:6,],orgAnn="org.Hs.eg.db",
#' IDs2Add=c("symbol","omim"))
#' ##addGeneIDs(annotatedPeak$feature[1:6],orgAnn="org.Hs.eg.db",
#' ## IDs2Add=c("symbol","genename"))
#' if(interactive()){
#' mart <- useMart("ENSEMBL_MART_ENSEMBL",host="www.ensembl.org",
#' dataset="hsapiens_gene_ensembl")
#' ##mart <- useMart(biomart="ensembl",dataset="hsapiens_gene_ensembl")
#' addGeneIDs(annotatedPeak[1:6,], mart=mart,
#' IDs2Add=c("hgnc_symbol","entrezgene"))
#' }
#' @keywords misc
addGeneIDs<-function(annotatedPeak, orgAnn, IDs2Add=c("symbol"),
feature_id_type="ensembl_gene_id",
silence=TRUE,
mart){
if (missing(annotatedPeak))
{
stop("Missing required argument annotatedPeak!",
call.=FALSE)
}
if(missing(orgAnn) & missing(mart)){
stop('no annotation database selected',
call.=FALSE)
}
if(is(annotatedPeak, "GRanges")){
feature_ids <- unique(annotatedPeak$feature)
}else{
if(is.character(annotatedPeak)){
feature_ids <- unique(annotatedPeak)
}else{
stop("annotatedPeak needs to be GRanges type with
feature variable holding the feature id or a
character vector holding the IDs of the features
used to annotate the peaks!",call.=FALSE)
}
}
feature_ids <- feature_ids[!is.na(feature_ids)]
feature_ids <- feature_ids[feature_ids!=""]
if (length(feature_ids) == 0)
{
stop("There is no feature column in annotatedPeak or
annotatedPeak has size 0!",call.=FALSE)
}
if(!missing(orgAnn)){
if(is(orgAnn, "OrgDb")){
orgAnn <- deparse(substitute(orgAnn))
}
if(!is(orgAnn, "character")){
stop("orgAnn must be a character.")
}
if(!grepl(".eg.db",orgAnn,ignore.case=TRUE)){
stop('Annotation database must be *.eg.db',call.=FALSE)
}
is.installed <- function(orgpkg)
is.element(orgpkg, installed.packages()[,1])
if(!is.installed(orgAnn)){
BiocManager::install(pkgs=orgAnn,
update=FALSE,
ask=FALSE)
}
if(!library(orgAnn,
character.only=TRUE,
logical.return=TRUE)){
if(!silence)
message("No valid gene mapping package as
argument orgAnn is passed in!")
stop("Please refer
http://www.bioconductor.org/packages/release/data/annotation/
for available org.xx.eg.db packages")
}
# require(orgAnn,character.only = TRUE)
orgAnn<-sub("\\.db$","",orgAnn,ignore.case=TRUE)
#get Entrez ID::entrezIDs
if(feature_id_type=="entrez_id"){
m_ent<-as.data.frame(feature_ids, stringsAsFactors=FALSE)
colnames(m_ent)<-c("entrez_id")
}else{
prefix<-switch(feature_id_type,
gene_alias = "ALIAS",
gene_symbol = "SYMBOL",
ensembl_gene_id = "ENSEMBL",
refseq_id = "REFSEQ",
"UNKNOWN"
)
if(prefix=="UNKNOWN"){
stop("Currently only the following type of IDs are supported:
entrez_id, gene_alias, ensembl_gene_id, refseq_id and gene_symbol!",
call.=FALSE)
}
tryCatch(env<-get(paste(orgAnn,prefix,"2EG",sep="")),
error = function(e){
stop(paste("Annotation database ",
orgAnn,
"2EG does not exist!\n
\tPlease try to load annotation
database by library(",
orgAnn,".db)",sep=""),call.=FALSE)
})
entrez <- AnnotationDbi::mget(feature_ids,env,ifnotfound=NA)
gene_ids <- names(entrez)
m_ent <- do.call(rbind,lapply(gene_ids,function(.ele){
r = entrez[[.ele]]
if(!is.na(r[1])) cbind(rep(.ele,length(r)),r)
else {
if(!silence) message(paste("entrez id for '",
.ele, "' not found\n",
sep = ""))
c(.ele, NA)
}
}))
m_ent<-as.data.frame(m_ent, stringsAsFactors=FALSE)
m_ent<-m_ent[!is.na(m_ent[,1]), , drop=FALSE]
colnames(m_ent)<-c(feature_id_type, "entrez_id")
}
entrezIDs<-as.character(m_ent$entrez_id)
entrezIDs<-unique(entrezIDs)
entrezIDs<-entrezIDs[!is.na(entrezIDs)]
if(length(entrezIDs)==0){
stop("No entrez identifier can be mapped by input data based on
the feature_id_type.\nPlease consider to use correct
feature_id_type, orgAnn or annotatedPeak\n",
call.=FALSE);
}
IDs2Add <- unique(IDs2Add)
IDs2Add <- IDs2Add[IDs2Add!=feature_id_type]
IDs <- unique(entrezIDs[!is.na(entrezIDs)])
for(IDtoAdd in IDs2Add){
x<-NULL
if(!silence) message(paste("Adding",IDtoAdd,"... "))
if(IDtoAdd!="entrez_id"){
orgDB<-NULL
tryCatch(orgDB<-get(paste(orgAnn,toupper(IDtoAdd),sep="")),
error = function(e){
if(!silence){
message(paste("The IDs2Add you input, \"",
IDtoAdd,
"\", is not supported!\n",
sep=""))
}
})
if(is.null(orgDB)){
IDs2Add<-IDs2Add[IDs2Add!=IDtoAdd]
next
}
if(!is(orgDB, "AnnDbBimap") & !is(orgDB, "IpiAnnDbMap")){
if(!silence){
message(paste("The IDs2Add you input, \"",
IDtoAdd,
"\", is not supported!\n",
sep=""))
}
IDs2Add<-IDs2Add[IDs2Add!=IDtoAdd]
next
}
x <- AnnotationDbi::mget(IDs, orgDB,ifnotfound=NA)
x <- sapply(x,base::paste,collapse=";")
x <- as.data.frame(x, stringsAsFactors=FALSE)
m_ent <- merge(m_ent, x,
by.x="entrez_id",
by.y="row.names",
all.x=TRUE)
colnames(m_ent)[length(colnames(m_ent))]<-IDtoAdd
}
if(!silence) message("done\n")
}
m_ent<-m_ent[, c(feature_id_type,IDs2Add), drop=FALSE]
}else{
if(missing(mart) || !is(mart, "Mart")){
stop('No valid mart object is passed in!',call.=FALSE)
}
IDs2Add<-unique(IDs2Add)
IDs2Add<-IDs2Add[IDs2Add!=feature_id_type]
tryCatch(m_ent<-
getBM(attributes=c(feature_id_type,IDs2Add),
filters = feature_id_type,
values = feature_ids, mart=mart),
error = function(e){
stop(paste("Get error when calling getBM:", e, sep="\n"),
call.=FALSE)
})
if(any(colnames(m_ent)!=c(feature_id_type, IDs2Add)))
colnames(m_ent) <- c(feature_id_type, IDs2Add)
}
if(!silence) message("prepare output ... ")
#dealing with multiple entrez_id for single feature_id
if(ncol(m_ent)==1) stop("None of IDs could be appended. Please double check IDs2Add.")
duplicated_ids <-
m_ent[duplicated(m_ent[,feature_id_type]), feature_id_type]
if(length(duplicated_ids)>0){
m_ent.duplicated <- m_ent[m_ent[,feature_id_type] %in% duplicated_ids,]
m_ent.duplicated <- condenseMatrixByColnames(as.matrix(m_ent.duplicated),
feature_id_type)
m_ent<-m_ent[!(m_ent[,feature_id_type] %in% duplicated_ids),]
m_ent<-rbind(m_ent,m_ent.duplicated)
}
if (is(annotatedPeak, "GRanges")){
#rearrange m_ent by annotatedPeak$feature
#data.frame is very important for order...
orderlist <- data.frame(annotatedPeak$feature)
orderlist <- cbind(1:nrow(orderlist),orderlist)
colnames(orderlist) <- c("orderid___",feature_id_type)
m_ent <- merge(orderlist, m_ent, by=feature_id_type, all.x=TRUE)
m_ent <- m_ent[order(m_ent[,"orderid___"]),
c(feature_id_type, IDs2Add)]
for(IDtoAdd in IDs2Add){
mcols(annotatedPeak)[,IDtoAdd] <- m_ent[,IDtoAdd]
}
}else{
annotatedPeak <- m_ent
}
if(!silence) message("done\n")
annotatedPeak
}
|
8bc853cc1253fcb62a231804baf5ed84416242cd
|
2d9fb03feb8626c67ba5d3f1a0815710b621c5f6
|
/R/activity_type_frequence.R
|
570bf7049b4cc31f7bf4c069036d6b359e76daf7
|
[] |
no_license
|
bbrewington/edeaR
|
4c8916bad4c54521764574770ae941983363dc0a
|
02b31d133b5cec68caa6e0c5fa446a6a6275d462
|
refs/heads/master
| 2021-01-19T18:32:49.442081
| 2016-08-27T17:31:36
| 2016-08-27T17:31:36
| 66,726,375
| 0
| 0
| null | 2016-08-27T17:17:51
| 2016-08-27T17:17:51
| null |
UTF-8
|
R
| false
| false
| 1,186
|
r
|
activity_type_frequence.R
|
#' @title Metric: Activity Type Frequency
#'
#' @description Provides summary statistics about the frequency of activity types at the level of traces, cases or activity types
#''
#' @param eventlog The event log to be used. An object of class
#' \code{eventlog}.
#'
#' @param level_of_analysis At which level the analysis of activity type frequency should be performed: trace, case, activity or resource.
#'
#' @export activity_type_frequency
activity_type_frequency <- function(eventlog,
level_of_analysis) {
stop_eventlog(eventlog)
if(!(level_of_analysis %in% c("trace", "activity","case", "resource", "resource-activity")))
stop("Level of analysis should be one of the following: trace, case, activity, resource, resource-activity.")
if (level_of_analysis == "trace")
return(activity_type_frequency_trace(eventlog))
else if (level_of_analysis == "case")
return(activity_type_frequency_case(eventlog))
else if(level_of_analysis == "activity")
return(activity_type_frequency_activity(eventlog))
else if(level_of_analysis == "resource")
return(activity_type_frequency_resource(eventlog))
else
return(activity_type_frequency_resource_activity(eventlog))
}
|
4edeba171661f402a0b5f4a36d5f5a79b4a43c79
|
678a532bc05214556abb1f993b867b390ed5f7ab
|
/man/plot_heatmap_for_k.Rd
|
72cbceb6bc8663e5bd5f8578a9e942799aaa93b0
|
[] |
no_license
|
rdocking/amlpmpsupport
|
d744aa67c8cb882e8fd022b6d2c1ba402c0210c4
|
b1f843ab41b59ca1fc54b044f23cfbfa741f2ae0
|
refs/heads/main
| 2023-02-07T19:00:58.035482
| 2021-01-02T21:56:53
| 2021-01-02T21:56:53
| 271,644,653
| 1
| 1
| null | null | null | null |
UTF-8
|
R
| false
| true
| 806
|
rd
|
plot_heatmap_for_k.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/plotting_functions.r
\name{plot_heatmap_for_k}
\alias{plot_heatmap_for_k}
\title{Plot a pheatmap for a single value of k}
\usage{
plot_heatmap_for_k(
cluster_assignments.df,
annotation_col.df,
ann_colours.lst,
cluster_input.mat,
annotation_legend = FALSE
)
}
\arguments{
\item{cluster_assignments.df}{A data frame containing cluster assignments for samples}
\item{annotation_col.df}{A data frame containing sample annotations}
\item{ann_colours.lst}{A named list containing annotation colours}
\item{cluster_input.mat}{A matrix of expression values for the heatmap body.}
\item{annotation_legend}{Boolean - whether or not to include an annotation legend.}
}
\description{
Plot a pheatmap for a single value of k
}
|
631b69dd6df2c56b7d44ed1c62a17cadca2718f2
|
898b2aac5e970486cca3c45ed2ffeece244d6731
|
/R/etl_transform.R
|
8137d7e3c57cf546bc433b392d220d3cfeb23204
|
[] |
no_license
|
homerhanumat/airlines
|
d332c4058cc43690ac84a749fde27d6e961636e9
|
84fd964ef6de272bcd2a0ec7e7dbea912e639cae
|
refs/heads/master
| 2021-09-01T22:52:51.966622
| 2017-12-29T02:12:09
| 2017-12-29T02:12:09
| 113,721,910
| 0
| 0
| null | 2017-12-10T04:09:49
| 2017-12-10T04:09:48
| null |
UTF-8
|
R
| false
| false
| 5,342
|
r
|
etl_transform.R
|
globalVariables(".")
#' @rdname etl_load.etl_airlines
#' @inheritParams etl_transform.etl_airlines
#' @export
etl_transform.etl_airlines <- function(obj, years = 2015, months = 1:12, ...) {
must_unzip <- match_files_by_year_months(list.files(attr(obj, "raw_dir")),
pattern = "On_Time_On_Time_Performance_%Y_%m.zip", years, months)
unzipped <- match_files_by_year_months(list.files(attr(obj, "load_dir")),
pattern = "flights_%Y_%m.csv", years, months)
# cat(unzipped)
missing <- !gsub("On_Time_On_Time_Performance", "flights", must_unzip) %in%
gsub("\\.csv", "\\.zip", unzipped)
tounzip <- must_unzip[missing]
if (length(tounzip) > 0) {
lapply(paste0(attr(obj, "raw_dir"), "/", tounzip), clean_flights)
}
invisible(obj)
}
#' @importFrom readr read_csv
#' @importFrom lubridate make_datetime
clean_flights <- function(path_zip) {
# rename the CSV to match the ZIP
load_dir <- gsub("/raw", "/load", dirname(path_zip))
path_csv <- basename(path_zip) %>%
gsub("On_Time_On_Time_Performance", "flights", x = .) %>%
paste0(load_dir, "/", x = .) %>%
gsub("\\.zip", "\\.csv", x = .)
# col_types <- readr::cols(
# DepTime = col_integer(),
# ArrTime = col_integer(),
# CRSDepTime = col_integer(),
# CRSArrTime = col_integer(),
# Carrier = col_character()
# )
# can't get col_types argument to work!
# readr::read_csv(path_zip, col_types = col_types) %>%
# Some tailnumbers are in Latin-1 encoding, for 2001 months 1-12
# and for 2002 monhts 1 and 2
# detect these months:
latin_month <- function(path_zip) {
matches <- unlist(regmatches(path_zip,
m = regexec("_(\\d{4})_(\\d{1,2})",
text = path_zip)))
year <- as.numeric(matches[2])
month <- as.numeric(matches[3])
year == 2001 || (year == 2002 & month %in% 1:2)
}
# Move away from deprecated SE versions of data verbs.
# Also, write_csv writes out 1000 (and presumably also x000)
# in scientific notation. Hence (for example) a 10am scheduled
# departure time will be written as 1e3, which cannot be interpreted as
# smallint by PostgreSQL. Hence we apply format() to any numerical
# variables that could take values x000.
# Seems to work with MySQL, too.
flights_df <- readr::read_csv(path_zip) %>%
mutate(year = format(Year, scientific = FALSE),
dep_time = format(DepTime, scientific = FALSE),
dep_delay = format(DepDelay, scientific = FALSE),
sched_dep_time = format(CRSDepTime, scientific = FALSE),
arr_time = format(ArrTime, scientific = FALSE),
arr_delay = format(ArrDelay, scientific = FALSE),
sched_arr_time = format(CRSArrTime, scientific = FALSE)) %>%
mutate(dep_time = ifelse(grepl("NA", dep_time), NA, dep_time),
dep_delay = ifelse(grepl("NA", dep_delay), NA, dep_delay),
sched_dep_time = ifelse(grepl("NA", sched_dep_time), NA, sched_dep_time),
arr_time = ifelse(grepl("NA", arr_time), NA, arr_time),
sched_arr_time = ifelse(grepl("NA", sched_arr_time), NA, sched_arr_time),
arr_delay = ifelse(grepl("NA", arr_delay), NA, arr_delay)) %>%
select(
year, month = Month, day = DayofMonth,
dep_time, sched_dep_time, dep_delay = dep_delay,
arr_time, sched_arr_time, arr_delay = arr_delay,
carrier = Carrier, tailnum = TailNum, flight = FlightNum,
origin = Origin, dest = Dest, air_time = AirTime, distance = Distance,
cancelled = Cancelled, diverted = Diverted
) %>%
# filter(origin %in% c("JFK", "LGA", "EWR")) %>%
mutate(hour = as.numeric(sched_dep_time) %/% 100,
minute = as.numeric(sched_dep_time) %% 100,
time_hour = lubridate::make_datetime(as.numeric(year),
month, day, hour, minute, 0))
if ( latin_month(path_zip) ) {
message("Addressing failed parses in tail number:\n")
message("converting Latin-1 tail numbers to UTF-8 so data will load... \n")
flights_df <-
flights_df %>%
mutate(tailnum = iconv(tailnum, "latin1", "UTF-8"))
}
flights_df %>%
arrange(year, month, day, dep_time) %>%
readr::write_csv(path = path_csv, na = "")
}
## deprecated
#
# unzip_month <- function(path_zip) {
# files <- unzip(path_zip, list = TRUE)
# # Only extract biggest file
# csv <- files$Name[order(files$Length, decreasing = TRUE)[1]]
# message(paste("Unzipping", csv))
# load_dir <- gsub("/raw", "/load", dirname(path_zip))
# unzip(path_zip, exdir = load_dir, overwrite = TRUE, junkpaths = TRUE, files = csv)
#
# # fix unprintable charater bug. See:
# # https://github.com/beanumber/airlines/issues/11
# # UPDATE: this doesn't seem to be an issue since readr uses UTF-8 by default
# path_csv <- paste0(load_dir, "/", csv)
# if (grepl("2001_3.csv", path_csv)) {
# bad <- readLines(path_csv)
# good <- gsub("[^[:print:]]", "", bad)
# writeLines(good, path_csv)
# }
#
# # rename the CSV to match the ZIP
# path_csv_new <- gsub(".zip", ".csv", paste0(load_dir, "/", basename(path_zip)))
# file.rename(path_csv, path_csv_new)
# return(path_csv_new)
# }
|
0362555735c4cbe178a55ce7123fbb676b28530d
|
2e00a38e30564bd1818e76e3f337095afe7aa7a7
|
/man/likert2prop.Rd
|
3f55ed8dd5dfa50df420729389565ba2b733242e
|
[
"MIT"
] |
permissive
|
dtitone-lab/languageEntropy
|
612955f8f5659c0136b26a16c2ee0b89fb8cdcec
|
6e08d9550b3923f7df2d4ebf26d1122fc2bf56c9
|
refs/heads/master
| 2020-04-27T19:26:41.867214
| 2020-04-09T01:26:17
| 2020-04-09T01:26:17
| 174,618,279
| 0
| 0
|
NOASSERTION
| 2019-07-26T16:42:42
| 2019-03-08T22:19:54
|
R
|
UTF-8
|
R
| false
| true
| 1,869
|
rd
|
likert2prop.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/likert2prop.R
\name{likert2prop}
\alias{likert2prop}
\title{Function to convert Likert scale data to proportions}
\usage{
likert2prop(data, id, ..., colsList = NULL, minLikert = 1)
}
\arguments{
\item{data}{Your dataset}
\item{id}{ID column corresponding to the unique subject ID.}
\item{...}{Columns of the dataset that contain the Likert data to be
converted to proportions. If using this argument, proportions for each column will be
computed by summing together all columns. If you want to specify groups of
columns to be converted independently, you may specify them in the colsList
argument instead}
\item{colsList}{A list of grouped columns. E.g.,
list(c("L1Home","L2Home","L3Home"), c("L1Work","L2Work","L3Work")). Totals
will be computed separately for each group.}
\item{minLikert}{The minimum possible value of your Likert index / scale.
Typically 1. This is used in rebaselining Likert values to 0.}
}
\description{
Function to convert Likert scale data to proportions
}
\examples{
library(languageEntropy)
data(entropyExData) # load example data
# convert Likert scale data to proportions
## first for the home context
entropyExData <- likert2prop(entropyExData, sub, L1Home, L2Home, L3Home)
print(entropyExData)
# next for the work context
entropyExData <- likert2prop(entropyExData, sub, L1Work, L2Work, L3Work)
print(entropyExData)
entropyExData <- likert2prop(entropyExData, sub, L1PercentUse, L2PercentUse, L3PercentUse)
print(entropyExData)
# alternatively, you can convert home and work at the same time
# by passing home and work as separate vectors within a list
data(entropyExData) # reload example data
entropyExData <- likert2prop(entropyExData, sub,
colsList = list(c("L1Home", "L2Home", "L3Home"),
c("L1Work", "L2Work", "L3Work")))
print(entropyExData)
}
|
a22193b56d5fdc646e3b57ba0cdcd9ad2768d96e
|
74b0bd9a78628a9a7c529bc4306940f13e333fa7
|
/CoopGame/man-roxygen/param/n.R
|
1caefbf2daa1b5829d4798d1e29c9da82dff4756
|
[] |
no_license
|
anwanjohannes/CoopGame
|
b5129301a05591704e25b303d86c874af3eb6853
|
69c697dad8e3cd79b5cca62c5f1913a03b9ccd52
|
refs/heads/master
| 2020-05-03T16:52:29.265390
| 2019-04-04T20:03:38
| 2019-04-04T20:03:38
| 178,733,418
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 47
|
r
|
n.R
|
#' @param n represents the number of players.
|
8369c8a7abe203eb1a45a7e4c174514278dbe198
|
97f1e3e6e908a83489e4243268ba539316196176
|
/man/usePkg.Rd
|
f94ab952071f9b1674884f6abafd7027f9644b06
|
[
"Apache-2.0"
] |
permissive
|
ANTsX/ANTsRCore
|
1c3d1da3bea84859da7d18f54c34ae13d2af8619
|
8e234fd1363c0d618f9dc21c9566f3d5464655a2
|
refs/heads/master
| 2023-05-24T23:53:30.886217
| 2023-05-22T02:52:39
| 2023-05-22T02:52:39
| 83,897,912
| 8
| 22
| null | 2023-05-22T02:52:40
| 2017-03-04T14:09:48
|
C++
|
UTF-8
|
R
| false
| true
| 648
|
rd
|
usePkg.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/usePkg.R
\name{usePkg}
\alias{usePkg}
\title{Use any package. If package is not installed, this will install from CRAN.}
\usage{
usePkg(packageName, allowInstall = FALSE)
}
\arguments{
\item{packageName}{Name of package as *string*.}
\item{allowInstall}{let the package be installed from CRAN}
}
\value{
TRUE if package successfully loaded, FALSE otherwise.
}
\description{
Use any package. If package is not installed, this will install from CRAN.
}
\examples{
usePkg("randomForest")
usePkg("stats", allowInstall = TRUE)
}
\author{
Benjamin M. Kandel, BB Avants
}
|
871cf5f727d5690b72a2e05b03d02c29a56deabb
|
53d1a914d59666b0b79b0b689f9ca83b6789ca86
|
/Rmds/knit_rmd.R
|
5a6847b7c309b56ce5a85790ad97934b90321982
|
[] |
no_license
|
sheridar/spt5-mutants
|
6c4d71bf45b9e21bfbd6712a88fb151638ea71db
|
80d58ef497b1ae42396c2d4399582d6b6c17721e
|
refs/heads/main
| 2023-04-08T06:00:31.528713
| 2022-08-21T21:31:17
| 2022-08-21T21:31:17
| 527,275,899
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 359
|
r
|
knit_rmd.R
|
library(rmarkdown)
library(docopt)
doc <- "Usage: knit_Rmd.R [--help] [--input INPUT] [--output OUT]
-i --input INPUT path to rmarkdown
-o --output OUTPUT name of output html file to write
-h --help display this help message"
opts <- docopt(doc)
print(opts)
# Render Rmd
render(
input = opts$input,
output_file = opts$output
)
|
9244207944908e83e76ab6bca8d72e6ec546a13a
|
35f938ce60457589f150e3270be88c600acbff2f
|
/R/EQAO.R
|
8ce6c276c1a6d856f59546cb43590cbdff70186e
|
[] |
no_license
|
cconley/DDSB
|
3cfba05de4316115b6d6ae30fdbaa26f698d1175
|
59652b30c6705e55de19528d4c013e659de9ab22
|
refs/heads/master
| 2021-01-25T14:26:38.146141
| 2018-03-05T18:51:25
| 2018-03-05T18:51:25
| 123,450,536
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 15,243
|
r
|
EQAO.R
|
#' For use with EQAO ISD files
#'
#' Collapsing IEP categories into a single column
#'
#' Use: df <- EQAO_IEPrecode(df)
#'
#' Function determines whether the ISD data frame is grade 3, 6, 9 or 10 and creates a new IEPcode column of IEP labels
#' Adapted from drsimonj'b blog post: https://drsimonj.svbtle.com/creating-corporate-colour-palettes-for-ggplot2
ISD_IEPrecode <- function(x){
ifelse("ROverallLevel" %in% colnames(x),
{x$IEPcode <- ifelse(x$SIF_IPRC_Behaviour == "1", "Behaviour",
ifelse(x$SIF_IPRC_Autism == "1", "Autism",
ifelse(x$SIF_IPRC_Deaf == "1", "Deaf",
ifelse(x$SIF_IPRC_Language == "1", "Language",
ifelse(x$SIF_IPRC_Speech == "1", "Speech",
ifelse(x$SIF_IPRC_Learning == "1", "Learning",
ifelse(x$SIF_IPRC_Giftedness == "1", "Giftedness",
ifelse(x$SIF_IPRC_MildIntellectual == "1", "MildIntellectual",
ifelse(x$SIF_IPRC_Developmental == "1", "Development",
ifelse(x$SIF_IPRC_Physical == "1", "Physical",
ifelse(x$SIF_IPRC_Blind =="1", "Blind",
ifelse(x$SIF_IPRC_Multiple == "1", "Multiple","No IEP")
)
)
)
)
)
)
)
)
)
)
)},
ifelse("MathClassWhen" %in% colnames(x),
{x$IEPcode <- ifelse(x$SIF_IPRCBehaviour == "1", "Behaviour",
ifelse(x$SIFIPRC_Autism == "1", "Autism",
ifelse(x$SIF_IPRCDeaf == "1", "Deaf",
ifelse(x$SIF_IPRCLanguage == "1", "Language",
ifelse(x$SIF_IPRCSpeech == "1", "Speech",
ifelse(x$SIF_IPRCLearning == "1", "Learning",
ifelse(x$SIF_IPRCGiftedness == "1", "Giftedness",
ifelse(x$SIF_IPRCMildIntellectual == "1", "MildIntellectual",
ifelse(x$SIF_IPRCDevelopmental == "1", "Development",
ifelse(x$SIF_IPRCPhysical == "1", "Physical",
ifelse(x$SIF_IPRCBlind =="1", "Blind",
ifelse(x$SIF_IPRCMultiple == "1", "Multiple","No IEP")
)
)
)
)
)
)
)
)
)
)
)},
ifelse("OSSLTOutcome" %in% colnames(x),
{x$IEPcode <- ifelse(x$IPRCExBehaviour == "1", "Behaviour",
ifelse(x$IPRCEx_Autism == "1", "Autism",
ifelse(x$IPRCExDeaf == "1", "Deaf",
ifelse(x$IPRCExLanguage == "1", "Language",
ifelse(x$IPRCExSpeech == "1", "Speech",
ifelse(x$IPRCExLearning == "1", "Learning",
ifelse(x$IPRCExGiftedness == "1", "Giftedness",
ifelse(x$IPRCExMildIntellectual == "1", "MildIntellectual",
ifelse(x$IPRCExDevelopmental == "1", "Development",
ifelse(x$IPRCExPhysical == "1", "Physical",
ifelse(x$IPRCExBlind =="1", "Blind",
ifelse(x$IPRCExMultiple == "1", "Multiple","No IEP")
)
)
)
)
)
)
)
)
)
)
)}, x$IEPcode <- "Unknown File Format"
)
)
)
return(x)
}
#' For use with all raw secondary EQAO ISD files
#'
#' Use: df <- EQAO_SecCourse(df)
#'
#' Cleaning all Secondary Course type labels
#'
#' Function determines whether the ISD is elementary or secondary assessments and recodes
ISD_SecCourse <- function(x){
ifelse("LevelOfStudyLanguage" %in% colnames(x),
{x$re.course <- ifelse(x$LevelOfStudyLanguage == "-2", "Ambiguous",
ifelse(x$LevelOfStudyLanguage == "-1", "Missing",
ifelse(x$LevelOfStudyLanguage == "0", "NA",
ifelse(x$LevelOfStudyLanguage == "1", "Academic",
ifelse(x$LevelOfStudyLanguage == "2", "Applied",
ifelse(x$LevelOfStudyLanguage == "3", "Locally Developed",
ifelse(x$LevelOfStudyLanguage == "4", "ESL/ELD",
ifelse(x$LevelOfStudyLanguage == "5", "Other","BadCode")
)
)
)
)
)
)
)},
ifelse("Program" %in% colnames(x),
{x$re.course <- ifelse(x$Program =="1", "Applied",
ifelse(x$Program == "2", "Academic", "BadCode")
)}, x$re.course <- "Elementary - Not Applicable"
)
)
return(x)
}
#' For use with all raw EQAO ISD file
#'
#' Cleaning ELL field names
#'
#' Use: df <- EQAO_ELL(df)
#'
#' Function determines whether the ISD is elementary or secondary assessments and recodes
ISD_ELL <- function(x){
ifelse("Background_ESLELD_ALFPDF" %in% colnames(x),
{x$ELLcode <- ifelse(x$Background_ESLELD_ALFPDF == "-1", "Missing",
ifelse(x$Background_ESLELD_ALFPDF == "0", "Not ELL",
ifelse(x$Background_ESLELD_ALFPDF == "1", "ELL",
ifelse(x$Background_ESLELD_ALFPDF == "2", NULL,
ifelse(x$Background_ESLELD_ALFPDF == "3", NULL,"BadCode"
)
)
)
)
)},
ifelse("ESLELD_ALFPDF" %in% colnames(x),
{x$EQAOcode <- ifelse(x$ESLELD_ALFPDF == "-1", "Missing",
ifelse(x$ESLELD_ALFPDF == "0", "Not ELL",
ifelse(x$ESLELD_ALFPDF == "1", "ELL",
ifelse(x$ESLELD_ALFPDF == "2", NULL,
ifelse(x$ESLELD_ALFPDF == "3", NULL,"BadCode"
)
)
)
)
)}, x$EQAOcode <- "Unknown File Format"
)
)
return(x)
}
#' For use with all raw Elementary EQAO ISD file
#'
#' Cleaning FI labels
#'
#' Use: df <- EQAO_FI(df)
#'
#' French Immersion is an elementary distinction. Secondary is addressed through course types
#'
#' Function determines whether the ISD is elementary or secondary assessments and recodes
ISD_FI <- function(x){
ifelse("Background_FrenchImmersion" %in% colnames(x),
{x$FIcode <- ifelse(x$Background_FrenchImmersion == "-1", "Missing",
ifelse(x$Background_FrenchImmersion == "0", "Not FI",
ifelse(x$Background_FrenchImmersion == "1", "FI (A)",
ifelse(x$Background_FrenchImmersion == "2", "FI (B)",
ifelse(x$Background_FrenchImmersion == "3", "FI (C)",
ifelse(x$Background_FrenchImmersion == "4", "FI (G6)", "BadCode"
)
)
)
)
)
)}, x$FIcode <- "Secondary - Not Applicable"
)
return(x)
}
#For use with all raw EQAO ISD file
#
#Use: df <- EQAO_Gender(df)
#
#Gender is a standard field used in all ISDs and it not assessment specific
ISD_Gender <- function(x){
x$Gendercod <- ifelse(x$Gender == "-1", "Missing",
ifelse(x$Gender == "1", "Male",
ifelse(x$Gender == "2", "Female", "BadCode")
)
)
return(x)
}
ISD_Achieve <- function(x) {
x$Reading <- ifelse(x$ROverallLevel == "B", "No Data",
ifelse(x$ROverallLevel == "P", "Pending",
ifelse(x$ROverallLevel == "Q", "Not Required",
ifelse(x$ROverallLevel %in% c("R", "W"), "Withheld",
ifelse(x$ROverallLevel == "X", "Exempt", x$ROverallLevel)))))
x$Writing <- ifelse(x$WOverallLevel == "B", "No Data",
ifelse(x$WOverallLevel == "P", "Pending",
ifelse(x$WOverallLevel == "Q", "Not Required",
ifelse(x$ROverallLevel %in% c("R", "W"), "Withheld",
ifelse(x$WOverallLevel == "X", "Exempt", x$ROverallLevel)))))
x$Math <- ifelse(x$MOverallLevel == "B", "No Data",
ifelse(x$MOverallLevel == "P", "Pending",
ifelse(x$MOverallLevel == "Q", "Not Required",
ifelse(x$MOverallLevel %in% c("R", "W"), "Withheld",
ifelse(x$MOverallLevel == "X", "Exempt", x$ROverallLevel)))))
return(x)
}
ISD_AchieveFctr <- function(x) {
x$Reading <- ifelse(x$ROverallLevel == "B", "No Data",
ifelse(x$ROverallLevel == "P", "Pending",
ifelse(x$ROverallLevel == "Q", "Not Required",
ifelse(x$ROverallLevel %in% c("R", "W"), "Withheld",
ifelse(x$ROverallLevel == "X", "Exempt", x$ROverallLevel)))))
x$Reading <- factor(x$Reading, levels = c("Not Required", "No Data", "Exempt", "Withheld", "Pending", "1", "2", "3", "4"))
x$Writing <- ifelse(x$WOverallLevel == "B", "No Data",
ifelse(x$WOverallLevel == "P", "Pending",
ifelse(x$WOverallLevel == "Q", "Not Required",
ifelse(x$ROverallLevel %in% c("R", "W"), "Withheld",
ifelse(x$WOverallLevel == "X", "Exempt", x$ROverallLevel)))))
x$Writing <- factor(x$Writing, levels = c("Not Required", "No Data", "Exempt", "Withheld", "Pending", "1", "2", "3", "4"))
x$Math <- ifelse(x$MOverallLevel == "B", "No Data",
ifelse(x$MOverallLevel == "P", "Pending",
ifelse(x$MOverallLevel == "Q", "Not Required",
ifelse(x$MOverallLevel %in% c("R", "W"), "Withheld",
ifelse(x$MOverallLevel == "X", "Exempt", x$ROverallLevel)))))
x$Math <- factor(x$Math, levels = c("Not Required", "No Data", "Exempt", "Withheld", "Pending", "1", "2", "3", "4"))
return(x)
}
#' Examples
#' ggplot(iris, aes(Sepal.Width, Sepal.Length, color = Species)) +
#' geom_point(size = 4) +
#' DDSB_scale_color()
#'
#'
#' ggplot(iris, aes(Sepal.Width, Sepal.Length, color = Sepal.Length)) +
#' geom_point(size = 4, alpha = .6) +
#' DDSB_scale_color(discrete = FALSE, palette = "cool")
#'
#' ggplot(mpg, aes(manufacturer, fill = manufacturer)) +
#' geom_bar() +
#' theme(axis.text.x = element_text(angle = 45, hjust = 1)) +
#' DDSB_scale_fill(palette = "mixed", guide = "none")
|
6ff5c29d0e34d09734fe411f0cc320a46bb764bd
|
b99abf9744dc0fb59684470d4994120c8577562b
|
/Sous_Routine_MOBITC/MOBITC_Export_Rapport_Trace.R
|
e380d926b577289cfe622ec3e90e262946160a7d
|
[] |
no_license
|
ctrmal/dtermed.MobiTC-1
|
e7ba094f79927f0aa429bec7f2cde0fac7a48fef
|
96cbedfb33687d6394e0473486288ed4a257a5d0
|
refs/heads/master
| 2020-06-26T11:37:03.569225
| 2019-06-13T09:29:36
| 2019-06-13T09:29:36
| 199,621,400
| 0
| 0
| null | 2019-07-30T09:36:03
| 2019-07-30T09:36:02
| null |
UTF-8
|
R
| false
| false
| 2,962
|
r
|
MOBITC_Export_Rapport_Trace.R
|
chemin_rep_travail="C:\\0_ENCOURS\\TPM\\Erosion\\MobiTC_rivages"
dirr=R.home()
chem_mobitc=paste(dirr,"/Cerema/MOBITC",sep="")
fichier_intersectionv1="20190517T152421-TPM-Sque-cont-Tra-P50-L0100sel-lisse-filtre3-mod-IntersTDC-v1.txt"
fichier_sque="20190517T152421-TPM-Sque-cont"
fichier_trace="20190517T152421-TPM-Sque-cont-Tra-P50-L0100sel-lisse-filtre3-mod"
MOBITC_Export_Rapport_Trace<-function(chem_mobitc,chemin_rep_travail,fichier_sque,fichier_trace,fichier_intersectionv1)
{
if(!require(rmarkdown)){install.packages("rmarkdown")}
library(rmarkdown)
if(!require(flexdashboard)){install.packages("flexdashboard")}
library(flexdashboard)
# lecture des param?tres
fichier_init=paste(chem_mobitc,"\\Init_Routine_MobiTC.txt",sep="")
fid=file(fichier_init, open = "r+")
lignes=readLines(fid)
produc=lignes[14]
ICtx=lignes[16]
datedebgraph=lignes[18]
datefingraph=lignes[20]
dateprosp=lignes[22]
close(fid)
#lecture du fichier intersection V1 normalement (limites retenue)
chem_intersectionv1=paste(chemin_rep_travail,"\\",fichier_intersectionv1,sep="")
#lecture de toutes les intersections
fichier_intersectionv0=paste(substr(fichier_intersectionv1,1,nchar(fichier_intersectionv1)-7),"-v0.txt",sep="")
chem_intersectionv0=paste(chemin_rep_travail,"\\",fichier_intersectionv0,sep="")
tab00=read.table(chem_intersectionv0,sep="\t",header=TRUE,row.names = NULL)
tab11=read.table(chem_intersectionv1,sep="\t",header=TRUE,row.names = NULL)
#modif des dates dans le tableau de d?part
source(paste(chem_mobitc,"/Sous_Routine_MOBITC/MOBITC_Convertdate_2.R",sep=""))
tab=MOBITC_Convertdate_2(tab00) #ttes
#pour AOR
tab1=MOBITC_Convertdate_2(tab11) #que les retenues
nomdirrapport=paste(chemin_rep_travail,"\\Rapport",sep="")
if (file.exists(nomdirrapport)=="FALSE"){dir.create(nomdirrapport)}
for (iaxe in 1 : length(unique(tab$NAxe)))
{
itemp=which(tab$NAxe ==unique(tab$NAxe)[iaxe])
#calcul du nombre de trace
NbTrace=length(unique(tab$NTrace[itemp]))
for (itr in 1:NbTrace)
{
#extrait des valeurs du sque et de la trace
NAxe=unique(tab$NAxe)[iaxe]
NTrace=unique(tab$NTrace[itemp])[itr]
extraittab1=tab1[which(tab1$NAxe == NAxe & tab1$NTrace == NTrace),]
if (length(extraittab1$Distance)>1)
{
chem_rmd=paste(chem_mobitc,"/Sous_Routine_MOBITC/Rivages_report-6.Rmd",sep="")
chem_rapport=paste0(chemin_rep_travail,"/Rapport/Rapport-MobiTC-Naxe",NAxe, "-Ntrace", NTrace, ".html")
rmarkdown::render(chem_rmd, params = list(chemin_rep_travail = chemin_rep_travail,chem_mobitc = chem_mobitc,fichier_sque=fichier_sque,fichier_intersectionv1=fichier_intersectionv1,fichier_trace=fichier_trace,iaxe=iaxe,itr=itr),output_file = chem_rapport )
}
}
}
textexportrapport="Export fini"
return(list(textexportrapport))
}
|
2c07558f3c0a923174df8538c79b52b4122ba946
|
73f1718e2d96d126ca8e5c54efcf66984daaab3d
|
/MutSpot_Rpackage/man/mutPredict.snv.find.motif.Rd
|
a9f3a02a678e41529663ed80dc1f25e2fa050128
|
[] |
no_license
|
skandlab/MutSpot
|
f28e496eabeb526500a2820d13de973cb0cc9749
|
1189234c1fcffc5cfacec805a1e43e20374dc995
|
refs/heads/master
| 2023-05-11T02:54:15.479497
| 2023-05-08T02:54:45
| 2023-05-08T02:54:45
| 96,533,637
| 10
| 11
| null | 2019-03-15T12:23:16
| 2017-07-07T11:50:06
|
R
|
UTF-8
|
R
| false
| true
| 565
|
rd
|
mutPredict.snv.find.motif.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/mutPredict.snv.find.motif.R
\name{mutPredict.snv.find.motif}
\alias{mutPredict.snv.find.motif}
\title{Extract nucleotide contexts for each site in SNV hotspots.}
\usage{
mutPredict.snv.find.motif(seq, motifs)
}
\arguments{
\item{seq}{Dataframe containing DNA sequences of each site in the SNV hotspot.}
\item{motifs}{Selected nucleotide contexts to be extracted.}
}
\value{
Nucleotide context feature matrix.
}
\description{
Extract nucleotide contexts for each site in SNV hotspots.
}
|
256c702062e2913d7e570c8ccfca91e9cde6eaca
|
539f352d0959cd134a2846728281d43c5266e121
|
/staging/recurrent/src/helper.R
|
83f25819d49753460a42ea490f5582357e6562ba
|
[
"MIT"
] |
permissive
|
chendaniely/multidisciplinary-diffusion-model-experiments
|
56420f065de42f4fe080bc77988fcfe9592182c1
|
04edf28bb1bfadaff7baf82b8e3af02a3f34bf6d
|
refs/heads/master
| 2016-09-06T10:03:27.798846
| 2016-02-28T19:01:26
| 2016-02-28T19:01:26
| 24,729,632
| 2
| 0
| null | 2015-11-24T17:04:32
| 2014-10-02T17:46:19
|
R
|
UTF-8
|
R
| false
| false
| 5,170
|
r
|
helper.R
|
################################################################################
#
# Preping data functions
#
################################################################################
#' parse columns from the link.values dataframe such that
#' a separate column represents the influencing node type, influencing node
#' index, and link (weight) value between the link
#' note: links unidirectional
parse_link_values_file <- function(link_values_file_df){
link_values <- link_values_file_df
link_values$from <-sapply(str_split(link_values$V1, '->'), "[[", 1)
link_values$to <-sapply(str_split(link_values$V1, '->'), "[[", 2)
dim(link_values)
link_values$j_type <-sapply(str_split(link_values$from, ':'), "[[", 1)
link_values$j_value <-sapply(str_split(link_values$from, ':'), "[[", 2)
dim(link_values)
link_values$i_type <-sapply(str_split(link_values$to, ':'), "[[", 1)
link_values$i_value <-sapply(str_split(link_values$to, ':'), "[[", 2)
dim(link_values)
names(link_values)[1] <- 'original'
names(link_values)[2] <- 'weights'
return(link_values)
}
get_same_bank_df <- function(link_values_df){
same_bank <- link_values_df[link_values_df$j_type == 'Input' &
link_values_df$i_type == 'Input', ]
return(same_bank)
}
get_same_bank_sub_df <- function(same_bank_df,
cols = c('j_value', 'i_value', 'weights')){
return(same_bank_df[, cols])
}
reshape_weights_df <- function(df, value_var = 'weights'){
wide <- dcast(df, i_value ~ j_value, value.var = value_var)
wide
row.names(wide) <- wide$i_value
wide <- wide[, !names(wide) %in% c('i_value')]
return(wide)
}
sort_rows_columns_df <- function(df){
column_order <- order_vector_index(names(df))
df <- df[, column_order]
row_order <- order_vector_index(row.names(df))
df <- df[row_order, ]
return(df)
}
sort_rows_columns_matrix <- function(m){
column_order <- sort(colnames(m))
m <- m[, column_order]
row_order <- sort(row.names(m))
m <- m[row_order, ]
return(m)
}
order_vector_index <- function(unsorted){
sorted <- sort(as.numeric(unsorted))
sorted_order <- sapply(sorted, function(x){pattern <- sprintf('^%s$', x);
grep(pattern, unsorted)})
return(sorted_order)
}
randomize_weights <- function(link_values_df,
name_of_weight_column,
randomize_min,
randomize_max){
link_values_df[, name_of_weight_column] <-
apply(link_values_df, 1, function(x){
round(runif(1, -10, 10), 4)
})
return(link_values_df)
}
get_opposite_bank_df <- function(link_values_df,
weight_col_name = 'weights',
keep = 'odd',
randomize_weights = FALSE,
randomize_min = -10,
randomize_max = 10){
opposite_bank <- link_values[link_values$j_type == 'InputMirror' &
link_values$i_type == 'Input', ]
opposite_bank
opposite_bank_sub <- opposite_bank[, c('j_value', 'i_value', 'weights')]
opposite_bank_sub
if(randomize_weights == TRUE){
opposite_bank_sub[, weight_col_name] <-
apply(opposite_bank_sub, 1,
function(x){
runif(n = 1,
min = randomize_min,
max = randomize_max)
})
}
if(keep == 'odd'){
keep_rows <- seq(1, nrow(opposite_bank_sub), 2)
} else{
keep_rows <- seq(2, nrow(opposite_bank_sub), 2)
}
weights_opposite_bank <- opposite_bank_sub[keep_rows, ]
return(weights_opposite_bank)
}
get_hidden_bank_df <- function(link_values_df,
weight_col_name = 'weights',
randomize_weights = FALSE,
randomize_min = -10,
randomize_max = 10){
hidden_bank <- link_values_df[link_values_df$j_type == 'Hidden' &
link_values$i_type == 'Input', ]
hidden_bank
hidden_bank_sub <- hidden_bank[, c('j_value', 'i_value', 'weights')]
hidden_bank_sub
if(randomize_weights == TRUE){
hidden_bank_sub[, weight_col_name] <-
apply(hidden_bank_sub, 1,
function(x){
runif(n = 1,
min = randomize_min,
max = randomize_max)
})
}
weights_hidden_bank <- hidden_bank_sub
}
abr_type <- function(type){
if (type == 'Hidden'){
return("hi")
} else if (type == "InputMirror"){
return("im")
} else if (type == "Input"){
return("in")
} else if (type == "HiddenMirror"){
return("hm")
} else if (type == "bias"){
return("bi")
} else if (type == "ExternalInput"){
return("ei")
} else if(type == "ExternalMirror"){
return("em")
}
}
|
5897cef45303b7b8e666abd2ba9c89798a3fedd8
|
ffdea92d4315e4363dd4ae673a1a6adf82a761b5
|
/data/genthat_extracted_code/pastecs/examples/stat.pen.Rd.R
|
9e7dc2666ee572b7fec78c3879edd9f900c46bee
|
[] |
no_license
|
surayaaramli/typeRrh
|
d257ac8905c49123f4ccd4e377ee3dfc84d1636c
|
66e6996f31961bc8b9aafe1a6a6098327b66bf71
|
refs/heads/master
| 2023-05-05T04:05:31.617869
| 2019-04-25T22:10:06
| 2019-04-25T22:10:06
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 229
|
r
|
stat.pen.Rd.R
|
library(pastecs)
### Name: stat.pen
### Title: Pennington statistics on a data frame or time series
### Aliases: stat.pen
### Keywords: ts
### ** Examples
data(marbio)
stat.pen(marbio[,c(4, 14:16)], basic=TRUE, desc=TRUE)
|
676d1c52f16dd13dc6ad2f8f73750be257c1439d
|
99d0e8a1a8dae971ab6ebb4a15c8c6a3620a7c50
|
/do_MethSig.R
|
cc27de9c9be16af9d7f8b323d8a3a2d7039e31e1
|
[] |
no_license
|
microtsiu/WGBSSuite
|
93ce2d6f00d1d91e2b884671ce098dc7910dceed
|
b775f4328f50f0fdb298f664f8bc328e315dcf52
|
refs/heads/master
| 2020-09-16T21:52:40.022409
| 2016-07-05T13:31:13
| 2016-07-05T13:31:13
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 6,178
|
r
|
do_MethSig.R
|
library(methylSig)
par_methsig_benchmark <- function(methsig_cutoffs,methsig_qvals,locs,dfs,max_distance,min_CpG,methdiff,methdiff_cutoff){
#methsig_cutoffs<-c(0,0.1,0.2,0.3,0.4,0.5,0.6,0.7,0.8,0.9,1)
#methsig_ROC <- matrix(data=0,nrow=2,ncol=length(methsig_cutoffs))
foreach(i=1:length(methsig_cutoffs),.combine=cbind) %dopar% {
#methsig_points<-extract_predicted_methsig_cDMR_points(methsig_qvals,methsig_cutoffs[i],8,locs,10)
methsig_dfs<-extract_blocks_methsig(methsig_qvals,methsig_cutoffs[i],min_CpG,locs,max_distance,methdiff,methdiff_cutoff)#
x<-score_overlap(dfs,methsig_dfs[[2]],methsig_cutoffs[i])
print(x)
}
}
par_methsig_benchmark_DMRs <- function(methsig_cutoffs,methsig_qvals,locs,dfs_locs,max_distance,min_CpG,methdiff,methdiff_cutoff){
foreach(i=1:length(methsig_cutoffs),.combine=cbind) %dopar% {
#for(i in 1:length(methsig_cutoffs)) {
#methsig_points<-extract_predicted_methsig_cDMR_points(methsig_qvals,methsig_cutoffs[i],8,locs,10)
methsig_dfs<-extract_blocks_methsig(methsig_qvals,methsig_cutoffs[i],min_CpG,locs,max_distance,methdiff,methdiff_cutoff)#
x<-score_overlap_DMR(dfs_locs, methsig_dfs[[4]],methsig_cutoffs[i])
print(x)
}
}
methsig_benchmark <- function(methsig_cutoffs,methsig_qvals,locs,dfs){
methsig_cutoffs<-c(0,0.1,0.2,0.3,0.4,0.5,0.6,0.7,0.8,0.9,1)
#methsig_ROC <- matrix(data=0,nrow=2,ncol=length(methsig_cutoffs))
s<-list()
for(i in 1:length(methsig_cutoffs)){
#methsig_points<-extract_predicted_methsig_cDMR_points(methsig_qvals,methsig_cutoffs[i],8,locs,10)
methsig_dfs<-extract_predicted_methsig_cDMR(methsig_qvals,methsig_cutoffs[i],1,locs,10)#
x<-score_overlap(dfs,methsig_dfs)
s[[length(s)+1]] <- x
}
return(s)
}
methsig_for_bench <- function(data_set,number_of_samples,number_of_replicas,locs){
#load a methsig object from file
load("meth.rda")
reformatted_for_methsig <- reformat_for_methsig(data_set,number_of_samples,number_of_replicas,locs,meth)
return(reformatted_for_methsig)
}
reformat_for_methsig <- function (data_set,number_of_samples,number_of_replicas,locs,meth){
data_set<-t(data_set)
l<-length(data_set[[1]][,1])
size_of_set <- number_of_samples*number_of_replicas
cov <- matrix(data=0,nrow=l,ncol=size_of_set)
numT <- matrix(data=0,nrow=l,ncol=size_of_set)
numC <- matrix(data=0,nrow=l,ncol=size_of_set)
ids <- data_set[[2]]
start <- data_set[[2]]
end <-data_set[[2]]+1
chr <- matrix(data=1,nrow=l,ncol=1)
strand <- matrix(data=F,nrow=l,ncol=1)
sample.ids <- matrix(data='A',nrow=size_of_set,ncol=1)
sample.filenames <- matrix(data=0,nrow=6,ncol=1)
treatment <- matrix(data=1,nrow=size_of_set,ncol=1)
#destranded
destranded <- TRUE
#resolution
resolution <- "base"
#options
options <- "maxCount=500 & minCount=1 & assembly=hg18 & context=CpG"
for(i in 1:number_of_samples){
for(j in 1:number_of_replicas){
row <- (((i-1)*(number_of_replicas*4))+((j-1)*4)+1)+2
index <- ((i-1)*number_of_replicas)+j
cov[,index]<-data_set[[1]][,(row+2)]
numT[,index]<-data_set[[1]][,row+1]-data_set[[1]][,row]
numC[,index]<-data_set[[1]][,row]
treatment[index]<-i
sample.ids[index]<-paste0(i,j)
}
}
meth@data.numCs <- numC
meth@data.coverage <- cov
meth@data.numTs <- numT
meth@data.ids <- ids
meth@data.start <- start
meth@data.end <- end
meth@treatment <- as.numeric(treatment)
meth@sample.ids <- as.character(sample.ids)
meth@data.chr <- meth@data.chr[1:l]
meth@data.strand <- meth@data.strand[1:l]
gs<-seq(1,number_of_samples)
myDiffSigboth = methylSigCalc(meth, groups=gs, min.per.group=number_of_samples,local.disp=TRUE, winsize.disp=2000,num.cores=12)
return(myDiffSigboth)
}
extract_blocks_methsig <-function (prob_diff,thresh,cutoff,locs,max_dist,methdiff,methdiff_cutoff){
preds<-list()
preds_ind<-1
diff_meth <- matrix(data=0,nrow=1,ncol=length(prob_diff))
test <- matrix(data=0,nrow=1,ncol=length(prob_diff))
meth_sites <- matrix(data=0,nrow=1,ncol=length(prob_diff))
c<-1
for(i in 2:length(prob_diff)){
if((prob_diff[i] < thresh)&&(abs(methdiff[i]) > methdiff_cutoff)){
diff_meth[i] = c;
test[i] = 1
c<-c+1
}else{
c<-1
}
}
grace<-0
for(i in length(diff_meth):1){
if(grace > 0){
diff_meth[i] = 1;
grace<-grace-1;
}else if(diff_meth[i] > cutoff){
grace<-diff_meth[i]-1
diff_meth[i] = 1;
}else{
diff_meth[i] <- 0;
grace<-0
}
}
on<-0
off<-0
points=list()
c<-0
for(i in 1:length(diff_meth)){
if(diff_meth[i] == 1){
if(on != 0){
off <- i
c<-c+1
}else{
on <- i
c<-c+1
}
}else{
if(on != 0){
if(off != 0){
points[[(length(points)+1)]]<-c(on,off,c)
preds[[preds_ind]]<-c(on,off)
preds_ind<-preds_ind+1
meth_sites[on:off]<-1
on<-0
off<-0
c<-0
}
}else{
on<-0
off<-0
c<-0
}
}
}
if(on != 0){
if(off != 0){
points[[(length(points)+1)]]<-c(on,off,c)
meth_sites[on:off]<-1
}
}
return(list(points,test,cutoff,preds))
}
extract_predicted_methsig_cDMR <-function (prob_diff,thresh,cutoff,locs,max){
diff_meth <- matrix(data=0,nrow=1,ncol=length(prob_diff))
c<-1
for(i in 2:length(prob_diff)){
if((prob_diff[i] < thresh)&((locs[i]-locs[i-1]) < max)){
diff_meth[i] = c;
c<-c+1
}else{
c<-1
}
}
grace<-0
for(i in length(diff_meth):1){
if(grace > 0){
diff_meth[i] = 1;
grace<-grace-1;
}else if(diff_meth[i] > cutoff){
grace<-diff_meth[i]-1
diff_meth[i] = 1;
}else{
diff_meth[i] <- 0;
grace<-0
}
}
on<-0
off<-0
points=list()
for(i in 1:length(diff_meth)){
if(diff_meth[i] == 1){
if(on != 0){
off <- i
}else{
on <- i
}
}else{
if(on != 0){
if(off != 0){
diff_meth[on:off]<-1
on<-0
off<-0
}
}else{
on<-0
off<-0
}
}
}
return(diff_meth)
}
|
c32b127cc0e6f79f59489cc2fd804502c428d75d
|
ffdea92d4315e4363dd4ae673a1a6adf82a761b5
|
/data/genthat_extracted_code/GenomicMating/examples/getGaSolutions.rd.R
|
bcee82f0973b63337fedbc9bb951c6739497e672
|
[] |
no_license
|
surayaaramli/typeRrh
|
d257ac8905c49123f4ccd4e377ee3dfc84d1636c
|
66e6996f31961bc8b9aafe1a6a6098327b66bf71
|
refs/heads/master
| 2023-05-05T04:05:31.617869
| 2019-04-25T22:10:06
| 2019-04-25T22:10:06
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 4,407
|
r
|
getGaSolutions.rd.R
|
library(GenomicMating)
### Name: getGaSolutions
### Title: getGaSolutions
### Aliases: getGaSolutions Kmatfunc calculatecrossvalue getstatsM1
### getstatsM2 getstatsM3 getstatsfromsim pairs3d par.name mapfunct
### par.position tails
### ** Examples
## Not run:
##D library(GenomicMating)
##D
##D ###Create 100 markers for two sets of populations of size 20.
##D N=20
##D nmarkers=100
##D Markers<-c()
##D for (i in 1:N){
##D Markers<-rbind(Markers,rbinom(nmarkers, 2,.1)-1)
##D }
##D
##D
##D Markers2<-c()
##D for (i in 1:N){
##D Markers2<-rbind(Markers2,rbinom(nmarkers, 2,.1)-1)
##D }
##D
##D ###Marker effects for a trait.
##D markereffects<-rep(0,nmarkers)
##D markereffects[sample(1:nmarkers,nmarkers/2)]<-rnorm(nmarkers/2)
##D Markers[1:5,1:5]
##D
##D #######Relationship matrices (K only for the first population.
##D ##K2 for both populations together.)
##D #library(parallel)
##D K=Amat.pieces(rbind(Markers), pieces=5)
##D
##D K2=Amat.pieces(rbind(Markers,Markers2), pieces=5)
##D K[1:5,1:5]
##D
##D ####putting names
##D rownames(Markers)<-paste("l", 1:nrow(Markers),sep="_")
##D rownames(Markers2)<-paste("l", (nrow(Markers)+1):(nrow(Markers)+
##D nrow(Markers2)),sep="_")
##D rownames(K2)<-colnames(K2)<-c(rownames(Markers),rownames(Markers2))
##D rownames(K)<-colnames(K)<-c(rownames(Markers))
##D
##D
##D ###Best genotype in pop 1
##D which.max(Markers%*%markereffects)
##D markermap=as.matrix(data.frame(chr=rep(1,nmarkers),
##D pos=seq(0,1,length=nmarkers)))
##D
##D colnames(Markers)<-1:nmarkers
##D
##D ########Mating within pop 1, using method 1.
##D ########Adjust genetic algorithm paparmeters for convergence.
##D
##D gasols<-getGaSolutions(Markers=Markers,Markers2=NULL, K=K,
##D markereffects=markereffects,markermap=markermap,nmates=10,
##D minparents=3, impinbreedstepsize=.02, impvar=.01,
##D impforinbreed=.01,npopGA=50, nitGA=10, miniters=10,minitbefstop=20,
##D plotiters=TRUE,mc.cores=1,nelite=20, mutprob=0.8, noself=TRUE,
##D method=1, type=0L, generation=0L)
##D
##D gasols
##D
##D
##D ######Mating between pop1 and pop2. Method 1.
##D
##D gasols1<-getGaSolutions(Markers=Markers,Markers2=Markers2, K=K2,
##D markereffects,markermap=markermap,nmates=10,
##D minparents=3,
##D impinbreedstepsize=.02, impvar=.02,
##D impforinbreed=.07,
##D npopGA=50, nitGA=10, miniters=10,minitbefstop=20,
##D plotiters=TRUE,
##D mc.cores=2,nelite=20, mutprob=0.8, noself=F, method=1,
##D type=0L, generation=0L)
##D
##D
##D ######Mating between pop1 and pop2. Method 2.
##D
##D gasols2<-getGaSolutions(Markers=Markers,Markers2=Markers2, K=K2,
##D markereffects,markermap=markermap,nmates=10,
##D minparents=3,
##D impinbreedstepsize=.02, impvar=.02,
##D impforinbreed=.07,
##D npopGA=50, nitGA=10, miniters=10,minitbefstop=20,
##D plotiters=TRUE,
##D mc.cores=2,nelite=20, mutprob=0.8, noself=F, method=2,
##D type=0L, generation=0L)
##D
##D ####for method 3 polyploid. Markers need to be coded between 0 and 1.
##D N=20
##D nmarkers=100
##D Markers<-c()
##D for (i in 1:N){
##D Markers<-rbind(Markers,runif(nmarkers))
##D }
##D
##D
##D Markers2<-c()
##D for (i in 1:N){
##D Markers2<-rbind(Markers2,runif(nmarkers))
##D }
##D
##D markereffects<-rep(0,nmarkers)
##D markereffects[sample(1:nmarkers,nmarkers/2)]<-rnorm(nmarkers/2)
##D Markers[1:5,1:5]
##D #library(parallel)
##D K=Amat.pieces(rbind(Markers)*2-1, pieces=5)
##D
##D K2=Amat.pieces(rbind(Markers,Markers2)*2-1, pieces=5)
##D K[1:5,1:5]
##D rownames(Markers)<-paste("l", 1:nrow(Markers),sep="_")
##D rownames(Markers2)<-paste("l", (nrow(Markers)+1):(nrow(Markers)+nrow(Markers2)),sep="_")
##D rownames(K2)<-colnames(K2)<-c(rownames(Markers),rownames(Markers2))
##D rownames(K)<-colnames(K)<-c(rownames(Markers))
##D
##D which.max(Markers%*%markereffects)
##D markermap=as.matrix(data.frame(chr=rep(1,nmarkers),pos=seq(0,1,length=nmarkers)))
##D
##D colnames(Markers)<-1:nmarkers
##D
##D
##D gasols3<-getGaSolutions(Markers=Markers,Markers2=Markers2, K=K2,
##D markereffects,markermap=markermap,nmates=10,
##D minparents=1,
##D impinbreedstepsize=.02, impvar=.02,
##D impforinbreed=.07,
##D npopGA=50, nitGA=10, miniters=10,minitbefstop=20,plotiters=TRUE,
##D mc.cores=1,nelite=20, mutprob=0.8, noself=F, method=3,
##D type=0L, generation=0L)
##D
##D
##D gasols3
##D
## End(Not run)
|
246135fc22bc5f44ab9f9979b6b455d1958d2383
|
c09007f15f05d68f99650619d4bf5c6ed5471295
|
/R/version.R
|
e67cdf0b3a760726142a1a1716b8be4f0944501b
|
[
"MIT"
] |
permissive
|
isabella232/zones
|
7dbb0ef410f0f08c685583f2a05351c02c03820d
|
80e379b416b1c58bc48ea1e8abebf5c4d1a1f911
|
refs/heads/master
| 2023-04-02T06:03:10.610755
| 2021-04-19T19:02:06
| 2021-04-19T19:02:06
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 327
|
r
|
version.R
|
#' Time zone database version
#'
#' @description
#' `zone_database_version()` returns the version of the time zone database
#' currently in use.
#'
#' @return
#' A single string of the database version.
#'
#' @export
#' @examples
#' zone_database_version()
zone_database_version <- function() {
zone_database_version_cpp()
}
|
0bc2dac629ceb07b0f0c3977d13f82149c389286
|
ed7c017dd9a4e256db9284b8b4219fd8f8ade2f0
|
/pkg_VariantCallinginR/man/AlignQual.Rd
|
67fd6d257a8fc1dcb6a7b339855ea6592b2b8bfb
|
[] |
no_license
|
DRemarque/VariantCallinginR
|
2c9bf42dd47e7f2547c6c33f1736a5f7adaa4558
|
f14cec98dfdf7eb3ab6f951107090068e9743c0b
|
refs/heads/master
| 2022-11-16T19:02:01.387893
| 2020-07-06T18:02:12
| 2020-07-06T18:02:12
| 275,776,774
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| true
| 1,384
|
rd
|
AlignQual.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/AlignQual.R
\name{AlignQual}
\alias{AlignQual}
\title{AlignQual()}
\usage{
AlignQual(
Alignment_File_Name = "No Default",
Reference_Length = "No Default",
Read_Length = "No Default",
server = F
)
}
\arguments{
\item{Alignment_File_Name}{The file name of the alignment to be assessed (including .bam file extension)}
\item{Reference_Length}{The length of the utilised reference genome in basepairs. This is used to calculate the coverage based on the Lander/Waterman equation (Coverage=Length*Number of mapped reads/ Genome Length)}
\item{Read_Length}{The general length of the sequenced reads in basepairs. This generally between 100 and 150 bp for Illumina.}
\item{server}{To save the quality plots on a server without gpu access, set this variable to TRUE to use an alternative save method.}
}
\description{
Based on RSamtools and ggplot2, this function is used collect quality data from an alignment BAM file, which is then reported in a mapQ graph, a table read numbers (un)mapped and ,optionally, coverage and alignment rates are outputted as well in the console. .
}
\examples{
# Plot the quality of a file
AlignQual("myalignment.bam")
# Calculate coverage as well
AlignQual("myalignment.bam",Reference_Length=1000,Read_Length=150)
}
\keyword{alignment}
\keyword{bowtie2}
\keyword{fastq}
|
838f1d1829d89a0ba012baaf87e184e3bc0260de
|
8e01589320ce5bfcc5992acc4447225ed0e4b806
|
/R/glmfence.R
|
ff597580287e448605861bd44936aed09b21086e
|
[] |
no_license
|
garthtarr/mplot
|
c58a50650c05cfe6827ab41cb1bdf5171f29d4d5
|
3d6072a8249ca2607f2bfe5eb464abfd3cdec526
|
refs/heads/master
| 2021-07-17T01:57:32.116579
| 2021-07-10T10:36:39
| 2021-07-10T10:36:39
| 19,841,699
| 12
| 9
| null | null | null | null |
UTF-8
|
R
| false
| false
| 4,627
|
r
|
glmfence.R
|
#' The fence procedure for generalised linear models
#'
#' This function implements the fence procedure to
#' find the best generalised linear model.
#'
#' @param mf an object of class \code{\link[stats]{glm}}
#' specifying the full model.
#' @param cstar the boundary of the fence, typically found
#' through bootstrapping.
#' @param nvmax the maximum number of variables that will be
#' be considered in the model.
#' @param adaptive logical. If \code{TRUE} the boundary of the fence is
#' given by cstar. Otherwise, it the original (non-adaptive) fence
#' is performed where the boundary is cstar*hat(sigma)_{M,tildeM}.
#' @param trace logical. If \code{TRUE} the function prints out its
#' progress as it iterates up through the dimensions.
#' @param ... further arguments (currently unused)
#' @seealso \code{\link{af}}, \code{\link{lmfence}}
#' @references Jiming Jiang, Thuan Nguyen, J. Sunil Rao,
#' A simplified adaptive fence procedure, Statistics &
#' Probability Letters, Volume 79, Issue 5, 1 March 2009,
#' Pages 625-629, http://dx.doi.org/10.1016/j.spl.2008.10.014.
#' @export
#' @keywords Internal
#' @family fence
glmfence = function(mf,
cstar,
nvmax,
adaptive=TRUE,
trace=TRUE,...){
method="ML"
if(any(class(mf)=="glm")!=TRUE){
stop("The argument to mf needs to be a glm object.")
}
if(attr(mf$terms,"intercept")==0){
stop("Please allow for an intercept in your model.")
}
m = mextract(mf)
kf = m$k
fixed = m$fixed
family = m$family
yname = m$yname
Xy = m$X
n = m$n
wts = m$wts
if(missing(nvmax)) nvmax=kf
null.ff = stats::as.formula(paste(yname,"~1")) # null formula
m0 = stats::glm(null.ff, data = Xy, family=family, weights = wts) # null model
Qmf = Qm(mf, method=method) # Qm for the full model
Qm0 = Qm(m0, method=method) # Qm for the null model
ret = met = list()
# Null model
if(trace) cat(paste("Null model "))
UB = Qmf + cstar*sigMM(k.mod=1, method, k.full=kf,adaptive=adaptive)
if(Qm0<=UB){
if(trace) txt.fn(Qm0,UB,m0)
ret[[1]] = null.ff
return(ret)
} else if(trace) cat("(Not a candidate model) \n")
if(cstar<5){ # avoids having to add variables to get the full model
nvmax = kf
prev.nvmax = nvmax
} else if(nvmax<5){
prev.nvmax = nvmax
nvmax = nvmax+5
} else prev.nvmax = nvmax
# look around for the best model at each model size
while(prev.nvmax<=kf){
prev.nvmax = nvmax
bg = bestglm::bestglm(Xy=Xy, family=family,
IC = "BIC",
TopModels = 5*kf,
nvmax = nvmax, weights=wts)
lc = bg$Subsets[,1:kf]+0 # 'leaps' candidates
for(i in 2:nvmax){
if(trace) cat(paste("Model size:",i,""))
UB = Qmf + cstar*sigMM(k.mod=i, method, k.full=kf, adaptive=adaptive)
mnames = colnames(lc)[which(lc[i,]==1)]
ff = stats::as.formula(paste(yname," ~ ",paste(mnames[-1],collapse="+"),sep=""))
em = stats::glm(formula=ff, data=Xy, family=family, weights=wts)
hatQm = Qm(em,method=method)
if(hatQm<=UB){
if(trace){
cat("\n")
cat("Candidate model found via bestglm. \n")
cat("Exploring other options at this model size. ")
txt.fn(hatQm,UB,em)
}
pos = 1
environment(ff) = globalenv()
ret[[pos]] = ff # record the result
met[[pos]] = hatQm #record its score
# look for others at this model size:
lfm = bg$BestModels[,1:(kf-1)]+0
lfm.sum = apply(lfm,1,sum)
lfm = lfm[lfm.sum==i-1,]
# remove already estimated model from lfm:
check.fn = function(x) !all(x==lc[i,-1])
lfm = lfm[apply(lfm,1,check.fn),]
if(dim(lfm)[1]>0){
for(j in 1:dim(lfm)[1]){
mnames = colnames(lfm)[which(lfm[j,]==1)]
ff = stats::as.formula(paste(yname," ~ ",
paste(mnames,collapse="+"),
sep=""))
em = stats::glm(ff, data=Xy, family=family, weights=wts)
hatQm = Qm(em,method=method)
if(hatQm<=UB){
if(trace) txt.fn(hatQm,UB,em)
pos = pos+1
environment(ff) = globalenv()
ret[[pos]] = ff
met[[pos]] = hatQm
}
}
return(ret[rank(unlist(met),ties.method="random")])
} else return(ret)
}
if(trace) cat("(No candidate models found) \n")
}
if(trace) cat(" (No candidate models found: increasing nvmax) \n", cstar)
nvmax = nvmax+5
}
}
|
fb9983ac02dd6164b985a73f4b12d9078aa0b241
|
7aeadc8bcade91cb6c50bd57b525ea968a42a43c
|
/code/stepwise.R
|
337fd2958e9881ab521126bc3712eff86ab95d74
|
[] |
no_license
|
Miamiamiamyt/Econ725_finalproject_group7
|
5e62e02df3c61984325813912bed8f0184eb954d
|
932f5707634d555e2d1e9dd51d00c2f9a2573ffb
|
refs/heads/main
| 2023-01-23T06:02:20.719806
| 2020-12-11T07:18:46
| 2020-12-11T07:18:46
| 313,625,645
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 2,830
|
r
|
stepwise.R
|
stepwise = function(train,test,name,y,crit) {
train_temp <- train[,..name]
if(y == 'single_price') {
model <- lm(data.frame(train$single_price,train_temp))
colinear <- names(model$coefficients[which(is.na(model$coefficients))])
#print(typeof(colinear))
#train_temp <- data.frame(train_temp)
train_temp <- train_temp[, (colinear) := NULL]
#print(train_temp)
regfit.bwd <- regsubsets(y=train$single_price,x=train_temp, method="backward")
}
if(y == 'single_sellerprice') {
model <- lm(data.frame(train$single_sellerprice,train_temp))
colinear <- names(model$coefficients[which(is.na(model$coefficients))])
#print(typeof(colinear))
#train_temp <- data.frame(train_temp)
train_temp <- train_temp[, (colinear) := NULL]
#print(train_temp)
regfit.bwd <- regsubsets(y=train$single_sellerprice,x=train_temp, method="backward")
}
if(y == 'single_buyerprice') {
model <- lm(data.frame(train$single_buyerprice,train_temp))
colinear <- names(model$coefficients[which(is.na(model$coefficients))])
#print(typeof(colinear))
#train_temp <- data.frame(train_temp)
train_temp <- train_temp[, (colinear) := NULL]
#print(train_temp)
regfit.bwd <- regsubsets(y=train$single_buyerprice,x=train_temp, method="backward")
}
if(y == 'att') {
model <- lm(lm(data.frame(train$expatt,train_temp)))
colinear <- names(model$coefficients[which(is.na(model$coefficients))])
#print(typeof(colinear))
#train_temp <- data.frame(train_temp)
train_temp <- train_temp[, (colinear) := NULL]
#print(train_temp)
regfit.bwd <- regsubsets(y=train$expatt,x=train_temp, method="backward")
}
summary_bwd <- summary(regfit.bwd)
if(crit == 'bic'){
#which.min(summary_bwd$bic)
print(round(coef(regfit.bwd,which.min(summary_bwd$bic)),10))
#print(names(coef(regfit.bwd,which.min(summary_bwd$bic))))
name1 <- names(coef(regfit.bwd,which.min(summary_bwd$bic)))
name1 <- name1[-1]
print(name1)
}
if(crit == 'adjr2'){
#which.min(summary_bwd$adjr2)
print(round(coef(regfit.bwd,which.max(summary_bwd$adjr2)),10))
#print(names(coef(regfit.bwd,which.min(summary_bwd$adjr2))))
name1 <- names(coef(regfit.bwd,which.max(summary_bwd$adjr2)))
name1 <- name1[-1]
print(name1)
print(max(summary_bwd$adjr2))
}
if(y == 'single_price') {
best <- lm(data.frame(train$single_price,train[,..name1]))
}
if(y == 'single_sellerprice') {
best <- lm(data.frame(train$single_sellerprice,train[,..name1]))
}
if(y == 'single_buyerprice') {
best <- lm(data.frame(train$single_buyerprice,train[,..name1]))
}
if(y == 'att') {
best <- lm(data.frame(train$expatt,train[,..name1]))
}
prediction <- predict(best, data.frame(test[,..name1]))
#print(test[,..name1])
return(prediction)
}
|
254b64601511f5c3f0415e07668897b051ff29f8
|
fa771d50093e1e01b07dfbf8c3d6cc4cbe6006a3
|
/cachematrix.R
|
953d5b961f1aaae58ba5d8050f354524391391e3
|
[] |
no_license
|
azzurres/ProgrammingAssignment2
|
43d2b81fdc284c41ac7b05d8897d7b0f8e76356c
|
89d488aee0ff503fe78b58792c96c12cd3dfa64b
|
refs/heads/master
| 2021-01-21T19:45:40.969078
| 2014-12-18T17:38:19
| 2014-12-18T17:38:19
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 2,192
|
r
|
cachematrix.R
|
## The object of this assignment is to cache the inverse of a matrix in
## order to call upon it later instead of recalculating it, resulting in a gain
## of time. The following two functions will do this.
## makeCacheMatrix creates a list containing a function to
## 1. set the value of the matrix
## 2. get the value of the matrix
## 3. set the value of inverse of the matrix
## 4. get the value of inverse of the matrix
makeCacheMatrix <- function(x = matrix()) {
inv <- NULL
set <- function(y) {
x <<- y
inv <<- NULL
}
get <- function() x
setinverse <- function(solve) inv <<- solve
getinverse <- function() inv
list(set=set, get=get, setinverse=setinverse, getinverse=getinverse)
}
## The following function returns the inverse of the special matrix. It will
## first check if the result is already in the cache and if so will return it
## without any other computation. If not, it will then proceed to calculate
## the inverse and cache it.
cacheSolve <- function(x, ...) {
inv <- x$getinverse()
if(!is.null(inv)) {
message("Getting cached data.")
return(inv)
}
data <- x$get()
inv <- solve(data)
x$setinverse(inv)
inv
}
## Sample test
## > x<- matrix(rnorm(16),4)
## > m=makeCacheMatrix(x)
## > m$get()
## [,1] [,2] [,3] [,4]
## [1,] -0.23940112 -0.7990949 -0.5018423 0.9560346
## [2,] -0.04909691 -0.7173924 1.1542377 0.1002856
## [3,] -0.36436440 0.7416197 -0.9177909 0.2550885
## [4,] -0.76250456 2.4287415 -0.3571589 1.0358884
## > cacheSolve(m)
## [,1] [,2] [,3] [,4]
## [1,] 0.6947519 -2.8560683 -4.2357352 0.6783577
## [2,] -0.2123333 -0.4660022 -0.6235214 0.3946220
## [3,] -0.1845769 0.5271896 -0.4115036 0.2206437
## [4,] 0.9455949 -0.8279616 -1.7978455 0.6155304
## > cacheSolve(m)
## getting cached data.
## [,1] [,2] [,3] [,4]
## [1,] 0.6947519 -2.8560683 -4.2357352 0.6783577
## [2,] -0.2123333 -0.4660022 -0.6235214 0.3946220
## [3,] -0.1845769 0.5271896 -0.4115036 0.2206437
## [4,] 0.9455949 -0.8279616 -1.7978455 0.6155304
|
190fcb21b7182d419b103f9300ebd72365c1b176
|
bd5b0f364019a59f296674b444a448a29d7ab55c
|
/R/make_gazetteer_example.R
|
856c1f58ae819990da654f5167a5b30849057436
|
[] |
no_license
|
ramarty/Unique-Location-Extractor
|
7adc08879e9c6597e48fa601223a3bb81de10d55
|
e3a8c3775656603b013d6a185d533cfa88dcd664
|
refs/heads/master
| 2023-07-04T02:39:22.948217
| 2021-07-27T15:35:08
| 2021-07-27T15:35:08
| 234,098,531
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 356
|
r
|
make_gazetteer_example.R
|
library(osmdata)
library(raster)
library()
#### Define Area of Interest
kenya <- getData('GADM', country='KEN', level=3)
nairobi <- kenya[kenya$NAME_1 %in% "Nairobi",]
#### OSM Prep
q <- opq(bbox = nairobi %>% extent() %>% as.vector(),
timeout = 9999)
roads <- q %>%
add_osm_feature(key = 'highway', value = 'motorway') %>%
osmdata_sf()
|
6b5b92d4b4ed2c42f200cc3705b36fdeab091930
|
faca9fb310e0f5d25206dd7fbd8bd059e6facefb
|
/man/verbatimize.Rd
|
d93b3bc404a9efd65f0d3bc9e36c38d0dd80f289
|
[] |
no_license
|
imbs-hl/imbs
|
505f534fb68cd2d8fc6a3847f36784245cab3111
|
2d3ec95b81ea84623f007c5364ab19789a85715c
|
refs/heads/master
| 2023-08-11T08:33:42.695944
| 2019-09-05T20:01:22
| 2019-09-05T20:01:22
| 66,840,758
| 1
| 1
| null | 2018-01-29T15:02:18
| 2016-08-29T12:13:16
|
R
|
UTF-8
|
R
| false
| true
| 448
|
rd
|
verbatimize.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/character-vectors.R
\name{verbatimize}
\alias{verbatimize}
\title{Verbatimize string}
\usage{
verbatimize(x, sep = ", ")
}
\arguments{
\item{x}{Character vector to be encapsulated}
\item{sep}{String; How pasted elements should be separated, Default is ", ".}
}
\value{
String of pasted values
}
\description{
Encapsulate elements of vector in LaTeX verbatim commands
}
|
33e87634486395d31e478656756792bf661151a5
|
4af263043663e462f6d20a4e85eda3e59c26df1f
|
/man/create_data_churn.Rd
|
4db6229063533dfb8359e061c252e1cce870ca19
|
[] |
no_license
|
rolkra/explore
|
de4398f17610be00fd4b181b0ffb4bf896335749
|
afb33da1cc80e2135ed08440eee61494e3300c4a
|
refs/heads/master
| 2023-09-01T06:19:24.895608
| 2023-08-29T08:12:14
| 2023-08-29T08:12:14
| 128,469,432
| 171
| 21
| null | 2023-09-07T06:32:18
| 2018-04-06T20:54:05
|
R
|
UTF-8
|
R
| false
| true
| 727
|
rd
|
create_data_churn.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/create-data.R
\name{create_data_churn}
\alias{create_data_churn}
\title{Create data churn}
\usage{
create_data_churn(
obs = 1000,
target_name = "churn",
factorise_target = FALSE,
target1_prob = 0.4,
add_id = FALSE,
seed = 123
)
}
\arguments{
\item{obs}{Number of observations}
\item{target_name}{Variable name of target}
\item{factorise_target}{Should target variable be factorised?}
\item{target1_prob}{Probability that target = 1}
\item{add_id}{Add an id-variable to data?}
\item{seed}{Seed for randomization (integer)}
}
\value{
A dataset as tibble
}
\description{
Artificial data that can be used for unit-testing or teaching
}
|
5cd606d4f82d94f8498a709cb2ead3dead80bcd0
|
f7993d98c9effe1e7ab6bbd855ac2808b41cae16
|
/Parte1-Rfundamentos/R08Listas.R
|
af7c3115022b6620b222dcd666493aa9adee8048
|
[
"MIT"
] |
permissive
|
alletsc/R-Scripts-para-Introducao
|
db71da50b8d42cddc2d8cef54dcae9167ccc6541
|
89eef96131c6dc3d7cd566d4bc1817b288ec480a
|
refs/heads/master
| 2023-01-04T16:58:48.961433
| 2020-10-27T04:21:56
| 2020-10-27T04:21:56
| 290,594,709
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 1,566
|
r
|
R08Listas.R
|
#Notebook criado para demonstracao de Listas
#List str
liststr = list('Bem-vindo', 'ao estudo', 'de R')
liststr
#Lista de inteiros
listint = list(2, 3, 4)
listint
#Lista de floats
floatlist = list(1.90, 45.3, 300.5)
floatlist
#Lista com numeros complexos
compllist = list(5.2+3i, 2.4+8i)
compllist
#List com valores Logicos
logiclist = list(T, F, F)
logiclist
#Listas Compostas
compostlist = list("A", 3, TRUE)
compostlist
lista1 <- list(1:10, c("Maria", "Selvino", "Sther"), rnorm(10))
lista1
?rnorm #funcao de distribuicao normal
#Fatiando Listas - Slicing
lista1[1]
lista1[c(1,2)]
lista1[2]
lista1[[2]][1]
lista1[[2]][1] = "Monica" #Substituindo elementos em uma lista
lista1
#Listas Nomeadas
names(lista1) <- c("int", "str", "num")
lista1
vectnum <- 1:4
vectnum
vectstr <- c("A", "B", "C", "D")
vectstr
#Nomeando listas
lista2 <- list(Numeros = vectnum, Letras = vectstr)
lista2
#Nomeando Elementos
lista2 <- list(elemento1=3:5, elemento2=c(7.2,3.5))
lista2
#Especificando elementos
names(lista1) <- c("inteiros", "strings", "numericos")
lista1
lista1$strings #$ para incluir nome na lista
length(lista1$inteiros)
lista1$inteiros
#Verificar o tamanho da lista
length(lista1)
# Podemos extrair um elemento
lista1$strings[2]
# Mode dos elementos
mode(lista1$numericos)
mode(lista1$strings)
#Combinando 2 listas
lista3 <- c(lista1, lista2)
lista3
#Transformacao de vetores em lista
v = c(1:3)
v
l = as.list(v)
l
#Unindo elementos em uma lista
mat = matrix(1:4, nrow = 2)
mat
vec = c(1:9)
vec
lst = list(mat, vec)
lst
|
e9433dd0169dee61dc9e64377d677ffe48074a23
|
58b507613f6d4390ce0bcd2d61597343c32fdd28
|
/man/apply_offset.Rd
|
91a3be9b15d726af60768b581296822103b47ef9
|
[
"MIT"
] |
permissive
|
Weiming-Hu/EITrans
|
1409017da7850c29f509edc22e122cd7fe3bd12a
|
d7877918088433e57ba058b697da8a5361661fcf
|
refs/heads/master
| 2022-06-17T06:31:12.102028
| 2021-03-24T01:32:34
| 2021-03-24T01:32:34
| 203,261,345
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| true
| 900
|
rd
|
apply_offset.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/apply_offset.R
\name{apply_offset}
\alias{apply_offset}
\title{EITrans::apply_offset}
\usage{
apply_offset(ens, offset, pre_sorted = F, verbose = F)
}
\arguments{
\item{ens}{A 4-dimensional array for ensemble forecasts. Dimensions should be
\verb{[stations, times, lead times, members]}.}
\item{offset}{Member offset values used to calibrate ensembles.}
\item{pre_sorted}{Whether the ensemble members are pre-sorted from the lowest to
the highest. Using pre-sorted ensembles can save runtime.}
\item{verbose}{Whether to print messages.}
}
\value{
A calibrated ensemble
}
\description{
EITrans::apply_offset adds the offset values to a forecast ensemble. Offset values
are calculated from EITrans::member_offset. Or you could simply use EITrans::EITrans
which has built in a complete workflow for EITrans calibration.
}
|
b080753849ff32f7b91b3c59403b4b96f4c9f8a3
|
42ac78fed8e8494cc54a533e6cb9b4c18ca51369
|
/branches/Matrix-APIchange/tests/matr-exp.R
|
112b41c32f4781637084432a05fc79d09b37132a
|
[] |
no_license
|
LTLA/Matrix
|
8a79cac905cdb820f95190e99352cd9d8f267558
|
2b80087cfebc9f673e345000aeaf2170fc15b506
|
refs/heads/master
| 2020-08-07T20:22:12.075155
| 2019-09-28T21:21:10
| 2019-09-28T21:21:10
| 213,576,484
| 0
| 1
| null | 2019-10-13T00:56:38
| 2019-10-08T07:30:49
|
C
|
UTF-8
|
R
| false
| false
| 1,575
|
r
|
matr-exp.R
|
library(Matrix)
## Matrix Exponential
source(system.file("test-tools.R", package = "Matrix"))
## e ^ 0 = 1 - for matrices:
assert.EQ.mat(expm(Matrix(0, 3,3)), diag(3), tol = 0)# exactly
## e ^ diag(.) = diag(e ^ .):
assert.EQ.mat(expm(as(diag(-1:4), "dgeMatrix")), diag(exp(-1:4)))
set.seed(1)
rE <- replicate(100,
{ x <- rlnorm(12)
relErr(as(expm(as(diag(x), "dgeMatrix")),
"matrix"),
diag(exp(x))) })
stopifnot(mean(rE) < 1e-15,
max(rE) < 1e-14)
summary(rE)
## Some small matrices
m1 <- Matrix(c(1,0,1,1), nc = 2)
e1 <- expm(m1)
assert.EQ.mat(e1, cbind(c(exp(1),0), exp(1)))
m2 <- Matrix(c(-49, -64, 24, 31), nc = 2)
e2 <- expm(m2)
## The true matrix exponential is 'te2':
e_1 <- exp(-1)
e_17 <- exp(-17)
te2 <- rbind(c(3*e_17 - 2*e_1, -3/2*e_17 + 3/2*e_1),
c(4*e_17 - 4*e_1, -2 *e_17 + 3 *e_1))
assert.EQ.mat(e2, te2, tol = 1e-13)
## See the (average relative) difference:
all.equal(as(e2,"matrix"), te2, tol = 0) # 1.48e-14 on "lynne"
## The ``surprising identity'' det(exp(A)) == exp( tr(A) )
## or log det(exp(A)) == tr(A) :
stopifnot(all.equal(c(determinant(e2)$modulus), sum(diag(m2))))
m3 <- Matrix(cbind(0,rbind(6*diag(3),0)), nc = 4)# sparse
e3 <- expm(m3)
E3 <- expm(Matrix(m3, sparse=FALSE))
stopifnot(identical(e3, E3))
e3. <- rbind(c(1,6,18,36),
c(0,1, 6,18),
c(0,0, 1, 6),
c(0,0, 0, 1))
assert.EQ.mat(e3, e3.)
cat('Time elapsed: ', proc.time(),'\n') # for ``statistical reasons''
|
5450063870b4771dd22b638d570fb8bb331a32a1
|
468b5f731ac921e570fc50360e427726bdec17bd
|
/app.R
|
0a749808589f45c235fbb4f57b0fa116a4eaaf82
|
[
"MIT"
] |
permissive
|
josephd8/balancer-shiny
|
c8c94dd8656924ad6e188c2e6a3f0cd3f1463a72
|
7691ea3e6a98da88a71fa10721854278585c9a60
|
refs/heads/main
| 2023-01-25T04:33:01.146826
| 2020-12-10T22:27:21
| 2020-12-10T22:27:21
| 320,367,584
| 0
| 0
|
MIT
| 2020-12-10T22:27:22
| 2020-12-10T19:16:57
|
R
|
UTF-8
|
R
| false
| false
| 1,306
|
r
|
app.R
|
library(shiny)
source("helpers.R")
# Define UI for application that draws a histogram
ui <- fluidPage(
# Application title
titlePanel("Balancer Pools"),
sidebarLayout(
sidebarPanel(
selectInput(inputId = 'type',
label = 'Pool Type',
choices = c("Smart", "Shared", "Private"),
selected = 1),
uiOutput("pool_selection")
),
mainPanel(
textOutput("pool_stats")
)
)
)
# Define server logic required to draw a histogram
server <- function(input, output) {
ref <- reactive({
if(input$type == "Smart"){
return(smart)
} else if (input$type == "Shared"){
return(shared)
} else if (input$type == "Private"){
return(private)
}
})
output$pool_selection <- renderUI({
selectInput(inputId = "pool",
label = "Pool",
choices = get_pool_names(ref()),
selected = 1)
})
stats <- reactive({
get_pool_stats(ref(), input$pool)
})
output$pool_stats <- renderText(
get_pool_stats(ref(), input$pool)
)
}
# Run the application
shinyApp(ui = ui, server = server)
|
aba65a348af854c200a771e80c2cf7b42b8f04f9
|
59938d0e27d9d4eb28c2cfab7b05f98b61d35886
|
/DSC/Q2_Firstn.R
|
05010043d09f854bd7c65280d068454fb94585ff
|
[] |
no_license
|
codepractice97/SemSix
|
910114e33e711f93edab11845f08f9aadbb4e509
|
a7f361af98fcd28fb720585ceece68ddbde8c22a
|
refs/heads/master
| 2020-12-29T20:44:24.111971
| 2020-05-13T18:21:46
| 2020-05-13T18:21:46
| 238,725,305
| 0
| 1
| null | null | null | null |
UTF-8
|
R
| false
| false
| 67
|
r
|
Q2_Firstn.R
|
N=as.integer(readline("Enter Nth number"))
C=c(1:N)
print(sum(C))
|
427803704207e932efd061feb4d1e08e1805f941
|
0751e430a7b3ccb9a4417ab8a1d05cda5be3c96e
|
/wrangling_rasters.R
|
efb879c2ecd4a0634cc3318e46176a0633744d9a
|
[] |
no_license
|
bilgecansen/MAPSvsGBIF
|
544666463ed4092e50b5527ff03addba62872189
|
7ae003f6ce15c18667f5a4013f149b544bb426f2
|
refs/heads/master
| 2023-04-13T21:40:00.667137
| 2022-10-03T13:05:42
| 2022-10-03T13:05:42
| 269,141,047
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 1,757
|
r
|
wrangling_rasters.R
|
rm(list = ls())
library(tidyverse)
library(raster)
# Average of 17 years between 1992-2008 ----------------------------------
years <- 1992:2008
variables <- list.files("weather/single_year/1992")[1:10] %>%
gsub(".asc", "", .)
if (!"average" %in% list.files("weather")) dir.create("weather/average")
pb <- txtProgressBar(min = 0, max = length(variables), style = 3)
for (k in 1:length(variables)) {
r <- list()
for (i in 1:length(years)) {
folder <- paste("weather/single_year", years[i], sep = "/")
r[[i]] <- raster(paste(folder, list.files(folder)[k], sep = "/"))
}#i
rs <- stack(r) %>% mean(.)
names(r) <- paste(variables[k], years[i], sep = "_")
filename <- paste(variables[k], "asc", sep = ".") %>%
paste("weather/average", ., sep = "/")
writeRaster(rs, filename = filename, overwrite = T)
setTxtProgressBar(pb, k)
}#k
# Save as data frame
files <- paste0("weather/average/", list.files("weather/average"))
r <- map(files, raster) %>%
do.call(raster::stack, .)
names(r) <- gsub(".asc", "", list.files("weather/average"))
df <- rasterToPoints(r)
saveRDS(df, file = "data_maurer.rds")
# Every year and variable in an array -------------------------------------
a <- list()
for (i in 1:length(years)) {
folder <- paste("weather/single_year", years[i], sep = "/")
r <- paste(folder, list.files(folder)[1:10], sep = "/") %>%
stack(.)
names(r) <- gsub(".asc", "", list.files(folder))[1:10]
a[[i]] <- rasterToPoints(r)[,3:12]
gc()
}
a <- lapply(a, function(x) array(x, dim = c(1,53097,10)))
a2 <- do.call(abind, list(a, along = 1))
dimnames(a2) <- list(years = as.character(years), NULL, variables = names(r))
saveRDS(a2, file = "maurer_array.rds")
|
bf52cc26d0368959c5008b0a0b4dbc7437c1acd0
|
7c759c26de0788d39c0cbbffb9a601c406a0547f
|
/R Projects/R Shiny Visualizations/Lineup Assessment Tool/app.R
|
980dd0c8a58e48ae286dc8257eb148fba0295d5b
|
[] |
no_license
|
akash424/Data-Science-Portfolio
|
a648b3c1b460f685551dc521de1853780c17ec62
|
6498419353021d78259604c67a38fc96c450e486
|
refs/heads/master
| 2021-06-24T11:51:52.568635
| 2021-04-09T06:17:20
| 2021-04-09T06:17:20
| 212,435,479
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 16,497
|
r
|
app.R
|
# Load packages
library(readxl)
library(dplyr)
library(stringr)
library(ggplot2)
library(plotly)
library(shiny)
library(DT)
library(tidyverse)
library(rstudioapi)
current_path = getActiveDocumentContext()$path
setwd(dirname(current_path))
# Read data
TwoMan <- read_excel("data/LINEUP DATA.xlsx", sheet = "2Man")
ThreeMan <- read_excel("data/LINEUP DATA.xlsx", sheet = "3Man")
FourMan <- read_excel("data/LINEUP DATA.xlsx", sheet = "4Man")
FiveMan <- read_excel("data/LINEUP DATA.xlsx", sheet = "5Man")
# Quick check
for (i in list(TwoMan, ThreeMan, FourMan, FiveMan)) {
if (sum(i$`+/-` != -1*i$`OPP +/-`) == 0 ) {
print("data looks good")
}
else {
print("data has some errors")
}
}
# Calculating possessions
for (i in c("TwoMan", "ThreeMan", "FourMan", "FiveMan")) {
assign(i, get(i) %>%
mutate(POSS = round(100*PTS/OFFRTG),
`OPP POSS` = round(100*`OPP PTS`/DEFRTG)) %>%
mutate_all(funs(ifelse(is.nan(.), 0, .))))
}
# Dropping advanced stats
for (i in c("TwoMan", "ThreeMan", "FourMan", "FiveMan")) {
assign(paste(i,"cleaned", sep = "_"), get(i) %>%
select(-(OFFRTG:PIE), -`+/-`, -`OPP +/-`) %>%
mutate(`EFG%` = round((FGM + 0.5*`3PM`)/FGA, 3)*100,
`TS%` = round(PTS/(2*(FGA+(.44*FTA))), 3)*100,
`OPP EFG%` = round((`OPP FGM` + 0.5*`OPP 3PM`)/`OPP FGA`, 3)*100,
`OPP TS%` = round(`OPP PTS`/(2*(`OPP FGA`+(.44*`OPP FTA`))), 3)*100) %>%
mutate_all(funs(ifelse(is.nan(.), 0, .))) %>%
select(LINEUPS:`OPP PITP`,POSS:`OPP TS%`,RESULT))
}
# Summing stats across wins & losses
for (i in c("TwoMan", "ThreeMan", "FourMan", "FiveMan")) {
assign(paste(i,"combined", sep = "_"),
get(paste(i,"cleaned", sep = "_")) %>%
select(-RESULT) %>%
group_by(LINEUPS, SEASON, TEAM) %>%
summarise_at(vars(GP:`OPP POSS`), funs(sum(.))) %>%
mutate(`FG%` = 100*round(FGM/FGA,3),
`3P%` = 100*round(`3PM`/`3PA`,3),
`FT%` = 100*round(FTM/FTA,3),
`OPP FG%` = 100*round(`OPP FGM`/`OPP FGA`,3),
`OPP 3P%` = 100*round(`OPP 3PM`/`OPP 3PA`,3),
`OPP FT%` = 100*round(`OPP FTM`/`OPP FTA`,3),
`EFG%` = round((FGM + 0.5*`3PM`)/FGA, 3)*100,
`TS%` = round(PTS/(2*(FGA+(.44*FTA))), 3)*100,
`OPP EFG%` = round((`OPP FGM` + 0.5*`OPP 3PM`)/`OPP FGA`, 3)*100,
`OPP TS%` = round(`OPP PTS`/(2*(`OPP FGA`+(.44*`OPP FTA`))), 3)*100) %>%
mutate_all(funs(ifelse(is.nan(.), 0, .))) %>%
mutate(RESULT = "Combined"))
}
# Combining dataframes
for (i in c("TwoMan", "ThreeMan", "FourMan", "FiveMan")) {
assign(paste(i, "final", sep = "_"),
rbind(as.data.frame(get(paste(i, "cleaned", sep = "_"))), as.data.frame(get(paste(i, "combined", sep = "_")))))
}
# Clearing up workspace
rm(list=setdiff(ls(), c("TwoMan_final", "ThreeMan_final", "FourMan_final", "FiveMan_final")))
# Rearranging columns
for (i in c("TwoMan", "ThreeMan", "FourMan", "FiveMan")) {
assign(paste(i, "final", sep = "_"),
get(paste(i,"final", sep = "_")) %>%
select(LINEUPS:MIN, POSS:`OPP POSS`, PTS:FGA, `3PM`:`3PA`, FTM:FTA, OREB:PFD, `OPP PTS`, `OPP FGM`:`OPP FGA`, `OPP 3PM`:`OPP 3PA`, `OPP FTM`:`OPP FTA`, `OPP OREB`:`OPP PITP`,
`FG%`, `3P%`, `FT%`, `EFG%`, `TS%`, `OPP FG%`, `OPP 3P%`, `OPP FT%`, `OPP EFG%`, `OPP TS%`, RESULT))
}
rm(i)
#Setting table template
opp_stats <- names(TwoMan_final)[grepl( "OPP" , names( TwoMan_final ) )]
tm_stats <- names(TwoMan_final)[!grepl( "OPP" , names( TwoMan_final ) )]
tm_stats <- tm_stats[-c(1:5,33)]
# UI for app
ui <- fluidPage(
titlePanel("Lineup Assessment Tool"),
sidebarLayout(
sidebarPanel(width = 4,
helpText('This application allows users to perform in-depth analysis on a single lineup, as well as compare lineups across a number of different statistical measures.'),
br(), br(),
selectInput("num", "Number of players in lineup:", width = "50%",
choices = as.character(2:5)),
br(),
radioButtons("data", "Per Mode:", choices = c("Totals", "Per Game", "Per 100 Poss", "Per 48 Min")),
br(),
radioButtons("result", "Game Result:", choices = c("All", "Wins", "Losses")),
br(),
"The selection below controls what is shown on the 'Lineup Breakdown' tab.",
uiOutput("lineup"),
br(),
"The selection below controls what is shown on the 'Lineup Comparison' tab.",
selectInput("stat", h3("Area of focus:"), choices = tm_stats)
),
mainPanel( tags$style(type="text/css",
".shiny-output-error { visibility: hidden; }",
".shiny-output_error:before { visibility: hidden; }"),
tabsetPanel(
tabPanel("Lineup Breakdown",
br(),
p("On/off court values in each statistical category are provided for the selected lineup. These can be used to identify strengths and weaknesses of the lineup.",
style = "font-family: 'times'; font-size:14pt"),
br(),br(),br(),
fluidRow(align='center', htmlOutput("info")),
br(),br(),
fluidRow(align='center', dataTableOutput("summary"))
),
tabPanel("Lineup Comparison",
br(),
p("On/off court values for lineups that have played 100 minutes or more are shown. This can be used to quickly compare different lineup combinations within a particular statistical category.",
style = "font-family: 'times'; font-size:14pt"),
br(),br(),br(),
fluidRow(align = 'center', dataTableOutput("compare")))
)
)
)
)
server <- function(input, output, session) {
dataInput <- reactive({
switch (input$num,
"2" = TwoMan_final,
"3" = ThreeMan_final,
"4" = FourMan_final,
"5" = FiveMan_final
)
})
gameRes <- reactive({
switch(input$result,
"All" = "Combined",
"Wins" = "W",
"Losses" = "L")
})
output$lineup <- renderUI({
dataset <- dataInput()
selectInput("players", h3("Lineup:"), choices = sort(unique(dataset$LINEUPS)))
})
output$info <- renderUI({
dataset <- dataInput()
dataset <- dataset %>% filter(LINEUPS %in% input$players)
if(sum(dataset$RESULT=='W')>0) {W <- dataset$GP[dataset$RESULT=='W']} else {W <- 0}
if(sum(dataset$RESULT=='L')>0) {L <- dataset$GP[dataset$RESULT=='L']} else {L <- 0}
MIN <- dataset$MIN[dataset$RESULT=='Combined']
if(MIN > 100) {
x <- paste0("<font size=5 color=blue>","<strong>",input$players,"</strong>","</font>",
"<br>",
"<font size=4>",W,"W - ",L,"L","</font>",
"<br>",
"<font size=4>",MIN," minutes","</font>")
HTML(x)
} else {
x <- paste0("<font size=5 color=blue>","<strong>",input$players,"</strong>","</font>",
"<br>",
"<font size=4>",W,"W - ",L,"L","</font>",
"<br>",
"<font size=4>",MIN," minutes","</font>",
"<br>", "<br>",
"<font size=3 color=red>","This lineup has not seen much playing time, which affects results.")
HTML(x)
}
})
output$summary <- renderDataTable({
dataset <- dataInput()
dataset <- dataset %>% filter(LINEUPS %in% input$players & RESULT %in% gameRes())
players <- unlist(str_split(dataset$LINEUPS, ", "))
others_lineups <- FiveMan_final[!apply(sapply(players, grepl, FiveMan_final$LINEUPS), 1, all) , ] %>%
filter(RESULT %in% gameRes()) %>%
group_by(SEASON, TEAM) %>%
summarise_at(vars(GP:`OPP TS%`), funs(sum(., na.rm = T))) %>%
mutate(`FG%` = 100*round(FGM/FGA,3),
`3P%` = 100*round(`3PM`/`3PA`,3),
`FT%` = 100*round(FTM/FTA,3),
`OPP FG%` = 100*round(`OPP FGM`/`OPP FGA`,3),
`OPP 3P%` = 100*round(`OPP 3PM`/`OPP 3PA`,3),
`OPP FT%` = 100*round(`OPP FTM`/`OPP FTA`,3),
`EFG%` = round((FGM + 0.5*`3PM`)/FGA, 3)*100,
`TS%` = round(PTS/(2*(FGA+(.44*FTA))), 3)*100,
`OPP EFG%` = round((`OPP FGM` + 0.5*`OPP 3PM`)/`OPP FGA`, 3)*100,
`OPP TS%` = round(`OPP PTS`/(2*(`OPP FGA`+(.44*`OPP FTA`))), 3)*100)
table <- as.data.frame(matrix(NA, nrow = length(tm_stats), ncol = 7))
names(table) <- c('Tm On-Court', 'Tm Off-Court', 'Tm Swing', 'Opp On-Court', 'Opp Off-Court', 'Opp Swing', 'Difference in Swing')
rownames(table) <- tm_stats
if (input$data == "Totals") {
dataset <- dataset
others_lineups <- others_lineups
} else if (input$data == "Per Game") {
dataset <- dataset %>% mutate_at(vars(POSS:`OPP PITP`), funs(round(./GP, 2)))
others_lineups <- others_lineups %>% mutate_at(vars(POSS:`OPP PITP`), funs(round(./GP, 2)))
} else if (input$data == "Per 100 Poss") {
dataset <- dataset %>%
mutate(POSS_2 = POSS) %>%
mutate_at(vars(POSS:`OPP PITP`), funs(round(100*./POSS_2, 2)))
others_lineups <- others_lineups %>%
mutate(POSS_2 = POSS) %>%
mutate_at(vars(POSS:`OPP PITP`), funs(round(100*./POSS_2, 2)))
} else if (input$data == "Per 48 Min") {
dataset <- dataset %>% mutate_at(vars(POSS:`OPP PITP`), funs(round(48*./MIN, 2)))
others_lineups <- others_lineups %>% mutate_at(vars(POSS:`OPP PITP`), funs(round(48*./MIN, 2)))
}
for (i in 1:length(tm_stats)) {
table[i, 1] <- unname(unlist(dataset[tm_stats[i]]))
table[i, 2] <- unname(unlist(others_lineups[tm_stats[i]]))
table[i, 3] <- round(table[i, 1] - table[i, 2], 1)
table[i, 4] <- unname(unlist(dataset[opp_stats[i]]))
table[i, 5] <- unname(unlist(others_lineups[opp_stats[i]]))
table[i, 6] <- round(table[i, 4] - table[i, 5], 1)
table[i, 7] <- round(table[i, 3] - table[i, 6], 1)
}
table <- table %>%
rownames_to_column('stat') %>%
arrange(desc(`Difference in Swing`)) %>%
column_to_rownames('stat')
sketch = htmltools::withTags(table(
class = 'display',
thead(
tr(
th(rowspan = 2, ''),
th(colspan = 3, 'TEAM'),
th(colspan = 3, 'OPPONENT')
),
tr(
lapply(c(rep(c('On-Court', 'Off-Court', 'Swing'), 2), 'Difference in Swing'), th)
)
)
))
datatable(table, container = sketch, rownames = TRUE)
})
output$compare <- renderDataTable({
tmStat <- input$stat
oppStat <- paste('OPP', tmStat)
dataset <- dataInput()
dataset <- dataset %>% filter(RESULT %in% gameRes() & MIN >= 100)
table <- as.data.frame(matrix(NA, nrow = nrow(dataset), ncol = 10))
names(table) <- c('Lineup', 'GP', 'MIN', 'Tm On-Court', 'Tm Off-Court', 'Tm Swing', 'Opp On-Court', 'Opp Off-Court', 'Opp Swing', 'Difference in Swing')
table$Lineup <- dataset$LINEUPS
table$GP <- dataset$GP
table$MIN <- dataset$MIN
others_lineups <- vector("list", length = nrow(dataset))
for (i in 1:nrow(dataset)) {
players <- unlist(str_split(dataset$LINEUPS[i], ", "))
others_lineups[[i]] <- FiveMan_final[!apply(sapply(players, grepl, FiveMan_final$LINEUPS), 1, all) , ] %>%
filter(RESULT %in% gameRes()) %>%
group_by(SEASON, TEAM) %>%
summarise_at(vars(GP:`OPP TS%`), funs(sum(., na.rm = T))) %>%
mutate(`FG%` = 100*round(FGM/FGA,3),
`3P%` = 100*round(`3PM`/`3PA`,3),
`FT%` = 100*round(FTM/FTA,3),
`OPP FG%` = 100*round(`OPP FGM`/`OPP FGA`,3),
`OPP 3P%` = 100*round(`OPP 3PM`/`OPP 3PA`,3),
`OPP FT%` = 100*round(`OPP FTM`/`OPP FTA`,3),
`EFG%` = round((FGM + 0.5*`3PM`)/FGA, 3)*100,
`TS%` = round(PTS/(2*(FGA+(.44*FTA))), 3)*100,
`OPP EFG%` = round((`OPP FGM` + 0.5*`OPP 3PM`)/`OPP FGA`, 3)*100,
`OPP TS%` = round(`OPP PTS`/(2*(`OPP FGA`+(.44*`OPP FTA`))), 3)*100)
}
others_lineups <- do.call('rbind', others_lineups)
if (input$data == "Totals") {
dataset <- dataset
others_lineups <- others_lineups
} else if (input$data == "Per Game") {
dataset <- dataset %>% mutate_at(vars(POSS:`OPP PITP`), funs(round(./GP, 2)))
others_lineups <- others_lineups %>% mutate_at(vars(POSS:`OPP PITP`), funs(round(./GP, 2)))
} else if (input$data == "Per 100 Poss") {
dataset <- dataset %>%
mutate(POSS_2 = POSS) %>%
mutate_at(vars(POSS:`OPP PITP`), funs(round(100*./POSS_2, 2)))
others_lineups <- others_lineups %>%
mutate(POSS_2 = POSS) %>%
mutate_at(vars(POSS:`OPP PITP`), funs(round(100*./POSS_2, 2)))
} else if (input$data == "Per 48 Min") {
dataset <- dataset %>% mutate_at(vars(POSS:`OPP PITP`), funs(round(48*./MIN, 2)))
others_lineups <- others_lineups %>% mutate_at(vars(POSS:`OPP PITP`), funs(round(48*./MIN, 2)))
}
for (i in 1:nrow(dataset)) {
table[i, 4] <- unname(unlist(dataset[i, tmStat]))
table[i, 5] <- unname(unlist(others_lineups[i, tmStat]))
table[i, 6] <- round(table[i, 4] - table[i, 5], 1)
table[i, 7] <- unname(unlist(dataset[i, oppStat]))
table[i, 8] <- unname(unlist(others_lineups[i, oppStat]))
table[i, 9] <- round(table[i, 7] - table[i, 8], 1)
table[i, 10] <- round(table[i, 6] - table[i, 9], 1)
}
if (input$stat %in% c('TOV', 'PF')) {
table <- table %>%
arrange(`Difference in Swing`)
} else {
table <- table %>%
arrange(desc(`Difference in Swing`))
}
sketch = htmltools::withTags(table(
class = 'display',
thead(
tr(
th(rowspan = 2, 'LINEUP'),
th(rowspan = 2, 'GP'),
th(rowspan = 2, 'MIN'),
th(colspan = 3, 'TEAM'),
th(colspan = 3, 'OPPONENT')
),
tr(
lapply(c(rep(c('On-Court', 'Off-Court', 'Swing'), 2), 'Difference in Swing'), th)
)
)
))
datatable(table, container = sketch, rownames = FALSE)
})
}
shinyApp(ui, server)
# dataset <- dataInput() %>% filter(LINEUPS %in% input$players)
#
# if (input$data == "Totals") {
# dataset <- dataset
# } else if (input$data == "Per Game") {
# dataset <- dataset %>% mutate_at(vars(POSS:`OPP PITP`), funs(round(./GP, 2)))
# } else if (input$data == "Per 100 Poss") {
# dataset <- dataset %>%
# mutate(POSS_2 = POSS) %>%
# mutate_at(vars(POSS:`OPP PITP`), funs(round(100*./POSS_2, 2)))
# } else if (input$data == "Per 48 Min") {
# dataset <- dataset %>% mutate_at(vars(POSS:`OPP PITP`), funs(round(48*./MIN, 2)))
# }
#
#
# for (i in 1:nrow(results)) {
# results[i,1] <- dataset[dataset$RESULT=="Combined", which(names(dataset)==tm_stats[i])]
# results[i,4] <- dataset[dataset$RESULT=="Combined", which(names(dataset)==opp_stats[i])]
# results[i,7] <- results[i,1] - results[i,4]
#
# if (sum(dataset$RESULT=='W')>0) {
# results[i,2] <- dataset[dataset$RESULT=="W", which(names(dataset)==tm_stats[i])]
# results[i,5] <- dataset[dataset$RESULT=="W", which(names(dataset)==opp_stats[i])]
# results[i,8] <- results[i,2] - results[i,5]
# }
#
# if (sum(dataset$RESULT=='L')>0) {
# results[i,3] <- dataset[dataset$RESULT=="L", which(names(dataset)==tm_stats[i])]
# results[i,6] <- dataset[dataset$RESULT=="L", which(names(dataset)==opp_stats[i])]
# results[i,9] <- results[i,3] - results[i,6]
# }
#
# results[i,10] <- results[i,8] - results[i,9]
# }
#
#
# for (j in 1:ncol(results)) set(results, which(is.infinite(results[[j]])), j, NA)
# results["TOV", 10] <- results["TOV", 10] * -1
# results["PF", 10] <- results["PF", 10] * -1
# results <- results[order(-results$`Win-to-Loss Change`) , ]
#
# formattable(results, align='c',
# list(
# `Win-to-Loss Change` = formatter("span",
# style = ~ style(color = ifelse(`Win-to-Loss Change` > 0, "green", "red")))))
|
265557d64c4da518785a480bb083b6a8c1a9fd87
|
57dc9106303ae7d4749626b8929e47ebff5fdfbb
|
/makeplot-square.R
|
156d3b17075ba9f4b5991343f51a3d46e3e2c27a
|
[] |
no_license
|
storaged/simulation
|
effe97b9d0428b8c5402e9e7850c942b8ea10a4a
|
2228ad2e43a6dbc2d6bd60b0ba93e2148b3c0a4e
|
refs/heads/master
| 2016-09-05T10:32:46.405316
| 2014-07-22T22:45:09
| 2014-07-22T22:45:09
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 661
|
r
|
makeplot-square.R
|
pdf('square-plot.pdf')
T = as.matrix(read.table('square.Rdata'))
boxplot.matrix(T[,2:101], use.cols=FALSE, xlab='Environmental stress level (phenotypic units/generation)', ylab='Number of autonomous transposons', names=format(T[,1]*0.002, scientific=FALSE), cex.lab=1.3)
dev.off()
pdf('perc-plot.pdf')
v = T[,1]
for(i in 1:50) { v[i] = 0; for(j in 2:101) { if(T[i, j] == 0) v[i] = v[i] + 1 } }
options(scipen=5)
plot(y=v, xlab='Environmental stress level (phenotypic units/generation)', ylab='Numer of runs in which TEs have been excised (out of a total of 100)', x=T[,1]*0.002, cex.lab=1.3)
lines(y=v, x=T[,1]*0.002)
abline(h=100, lty=2)
dev.off()
|
61013189a87eaef24088820ce05b72cfc7caa68c
|
8a8269eb23d9580db8eaeb184e5a9de790059a01
|
/ui.R
|
dd0330e57de3c1ba41743e22ce24faa98c08e240
|
[] |
no_license
|
yizhouthu/DataProductsProject
|
93c96ce0ba189f5e322fba7fd3d36915ea49a910
|
287cfe3d5e09a7ac201aa7f74b023f40d6d619c6
|
refs/heads/master
| 2021-01-12T17:07:22.676285
| 2016-10-15T16:47:21
| 2016-10-15T16:47:21
| 69,980,408
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 1,188
|
r
|
ui.R
|
#
# This is the user-interface definition of a Shiny web application. You can
# run the application by clicking 'Run App' above.
#
# Find out more about building applications with Shiny here:
#
# http://shiny.rstudio.com/
#
library(shiny)
shinyUI(pageWithSidebar(
headerPanel("Computing Without A Calculator"),
sidebarPanel(
radioButtons(
"problemNum", "Problem Number:",
c("1" = "1", "2" = "2", "3" = "3")
),
uiOutput("problemContent"),
numericInput("guess", "Your guess:", value = 5, min = 0, max = 10,
step = 0.01),
actionButton("Submit", "Submit")
),
mainPanel(
h2("Guess the value of x"),
p("The object of this game is to guess the unknown x without a calculator.
The result should be rounded to 2 decimal places, so your guess should
also be a number with 2 decimal places. After each wrong guess, the
fitted value of your guess will be shown."),
h2("Result of your guess"),
h4("You entered"),
textOutput("inputValue"),
uiOutput("fittedValueOrSuccess")
)
))
|
9feb31c906794df95f14c440d395e0f227c86d3b
|
35e9bba795e44aba136ce5cbbc4be37869555973
|
/example_mpg.R
|
64cf418c03f762c3ca262e5643579c34e7ef9faf
|
[] |
no_license
|
hyunyouchoi/Programming-Machine-Learning-Application
|
fbe1f7cc174fe6c7891a79732aeac08842f41a17
|
07cb8678fceb98d39a2e05b42e93dd97abd19a73
|
refs/heads/master
| 2021-01-19T03:36:39.693581
| 2017-04-13T00:20:07
| 2017-04-13T00:20:07
| 87,329,206
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 1,689
|
r
|
example_mpg.R
|
library(shiny)
library(datasets)
# Define UI for miles per gallon application
ui <- fluidPage (
pageWithSidebar(
# Application title
headerPanel("Miles Per Gallon"),
# Sidebar with controls to select the variable to plot against mpg
# and to specify whether outliers should be included
sidebarPanel(
selectInput("variable", "Variable:",
list("Cylinders" = "cyl",
"Transmission" = "am",
"Gears" = "gear")),
checkboxInput("outliers", "Show outliers", FALSE)
),
# Show the caption and plot of the requested variable against mpg
mainPanel(
h3(textOutput("caption")),
plotOutput("mpgPlot")
)
))
# We tweak the "am" field to have nicer factor labels. Since this doesn't
# rely on any user inputs we can do this once at startup and then use the
# value throughout the lifetime of the application
mpgData <- mtcars
#mpgData$am <- factor(mpgData$am, labels = c("Automatic", "Manual"))
# Define server logic required to plot various variables against mpg
server <- function(input,output) {
# Compute the forumla text in a reactive expression since it is
# shared by the output$caption and output$mpgPlot expressions
formulaText <- reactive({
paste("mpg ~", input$variable)
})
#Return the formula text for printing as a caption
output$caption <- renderText({
formulaText()
})
# Generate a plot of the requested variable against mpg and only
# include outliers if requested
output$mpgPlot <- renderPlot({
boxplot(as.formula(formulaText()),
data = mpgData,
outline = input$outliers)
})
}
shinyApp(ui, server)
|
8caeaf639c3e3593d537418688673d851a861912
|
20fb140c414c9d20b12643f074f336f6d22d1432
|
/man/NISTukThUnthPerCubFtTOjoulePerCubMeter.Rd
|
b48212c07379e0722420fe096e450d9a122a6139
|
[] |
no_license
|
cran/NISTunits
|
cb9dda97bafb8a1a6a198f41016eb36a30dda046
|
4a4f4fa5b39546f5af5dd123c09377d3053d27cf
|
refs/heads/master
| 2021-03-13T00:01:12.221467
| 2016-08-11T13:47:23
| 2016-08-11T13:47:23
| 27,615,133
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 1,010
|
rd
|
NISTukThUnthPerCubFtTOjoulePerCubMeter.Rd
|
\name{NISTukThUnthPerCubFtTOjoulePerCubMeter}
\alias{NISTukThUnthPerCubFtTOjoulePerCubMeter}
\title{Convert British thermal unitth per cubic foot to joule per cubic meter }
\usage{NISTukThUnthPerCubFtTOjoulePerCubMeter(ukThUnthPerCubFt)}
\description{\code{NISTukThUnthPerCubFtTOjoulePerCubMeter} converts from British thermal unitth per cubic foot (Btuth/ft3) to joule per cubic meter (J/m3) }
\arguments{
\item{ukThUnthPerCubFt}{British thermal unitth per cubic foot (Btuth/ft3) }
}
\value{joule per cubic meter (J/m3) }
\source{
National Institute of Standards and Technology (NIST), 2014
NIST Guide to SI Units
B.8 Factors for Units Listed Alphabetically
\url{http://physics.nist.gov/Pubs/SP811/appenB8.html}
}
\references{
National Institute of Standards and Technology (NIST), 2014
NIST Guide to SI Units
B.8 Factors for Units Listed Alphabetically
\url{http://physics.nist.gov/Pubs/SP811/appenB8.html}
}
\author{Jose Gama}
\examples{
NISTukThUnthPerCubFtTOjoulePerCubMeter(10)
}
\keyword{programming}
|
2282c32dbdb8255635dcb23a2cfd4105565e3764
|
1e0198da60d1cddb803ecee7c21f21e7ee51e53d
|
/run_analysis.R
|
f967102f18242060557fbc385add38a9549a2c33
|
[] |
no_license
|
ptoctoi/datasciencecoursera
|
d599c28da705942933b31824cb40a45b8cbf034d
|
709f7b76c69229fcc9352c9831e2dffb685127ab
|
refs/heads/master
| 2021-01-10T04:46:04.231495
| 2016-02-25T11:00:34
| 2016-02-25T11:06:18
| 48,130,880
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 2,630
|
r
|
run_analysis.R
|
## Read files
#Activity
activity <- read.table("C:\\Users\\ptocto\\Documents\\UCI HAR Dataset\\activity_labels.txt")
#Features
features <- read.table("C:\\Users\\ptocto\\Documents\\UCI HAR Dataset\\features.txt")
# Train files
xtrain <- read.table("C:\\Users\\ptocto\\Documents\\UCI HAR Dataset\\train\\X_train.txt")
ytrain <- read.table("C:\\Users\\ptocto\\Documents\\UCI HAR Dataset\\train\\Y_train.txt")
subjtrain <- read.table("C:\\Users\\ptocto\\Documents\\UCI HAR Dataset\\train\\subject_train.txt")
# Test files
xtest <- read.table("C:\\Users\\ptocto\\Documents\\UCI HAR Dataset\\test\\X_test.txt")
ytest <- read.table("C:\\Users\\ptocto\\Documents\\UCI HAR Dataset\\test\\Y_test.txt")
subjtest <- read.table("C:\\Users\\ptocto\\Documents\\UCI HAR Dataset\\test\\subject_test.txt")
# Merge ytrain with activity desc
ytraindesc <- merge(ytrain,activity,by.x = "activity", by.y = "V1")
# Merge ytest with activity desc
ytestdesc <- merge(ytest,activity,by.x = "activity", by.y = "V1")
# Filter only column with "mean()" and "std()"
featuresm <- grep("mean()",features)
featuress <- grep("std()",features$V2)
featuresf <- c(featuresm,featuress)
# Filter data only with column "mean()" and "std()"
xtrainsub <- select(xtrain, featuresf)
xtestsub <- select(xtest, featuresf)
# Replace names without "-"
for (i in 1:79)
{
names(xtrainsub)[i] <- gsub("-","",features[featuresf[i],2])
names(xtestesub)[i] <- gsub("-","",features[featuresf[i],2])
}
# Union ytestdesc,xtestesub
filefinal <- data.frame(ytestdesc,xtestesub)
# Union ytraindesc,xtrainsub
filefinal2 <- data.frame(ytraindesc,xtrainsub)
# Change names
names(filefinal)[1] <- "Activity"
names(filefinal)[2] <- "desc.activity"
names(filefinal2)[1] <- "Activity"
names(filefinal2)[2] <- "desc.activity"
#Create data frame of text "Train" and "Test"
vtipotrain <- data.frame(matrix("Train",nrow = 7352, ncol=1))
vtipotest <- data.frame(matrix("Test",nrow = 2947, ncol=1))
names(vtipotrain)[1] <- "Type Data"
names(vtipotest)[1] <- "Type Data"
#Union data in order to indicate "Train" or "Test"
filefinaltrain <- data.frame(vtipotest,filefinal)
filefinaltest <- data.frame(vtipotrain,filefinal2)
# Combine data of train and test
FileTrainTest <- rbind(filefinaltrain,filefinaltest)
# Generate mean by subject and activity
tidydata <- aggregate(FileTrainTest[, 5:ncol(FileTrainTest)],
by=list(subject = FileTrainTest$subject,
desc.activity = FileTrainTest$desc.activity),
mean)
# write final data to disk
write.table(tidydata, file = "./tidy_data.txt", row.name = FALSE)
|
0c623baa481063de885412d8fede68ba599b3c63
|
d31b057957591ec4f8ea01289279e62ffdc4cfc8
|
/plot1.R
|
181143d083a9cefe54fb32c5da653c3c38446baa
|
[] |
no_license
|
dajit/ExData_Plotting1
|
d752706518837f1f535f4a2dbece1e522a4d5c81
|
b240e8c340890cb8596cf01fc2db4f973524d666
|
refs/heads/master
| 2021-01-18T14:29:15.072297
| 2016-08-14T22:34:58
| 2016-08-14T22:34:58
| 65,675,812
| 0
| 0
| null | 2016-08-14T16:30:50
| 2016-08-14T16:30:48
| null |
UTF-8
|
R
| false
| false
| 1,142
|
r
|
plot1.R
|
# Load required library
library(data.table)
library(base)
#setwd("C:\\coursera\\course4\\wk1\\project\\submit")
# download zip file
url <- "https://d396qusza40orc.cloudfront.net/exdata%2Fdata%2Fhousehold_power_consumption.zip"
zipName <- file.path(getwd(), "input_data.zip")
download.file(url, zipName)
unzip(zipName, overwrite = TRUE)
# read input file
idt <- fread("household_power_consumption.txt", sep = ";")
png (file = "plot1.png", width = 480, height = 480, units = "px" )
# note must ignore NA when sum
# sum(as.numeric(t1$Global_active_power), na.rm = TRUE)
# big object 150 MB
object.size(idt)
# get only the data forrequired dates
dt1 <- idt[(idt$Date == "1/2/2007" ),]
dt2 <- idt[(idt$Date == "2/2/2007" ),]
# merge
td <- rbind(dt1,dt2)
# much smaller data set
#object.size(td)
# 394168 bytes
# Plot 1
hist(as.numeric(td$Global_active_power), col = "red" , xlab = "Global Active Power (kilowatts)", main = ("Global Active Power") )
## Copy my plot to a PNG file
#dev.copy(png, file = "plot1.png")
## Don't forget to close the PNG device!
dev.off()
dev.set(which = 2)
|
f090d67e26b90df4686733632a5e53b7e3354bda
|
e4b593c614b6388e9c28eb50ea0379d62e0d8b3a
|
/G3_loanAnalysis_ModelBuilding2.R
|
25060cf09dc53298bfc6235c2785e0c8b98567bd
|
[] |
no_license
|
demonxy468/R-LoanAnalysis
|
bef594d3541cb8187aafa262e13931cb2648ebd2
|
8ded8585f24afb91107b274bb54502df8d16f02f
|
refs/heads/master
| 2021-01-17T00:41:51.777214
| 2016-07-09T16:28:47
| 2016-07-09T16:28:47
| 62,958,222
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 4,218
|
r
|
G3_loanAnalysis_ModelBuilding2.R
|
# IST 718 - Final Project
# Group 3 - Ha Nguyen - Jaydeep - Ye Xu
# File: G3_loanAnalysis_ModelBuilding2.R
# Purpose: Predict Good/Bad loan from lendingclub data
# Pre-processing data
# Run the file at working directory
source("G3_loanAnalysis_PreProcess.R")
# Output is transformed data loan3c_Reduced and loan3d_Reduced
################################# MODEL BUILDING ###########################################
############# METHOD 2: Use loan3c_Reduced as training, sample good/bad loan ratio as 50/50 ####
#########################################################################################################
loan3Good <- loan3c_Reduced[loan3c_Reduced$loan_status == 0,] # No of row = 31369
loan3Bad <- loan3c_Reduced[loan3c_Reduced$loan_status == 1,] # No of row = 13025
# Take a sample of 50% from the Good Loan
set.seed(1)
GoodSample <- sample(nrow(loan3Good), nrow(loan3Good)/2)
loan3GoodSample <- loan3Good[GoodSample,] # No of row = 15684
# Merging GoodLoan Sample with Bad Loan so that the combined dataset has distribution good/bad ~ 50/50
loan3Sample <- rbind(loan3GoodSample, loan3Bad)
# Use the combined dataset for training models and predict data year 2015
##### LOGISTIC REGRESSION ############################################
glm3.2 <- glm(loan_status ~.-grade-pub_rec, data = loan3Sample, family = binomial)
summary(glm3.2)
#Predict data year 2015
glm.predProb2 <- predict(glm3.2, loan3d_Reduced, type = "response")
summary(glm.predProb2)
glm.pred2 = ifelse(glm.predProb2 > 0.5, 1, 0)
# Check how well the model predict
table(glm.pred2, loan3d_Reduced$loan_status) #Accuracy increased
##### RANDOM FOREST ############################################
library(randomForest)
library(caret)
rf2.2 <- randomForest(loan_status ~.-grade-pub_rec, data = loan3Sample, mtry=5, ntree=400, na.action=na.roughfix)
rf2.pred2 <- predict(rf2.2, loan3d_Reduced)
confusionMatrix(rf2.pred2, loan3d_Reduced$loan_status)
##### NAIVE BAYES ############################################
library(e1071)
nb2 <- naiveBayes(loan_status ~.-grade-pub_rec, data = loan3Sample)
nb2.pred <- predict(nb2, loan3d_Reduced)
confusionMatrix(nb2.pred, loan3d_Reduced$loan_status)
##### CALCULATE AUC ############################################
glmROC2 <- roc(loan_status ~ as.numeric(glm.pred2), data = loan3d_Reduced, plot = T, col = "red", lty = 1)
nbROC2 <- roc(loan_status ~ as.numeric(nb2.pred), data = loan3d_Reduced, plot = T, add = T, col= "green", lty = 1)
rfROC2 <- roc(loan_status ~ as.numeric(rf2.pred2), data = loan3d_Reduced, plot = T, add = T, col = "blue", lty = 1)
legend(0.6,0.6, c('logistic regression','naive-bayes', 'random forest'),col=c('red','green', 'blue'),lwd=3, xjust = -0.5)
#AUC score
cbind(glmAUC = glmROC2$auc, nbAUC = nbROC2$auc, rfAUC = rfROC2$auc)
###################################### Visulize predicting results of random forest #####################################
pre <- predict(rf2.2, loan3d_Reduced, type = "prob")
prediction <- data.frame(list(pre)) #,row.names = NULL
colnames(prediction) <- c("Good.Loan.Rate","Bad.Loan.Rate")
prediction$loan_amnt <- loan3d_Reduced$loan_amnt
prediction$term <- substr(loan3d_Reduced$term, 0, 3)
prediction$home_ownership <- loan3d_Reduced$home_ownership
prediction$installment <- loan3d_Reduced$installment
prediction$annual_inc <- loan3d_Reduced$annual_inc
prediction$open_acc <- loan3d_Reduced$open_acc
prediction$total_acc <- loan3d_Reduced$total_acc
#=============== Visualize random forest prediction==============================
library(ggplot2)
#Visulization with loan amount and default probabilities, group by loan term
p1 <- ggplot(data=prediction, aes(x=loan_amnt, y=Bad.Loan.Rate))
p1 + geom_point(position = "jitter",aes(colour = prediction$home_ownership)) + geom_smooth(method = loess,aes(colour = factor(prediction$home_ownership)))
#Visualize with account opened and default probabilities, group by home ownership
p1 <- ggplot(data=prediction, aes(x=total_acc, y=Bad.Loan.Rate))
p1 + geom_bar( aes(fill=term),position="dodge",stat="identity") +facet_wrap("home_ownership")
|
b6d735553e11e7caa55b38789b4086398d8eea04
|
d44d9e695583b08a6dc6b41f16479406ae12444c
|
/inst/doc/bmassIntro.2.RealData.R
|
a6125556e3ad02d0bc239bd76fd4c12d405c0a3d
|
[] |
no_license
|
cran/bmass
|
70aeaae4dadda909b9eace7b96398c589fdb4d95
|
a7e29d68c846db10377893741a387148a63fe99d
|
refs/heads/master
| 2020-05-24T08:14:51.982486
| 2019-05-17T06:20:12
| 2019-05-17T06:20:12
| 187,180,500
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 5,882
|
r
|
bmassIntro.2.RealData.R
|
## ----eval=FALSE----------------------------------------------------------
# library("bmass");
# HDL <- read.table("jointGwasMc_HDL.formatted.QCed.txt.gz", header=T);
# LDL <- read.table("jointGwasMc_LDL.formatted.QCed.HDLMatch.txt.gz", header=T);
# TG <- read.table("jointGwasMc_TG.formatted.QCed.HDLMatch.txt.gz", header=T);
# TC <- read.table("jointGwasMc_TC.formatted.QCed.HDLMatch.txt.gz", header=T);
# load("bmassDirectory/data/GlobalLipids2013.GWASsnps.rda");
# Phenotypes <- c("HDL", "LDL", "TG", "TC");
# bmassResults <- bmass(Phenotypes, GlobalLipids2013.GWASsnps);
## ----eval=FALSE----------------------------------------------------------
# > summary(bmassResults)
# Length Class Mode
# MarginalSNPs 3 -none- list
# PreviousSNPs 4 -none- list
# NewSNPs 3 -none- list
# LogFile 20 -none- character
# ZScoresCorMatrix 16 -none- numeric
# Models 324 -none- numeric
# ModelPriors 1134 -none- numeric
# GWASlogBFMinThreshold 1 -none- numeric
## ----eval=FALSE----------------------------------------------------------
# > summary(bmassResults$NewSNPs)
# Length Class Mode
# SNPs 30 data.frame list
# logBFs 5427 -none- numeric
# Posteriors 5427 -none- numeric
## ----eval=FALSE----------------------------------------------------------
# > head(bmassResults$NewSNPs$SNPs, n=3)
# ChrBP Chr BP Marker MAF A1 HDL_A2 HDL_Direction
# 1704 10_101902054 10 101902054 rs2862954 0.4631 C T +
# 72106 10_5839619 10 5839619 rs2275774 0.1781 G A +
# 118903 11_109521729 11 109521729 rs661171 0.2876 T G +
# HDL_pValue HDL_N HDL_ZScore LDL_Direction LDL_pValue LDL_N
# 1704 1.287e-06 186893 4.841751 + 5.875e-01 172821.0
# 72106 7.601e-07 179144 4.945343 - 7.773e-05 165198.0
# 118903 1.705e-06 186946 4.785573 + 1.653e-02 172877.9
# LDL_ZScore TG_Direction TG_pValue TG_N TG_ZScore TC_Direction
# 1704 0.5424624 + 0.013930 177587.1 2.459063 +
# 72106 -3.9512933 - 0.001035 169853.0 -3.280836 -
# 118903 2.3969983 - 0.155800 177645.0 -1.419340 +
# TC_pValue TC_N TC_ZScore GWASannot mvstat mvstat_log10pVal unistat
# 1704 2.526e-04 187083 3.659609 0 48.70946 9.173067 23.44255
# 72106 1.911e-02 179333 -2.343378 0 38.26288 7.004804 24.45642
# 118903 1.785e-05 187131 4.290215 0 37.84098 6.917765 22.90171
# unistat_log10pVal Nmin logBFWeightedAvg
# 1704 5.890421 172821.0 7.068306
# 72106 6.119129 165198.0 5.447201
# 118903 5.768276 172877.9 5.438490
## ----eval=FALSE----------------------------------------------------------
# > dim(bmassResults$NewSNPs$logBFs)
# [1] 81 67
# > bmassResults$NewSNPs$logBFs[1:5,1:10]
# HDL LDL TG TC 10_101902054 10_5839619 11_109521729 11_13313759
# [1,] 0 0 0 0 0.000000 0.0000000 0.0000000 0.00000000
# [2,] 1 0 0 0 -233.831047 -235.0781367 -234.6670299 -234.15472067
# [3,] 2 0 0 0 0.000000 0.0000000 0.0000000 0.00000000
# [4,] 0 1 0 0 0.165855 0.3172489 -0.1100418 0.06393596
# [5,] 1 1 0 0 -64.774919 -66.2959645 -69.2388829 -69.93309528
# 11_45696596 11_47251202
# [1,] 0.00000000 0.00000000
# [2,] -231.68478695 -219.10321932
# [3,] 0.00000000 0.00000000
# [4,] -0.04838241 0.04997886
# [5,] -65.59917325 -45.04269241
## ----eval=FALSE----------------------------------------------------------
# > summary(bmassResults$PreviousSNPs)
# Length Class Mode
# logBFs 12069 -none- numeric
# SNPs 30 data.frame list
# DontPassSNPs 30 data.frame list
# Posteriors 12069 -none- numeric
## ----eval=FALSE----------------------------------------------------------
# > summary(bmassResults$MarginalSNPs)
# Length Class Mode
# SNPs 30 data.frame list
# logBFs 20493 -none- numeric
# Posteriors 20493 -none- numeric
## ----eval=FALSE----------------------------------------------------------
# > bmassResults$ZScoresCorMatrix
# HDL_ZScore LDL_ZScore TG_ZScore TC_ZScore
# HDL_ZScore 1.0000000 -0.0872789 -0.3655508 0.1523894
# LDL_ZScore -0.0872789 1.0000000 0.1607208 0.8223175
# TG_ZScore -0.3655508 0.1607208 1.0000000 0.2892982
# TC_ZScore 0.1523894 0.8223175 0.2892982 1.0000000
## ----eval=FALSE----------------------------------------------------------
# > dim(bmassResults$Models)
# [1] 81 4
# > head(bmassResults$Models)
# HDL LDL TG TC
# [1,] 0 0 0 0
# [2,] 1 0 0 0
# [3,] 2 0 0 0
# [4,] 0 1 0 0
# [5,] 1 1 0 0
# [6,] 2 1 0 0
## ----eval=FALSE----------------------------------------------------------
# > length(bmassResults$ModelPriors)
# [1] 1134
# > bmassResults[c("ModelPriorMatrix", "LogFile")] <- GetModelPriorMatrix(Phenotypes, bmassResults$Models, bmassResults$ModelPriors, bmassResults$LogFile)
# > head(bmassResults$ModelPriorMatrix)
# HDL LDL TG TC Prior Cumm_Prior OrigOrder
# 1 1 2 1 1 0.32744537 0.3274454 44
# 2 1 2 2 1 0.13788501 0.4653304 53
# 3 1 1 1 2 0.11727440 0.5826048 68
# 4 1 1 1 1 0.07801825 0.6606230 41
# 5 1 2 1 2 0.06210658 0.7227296 71
# 6 2 1 2 2 0.05698876 0.7797184 78
## ----eval=FALSE----------------------------------------------------------
# > bmassResults$GWASlogBFMinThreshold
# [1] 4.289906
|
8d7b06f4d27cef60b57a0dbb667b7008846b79e5
|
d0d916e9c0624bd92544754db5d92df57c477df7
|
/scripts/read_na_data.R
|
4d398e5cd1e12fad16169a762251a247fbd65521
|
[] |
no_license
|
keyes-timothy/med-pride
|
156c97a8041a1365827bc13cad595b502fb062a1
|
59cbc245d3178071b776bef4498bc785118034f8
|
refs/heads/master
| 2023-04-08T18:12:44.733368
| 2023-03-24T03:15:59
| 2023-03-24T03:15:59
| 244,046,822
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 7,167
|
r
|
read_na_data.R
|
### Description
### This script reads and recodes data from the 2018-2020 MSPA National
### Needs-assessment of LGBTQ+ medical students in the United States.
# Author: Timothy Keyes
# Version: 2020-03-23
# Libraries
library(tidyverse)
# Parameters
in_path <- here::here("data-raw", "mspa_na_raw.csv")
names_path <- here::here("data-raw", "school_names.csv")
out_path <- here::here("data", "mspa_na_data.rds")
var_names <-
c(
"participant_id",
"survey_id",
"timestamp",
"consent",
"school_attend",
"med_school_year",
"med_school_year_other",
"is_lgbtq",
"sab_is_male",
"sab_is_female",
"gender_man",
"gender_woman",
"gender_agender",
"gender_genderqueer",
"gender_transman",
"gender_transwoman",
"gender_another",
"gender_another_description",
"so_asexual",
"so_bisexual",
"so_gay",
"so_lesbian",
"so_pansexual",
"so_queer",
"so_questioning",
"so_heterosexual",
"so_another",
"so_another_description",
"race_native",
"race_asian",
"race_black",
"race_pi",
"race_white",
"race_hispanic",
"race_another",
"race_another_explanation",
"interaction_agree",
"interaction_satisfaction",
"personal_benefit_mspa",
"community_benefit_mspa",
"enhanced_activity_mspa_lgbtq_meded",
"enhanced_activity_mspa_social",
"enhanced_activity_mspa_di_training",
"enhanced_activity_mspa_discrim_bias_reduction",
"enhanced_activity_mspa_mentorship",
"enhanced_activity_mspa_advocacy",
"enhanced_activity_mspa_global_health",
"enhanced_activity_mspa_other",
"enhanced_activity_mspa_other_explanation",
"school_affinity_group_exist",
"school_affinity_group_benefit",
"school_affinity_group_involved",
"why_not_involved_time",
"why_not_involved_value",
"why_not_involved_opportunities",
"why_not_involved_uninterested",
"why_not_involved_not_queer",
"why_not_involved_another",
"why_not_involved_another_explanation",
"school_activities_advocacy",
"school_activities_social",
"school_activities_mentorship",
"school_activities_educational",
"school_activities_research",
"school_activities_intercollegiate",
"school_activities_other",
"school_activities_other_explanation",
"school_affinity_group_mission",
"school_affinity_group_supported",
"school_affinity_group_identify",
"interest_lgbtq_meded",
"interest_lgbtq_social",
"interest_lgbtq_bias_training",
"interest_lgbtq_advocacy",
"interest_lgbtq_global_health",
"interest_lgbtq_other",
"importance_lgbtq_meded",
"importance_lgbtq_social",
"importance_lgbtq_bias_training",
"importance_lgbtq_mentorship",
"importance_lgbtq_advocacy",
"importance_lgbtq_global_health",
"satisfaction_lgbtq_meded",
"satisfaction_lgbtq_social",
"satisfaction_bias_training",
"satisfaction_lgbtq_mentorship",
"satisfaction_lgbtq_advocacy",
"satisfaction_lgbtq_global_health",
"out_classmates_peers",
"out_labmates_coworkers_team",
"out_mentors",
"out_medical_school_app",
"out_residency_app",
"out_other",
"out_other_explanation",
"ability_out_classmates_peers",
"ability_out_labmates_coworkers_team",
"ability_out_mentors",
"ability_out_medical_school_app",
"ability_out_residency_app",
"ability_out_other",
"ability_out_other_explanation",
"protections_out_classmates_peers",
"protections_out_labmates_coworkers_team",
"protections_out_mentors",
"protections_out_medical_school_app",
"protections_out_residency_app",
"protections_out_other",
"protections_out_other_explanation",
"intersectionality",
"complete"
)
#function and variables for recoding "yes/no" variables
recode_checked <- function(my_var) {
recode(my_var, `0` = "no", `1` = "yes", .default = NA_character_)
}
checked_vars <-
vars(
consent,
sab_is_male,
sab_is_female,
gender_agender,
gender_man,
gender_woman,
gender_genderqueer,
gender_transman,
gender_transwoman,
gender_another,
so_asexual,
so_bisexual,
so_gay,
so_lesbian,
so_pansexual,
so_queer,
so_questioning,
so_heterosexual,
so_another,
race_native,
race_asian,
race_black,
race_pi,
race_white,
race_hispanic,
race_another,
enhanced_activity_mspa_lgbtq_meded,
enhanced_activity_mspa_social,
enhanced_activity_mspa_di_training,
enhanced_activity_mspa_discrim_bias_reduction,
enhanced_activity_mspa_mentorship,
enhanced_activity_mspa_advocacy,
enhanced_activity_mspa_global_health,
enhanced_activity_mspa_other,
enhanced_activity_mspa_other_explanation,
why_not_involved_time,
why_not_involved_value,
why_not_involved_opportunities,
why_not_involved_uninterested,
why_not_involved_not_queer,
why_not_involved_another,
school_activities_advocacy,
school_activities_social,
school_activities_mentorship,
school_activities_educational,
school_activities_research,
school_activities_intercollegiate,
school_activities_other,
importance_lgbtq_meded,
importance_lgbtq_social,
importance_lgbtq_bias_training,
importance_lgbtq_mentorship,
importance_lgbtq_advocacy,
importance_lgbtq_global_health,
out_classmates_peers,
out_labmates_coworkers_team,
out_mentors,
out_medical_school_app,
out_residency_app,
out_other,
ability_out_classmates_peers,
ability_out_labmates_coworkers_team,
ability_out_mentors,
ability_out_medical_school_app,
ability_out_residency_app,
ability_out_other,
protections_out_classmates_peers,
protections_out_labmates_coworkers_team,
protections_out_mentors,
protections_out_medical_school_app,
protections_out_residency_app,
protections_out_other
)
school_names <-
names_path %>%
read_csv() %>%
drop_na() %>%
deframe()
#===============================================================================
na_data <-
in_path %>%
read_csv(col_names = var_names, skip = 1) %>%
mutate_at(checked_vars, recode_checked) %>%
mutate(
med_school_year =
recode(
med_school_year,
`1` = "Pre-Clinical Student (prior to clerkships)",
`2` = "Clinical Student (on clerkships)",
`3` = "Research (PhD, Masters, or other)",
`4` = "Other"
),
is_lgbtq =
recode(
is_lgbtq,
`1` = "LGBTQ+",
`2` = "Non-LGBTQ+"
),
school_affinity_group_exist =
recode(
school_affinity_group_exist,
`1` = "yes",
`2` = "no"
),
school_affinity_group_involved =
recode(
school_affinity_group_involved,
`1` = "yes",
`2` = "no"
),
school_attend =
recode(
school_attend,
!!! school_names,
.default = NA_character_
)
)
write_rds(x = na_data, path = out_path)
|
a43f2e8e716508873d1ac9c003ecfc8fe596ec98
|
6fb04083c9d4ee38349fc04f499a4bf83f6b32c9
|
/man/initFgeneric.Rd
|
88d98135d3ebb0cc139bb2b0ae85eec23cc04f37
|
[] |
no_license
|
phani-srikar/AdapteR
|
39c6995853198f01d17a85ac60f319de47637f89
|
81c481df487f3cbb3d5d8b3787441ba1f8a96580
|
refs/heads/master
| 2020-08-09T10:33:28.096123
| 2017-09-07T09:39:25
| 2017-09-07T09:39:25
| 214,069,176
| 0
| 1
| null | null | null | null |
UTF-8
|
R
| false
| true
| 523
|
rd
|
initFgeneric.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/FLtestLib.R
\name{initFgeneric}
\alias{initFgeneric}
\title{initF.default helps to return a list of list.
Can be used for comparing results of R and FL functions which require two objects.}
\usage{
initFgeneric(specs = list(numberattribute = 5, featureattribute = TRUE, ...),
class = "FLMatrix")
}
\description{
initF.default helps to return a list of list.
Can be used for comparing results of R and FL functions which require two objects.
}
|
10418b79a10f3c459078586d91d71ca65c193a29
|
ee54d7727df60cc156251f3b2c479a68ce376402
|
/man/make_mgsm_psqn_obj.Rd
|
850cdc1cbae681fc4a6a17af4163d2cde067c3ed
|
[] |
no_license
|
boennecd/survTMB
|
ee108a47e8ca9e46095312a9afe779cfbe73351f
|
6a691e1e94e22c0a948b86cb0aca6ed850e77278
|
refs/heads/master
| 2023-01-05T22:59:40.057713
| 2020-11-04T08:25:13
| 2020-11-04T08:25:13
| 250,332,290
| 4
| 1
| null | null | null | null |
UTF-8
|
R
| false
| true
| 2,384
|
rd
|
make_mgsm_psqn_obj.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/mgsm-psqn.R
\name{make_mgsm_psqn_obj}
\alias{make_mgsm_psqn_obj}
\title{Create mgsm Object to Pass to psqn Method}
\usage{
make_mgsm_psqn_obj(
formula,
data,
df,
tformula = NULL,
Z,
cluster,
method = c("SNVA", "GVA"),
n_nodes = 20L,
param_type = c("DP", "CP_trans", "CP"),
link = c("PH", "PO", "probit"),
theta = NULL,
beta = NULL,
opt_func = .opt_default,
n_threads = 1L,
skew_start = -1e-04,
kappa = .MGSM_default_kappa,
dtformula = NULL
)
}
\arguments{
\item{formula}{two-sided \code{\link{formula}} where the left-hand side is a
\code{\link{Surv}} object and the right-hand side is the
fixed effects.}
\item{data}{\code{\link{data.frame}} with variables used in the model.}
\item{df}{integer with the degrees of freedom used for the baseline
spline.}
\item{tformula}{\code{\link{formula}} with baseline survival function.
The time variable must be the same
symbol as used in the left-hand-side of \code{formula}.
\code{NULL} implies that \code{df} is passed to
\code{\link{nsx}}.}
\item{Z}{one-sided \code{\link{formula}} where the right-hand side are
the random effects.}
\item{cluster}{vector with integers or factors for group identifiers
(one for each observation).}
\item{method}{Method character vector indicating which approximation to setup.
See \code{\link{make_mgsm_ADFun}}.}
\item{n_nodes}{integer with the number of nodes to use in (adaptive)
Gauss-Hermite quadrature.}
\item{param_type}{characters for the parameterization used with the SNVA.
See \code{\link{make_mgsm_ADFun}}.}
\item{link}{character specifying the link function.}
\item{theta}{starting values for covariance matrix.}
\item{beta}{starting values for fixed effect coefficients.}
\item{opt_func}{general optimization function to use. It
needs to have an interface like \code{\link{optim}}.}
\item{n_threads}{integer with number of threads to use.}
\item{skew_start}{starting value for the Pearson's moment coefficient of
skewness parameter when a SNVA is used. Currently,
a somewhat arbitrary value.}
\item{kappa}{numeric scalar with the penalty in the relaxed problem
ensuring the monotonicity of the survival curve.}
\item{dtformula}{\code{\link{formula}} with the derivative of the
baseline survival function.}
}
\description{
Create mgsm Object to Pass to psqn Method
}
|
e0233b11f4decdeea3734e581b9ef612bd08b97b
|
8592e76d1aa97406f6b99f327d56ad9d9b553379
|
/M09 - W04/jh_final_dataproduct/ui.R
|
886f1453128f0208f32bdff13e223f7a28282d53
|
[] |
no_license
|
shostiou/JohnsHopkins
|
527c75a4bfb670c090ab666fe687abc375de3052
|
f18b7b489b3f788294d24a8882817502a607d044
|
refs/heads/master
| 2021-07-10T05:28:02.585274
| 2021-04-10T13:08:20
| 2021-04-10T13:08:20
| 238,987,739
| 0
| 1
| null | null | null | null |
UTF-8
|
R
| false
| false
| 2,802
|
r
|
ui.R
|
#
# This is the user-interface definition of a Shiny web application. You can
# run the application by clicking 'Run App' above.
#
# Find out more about building applications with Shiny here:
#
# http://shiny.rstudio.com/
#
# Libraries required for the project
library(shiny)
library(ggplot2)
library(tidyverse)
# calling the trees dataset
data(trees)
# trees variables list
trees_var <- colnames(trees)
#trees_var <- c("Girth" = "Girth",
# "Height" = "Height",
# "Volume" = "Volume")
# list of colors for user choice
color_var <- c("black","green","blue","orange")
# Define UI for application that draws a histogram
shinyUI(fluidPage(
# Application title
titlePanel("Exploration of the trees dataset"),
tabsetPanel(
# Main tab panel
tabPanel("Main",
sidebarLayout(
sidebarPanel(
p(''),
p('please select "Instructions" tab for help'),
p(''),
selectInput("x_var","Select the x-axis variable",choices=trees_var),
selectInput("y_var","Select the y-axis variable",choices=trees_var),
radioButtons("radio_color","Select color of the plot",color_var),
sliderInput("pt_size", "Adjust points size:",min = 1, max = 5, value = 1)
), #sidebarPanel )
# Show a plot of the generated distribution
mainPanel(
h4("Trees Dataset scatter plot"),
plotOutput("treesplot")
) # mainPanel )
) #sidebarLayout )
), # tabPanel )
# instructions tab panel
tabPanel("Instructions",
p(""),
h2("User instructions"),
p("This application is used to do a basic exploration of the trees data set."),
p("It will automatically build a scatter plot between 2 variables of the data set"),
p(""),
h3('Step by step approach : '),
p("1. Please start by selecting the variable to be displayed on the x axis"),
p("2. Please start by selecting the variable to be displayed on the y axis"),
p("3. Please adjust the color of the plot according to your preferences"),
p("4. Please adjust the size of the scatter plot points according to your preferences"),
) # tabPanel )
) # tabSetPanel )
) # FluidPage )
)
|
d340b09582dde24ede12a1dabcf738467e1b9bb7
|
cdb0d033a36e2c2ea1d19a770ec6ef405a0a9543
|
/R/defaultpost.R
|
e1749312342199b0a6e16e1cac5315e1e3630ce4
|
[] |
no_license
|
cran/rjmcmc
|
f40ce283ae608c2fb3a0f5b86d85392e8b76b162
|
0cc1714f6301a0e3e828fc058803a1b42e5bf7aa
|
refs/heads/master
| 2021-01-22T22:24:04.328438
| 2019-07-09T13:20:02
| 2019-07-09T13:20:02
| 85,538,473
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 7,316
|
r
|
defaultpost.R
|
#' Perform Post-Processing Using Default Bijections
#'
#' Performs Bayesian multimodel inference, estimating Bayes factors and
#' posterior model probabilities for N candidate models. Unlike
#' \code{\link{rjmcmcpost}}, this function uses a default bijection scheme based
#' on approximating each posterior by a multivariate normal distribution. The
#' result is reminiscent of the algorithm of Carlin & Chib (1995) with a
#' multivariate normal pseudo-prior. Transformation Jacobians are computed using
#' automatic differentiation so do not need to be specified.
#'
#' @param posterior A list of N matrices containing the posterior distributions
#' under each model. Generally this will be obtained from MCMC output. Note
#' that each parameter should be real-valued so some parameters may need to be
#' transformed, using logarithms for example.
#' @param likelihood A list of N functions specifying the log-likelihood
#' functions for the data under each model.
#' @param param.prior A list of N functions specifying the prior distributions
#' for each model-specific parameter vector.
#' @param model.prior A numeric vector of the prior model probabilities. Note
#' that this argument is not required to sum to one as it is automatically
#' normalised.
#' @param chainlength How many iterations to run the Markov chain for.
#' @param TM.thin How regularly to calculate transition matrices as the chain
#' progresses.
#' @param save.all A logical determining whether to save the value of the
#' universal parameter at each iteration, as well as the corresponding
#' likelihoods, priors and posteriors. If \code{TRUE}, the output object
#' occupies significantly more memory.
#' @param progress A logical determining whether a progress bar is drawn.
#' @return Returns an object of class \code{rj} (see \code{\link{rjmethods}}).
#' If \code{save.all=TRUE}, the output has named elements \code{result},
#' \code{densities}, \code{psidraws}, \code{progress} and \code{meta}. If
#' \code{save.all=FALSE}, the \code{densities} and \code{psidraws} elements
#' are omitted.
#'
#' \code{result} contains useful point estimates, \code{progress} contains
#' snapshots of these estimates over time, and \code{meta} contains
#' information about the function call.
#'
#' @importFrom stats var
#' @references Carlin, B. P. and Chib, S. (1995) Bayesian Model Choice via
#' Markov Chain Monte Carlo Methods. \emph{Journal of the Royal Statistical
#' Society, Series B, 473-484}.
#' @references Barker, R. J. and Link, W. A. (2013) Bayesian multimodel
#' inference by RJMCMC: A Gibbs sampling approach. \emph{The American
#' Statistician, 67(3), 150-156}.
#'
#' @seealso \code{\link{adiff}} \code{\link{rjmcmcpost}}
#'
#' @examples
#' ## Comparing two binomial models -- see Barker & Link (2013) for further details.
#'
#' y=c(8,16); sumy=sum(y)
#' n=c(20,30); sumn=sum(n)
#'
#' L1=function(p){if((all(p>=0))&&(all(p<=1))) sum(dbinom(y,n,p,log=TRUE)) else -Inf}
#' L2=function(p){if((p[1]>=0)&&(p[1]<=1)) sum(dbinom(y,n,p[1],log=TRUE)) else -Inf}
#'
#' p.prior1=function(p){sum(dbeta(p,1,1,log=TRUE))}
#' p.prior2=function(p){dbeta(p[1],1,1,log=TRUE)+dbeta(p[2],17,15,log=TRUE)}
#'
#' draw1=matrix(rbeta(2000,y+1,n-y+1), 1000, 2, byrow=TRUE) ## full conditional posterior
#' draw2=matrix(c(rbeta(1000,sumy+1,sumn-sumy+1),rbeta(1000,17,15)), 1000, 2)
#'
#' out=defaultpost(posterior=list(draw1,draw2), likelihood=list(L1,L2),
#' param.prior=list(p.prior1,p.prior2), model.prior=c(1,1), chainlength=1000)
#'
#' @export
defaultpost=function(posterior, likelihood, param.prior, model.prior, chainlength=10000, TM.thin=chainlength/10, progress=TRUE, save.all=TRUE){
n.models = length(posterior)
nTM = chainlength/TM.thin
TM = BF = rep(list(matrix(NA, n.models, n.models)), nTM); mvnd = rep(NA, n.models)
n.par = rep(NA, n.models)
for(j in 1:n.models){
posterior[[j]] = as.matrix(posterior[[j]])
if(any(colnames(posterior[[j]])=="deviance")){
posterior[[j]] = posterior[[j]][, -which(colnames(posterior[[j]])=="deviance")]
}
n.par[j] = ncol(posterior[[j]])
}
dim.psi = sum(n.par)
p.bar = psi = rep(NA, dim.psi)
modlab = c(); covar = list()
for(j in 1:n.models){
modlab = c(modlab, rep(j, n.par[j]))
post = posterior[[j]]
p.bar[which(modlab==j)] = apply(post, 2, mean)
covar[[j]] = var(post) # covariance matrix for each model
}
u.prior = cbind(modlab, p.bar)
psistore = rep(list(matrix(NA, chainlength, dim.psi)), n.models)
store = rep(list(matrix(NA, chainlength, n.models*3, dimnames=list(NULL, c(paste0("Posterior M", 1:n.models), paste0("Likelihood M", 1:n.models), paste0("Prior M", 1:n.models))))), n.models)
message('Post-Processing Based on Normal Pseudo-Prior')
for(j in 1:n.models){
message('Row ', j, appendLF=FALSE)
wuse = trunc(getOption("width")-20L) # Set up progress bar
if(progress){ pb = utils::txtProgressBar(min=0, max=chainlength, initial=0, char="*", style=3, width=wuse) }
term = matrix(NA,chainlength,n.models)
is = which(modlab==j)
for(i in 1:chainlength){
psi[is] = posterior[[j]][sample(dim(posterior[[j]])[1], 1),]
for(m in 1:n.models){
if(m==j){ next }
mis = which(modlab==m)
psi[mis] = mvtnorm::rmvnorm(1, u.prior[mis,2], covar[[m]])
}
psistore[[j]][i,] = psi
for(k in 1:n.models){
ind = which(modlab==k)
like = likelihood[[k]](psi[ind])
prior = param.prior[[k]](psi[ind])
for(m in 1:n.models){
if(m==k){ next }
mis = which(modlab==m)
prior = prior + sum(mvtnorm::dmvnorm(psi[mis], u.prior[mis,2], covar[[m]], log=T))
}
term[i,k] = like + prior + log(model.prior[k])
store[[j]][i, k+n.models*(0:2)] = c(term[i,k], like, prior)
}
term[i,] = term[i,] - max(term[i,])
term[i,] = exp(term[i,])/sum(exp(term[i,]))
if(any(is.na(term[i,]))){ warning(paste("NAs in chain for model",j)); break }
if(progress){ utils::setTxtProgressBar(pb, value=i) }
if(i%%TM.thin == 0){
TM[[i/TM.thin]][j,]=apply(term[1:i,], 2, mean)
}
}
if(progress){ close(pb) }
}
prob = matrix(NA,nTM,n.models)
for(i in 1:nTM){
ev = eigen(t(TM[[i]]))
prob.us = ev$vector[,which(abs(ev$values-1) < 1e-8)]
prob[i,] = prob.us/sum(prob.us)
for(j in 1:n.models){
BF[[i]][,j] = prob[i,]/prob[i,j] * model.prior[j]/model.prior
}
}
if(save.all){ return(rj(list(result=list("Transition Matrix" = TM[[nTM]], "Posterior Model Probabilities"=prob[nTM,],
"Bayes Factors" = BF[[nTM]], "Second Eigenvalue" = ev$value[2]),
densities = store, psidraws = psistore, progress=list(TM=TM, prb=prob),
meta=list(chainlength=chainlength, TM.thin=TM.thin))))
} else {
return(rj(list(result=list("Transition Matrix" = TM[[nTM]], "Posterior Model Probabilities"=prob[nTM,],
"Bayes Factors" = BF[[nTM]], "Second Eigenvalue" = ev$value[2]),
progress=list(TM=TM, prb=prob), meta=list(chainlength=chainlength, TM.thin=TM.thin))))
}
}
|
dedccdfbaedf2422fe5067b4e83eded1d31c98e5
|
a5a46c8f1109456f2cb063be8646c25bd89d7a34
|
/Quiz4/Question2.R
|
58604a608d12d3d44e7324122d3ebc1bcd578d50
|
[] |
no_license
|
SteveLi90/GettingAndCleaningData
|
22a60bc989d436326fb008b8d9b85b1c082168ff
|
59f4fb777eb839ded999612de6e69eadb3cf78f6
|
refs/heads/master
| 2021-01-18T07:38:59.992269
| 2014-07-30T06:35:58
| 2014-07-30T06:35:58
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 968
|
r
|
Question2.R
|
# Load the Gross Domestic Product data for the 190 ranked countries in this data set:
# https://d396qusza40orc.cloudfront.net/getdata%2Fdata%2FGDP.csv
# Remove the commas from the GDP numbers in millions of dollars and average them. What is the average?
# Original data sources: http://data.worldbank.org/data-catalog/GDP-ranking-table
downloadDir <- './data'
if (!file.exists(downloadDir)) {
dir.create(downloadDir)
}
fileUrl <- "https://d396qusza40orc.cloudfront.net/getdata%2Fdata%2FGDP.csv "
dataFile <- file.path(paste(downloadDir, "getdata_data_GDP.csv", sep="/"))
download.file(fileUrl, destfile=dataFile, method="curl")
gdpData <- read.csv(dataFile, stringsAsFactors = FALSE, skip=4, nrows=215, col.names=c("CountryCode", "Rank", "Empty1", "Fullname", "GDP", "Empty3", "Empty4", "Empty5", "Empty6", "Empty7"))
gdpData$GDP <- gsub(x=gdpData$GDP, pattern = ",", replacement = "")
gdpData$GDP <- as.numeric(gdpData$GDP)
mean(gdpData$GDP, na.rm = TRUE)
|
35b443986c015daaf5f8f128fe8fd7e7a4a8f821
|
5d1db2e131d3d6ad2833800fe58c8c637b31ac9a
|
/man/divergence.Rd
|
73630368309ddab03ef2a76d8cc8f8b12f72de84
|
[] |
no_license
|
cran/calculus
|
ff9bb3676aeb9c43f8bfb80b9464cfa40d08fb90
|
1ef6b6e778cd845389b99860148db23c88f2af3e
|
refs/heads/master
| 2023-03-16T15:12:14.523441
| 2023-03-09T22:00:02
| 2023-03-09T22:00:02
| 236,567,271
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| true
| 3,572
|
rd
|
divergence.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/operators.R
\name{divergence}
\alias{divergence}
\alias{\%divergence\%}
\title{Numerical and Symbolic Divergence}
\usage{
divergence(
f,
var,
params = list(),
coordinates = "cartesian",
accuracy = 4,
stepsize = NULL,
drop = TRUE
)
f \%divergence\% var
}
\arguments{
\item{f}{array of \code{characters} or a \code{function} returning a \code{numeric} array.}
\item{var}{vector giving the variable names with respect to which the derivatives are to be computed and/or the point where the derivatives are to be evaluated. See \code{\link{derivative}}.}
\item{params}{\code{list} of additional parameters passed to \code{f}.}
\item{coordinates}{coordinate system to use. One of: \code{cartesian}, \code{polar}, \code{spherical}, \code{cylindrical}, \code{parabolic}, \code{parabolic-cylindrical} or a vector of scale factors for each varibale.}
\item{accuracy}{degree of accuracy for numerical derivatives.}
\item{stepsize}{finite differences stepsize for numerical derivatives. It is based on the precision of the machine by default.}
\item{drop}{if \code{TRUE}, return the divergence as a scalar and not as an \code{array} for vector-valued functions.}
}
\value{
Scalar for vector-valued functions when \code{drop=TRUE}, \code{array} otherwise.
}
\description{
Computes the numerical divergence of \code{functions} or the symbolic divergence of \code{characters}
in arbitrary \href{https://en.wikipedia.org/wiki/Orthogonal_coordinates#Table_of_orthogonal_coordinates}{orthogonal coordinate systems}.
}
\details{
The divergence of a vector-valued function \eqn{F_i} produces a scalar value
\eqn{\nabla \cdot F} representing the volume density of the outward flux of the
vector field from an infinitesimal volume around a given point.
The \code{divergence} is computed in arbitrary orthogonal coordinate systems using the
scale factors \eqn{h_i}:
\deqn{\nabla \cdot F = \frac{1}{J}\sum_i\partial_i\Biggl(\frac{J}{h_i}F_i\Biggl)}
where \eqn{J=\prod_ih_i}. When \eqn{F} is an \code{array} of vector-valued functions
\eqn{F_{d_1\dots d_n,i}}, the \code{divergence} is computed for each vector:
\deqn{(\nabla \cdot F)_{d_1\dots d_n} = \frac{1}{J}\sum_i\partial_i\Biggl(\frac{J}{h_i}F_{d_1\dots d_n,i}\Biggl)}
}
\section{Functions}{
\itemize{
\item \code{f \%divergence\% var}: binary operator with default parameters.
}}
\examples{
### symbolic divergence of a vector field
f <- c("x^2","y^3","z^4")
divergence(f, var = c("x","y","z"))
### numerical divergence of a vector field in (x=1, y=1, z=1)
f <- function(x,y,z) c(x^2, y^3, z^4)
divergence(f, var = c(x=1, y=1, z=1))
### vectorized interface
f <- function(x) c(x[1]^2, x[2]^3, x[3]^4)
divergence(f, var = c(1,1,1))
### symbolic array of vector-valued 3-d functions
f <- array(c("x^2","x","y^2","y","z^2","z"), dim = c(2,3))
divergence(f, var = c("x","y","z"))
### numeric array of vector-valued 3-d functions in (x=0, y=0, z=0)
f <- function(x,y,z) array(c(x^2,x,y^2,y,z^2,z), dim = c(2,3))
divergence(f, var = c(x=0, y=0, z=0))
### binary operator
c("x^2","y^3","z^4") \%divergence\% c("x","y","z")
}
\references{
Guidotti E (2022). "calculus: High-Dimensional Numerical and Symbolic Calculus in R." Journal of Statistical Software, 104(5), 1-37. \doi{10.18637/jss.v104.i05}
}
\seealso{
Other differential operators:
\code{\link{curl}()},
\code{\link{derivative}()},
\code{\link{gradient}()},
\code{\link{hessian}()},
\code{\link{jacobian}()},
\code{\link{laplacian}()}
}
\concept{differential operators}
|
5a50fe92642e761dc3f9970e5eb7d6ca0020f48e
|
c0230aa5c15c37a7cec0174f7e7bd04953fd04f8
|
/R/packages.R
|
1f09da5b4f31daaa130af5fa773a5ed5efd6c857
|
[] |
no_license
|
diegovalle/IRAG
|
0e52b1c3e3accc2d91059ba2ad99d0fd6ec29abb
|
aea0ca21de8cdcdd4a4ae8b3ac71721c8ef63974
|
refs/heads/master
| 2021-04-19T05:33:32.771716
| 2020-06-02T00:00:48
| 2020-06-02T00:00:48
| 249,584,367
| 0
| 1
| null | null | null | null |
UTF-8
|
R
| false
| false
| 562
|
r
|
packages.R
|
## Auto-Install the following packages
.packs <- c("ggplot2", "tabulizer", "dplyr", "stringr",
"rvest", "hrbrthemes", "scales", "tidyr",
"directlabels", "readxl",
"readr", "ggrepel", "mgcv", "tidybayes", "tsibble",
"lubridate", "forcats")
.success <- suppressWarnings(sapply(.packs, require, character.only = TRUE))
if (length(names(.success)[!.success])) {
install.packages(names(.success)[!.success])
sapply(names(.success)[!.success], require, character.only = TRUE)
}
options(stringsAsFactors = FALSE)
|
75c0fbe3d0b1c890e286202258d099a956e63b16
|
17a7f2333706ad280247d187f4aedbeb32714714
|
/weather-functions.R
|
4e4fc22e4f61581b0d14ba066b9b57ce7c941353
|
[] |
no_license
|
t707722/city-weather
|
90c3599d55f8ad0104dc2686ffacc132187a0eea
|
ef1eafcd9e246f8518b8ee0054c63be661f9d68b
|
refs/heads/master
| 2020-09-08T02:13:14.001145
| 2018-09-04T13:26:26
| 2018-09-04T13:26:26
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 10,617
|
r
|
weather-functions.R
|
library(shiny)
library(shinythemes)
library(highcharter)
library(readr)
library(dplyr)
library(tidyr)
library(tibble)
library(forcats)
require(readxl)
require(stringr)
require(data.table)
library(rlist)
## input data
#rm(list=ls()) # remove all vars
#rm(list = setdiff(ls(), lsf.str())) remove all variables except functions
#
# AVG_YEARS = 10
# YEAR = as.numeric(format(Sys.Date(), '%Y'))
# ID = 27612
# CITY = 'Москва'
# n_m = 12
precip_df = read.table('19812010_rus_prcp_normals.txt', skip = 2, sep = '')
names(precip_df) <- c('code',
"январь" , "февраль", "март" , "апрель" , "май",
"июнь" , "июль", "август", "сентябрь", "октябрь",
"ноябрь" , "декабрь" )
precip_df$code <- as.character(precip_df$code)
###
munge_weather_data <- function(code){
files = list.files(paste0('data_out/', code))
years = as.numeric(str_extract_all(files, '[0-9]{4}')) # extract years from characters
files = files[(years<=YEAR)&(years>=YEAR-AVG_YEARS)]
files = paste0('data_out/',ID,'/', files[files %like% "csv"])
xlsx_tables <- lapply(files, fread)
city <- xlsx_tables %>% list.stack(data.table = T)
names(city)[3] = 'temp_avg'
# city = fread(paste0('data_out/',ID,'/',files[1]))
#
#
# for(file in files[-1]){
# df = data.table::fread(paste0('data_out/',ID,'/',file))
# #df = read.csv(paste0('data_out/',ID,'/',file))
# city = plyr::rbind.fill(city, df)
#
# }
#
city$date <- as.Date(ISOdate(as.numeric(city$year), as.numeric(city$month), as.numeric(city$day)))
city = city[order(city$date),]
names(city)[3] = 'temp_avg'
dates = seq(from = as.Date(paste0(YEAR,"-01-01")), to = as.Date(paste0(YEAR,"-12-31")),by = 'day')
dates = data.frame(date = dates)
city = merge(dates,city, by = 'date', all = TRUE)
city$month <- as.numeric(format(city$date, '%m'))
city$day <- as.numeric(format(city$date, '%d'))
city$year <- as.numeric(format(city$date, '%Y'))
return(city)
}
## get data on normal precip
get_precip_data <- function(code){
precip_avg = precip_df[precip_df$code == code,][-1]
precip_normal = data.frame(as.numeric(precip_avg))
names(precip_normal)[1] = 'precip_normal'
precip_normal$date = seq(as.Date(paste0(YEAR,"-01-01")),length = 12,by = "months")
# precip_avg = read_excel(path = paste0('data/', code,'.xls'), sheet = 'Осадки')
# precip_avg = precip_avg[precip_avg[,1] == 'Средняя 1981-2010',]
# precip_avg = precip_avg[!is.na(precip_avg[,1]),]
# precip_normal = data.frame(as.numeric(precip_avg[,2:13]))
# names(precip_normal)[1] = 'precip_normal'
# precip_normal$date = seq(as.Date(paste0(YEAR,"-01-01")),length = 12,by="months")
return(precip_normal)
}
arrange_weather_data <- function(city, year = 2018){
bb = city %>%
group_by(month, day) %>%
filter(temp_max == max(temp_max, na.rm=TRUE)) %>%
rename(temp_rec_high = temp_max) %>%
select(day, month, date, temp_rec_high)
city = merge(city, bb[,c('date', 'temp_rec_high')], by = 'date', all.x = TRUE)
bb = city %>%
group_by(month, day) %>%
filter(temp_min == min(temp_min, na.rm=TRUE)) %>%
rename(temp_rec_low = temp_min) %>%
select(day, month, date, temp_rec_low)
city = merge(city, bb[,c('date', 'temp_rec_low')], by = 'date', all.x=TRUE)
avgs = city %>%
filter(year >= year - AVG_YEARS) %>%
group_by(day, month) %>%
summarise(temp_avg_max = mean(temp_max, na.rm=TRUE),
temp_avg_min = mean(temp_min, na.rm=TRUE),
temp_rec_max = max(temp_max, na.rm=TRUE),
temp_rec_min = min(temp_min, na.rm=TRUE))
city = merge(city, avgs, by = c('month', 'day'))
city = city[order(city$date,decreasing = TRUE),]
data = city[city$year == year,]
#data = city %>% filter(year == year)
#data = city[city$year == year,]
data <- mutate(data, dt = highcharter::datetime_to_timestamp(date))
data = data[order(data$date),]
data = data %>%
group_by(month) %>%
mutate(precip_value = cumsum(precip_mm))
data = merge(data, precip_normal, by = 'date', all.x = TRUE)
return(data)
}
chart_tufte_weather <- function(data){
data$dt <- datetime_to_timestamp(data$date)
# прошлый год = data_1
data_1 = arrange_weather_data(city, year = YEAR - 1)
data_1 = data_1[!is.na(data_1$date),]
data_1 <- data_1[,c('day', "month", 'temp_avg')]
names(data_1) <- c('day',"month", 'temp_avg_prev')
data_1 = merge(data[,c('day',"month", "dt")], data_1, by = c('day', 'month'), all.y=TRUE)
# абсолютный рекорд по дням
dtempgather <- data %>%
dplyr::select(dt,date,starts_with("temp")) %>%
dplyr::select(-temp_rec_high, -temp_rec_low, -temp_diff, -temp_avg) %>%
#dplyr::select( -temp_diff, -temp_avg) %>%
gather(key, value, -dt, -date) %>%
mutate(key = str_replace(key, "temp_", ""),
value = as.numeric(value))
dtempgather$value = round(dtempgather$value,1)
# "avg_max" "avg_min" "max" "min" "rec_max" "rec_min"
#summary(as.factor(dtempgather$key))
dtempgather$key[dtempgather$key == 'max'] = 'actual_max'
dtempgather$key[dtempgather$key == 'min'] = 'actual_min'
dtempspread <- dtempgather %>%
separate(key, c("serie", "type"), sep = "_")
dtempgather = unique(dtempgather)
dtempspread <- dtempgather %>%
separate(key, c("serie", "type"), sep = "_") %>%
spread(type, value) %>%
filter(!is.na(max) | !is.na(min))
temps <- dtempspread %>%
mutate(serie = factor(serie, levels = c("rec", "avg", "actual")),
serie = fct_recode(serie, Рекордная = "rec", Нормальная = "avg", Фактическая = "actual"))
temps = temps[!is.na(temps$dt),]
colors <- c("#ECEBE3", "#C8B8B9", "#C85C8A")
#colors <- c("#ECEBE3", "#C8B8B9", "#A90048")
colors <- colors[which(levels(temps$serie) %in% unique(temps$serie))]
hc <- highchart() %>%
hc_title(text = paste0(CITY, " - погода в ", YEAR, ' году'), style = list(fontSize = '14px', fontWeight = "bold"), align = "left") %>%
hc_xAxis(type = "datetime", showLastLabel = FALSE,
dateTimeLabelFormats = list(month = "%B")) %>%
hc_tooltip(shared = TRUE, useHTML = TRUE,
headerFormat = as.character(tags$small("{point.x: %b %d}", tags$br()))) %>%
hc_plotOptions(series = list(borderWidth = 0, pointWidth = 4))
hc <- hc %>%
hc_add_series(temps, type = "columnrange",
hcaes(dt, low = min, high = max, group = serie),
color = colors)
#hc
data = data[complete.cases(data$temp_avg),]
data_1 = data_1[order(data_1$dt),]
hc <- hc %>%
hc_add_series(data, type = "line", hcaes(x = dt, y = temp_avg),
name = 'Среднедневная',lineWidth=2, color = 'black') %>%
hc_add_series(data_1, type = "line", hcaes(x = dt, y = temp_avg_prev),
name = 'Среднедневная год назад',lineWidth=1.5, dashStyle = 'Dash', color = 'grey')
#hc
records <- data %>%
select(dt, temp_rec_high, temp_rec_low) %>%
filter(!is.na(temp_rec_high) | !is.na(temp_rec_low)) %>%
#dmap_if(is.character, str_extract, "\\d+") %>%
#dmap_if(is.character, as.numeric) %>%
gather(type, value, -dt) %>%
filter(!is.na(value)) %>%
mutate(type = str_replace(type, "temp_rec_", ""),
type = paste("Рекорд этого года", type))
pointsyles_high <- list(
symbol = "circle",
lineWidth= 1,
radius= 4,
fillColor= "#FFFFFF",
lineColor= "#bc0909"
)
pointsyles_low <- list(
symbol = "circle",
lineWidth= 1,
radius= 4,
fillColor= "#FFFFFF",
lineColor= "#0099CC"
)
if(nrow(records) > 0) {
hc <- hc %>%
hc_add_series(dplyr::filter(records, type == "Рекорд этого года high"), "point", hcaes(x = dt, y = value, group = type),
marker = pointsyles_high, showInLegend = FALSE) %>%
hc_add_series(dplyr::filter(records, type == "Рекорд этого года low"), "point", hcaes(x = dt, y = value, group = type),
marker = pointsyles_low, showInLegend = FALSE)
}
#hc
axis <- create_yaxis(
naxis = 2,
heights = c(3,1),
sep = 0.05,
turnopposite = FALSE,
showLastLabel = FALSE,
startOnTick = FALSE)
axis[[1]]$title <- list(text = "Температура, °C")
axis[[1]]$labels <- list(format = "{value}°C")
axis[[2]]$title <- list(text = "Осадки, мм")
axis[[2]]$min <- 0
hc <- hc_yAxis_multiples(hc, axis)
precip <- select(data, dt, precip_value, month)
n_months = max(data$month)
hc <- hc %>%
hc_add_series(precip, type = "area", hcaes(dt, precip_value, group = month),
name = "Осадки", color = "#008ED0", lineWidth = 1,
yAxis = 1, fillColor = "#EBEAE2",
id = c("p", rep(NA, n_months-1)), linkedTo = c(NA, rep("p", n_months-1)))
n_months = 12
precip_normal$month = 1:12
bb = seq(min(data$date),length=13,by="months")-1
bb = bb[2:13]
buff = precip_normal
buff$date = bb
precip_normal = rbind(buff, precip_normal)
precip_normal$dt = datetime_to_timestamp(precip_normal$date)
hc <- hc %>%
hc_add_series(precip_normal, "line", hcaes(x = dt, y = precip_normal, group = month),
name = "Normal Precipitation", color = "#008ED0", yAxis = 1, showInLegend = FALSE,
id = c("np", rep(NA, n_months - 1)), linkedTo = c(NA, rep("np", n_months - 1)),
lineWidth = 2, marker = FALSE)
#hc %>% hc_plotOptions(series = list(marker = list(enabled = FALSE)))
hc$x$conf_opts$lang$months = c("Январь" ,"Февраль", "Март" ,"Апрель" ,"Май", "Июнь",
"Июль", "Август", "Сентябрь", "Октябрь", "Ноябрь", "Декабрь")
hc$x$conf_opts$lang$shortMonths = c("Янв", "Фев", "Мар", "Апр", "Май", "Июн", "Июл", "Авг", "Сен", "Окт", "Ноя", "Дек")
return(hc)
}
|
17697116f991927c61ad2d8553d958f612c2637e
|
38cb70928b8cc03fbf13c653bd1f091b43d190e8
|
/inst/example/shinyColorPal/global.R
|
7affc619d500c0be3d2177a1bb94a4fb6b372cda
|
[] |
no_license
|
anhnguyendepocen/shinyCanvas
|
99f0dd81fbd526443de0fe70972efb7eed3ee54c
|
e035cc31b139c997792c86e7d7fd8cf030c1d1b6
|
refs/heads/master
| 2020-04-30T02:08:50.109469
| 2017-11-17T14:49:24
| 2017-11-17T14:49:24
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 945
|
r
|
global.R
|
library(shiny)
library(viridis)
library(shinyCanvas)
library(dplyr)
data=data.frame(x=1:1000,y=1:1000)
concaveman <- function(d){
library(V8)
ctx <- v8()
ctx$source('https://www.mapbox.com/bites/00222/concaveman-bundle.js')
jscode <- sprintf(
"var points = %s;var polygon = concaveman(points);",
jsonlite::toJSON(d, dataframe = 'values')
)
ctx$eval(jscode)
setNames(as.data.frame(ctx$get('polygon')), names(d))
}
rhull <- function(n,x) {
boundary <- x[chull(x),]
#boundary <- concaveman(x)
xlim <- range(boundary[,1])
ylim <- range(boundary[,2])
boundary <- rbind(c(NA,NA), boundary) # add space for new test point
result <- matrix(NA, n, 2)
for (i in 1:n) {
repeat {
boundary[1,1] <- runif(1, xlim[1], xlim[2])
boundary[1,2] <- runif(1, ylim[1], ylim[2])
if ( !(1 %in% chull(boundary))) {
result[i,] <- boundary[1,]
break
}
}
}
result
}
|
55d67eadefaece0dfe40a90a1d37cededfd23790
|
793b7407415860b23f113f748b3f5187f1b72eed
|
/core/svm.R
|
04090fdaf3c006d290ad638b5549f17376d70bd9
|
[] |
no_license
|
Triskae/projet-r
|
05218f6f62b11f8fa281fbfff9aabd50e563a86b
|
335ac90eb0295a41ab6eb5e8d6f154bd2523e901
|
refs/heads/main
| 2023-05-03T23:04:26.761444
| 2021-05-01T14:14:27
| 2021-05-01T14:14:27
| 358,350,921
| 0
| 2
| null | null | null | null |
UTF-8
|
R
| false
| false
| 2,336
|
r
|
svm.R
|
#--------------------------------------#
# ACTIVATION DES LIRAIRIES NECESSAIRES #
#--------------------------------------#
attach(input[[1]])
needs(e1071)
needs(ROCR)
#-------------------------#
# PREPARATION DES DONNEES #
#-------------------------#
setwd('data')
data <- read.csv("dataset.csv", header = TRUE, sep = ",", dec = ".", stringsAsFactors = T)
data_new <- read.csv("predict.csv", header = TRUE, sep = ",", dec = ".", stringsAsFactors = T)
setwd('../images')
data_shuffle <- data[sample(seq_along(data[, 1])),]
data_ea <- data_shuffle[1:800,]
data_et <- data_shuffle[801:1200,]
jpeg('svm.jpg')
#-------------------------#
# SUPPORT VECTOR MACHINES #
#-------------------------#
test_svm <- function(arg1, arg2){
# Apprentissage du classifeur
svm <- svm(default~., data_ea, probability=TRUE, kernel = arg1)
# Test du classifeur : classe predite
svm_class <- predict(svm, data_et, type="response")
# Test du classifeur : probabilites pour chaque prediction
svm_prob <- predict(svm, data_et, probability=TRUE)
# Recuperation des probabilites associees aux predictions
svm_prob <- attr(svm_prob, "probabilities")
# Courbe ROC
svm_pred <- prediction(svm_prob[,1], data_et$default)
svm_perf <- performance(svm_pred,"tpr","fpr")
plot(svm_perf, main = "Support vector machines svm()", add = FALSE, col = arg2)
dev.off()
# Calcul de l'AUC et affichage par la fonction cat()
svm_auc <- performance(svm_pred, "auc")
confusionMatrix <- as.matrix(
table(data_et$default, svm_class),
)
#this is a security to ensure a 2 dimensionnal confusion matrix
if(length(confusionMatrix[1,])==1){
confusionMatrix <- cbind(c(confusionMatrix[1,],0), c(confusionMatrix[2,],0))
}
svm.class <- predict(svm, data_new, type="response" )
svm.prob <- attr(predict(svm, data_new, probability = TRUE),"probabilities")
data_new$default <- svm.class
data_new$probability<-svm.prob[,1]
data_et$prediction <- svm_class
data_et$probability <- svm_prob[,1]
return(list("AUC"=as.character(attr(svm_auc, "y.values")),
"dataEtPrediction"=data_et,
"dataNewPrediction"=data_new,
"confusionMatrix"=
list("predictedPositive"=confusionMatrix[1,]
,"predictedNegative"=confusionMatrix[2,])
))
}
test_svm(arg1,arg2)
|
69af200b04f45fcc1e94f70f7d675cbfec71046e
|
35c947850f680289cc537d04df2da3e476bd5f25
|
/man/pedigree.format.genedrop.Rd
|
b45970f35f76d0dbced275b62408a86bb933705d
|
[] |
no_license
|
susjoh/pedantry
|
21c09022619ce81bee0a3311f92179f339ea8a0d
|
cac7b2b83b50dbda3e9da296603d40a7481ef332
|
refs/heads/master
| 2021-01-21T06:24:31.773899
| 2019-08-23T11:29:48
| 2019-08-23T11:29:48
| 83,225,863
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| true
| 410
|
rd
|
pedigree.format.genedrop.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/pedigree.format.R
\name{pedigree.format.genedrop}
\alias{pedigree.format.genedrop}
\title{Format Pedigree for genedrop analysis}
\usage{
pedigree.format.genedrop(ped)
}
\arguments{
\item{ped}{Pedigree object. Run simple.ped.name.rules() for an example.}
}
\description{
This function formats the pedigree for downstream analysis.
}
|
61b7b356c02e92b50358cbca7e21c5c2c4465019
|
01ddc13e55f3c1338ecfa86281e24f19bbfd205a
|
/man/Hill2oQV.Rd
|
91cdb464aec1c9cb24db4d476d9b28438ce83a93
|
[] |
no_license
|
TReynkens/ReIns
|
8618af18b88dc1b004f68b42312aceca4cbf979d
|
257c145ed364658597f20546bee741f4ed0cabd6
|
refs/heads/master
| 2023-03-19T08:03:16.967323
| 2023-03-08T08:30:09
| 2023-03-08T08:30:09
| 39,773,959
| 18
| 17
| null | null | null | null |
UTF-8
|
R
| false
| false
| 2,871
|
rd
|
Hill2oQV.Rd
|
\name{Hill.2oQV}
\alias{Hill.2oQV}
\title{
Bias-reduced MLE (Quantile view)
}
\description{
Computes bias-reduced ML estimates of gamma based on the quantile view.
}
\usage{
Hill.2oQV(data, start = c(1,1,1), warnings = FALSE, logk = FALSE,
plot = FALSE, add = FALSE, main = "Estimates of the EVI", ...)
}
%- maybe also 'usage' for other objects documented here.
\arguments{
\item{data}{
Vector of \eqn{n} observations.
}
\item{start}{
A vector of length 3 containing starting values for the first numerical optimisation (see Details). The elements
are the starting values for the estimators of \eqn{\gamma}, \eqn{\mu} and \eqn{\sigma}, respectively. Default is \code{c(1,1,1)}.
}
\item{warnings}{
Logical indicating if possible warnings from the optimisation function are shown, default is \code{FALSE}.
}
\item{logk}{
Logical indicating if the estimates are plotted as a function of \eqn{\log(k)} (\code{logk=TRUE}) or as a function of \eqn{k}. Default is \code{FALSE}.
}
\item{plot}{
Logical indicating if the estimates of \eqn{\gamma} should be plotted as a function of \eqn{k}, default is \code{FALSE}.
}
\item{add}{
Logical indicating if the estimates of \eqn{\gamma} should be added to an existing plot, default is \code{FALSE}.
}
\item{main}{
Title for the plot, default is \code{"Estimates of the EVI"}.
}
\item{\dots}{
Additional arguments for the \code{plot} function, see \code{\link[graphics:plot.default]{plot}} for more details.
}
}
\details{
See Section 4.2.1 of Albrecher et al. (2017) for more details.
}
\value{
A list with following components:
\item{k}{Vector of the values of the tail parameter \eqn{k}.}
\item{gamma}{Vector of the ML estimates for the EVI for each value of \eqn{k}.}
\item{b}{Vector of the ML estimates for the parameter \eqn{b} in the regression model for each value of \eqn{k}.}
\item{beta}{Vector of the ML estimates for the parameter \eqn{\beta} in the regression model for each value of \eqn{k}.}
}
\references{
Albrecher, H., Beirlant, J. and Teugels, J. (2017). \emph{Reinsurance: Actuarial and Statistical Aspects}, Wiley, Chichester.
Beirlant J., Dierckx, G., Goegebeur Y. and Matthys, G. (1999). "Tail Index Estimation and an Exponential Regression Model." \emph{Extremes}, 2, 177--200.
Beirlant J., Goegebeur Y., Segers, J. and Teugels, J. (2004). \emph{Statistics of Extremes: Theory and Applications}, Wiley Series in Probability, Wiley, Chichester.
}
\author{
Tom Reynkens based on \code{S-Plus} code from Yuri Goegebeur and \code{R} code from Klaus Herrmann.
}
%% ~Make other sections like Warning with \section{Warning }{....} ~
%\seealso{
%}
\examples{
data(norwegianfire)
# Plot bias-reduced MLE (QV) as a function of k
Hill.2oQV(norwegianfire$size[norwegianfire$year==76],plot=TRUE)
}
% Add one or more standard keywords, see file 'KEYWORDS' in the
% R documentation directory.
|
057a014bbe5369b85f72e870768d88c84d461a24
|
17780cb424c0fe6d9e0f5d6fe845abf77bb8a422
|
/server.R
|
e166bdacd774fb16861bcae8c8e0cc361b05309e
|
[
"MIT"
] |
permissive
|
AdamSpannbauer/rPackedBarDemo
|
331d69b94f5c07daff399df5fc523dbfd5ed5347
|
ada8ad859b6481d2c831571c1cf273ceae0d00b1
|
refs/heads/master
| 2020-06-04T03:13:21.218705
| 2019-06-16T20:04:53
| 2019-06-16T20:04:53
| 191,850,405
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 5,606
|
r
|
server.R
|
shinyServer(function(input, output, session) {
output$user_name_input_ui = renderUI({
queried_user = parseQueryString(session$clientData$url_search)[["user"]]
default_user = if (is.null(queried_user)) DEFAULT_USER else queried_user
div(
textInput(
inputId = "user_name_input",
label = "Twitter Username",
value = default_user,
placeholder = default_user
),
actionButton(
inputId = "get_tweet_button",
label = "Search Twitter",
icon = icon("twitter"),
style = "color: #fff; background-color: #00aced; border-color: #2e6da4"
)
)
})
tweet_dt = eventReactive(input$get_tweet_button, {
user_name = gsub("^\\@", "", input$user_name_input)
setTimeLimit(elapsed = 15)
tweet_info_dt = safe_get_user_tweet_info(user_name, 1000)
setTimeLimit(elapsed = Inf)
if (nrow(tweet_info_dt) == 0) return(NULL)
tweet_info_dt[total_fav_rt > 0, ]
}, ignoreNULL = FALSE)
most_popular_tweet = reactive({
req(tweet_dt())
tweet_dt()[order(-total_fav_rt), ][1, ]
})
plot_n_color_bar = eventReactive(input$replot_packed_bars, {
if (isTRUE(input$guess_bar_count)) {
guess_bar_count(tweet_dt()$total_fav_rt,
input$guess_bar_count_range[1],
input$guess_bar_count_range[2])
} else {
input$select_bar_count
}
})
packed_bar_plot = reactive({
req(tweet_dt())
set.seed(42)
if (nrow(tweet_dt()) < 10) return(plotly::plotly_empty())
plot_n_row = guess_bar_count(tweet_dt()$total_fav_rt, max_bar = 7)
try({plot_n_row = plot_n_color_bar()})
p = rPackedBar::plotly_packed_bar(input_data = tweet_dt(),
label_column = "text_preview",
value_column = "total_fav_rt",
number_rows = plot_n_row,
plot_title = "Tweet Interactions<br><sub>(click a bar to view more about tweet)</sub>",
xaxis_label = "Favorites & RTs",
hover_label = "Favs & RTs",
min_label_width = .03,
color_bar_color = "#00aced",
label_color = "white")
plotly::config(p, displayModeBar = FALSE)
})
output$rendered_packed_bar = plotly::renderPlotly({
packed_bar_plot()
})
output$packed_bar_ui = renderUI({
req(tweet_dt())
if (nrow(tweet_dt()) < 10) {
HTML("<h3>Error getting tweets ¯\\_(ツ)_/¯</h3>")
} else {
fluidRow(packedBarOutput("rendered_packed_bar"))
}
})
output$clicked_tweet_ui = renderUI({
req(packed_bar_plot())
filter_text = input$rendered_packed_bar_clicked
filter_text = if (is.null(filter_text)) NA_character_ else filter_text
filter_text = unescape_html(filter_text)
tweet = tweet_dt()[text_preview == filter_text, .(text, rt_n, fav_n)][1, ]
if (nrow(tweet) == 0 | is.na(filter_text)) {
tweet = most_popular_tweet()[, .(text, rt_n, fav_n)]
}
if (anyNA(tweet)) return(HTML(""))
wellPanel(
HTML(
sprintf(
paste(
"<h4 align='left'>%s</h4>",
"<h5 align='right'>",
"%s<i class='fa fa-retweet'></i>",
" ",
"%s<i class='fa fa-heart'></i>",
"</h5>"
),
tweet$text, tweet$rt_n, tweet$fav_n
)
) # HTML
) # wellPanel
})
output$guess_bar_plot = plotly::renderPlotly({
req(input$guess_bar_count_range)
min_bar = input$guess_bar_count_range[1]
max_bar = input$guess_bar_count_range[2]
plot_guess(tweet_dt()$total_fav_rt, min_bar, max_bar)
})
output$select_n_bars = renderUI({
req(nrow(tweet_dt()) > 0)
column(
width = 8,
offset = 2,
wellPanel(
h3("Fine Tune Plot"),
p(
"These controls allow you to adjust the number of ",
"rows in the Packed Bar Chart.",
br(),
"This corresponds to the",
code("number_rows"),
"option in",
code("rPackedBar::plotly_packed_bar()"),
br(),
"The default chart uses the guess option."
),
hr(),
fluidRow(
sliderInput(inputId = "select_bar_count",
label = "Select Chart Row Count",
min = 1,
max = 50,
step = 1,
value = guess_bar_count(tweet_dt()$total_fav_rt),
width = "50%"),
sliderInput(inputId = "guess_bar_count_range",
label = "Guess Range Bounds",
min = 1,
max = 50,
step = 1,
value = c(3, 7),
width = "50%")
), # fluidRow
br(),
fluidRow(
checkboxInput(inputId = "guess_bar_count",
label = "Guess Row Count",
value = TRUE)
), # fluidRow
fluidRow(
actionButton(inputId = "replot_packed_bars",
label = "Re-Draw Plot",
icon = icon("bar-chart"))
), # fluidRow
br(),
hr(),
br(),
fluidRow(
plotly::plotlyOutput("guess_bar_plot")
) # fluidRow
) # wellPanel
) # column
})
}) # shinyServer
|
22719f641f02193ff76960ce5e543ed27b340981
|
cde1c8e53ba3b32693dc55543318d5b7d76d5bed
|
/wk4 proj/abalone_script.R
|
7c6e11352649c8b64c2a72abcf2c2b8824b454e0
|
[] |
no_license
|
chanovin/dev_data_prod
|
3d845deefc6a7ee6e041e6356bce38d286956e73
|
2f8bd54890e8df8f35ba4edb4902f683f95f9953
|
refs/heads/master
| 2021-01-18T04:03:57.944334
| 2017-03-30T18:30:37
| 2017-03-30T18:30:37
| 85,768,193
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 515
|
r
|
abalone_script.R
|
library(caret)
abalone <- read.csv(url("http://archive.ics.uci.edu/ml/machine-learning-databases/abalone/abalone.data"),
header=FALSE)
names(abalone) <- c("sex","length","diameter","height","whole_wt","shuck_wt","visc_wt","shell_wt","rings")
abalone$rings <- as.factor(abalone$rings)
fit <- train(rings ~ length + diameter + height, data=abalone, method="treebag")
pred <- predict(fit, abalone)
acc <- as.numeric(pred) - as.numeric(abalone$rings)
boxplot(acc)
confusionMatrix(pred,abalone$rings)
|
9b450df4e9f902b214090152541153ec42e1bc76
|
90fa7bd362d2d35492e76ab25e6caf1066ed6bbf
|
/cachematrix.R
|
2d5846d6ef494b3ad68ff829c29f7d2174729575
|
[] |
no_license
|
srsmith13/ProgrammingAssignment2
|
4203db22c4ddca030cb4da1008d7a2535f02659f
|
7c8ec356737ec8686348397d90078cb42d59130d
|
refs/heads/master
| 2021-01-21T00:00:54.269331
| 2015-02-21T14:59:34
| 2015-02-21T14:59:34
| 30,810,624
| 0
| 0
| null | 2015-02-14T22:21:25
| 2015-02-14T22:21:25
| null |
UTF-8
|
R
| false
| false
| 1,706
|
r
|
cachematrix.R
|
## Takes a matrix as input, calculates the inverse and caches its value.
## If inverse has already been calculate, retrieves value from cache.
## If inverse has not already been calculated, computes, returns, and caches inverse
## MakeCacheMatrix creates a special matrix of functions that
## Set the value of matrix
## Get the galue of matrix
## Set the inverse of the matrix
## Get the inverse of the matrix
makeCacheMatrix <- function(x = matrix()) {
m <- NULL ## set value of m to NULL as default
y <- NULL ## set value of y to NULL as default
set <- function(y) { ##set value of matrix
x <<- y ## caches input matrix
m <<- NULL ## set value of m to NULL if cacheSolve used
}
get <- function() x
setinv <- function(solve) m <<- solve ##compute inverse of matrix and cache value
getinv <- function() m ##return matrix m
list <- list(set = set, get = get, setinv = setinv, getinv = getinv)
}
## cacheSolve calls the functions stored in the matrix created by makeCacheMatrix
## If inverse already calculated, retrieve inverse from cache
## If inverse not already calculated, compute, cache, and return inverse
cacheSolve <- function(x, ...) {
m <- x$getinv() ##apply getinv to matrix m, i.e. if inverse already calculated
if(!is.null(m)) { ##if m is not NULL i.e. if cacheSolve has been run before
message("getting cached matrix") ##alert that inverse already computed, retreiving from cache
return(m) ##return the inverse matrix
}
data <- x$get() ##if m is NULL, get input matrix
m <- solve(data, ...) ##compute inverse
x$setinv(m) ##run setinv function to cache inverse
m ##return inverse of matrix
}
|
7b5103e9112fb98f925c1ecb99e3a0e310663f38
|
1b01ee568a3a4d660aab12bca8854dc6326893ce
|
/plot2.R
|
5f6bd41d8e6de296c9766edff57f961aba0ba2e5
|
[] |
no_license
|
Verunka/ExData_Plotting1
|
d45190f1bd56c9dad184fc4c41213fcd95ad2bf8
|
606f54eb6742854a8ad73eb582f006d1407c92f6
|
refs/heads/master
| 2021-01-09T09:04:46.559623
| 2015-10-11T11:48:30
| 2015-10-11T11:48:30
| 44,023,053
| 0
| 0
| null | 2015-10-10T18:33:48
| 2015-10-10T18:33:48
| null |
UTF-8
|
R
| false
| false
| 840
|
r
|
plot2.R
|
Sys.setlocale("LC_ALL", "C")
#Read in the data
#Please make sure that the household_power_consumption.txt file is in your working directory
hcp <- read.table("household_power_consumption.txt", header = TRUE, sep = ";", na.strings = "?")
#Convert the Date variable into date class
hcp$Date <- as.character(hcp$Date)
hcp$Date <- as.Date(hcp$Date, format = "%d/%m/%Y")
#Subset only for values 2007/02/01 and 2007/02/02
hcp <- subset(hcp, Date =="2007-02-01" | Date == "2007-02-02")
#Create new variable DT = Date and Time together
hcp$DateTime <- as.POSIXct(paste(hcp$Date, hcp$Time), format="%Y-%m-%d %H:%M:%S")
#Plot the graph directly into png file
png(file = "plot2.png", width = 480, height=480, type="windows")
plot(hcp$DateTime, hcp$Global_active_power, type = "l", ylab = "Global Active Power (kilowatts)", xlab = "")
dev.off()
|
a0b1814761abf2dbd9b1e763064667cfef8bfc48
|
1c355458ec6afe873276128a169d04305e789522
|
/man/bin_mean.Rd
|
8f250838ed1de3d1e2ad07e3942cf28cb14ccdc0
|
[] |
no_license
|
Agatemei/binomial
|
5a6d5f408a5ebfa2102b8f4a7dc4de8158cd8a82
|
174456a4d1912d07dbc16c0b7780b7373cccfaf7
|
refs/heads/master
| 2020-05-18T10:05:03.306971
| 2019-05-01T23:54:49
| 2019-05-01T23:54:49
| 184,187,043
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| true
| 398
|
rd
|
bin_mean.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/functions_of_measures.R
\name{bin_mean}
\alias{bin_mean}
\title{bin_mean}
\usage{
bin_mean(trials, prob)
}
\arguments{
\item{trials}{number of trials}
\item{prob}{value of head probability}
}
\value{
computed mean value
}
\description{
calculates the mean of binomial random variable
}
\examples{
bin_mean(10 ,0.3)
}
|
dba8793e463c1c7cc1fdc03180336f7c62070a72
|
f0489c47853fc78a49bfbc28ca3cf39798b17431
|
/man/minfit-NMFfitXn-method.Rd
|
5ace55563748028fc572a8968e928e03b9ed3945
|
[] |
no_license
|
pooranis/NMF
|
a7de482922ea433a4d4037d817886ac39032018e
|
c9db15c9f54df320635066779ad1fb466bf73217
|
refs/heads/master
| 2021-01-17T17:11:00.727502
| 2019-06-26T07:00:09
| 2019-06-26T07:00:09
| 53,220,016
| 0
| 0
| null | 2016-03-05T19:46:24
| 2016-03-05T19:46:24
| null |
UTF-8
|
R
| false
| true
| 400
|
rd
|
minfit-NMFfitXn-method.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/NMFSet-class.R
\docType{methods}
\name{minfit,NMFfitXn-method}
\alias{minfit,NMFfitXn-method}
\title{Returns the best NMF model in the list, i.e. the run that achieved the lower
estimation residuals.}
\usage{
\S4method{minfit}{NMFfitXn}(object)
}
\description{
The model is selected based on its \code{deviance} value.
}
|
124c0ad474f6d9c03e11342b3adb2ad13464d0ed
|
4846b5b3748b6724d7c379dae7572e9fa90a798d
|
/man/runStereogeneOnCapR.Rd
|
e43e687382be08d87726c92191992a88e8a9563d
|
[] |
no_license
|
vbusa1/nearBynding
|
d225bcbdb1541b65c3f01604a1affd8ff51b068a
|
9ccf2b0e7fec87c426cf37fe45077d67abef210a
|
refs/heads/master
| 2023-04-07T19:01:47.323219
| 2021-07-30T17:39:58
| 2021-07-30T17:39:58
| 278,680,217
| 1
| 0
| null | null | null | null |
UTF-8
|
R
| false
| true
| 1,796
|
rd
|
runStereogeneOnCapR.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/runStereogeneOnCapR.R
\name{runStereogeneOnCapR}
\alias{runStereogeneOnCapR}
\title{runStereogeneOnCapR}
\usage{
runStereogeneOnCapR(
dir_CapR_bg = ".",
input_prefix,
protein_file,
output_prefix = input_prefix,
name_config = "config.cfg",
chrom_size,
nShuffle = 100,
get_error = FALSE,
...
)
}
\arguments{
\item{dir_CapR_bg}{Directory of lifted-over CapR bedGraph files. Default
current directory}
\item{input_prefix}{Prefix string appended to input files; same as
input_prefix argument in processCapRout. Required}
\item{protein_file}{Name of protein file in bedGraph format. Required}
\item{output_prefix}{Prefix string to be appended to all output files.
Default to be same as input_prefix}
\item{name_config}{Name of output config file. Default config.cfg}
\item{chrom_size}{Name of chromosome size file. File must be in two-column
format without a header where first column is chromosome name and second
column is chromosome length, as from getChainChrSize. Required}
\item{...}{includes all other parameters acceptable to write_config and
write_stereogene}
\item{nShuffle}{Permutations used to estimate error. Default 100.}
\item{get_error}{Whether to calculate the standard error of background
permutations from nShuffle. FALSE will save calculation time. Default FALSE}
}
\value{
generates StereoGene output files, including *.dist files
}
\description{
Writes a configuration file and Stereogene script and runs
Stereogene for all CapR tracks
}
\examples{
runStereogeneOnCapR(protein_file = "chr4and5_liftOver.bedGraph",
chrom_size = "chr4and5_3UTR.size",
name_config = "chr4and5_3UTR.cfg",
input_prefix = "chr4and5_3UTR")
}
|
42f1d1e9a0a4030ec3790e5b0efd7619f38ad6d8
|
18347ef9bc1f489e63e83cf03338b7211d21b7c8
|
/man/mutate_variables.Rd
|
61bff0f2a13cd3eb0b8e452550ea3d75ce10d331
|
[
"BSD-3-Clause",
"CC-BY-4.0"
] |
permissive
|
stan-dev/posterior
|
cd1e0778f5b930b7ef97b9c1f09167f162fb9d7e
|
55e92336c2984be1a2487cdd489552a07e273d70
|
refs/heads/master
| 2023-08-18T07:53:15.023052
| 2023-08-07T08:13:36
| 2023-08-07T08:13:36
| 212,145,446
| 105
| 20
|
NOASSERTION
| 2023-08-07T08:13:37
| 2019-10-01T16:30:28
|
R
|
UTF-8
|
R
| false
| true
| 2,176
|
rd
|
mutate_variables.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/mutate_variables.R
\name{mutate_variables}
\alias{mutate_variables}
\alias{mutate_variables.draws_matrix}
\alias{mutate_variables.draws_array}
\alias{mutate_variables.draws_df}
\alias{mutate_variables.draws_list}
\alias{mutate_variables.draws_rvars}
\title{Mutate variables in \code{draws} objects}
\usage{
mutate_variables(.x, ...)
\method{mutate_variables}{draws_matrix}(.x, ...)
\method{mutate_variables}{draws_array}(.x, ...)
\method{mutate_variables}{draws_df}(.x, ...)
\method{mutate_variables}{draws_list}(.x, ...)
\method{mutate_variables}{draws_rvars}(.x, ...)
}
\arguments{
\item{.x}{(draws) A \code{\link{draws}} object.}
\item{...}{Name-value pairs of expressions, each with either length 1 or the
same length as in the entire input (i.e., number of iterations or draws).
The name of each argument will be the name of a new variable, and the value
will be its corresponding value. Use a \code{NULL} value in \code{mutate_variables}
to drop a variable. New variables overwrite existing variables of the same
name.}
}
\value{
Returns a \code{\link{draws}} object of the same format as \code{.x}, with variables mutated
according to the expressions provided in \code{...}.
}
\description{
Mutate variables in a \code{\link{draws}} object.
}
\details{
In order to mutate variables in \code{\link{draws_matrix}} and \code{\link{draws_array}} objects,
they are transformed to \code{\link{draws_df}} objects first and then transformed back
after mutation. As those transformations are quite expensive for larger
number of draws, we recommend using \code{mutate_variables} on \code{\link{draws_df}} and
\code{\link{draws_list}} objects if speed is an issue.
In \code{\link{draws_rvars}} objects, the output of each expression in \code{...} is
coerced to an \code{\link{rvar}} object if it is not already one using \code{as_rvar()}.
}
\examples{
x <- as_draws_df(example_draws())
x <- subset(x, variable = c("mu", "tau"))
mutate_variables(x, tau2 = tau^2)
mutate_variables(x, scale = 1.96 * tau, lower = mu - scale)
}
\seealso{
\code{\link{variables}}, \code{\link{rename_variables}}
}
|
fd3cca8fcf75c4c9bfc7304143275dcc38e46481
|
4990db0abc12c9d4e4d09f89a897441b68395312
|
/[프로젝트]src/상관관계.R
|
47016dafd151e28e2d8085069de04c00f5c78514
|
[] |
no_license
|
leegangho/BigDataCampus
|
6993b8a934871e50279a393af8994e032c175ed6
|
4c080bdd7351ef65b0fe1709932f30d73d1b9728
|
refs/heads/master
| 2020-06-17T01:00:27.825531
| 2019-08-30T06:55:59
| 2019-08-30T06:55:59
| 195,749,439
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 536
|
r
|
상관관계.R
|
setwd("C:/Users/fkaus/OneDrive/바탕 화면/데이터캠퍼스 프로젝트/데이터")
cor.buy.car<- read.csv("소득수준상관관계.csv",header=T)
head(cor.buy.car)
# -0.0748
# 약한 음의 관계 거의 의미가 없다.
cor(cor.buy.car$소득수준,cor.buy.car$수소차)
# p-value:0.7829
# alternative hypothesis: true correlation is not equal to 0
cor.test(cor.buy.car$소득수준,cor.buy.car$수소차)
cor.house.car <- read.csv("집값상관관계.csv",header=T)
cor(cor.house.car$집값,cor.house.car$수소차)
|
c33dcefad4dc038f1edcebbe791db1df0c648953
|
69798ccc364cb158606eb72c7bf5024b49cf82f8
|
/code_correlation_analysis/encRNA_triplets_putatative_interaction.R
|
0ce7d3632b0ea3a8f78fd10c05ed19202536d282
|
[] |
no_license
|
cwt1/encRNA
|
522da7028bc7cfa9f95c00f61274839ae19f6374
|
c8f20e5a63cdbd738395b2b0694d4035fabefc97
|
refs/heads/master
| 2021-06-03T00:29:24.580932
| 2016-10-24T18:48:04
| 2016-10-24T18:48:04
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 6,896
|
r
|
encRNA_triplets_putatative_interaction.R
|
setwd("/media/ducdo/UUI/Bioinformatics/Summer Research/Cancer_Survival/encRNA_methylation_260616")
source("/media/ducdo/UUI/Bioinformatics/Summer Research/Cancer_Survival/encRNA_methylation_260616/code_correlation_analysis/helper_functions.R")
load("data_Saved_R_Objects/corr_matrices/normal_tumor_encRNA_triplets.rda")
load("mircode_objects.rda")
load("starbase_mRNA_miRNA_interactions.rda")
### -------------- some stats from sensitivity matrix ----------------------------------
dim(normal_encRNA) # [1] 29169 12
unique_normal_lncRNAs = unique(normal_encRNA$lncRNA); length(unique_normal_lncRNAs) # 200
unique_normal_mRNAs = unique(normal_encRNA$mRNA); length(unique_normal_mRNAs) # 1456
unique_normal_miRNAs = unique(normal_encRNA$miRNA); length(unique_normal_miRNAs) # 33
length(unique(normal_encRNA$lncRNA)) # [1] 200
## --------------- identify triple ----------------------------------------------------
## idea: starting from overlapped lncRNAs between sensitivity matrix and miRcode, then
# get all possible lncRNA-miRNA-mRNA from miRcode and starbase, put them into a putative matrix.
# then compare that matrix with the triplets in the sensitivity matrix
normal_encRNA_sensitivity_matched = get_matched_enRNA_sensitivity_with_putative_binding(normal_encRNA)
dim(normal_encRNA_sensitivity_matched) # 188 12
length(which(normal_encRNA_sensitivity_matched$lncRNA_miRNA_corr < 0 & normal_encRNA_sensitivity_matched$mRNA_miRNA_corr < 0)) # 3
tumor_encRNA_sensitivity_matched = get_matched_enRNA_sensitivity_with_putative_binding(tumor_encRNA)
dim(tumor_encRNA_sensitivity_matched) # 48 12
length(which(tumor_encRNA_sensitivity_matched$lncRNA_miRNA_corr < 0 & tumor_encRNA_sensitivity_matched$mRNA_miRNA_corr < 0)) # 1
intersect(normal_encRNA_sensitivity_matched$mRNA, tumor_encRNA_sensitivity_matched$mRNA) # 0
intersect(normal_encRNA_sensitivity_matched$miRNA, tumor_encRNA_sensitivity_matched$miRNA) # [1] "hsa-mir-22"
intersect(normal_encRNA_sensitivity_matched$lncRNA, tumor_encRNA_sensitivity_matched$lncRNA)
# [1] "ENSG00000229645.4" "ENSG00000228639.2" "ENSG00000249042.1"
# [4] "ENSG00000245812.2"
## --------------- helper function -----------------------------------------------------
get_matched_enRNA_sensitivity_with_putative_binding = function(encRNA_sensitivity){
load("mircode_objects.rda")
lncRNAs_overlapped = intersect(unique(mircode_lncRNA$gene_id), unique(encRNA_sensitivity$lncRNA))
# subset the encRNA_sensivivity to include only lncRNAs matches lncRNAs in miRcode
encRNA_sensitivity_subset1 = encRNA_sensitivity[which(encRNA_sensitivity$lncRNA %in% lncRNAs_overlapped),]
# similarly, subset the mircode_lncRNA to include only lncRNAs matches lncRNAs in encRNA_sensivivity
mircode_lncRNA_subset1 = mircode_lncRNA[which(mircode_lncRNA$gene_id %in% lncRNAs_overlapped),
c("gene_id","microrna")]
mircode_lncRNA_subset1 = get_putative_lncRNA_miRNA(mircode_lncRNA_subset1) # divide miRNAs familily into individual miRNAs
# now, subset encRNA_sensivivity_subset1 to include only the lncRNA-miRNA pairs which also shows up in mircode_lncRNA_subset1
# length(intersect(unique(encRNA_sensitivity_subset1$lncRNA_miRNA_pair), unique(mircode_lncRNA_subset1$lncRNA_miRNA_pair)))
intersected_lncRNA_miRNA_pairs = intersect(unique(encRNA_sensitivity_subset1$lncRNA_miRNA_pair), unique(mircode_lncRNA_subset1$lncRNA_miRNA_pair))
encRNA_sensitivity_subset2 = encRNA_sensitivity_subset1[which(encRNA_sensitivity_subset1$lncRNA_miRNA_pair %in% intersected_lncRNA_miRNA_pairs),]
# now, we have already found all lncRNA_miRNA pairs in the sensitivity matrix that are also included in miRcode, thus the duty of miRcode is done now
# next, we will be working on starbase. First, find all the intersected miRNAs between starbase and encRNA_sensitivity_subset2
starbase = process_starBase()
intersected_miRNAs = intersect(unique(starbase$miRNA), unique(encRNA_sensitivity_subset2$miRNA))
# subset starbase to include only miRNA shown up in encRNA_sensitivity_subset2;
# similarly, subset encRNA_sensitivity_subset2
starbase_subset = starbase[which(starbase$miRNA %in% intersected_miRNAs),]
encRNA_sensitivity_subset3 = encRNA_sensitivity_subset2[which(encRNA_sensitivity_subset2$miRNA %in% intersected_miRNAs),]
# now, find all intersected miRNA_mRNA pairs between encRNA_sensitivity_subset3 and starbase_subset
intersected_lncRNA_miRNA_pairs = intersect(unique(encRNA_sensitivity_subset3$mRNA_miRNA_pair), unique(starbase_subset$mRNA_miRNA_pair))
encRNA_sensitivity_subset4 = encRNA_sensitivity_subset2[which(encRNA_sensitivity_subset3$mRNA_miRNA_pair %in% intersected_lncRNA_miRNA_pairs),]
return(encRNA_sensitivity_subset4)
}
process_starBase = function(){
load("starbase_mRNA_miRNA_interactions.rda")
processed = starbase_mrna_mirna_interaction
colnames(processed)[1:2] = c("miRNA", "putative_mRNA")
#dim(processed); View(processed)
processed$miRNA = tolower(processed$miRNA)
# foreach element, remove 3p and 5p parts
processed$miRNA = gsub("-3p","",processed$miRNA)
processed$miRNA = gsub("-5p","",processed$miRNA)
processed$mRNA_miRNA_pair = paste(processed$putative_mRNA, processed$miRNA, sep = "-")
processed = processed[,c("miRNA", "putative_mRNA", "mRNA_miRNA_pair")]
processed = unique(processed)
return(processed)
}
get_putative_lncRNA_miRNA = function(dataframe){
require(rlist)
l = list()
apply(dataframe, 1, function(r){
k = getMiRNAs(r[2])
l <<- list.append(l, k)
})
names(l) = dataframe$gene_id
df = reshape2::melt(l)
colnames(df) = c("miRNA", "putative_lncRNAs")
df$lncRNA_miRNA_pair = paste(df$putative_lncRNAs, df$miRNA, sep = "-")
return(df)
}
# for each lncRNA ensemble ids, find potential miRNA interaction
getMiRNAs = function(miRNA_family = NULL){
if (is.null(miRNA_family)){
print("must provide miRNA_family")
break;
}
part1 = substr(miRNA_family, start = 1, stop = 3)
part1 = paste("hsa",tolower(part1),sep = "-")
# substring anything after miR till the end, divided by "/"
part2 = substr(miRNA_family,start = 5, stop = nchar(miRNA_family))
part2 = unlist(strsplit(x = part2, split = "/"))
# foreach element, remove 3p and 5p parts
part2 = gsub("-3p","",part2)
part2 = gsub("-5p","",part2)
# return individual mircRNA,
# example: 106abc will be disconstructed into 106a, 106b, 106c
part2 = sapply(part2, function(element){
if (grepl("\\D",element)){
digit_part = gsub(pattern = "\\D", replacement = "", x = element)
character_parts = gsub(pattern = "\\d", replacement = "", x = element)
character_parts = unlist(strsplit(x = character_parts,split = ""))
returned_value = paste(digit_part, character_parts,sep = "")
}else{
element
}
})
part2 = unname(unlist(part2))
return(paste(part1,part2,sep="-"))
}
|
a7c17e904b5d62bdbf112ad9e34385e93bc0e0d9
|
484fe2afb7904ed2c9b090de6302a4a0eb177a3b
|
/man/abbReviations-package.Rd
|
8c6fa7af0006e684ecc5499e04a7f87d0b676726
|
[] |
no_license
|
johnmyleswhite/abbReviations
|
13e97c45decaaf7f2a1e9c0c5f766522033563b9
|
8a6355e25a534454cf38e7beb3067dd761e0e235
|
refs/heads/master
| 2021-01-21T17:45:44.344004
| 2012-05-13T02:45:05
| 2012-05-13T02:45:05
| 4,175,256
| 2
| 1
| null | null | null | null |
UTF-8
|
R
| false
| false
| 584
|
rd
|
abbReviations-package.Rd
|
\name{abbReviations-package}
\alias{abbReviations-package}
\alias{abbReviations}
\docType{package}
\title{
Translate back-and-forth between abbreviations.
}
\description{
Translate back-and-forth between abbreviations.
}
\details{
\tabular{ll}{
Package: \tab abbReviations\cr
Type: \tab Package\cr
Version: \tab 0.1-2\cr
Date: \tab 2012-04-29\cr
License: \tab Artistic-2.0\cr
}
}
\author{
John Myles White
Maintainer: John Myles White <jmw@johnmyleswhite.com>
}
\keyword{ package }
\examples{
library('abbReviations')
state_to_abbreviation('New Jersey')
abbreviation_to_state('NJ')
}
|
ab33e9c936f067b6555ccc53df16ff46ca90cd3b
|
2f52c876827a32bb9f1e18b82cf78c5ef644f073
|
/rbind.R
|
38164acc01a005c6441b28c0708413fef7e06d9e
|
[] |
no_license
|
Lina900904/R-lang
|
65a4b93298d222e72f625e725196665b4cea3555
|
1ac6cbbda666765014f8a856dc92bf36032268b7
|
refs/heads/master
| 2020-03-22T06:55:07.848748
| 2018-07-10T08:13:49
| 2018-07-10T08:13:49
| 139,666,247
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 939
|
r
|
rbind.R
|
rrr <- function(ssss){
return (
switch(
toString(ssss),
"1"=rbind(matrix(c(1,0,0,0),nrow=1, ncol=4, byrow=TRUE),
matrix(c(2,3,0,0), nrow=1, ncol=4,byrow=TRUE),
matrix(c(4,5,6,0), nrow=1, ncol=4,byrow=TRUE),
matrix(c(7,8,9,10), nrow=1, ncol=4,byrow=TRUE)),
"2"=rbind(matrix(c(1,2,3,4), nrow=1, ncol=4,byrow=TRUE),
matrix(c(8,7,6,5), nrow=1, ncol=4, byrow=TRUE),
matrix(c(9,10,11,12), nrow=1, ncol=4,byrow=TRUE),
matrix(c(16,15,14,13), nrow=1,ncol=4, byrow=TRUE)),
"3"=rbind(matrix(c(0,0,1,0,0), nrow=1,ncol=5, byrow=TRUE),
matrix(c(0,2,3,4,0), nrow=1, ncol=5,byrow=TRUE),
matrix(c(5,6,7,8,9), nrow=1,ncol=5, byrow=TRUE),
matrix(c(0,10,11,12,0), nrow=1,ncol=5, byrow=TRUE),
matrix(c(0,0,13,0,0), nrow=1,ncol=5, byrow=TRUE))
))
}
rrr(1)
|
8c40ce3faf1d5f480cba80bd1fe798dcbfdccab8
|
00daf46a1286c20caa103a95b111a815ea539d73
|
/R/otherClasses.R
|
b3756b208c5e191ca17e9ec22fe53ac021415811
|
[] |
no_license
|
duncantl/Rllvm
|
5e24ec5ef50641535895de4464252d6b8430e191
|
27ae840015619c03b2cc6713bde71367edb1486d
|
refs/heads/master
| 2023-01-10T15:12:40.759998
| 2023-01-02T18:05:26
| 2023-01-02T18:05:26
| 3,893,906
| 65
| 14
| null | 2017-03-09T07:59:25
| 2012-04-01T16:57:16
|
R
|
UTF-8
|
R
| false
| false
| 743
|
r
|
otherClasses.R
|
setClass("StructTypeWithNames", representation(names = "character"), contains = "StructType")
setClass("DoubleType", contains = "Type") #XXX Not in LLVM
# A separate class so we can identify a StringType from a generic pointer since now they both use i8*
setClass("StringType", contains = "Type")
# R specific types
setClass("SEXPType", contains = "PointerType")
setClass("LGLSXPType", contains = "SEXPType")
setClass("INTSXPType", contains = "SEXPType")
setClass("REALSXPType", contains = "SEXPType")
setClass("STRSXPType", contains = "SEXPType")
setClass("VECSXPType", contains = "SEXPType")
setClass("CHARSXPType", contains = "SEXPType")
setClass("StructTypeWithNames", representation(names = "character"), contains = "StructType")
|
6446e9cb0e7b6af06433266b5e8d4fcc11dc2c7a
|
9fecce6f3ef41202cdcc855f4b0baff36131eacc
|
/Analysis/old_analysis/Metiers/bin/05_figures/SI_fig.R
|
ea76d17331363bb33f53e2698d7cd4cc08ba6e25
|
[] |
no_license
|
emfuller/cnh
|
0487e9647837d8fc999850b5951ff6331f9a5159
|
8b36faf8c73607d92e59e392fff3c0094b389d26
|
refs/heads/master
| 2021-05-01T08:02:52.200343
| 2019-04-06T18:25:48
| 2019-04-06T18:25:48
| 28,717,834
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 4,156
|
r
|
SI_fig.R
|
# plot composition of fisheries
# S1
# species table
library(reshape2)
library(dplyr)
tickets <- readRDS("/Users/efuller/Desktop/CNH/Analysis/Metiers/bin/04_data_output/vessel_landings_data.RDS")
met_summary <- tickets %>%
group_by(metier.2010, modified) %>%
summarize(revenue = sum(adj_revenue, na.rm = T)) %>%
group_by(metier.2010) %>%
mutate(total_rev = sum(revenue), per.rev = revenue/total_rev)
# subset to top 30 by revenue
met_rev <- unique(met_summary[,c("metier.2010","total_rev")])
met_rev <- met_rev[order(-met_rev$total_rev),]
species_melt <- melt(met_summary, id.vars = c("modified","metier.2010"), measure.vars = "per.rev")
species_melt <- subset(species_melt, metier.2010 %in% met_rev$metier.2010[1:10])
species_tab <- dcast(species_melt, modified ~ metier.2010, fun.aggregate = sum)
# remove species that have < 10% across all fisheries
library(RColorBrewer)
species_tab <- species_tab[-which(rowSums(species_tab[,2:ncol(species_tab)])<.05),]
rownames(species_tab) <- tolower(species_tab$modified)
# add fishery names and species common names
met_names <- read.csv("/Users/efuller/Desktop/CNH/processedData/catch/3_exploreBuildwebs/ref_tables/metier_descrp.csv", stringsAsFactors = FALSE)
c.names = data.frame(metier = tolower(colnames(species_tab)[2:ncol(species_tab)]),stringsAsFactors = FALSE)
for(i in 1:nrow(c.names)){
c.names$common_name[i] = paste0(met_names$Major_species[which(met_names$Metier == c.names$metier[i])], "\n",met_names$Major_gear[which(met_names$Metier == c.names$metier[i])])
}
colnames(species_tab)[2:ncol(species_tab)] <- c.names$common_name
# species common names
spid <- read.csv("/Users/efuller/Desktop/CNH/processedData/catch/spid.csv",stringsAsFactors = FALSE)
r.names <- data.frame(spid = rownames(species_tab),stringsAsFactors = FALSE)
for(i in 1:nrow(r.names)){
r.names$common_name[i] <- tolower(spid$common_name[which(tolower(spid$SPID)==r.names$spid[i])])
}
r.names$common_name <- gsub(" ","\n",r.names$common_name)
rownames(species_tab) <- r.names$common_name
pdf("/Users/efuller/Desktop/CNH/Analysis/Metiers/bin/05_figures/S1a.pdf",width = 6, height = 6)
heatmap(t(as.matrix(species_tab[,2:ncol(species_tab)])),col=c("white", brewer.pal(9,"Greys")), scale = "row", margins = c(8,8),cexRow = .75, cexCol = .75)
dev.off()
# figure S2
# effort and revenue plot
library(dplyr)
met_summary <- tickets %>%
filter(year %in% c(2009,2010)) %>%
group_by(metier.2010) %>%
summarize(revenue = sum(adj_revenue, na.rm = T),
n.trips = length(unique(trip_id)))
met_summary$paint <- "black"
met_summary$cex <- .6
met_summary$cex[which(met_summary$metier.2010=="TWL_1")] <- 1
met_summary$cex[which(met_summary$metier.2010=="POT_1")] <- 1
met_summary$cex[which(met_summary$metier.2010=="TWS_1")] <- 1
met_summary$cex[which(met_summary$metier.2010=="MSC_1")] <- 1
met_summary$cex[which(met_summary$metier.2010=="HKL_2")] <- 1
par(cex.axis = .8, cex.lab = .9 )
with(met_summary, plot(n.trips, revenue, type="p", bty="n",pch=19, xlab = "number of trips landed (2009-2010)", ylab="total revenue (2009-2010)",log = "xy", col = paint, cex = met_summary$cex, xlim=c(1,500000), ylim = c(1,1e9)))
text(met_summary$n.trips[which(met_summary$metier.2010=="TWL_1")], met_summary$revenue[which(met_summary$metier.2010=="TWL_1")], labels = "dover sole\nroller trawl", col = "black", cex = .8, pos = 3)
text(met_summary$n.trips[which(met_summary$metier.2010=="TWS_1")], met_summary$revenue[which(met_summary$metier.2010=="TWS_1")], labels = "pink shrimp\ntrawl", col = "black", cex = .8, pos = 2)
text(met_summary$n.trips[which(met_summary$metier.2010=="POT_1")], met_summary$revenue[which(met_summary$metier.2010=="POT_1")], labels = "dungenness\ncrab pots", col = "black", cex = .8, pos = 4)
text(met_summary$n.trips[which(met_summary$metier.2010=="MSC_1")], met_summary$revenue[which(met_summary$metier.2010=="MSC_1")], labels = "red urchin\ndiving", col = "black", cex = .8, pos = 4)
text(met_summary$n.trips[which(met_summary$metier.2010=="HKL_2")], met_summary$revenue[which(met_summary$metier.2010=="HKL_2")], labels = "black rockfish\nhook & line", col = "black", cex = .8, pos = 4)
|
9efce5710115340187d5874a9c1950574e88ab07
|
0aa63f99a9ebe79e55cc09338d5bb4ce2377fd83
|
/man/exportInstruments.Rd
|
3ea0273cdff745eb3a914a85d86cc603413d7959
|
[] |
no_license
|
nutterb/redcapAPI
|
c13b890b5d33b40c134833155861ee42d44b06c7
|
9b7287106198581c352fc91492d83fc7806d2bd7
|
refs/heads/main
| 2023-09-01T07:41:41.326514
| 2023-08-28T14:02:23
| 2023-08-28T14:02:23
| 11,691,011
| 47
| 31
| null | 2022-11-03T22:49:05
| 2013-07-26T17:31:51
|
R
|
UTF-8
|
R
| false
| true
| 1,886
|
rd
|
exportInstruments.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/exportInstruments.R
\name{exportInstruments}
\alias{exportInstruments}
\alias{exportInstruments.redcapApiConnection}
\title{Exports the REDCap Instruments}
\usage{
exportInstruments(rcon, ...)
\method{exportInstruments}{redcapApiConnection}(
rcon,
...,
error_handling = getOption("redcap_error_handling"),
config = list(),
api_param = list()
)
}
\arguments{
\item{rcon}{A REDCap connection object as generated by \code{redcapConnection}}
\item{...}{Arguments to be passed to other methods.}
\item{error_handling}{An option for how to handle errors returned by the API.
see \code{\link{redcapError}}}
\item{config}{\code{list} Additional configuration parameters to pass to
\code{\link[httr]{POST}}. These are appended to any parameters in
\code{rcon$config}.}
\item{api_param}{\code{list} Additional API parameters to pass into the
body of the API call. This provides users to execute calls with options
that may not otherwise be supported by \code{redcapAPI}.}
}
\description{
Returns a data frame of instruments, names, etc.
}
\section{REDCap Version}{
6.5.0 +
5.8.2+
}
\section{REDCap API Documentation}{
This function allows you to export a list of the data collection instruments
for a project. This includes their unique instrument name as seen in the second
column of the Data Dictionary, as well as each instrument's corresponding
instrument label, which is seen on a project's left-hand menu when entering data.
The instruments will be ordered according to their order in the project.
}
\section{Known REDCap Limitations}{
None
}
\references{
Please refer to your institution's API documentation.
Additional details on API parameters are found on the package wiki at
\url{https://github.com/vubiostat/redcapAPI/wiki/REDCap-API-Parameters}
}
\author{
Benjamin Nutter
}
|
ac93c09b266f615309ebd211a0ae6e5b49b1e5d7
|
b85cb92935407d40d03405ea09a7f96d005c1954
|
/Functions/cov_func.R
|
46b60ac309d3eb3e374ac69e29fa7d102b6cd015
|
[] |
no_license
|
enerhiya/Spatio-Temporal-Cross-Covariance-Functions-under-the-Lagrangian-Framework
|
0cccffd7a98d13e4f4c7353d9c42e923ae34dbdd
|
5084f24d9b89c9bff2794b0575a44d7ea0ccaf54
|
refs/heads/master
| 2021-06-18T19:50:38.829233
| 2021-02-17T17:09:46
| 2021-02-17T17:09:46
| 177,747,457
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 38,555
|
r
|
cov_func.R
|
#---------STATIONARY------------#
frozen_matern_cov_parapply <- function(wind, max_time_lag = 1, q = 2, LOCS = locs_demean, theta = space_params){
################### ###################
################### RETURNS a q * nrow(LOCS) * (max_time_lag + 1) x q * nrow(LOCS) * (max_time_lag + 1) matrix ###################
################### ###################
w <- wind
locs <- coords <- LOCS
if (max_time_lag > 0){
for (tt in 1:max_time_lag){
temp_locs <- cbind(coords[, 1] - tt * w[1], coords[, 2] - tt * w[2])
locs <- rbind(locs, temp_locs)
}
}
nu <- theta[4:5]
beta <- theta[3]
sigma2 <- theta[1:2]
dist0 <- parDist(x = locs, method = "euclidean") %>% as.matrix()
S <- matrix(NA, q * nrow(dist0), q * nrow(dist0))
for(i in 1:q){
for(j in 1:i){
temp <- (i - 1) * nrow(dist0) + 1:nrow(dist0)
temp1 <- (j - 1) * nrow(dist0) + 1:nrow(dist0)
if(i == j){
temp2 <- ifelse(dist0 != 0, sigma2[i] * (dist0 / beta)^nu[i] * besselK(dist0 / beta, nu[i]) / (2^(nu[i] - 1) * gamma(nu[i])), sigma2[i])
S[temp, temp1] <- temp2
}
if(i != j){
nu1 <- nu[i]
nu2 <- nu[j]
nu3 <- (nu1 + nu2)/2
rho <- theta[6] * (gamma(nu1 + 3/2) / gamma(nu1))^(1/2) * (gamma(nu2 + 3/2) / gamma(nu2))^(1/2) * gamma(nu3) / (gamma(nu3 + 3/2))
temp3 <- (dist0 / beta)^nu3 * besselK(dist0 / beta, nu3)/(2^(nu3 - 1) * gamma(nu3)) * sqrt(sigma2[i] * sigma2[j]) * rho
temp3[is.na(temp3)] <- sqrt(sigma2[i] * sigma2[j]) * rho
S[temp, temp1] <- temp3
S[temp1, temp] <- t(temp3)
}
}
}
return(c(S))
}
frozen_matern_cov <- function(theta, wind = NULL, max_time_lag = 0, q = 2, LOCS){
################### ###################
################### RETURNS a q * nrow(LOCS) * (max_time_lag + 1) x q * nrow(LOCS) * (max_time_lag + 1) matrix ###################
################### ###################
w <- wind
locs <- coords <- LOCS
if (max_time_lag > 0){
for (tt in 1:max_time_lag){
temp_locs <- cbind(coords[, 1] - tt * w[1], coords[, 2] - tt * w[2])
locs <- rbind(locs, temp_locs)
}
}
nu <- theta[4:5]
beta <- theta[3]
sigma2 <- theta[1:2]
dist0 <- parDist(x = locs, method = "euclidean") %>% as.matrix()
S <- matrix(NA, q * nrow(dist0), q * nrow(dist0))
for(i in 1:q){
for(j in 1:i){
temp <- (i - 1) * nrow(dist0) + 1:nrow(dist0)
temp1 <- (j - 1) * nrow(dist0) + 1:nrow(dist0)
if(i == j){
temp2 <- ifelse(dist0 != 0, sigma2[i] * (dist0 / beta)^nu[i] * besselK(dist0 / beta, nu[i]) / (2^(nu[i] - 1) * gamma(nu[i])), sigma2[i])
S[temp, temp1] <- temp2
}
if(i != j){
nu1 <- nu[i]
nu2 <- nu[j]
nu3 <- (nu1 + nu2)/2
rho <- theta[6] * (gamma(nu1 + 3/2) / gamma(nu1))^(1/2) * (gamma(nu2 + 3/2) / gamma(nu2))^(1/2) * gamma(nu3) / (gamma(nu3 + 3/2))
temp3 <- (dist0 / beta)^nu3 * besselK(dist0 / beta, nu3)/(2^(nu3 - 1) * gamma(nu3)) * sqrt(sigma2[i] * sigma2[j]) * rho
temp3[is.na(temp3)] <- sqrt(sigma2[i] * sigma2[j]) * rho
S[temp, temp1] <- temp3
S[temp1, temp] <- t(temp3)
}
}
}
return(S)
}
frozen_matern_cov_rep_I <- function(theta, wind, max_time_lag = 0, q = 2, LOCS){
################### ###################
################### RETURNS a q * nrow(LOCS) * (max_time_lag + 1) x q * nrow(LOCS) * (max_time_lag + 1) matrix ###################
################### ###################
w <- wind
locs <- coords <- LOCS
if (max_time_lag > 0){
for (tt in 1:max_time_lag){
temp_locs <- cbind(coords[, 1] - tt * w[1], coords[, 2] - tt * w[2])
locs <- rbind(locs, temp_locs)
}
}
nu <- theta[4:5]
beta <- theta[3]
sigma2 <- theta[1:2]
dist0 <- parDist(x = locs, method = "euclidean") %>% as.matrix()
S <- matrix(NA, q * nrow(dist0), q * nrow(dist0))
for(i in 1:q){
for(j in 1:i){
temp <- seq(i, nrow(dist0) * q, by = q)
temp1 <- seq(j, nrow(dist0) * q, by = q)
if(i == j){
temp2 <- ifelse(dist0 != 0, sigma2[i] * (dist0 / beta)^nu[i] * besselK(dist0 / beta, nu[i]) / (2^(nu[i] - 1) * gamma(nu[i])), sigma2[i])
S[temp, temp1] <- temp2
}
if(i != j){
nu1 <- nu[i]
nu2 <- nu[j]
nu3 <- (nu1 + nu2)/2
rho <- theta[6] * (gamma(nu1 + 3/2) / gamma(nu1))^(1/2) * (gamma(nu2 + 3/2) / gamma(nu2))^(1/2) * gamma(nu3) / (gamma(nu3 + 3/2))
temp3 <- (dist0 / beta)^nu3 * besselK(dist0 / beta, nu3)/(2^(nu3 - 1) * gamma(nu3)) * sqrt(sigma2[i] * sigma2[j]) * rho
temp3[is.na(temp3)] <- sqrt(sigma2[i] * sigma2[j]) * rho
S[temp, temp1] <- temp3
S[temp1, temp] <- t(temp3)
}
}
}
return(S)
}
frozen_matern_cov_for_heatmap <- function(theta, wind, q = 2){
################### ###################
################### RETURNS a q * nrow(LOCS) * (max_time_lag + 1) x 3 matrix of covariance values with respect to location (0,0) ###################
################### ###################
N <- 51
n <- N^2
TT <- 3
grid_x <- seq(from = -0.5, to = 0.5, length.out = N)
sim_grid_locations <- expand.grid(grid_x, grid_x) %>% as.matrix()
w <- wind
nu <- theta[4:5]
beta <- theta[3]
sigma2 <- theta[1:2]
nu1 <- nu[1]
nu2 <- nu[2]
nu3 <- (nu1 + nu2)/2
rho <- theta[6] * (gamma(nu1 + 3/2) / gamma(nu1))^(1/2) * (gamma(nu2 + 3/2) / gamma(nu2))^(1/2) * gamma(nu3) / (gamma(nu3 + 3/2))
S <- matrix(NA, n * TT, 3)
for(i in 1:3){
for(tt in 0:(TT - 1)){
for(l in 1:nrow(sim_grid_locations)){
temp_locs <- rbind(cbind(0, 0), cbind(sim_grid_locations[l, 1] - tt * w[1], sim_grid_locations[l, 2] - tt * w[2]))
dist0 <- dist(temp_locs) %>% as.numeric()
if(i < 3) temp2 <- ifelse(dist0 != 0, sigma2[i] * (dist0 / beta)^nu[i] * besselK(dist0 / beta, nu[i]) / (2^(nu[i] - 1) * gamma(nu[i])), sigma2[i])
else temp2 <- ifelse(dist0 != 0, (dist0 / beta)^nu3 * besselK(dist0 / beta, nu3)/(2^(nu3 - 1) * gamma(nu3)) * sqrt(sigma2[1] * sigma2[2]) * rho, sqrt(sigma2[1] * sigma2[2]) * rho)
S[tt * n + l, i] <- temp2
}
}
}
return(S)
}
#------------------------------- END ----------------------------#
matern_cov_soph <- function(theta, wind, max_time_lag, q, new_locations = locations, meters = T, nug_eff = F, kap = F){
w <- wind
loc1 <- coords1 <- new_locations
if (max_time_lag == 0){
loc1 <- loc1
} else {
for (tt in 1:max_time_lag){
temploc <- matrix(, ncol=2, nrow=nrow(coords1))
for(rr in 1:nrow(coords1)){
temploc[rr,] <- c(coords1[rr,1] - tt*w[1], coords1[rr,2] - tt*w[2])
}
loc1 <- rbind(loc1, temploc)
}
}
loc2 <- coords2 <- cbind(new_locations[,1] - kap[1], new_locations[,2] - kap[2])
if (max_time_lag == 0){
loc2 <- loc2
} else {
for (tt in 1:max_time_lag){
temploc <- matrix(, ncol=2, nrow=nrow(coords2))
for(rr in 1:nrow(coords2)){
temploc[rr,] <- c(coords2[rr,1] - tt*w[1], coords2[rr,2] - tt*w[2])
}
loc2 <- rbind(loc2, temploc)
}
}
loc <- rbind(loc1, loc1, loc2)
if(meters == T){
dist0 <- spDists(loc, longlat=F)/1000
}else{
dist0 <- spDists(loc, longlat=F)
}
nu <- theta[1:2]
beta <- theta[3]
var <- theta[4:5]
rho <- theta[6]
if(nug_eff == T){
nug <- theta[7:8]
}else{
nug <- c(0, 0)
}
S=matrix(NA, q*dim(dist0)[1], q*dim(dist0)[1])
for(i in 1:q){
for(j in 1:i){
temp=(i-1)*dim(dist0)[1]+1:dim(dist0)[1]
temp1=(j-1)*dim(dist0)[1]+1:dim(dist0)[1]
if(i==j){
temp2=ifelse(dist0!=0,var[i]*(dist0/beta)^nu[i] * besselK(dist0/beta, nu[i])/(2^(nu[i]-1)*gamma(nu[i])),var[i]+nug[i])
S[temp,temp1]=temp2
}
if(i != j){
nu1 <- nu[i]
nu2 <- nu[j]
nu3 <- (nu1 + nu2)/2
#rho=Beta[i,j]*(gamma(nu1+3/2)/gamma(nu1))^(1/2) * (gamma(nu2+3/2)/gamma(nu2))^(1/2)*gamma(nu3)/(gamma(nu3+3/2))
temp3 <- (dist0/beta)^nu3 * besselK(dist0/beta,nu3)/(2^(nu3-1)*gamma(nu3))*sqrt(var[i] * var[j])*rho
temp3[is.na(temp3)] <- sqrt(var[i] * var[j])*rho
S[temp,temp1] <- temp3
S[temp1,temp] <- t(temp3)
}
}
}
S1 <- rbind(cbind(S[1:nrow(loc1), 1:nrow(loc1)], S[1:nrow(loc1), (nrow(loc1)*3 + 1):(nrow(loc1)*4)]),
cbind(S[(nrow(loc1)*3 + 1):(nrow(loc1)*4), 1:nrow(loc1)], S[(nrow(loc1)*3 + 1):(nrow(loc1)*4), (nrow(loc1)*3 + 1):(nrow(loc1)*4)]))
return(S1)
}
matern_cov_old <- function(theta, wind, max_time_lag, q, new_locations = locations, meters = T, nug_eff, kap = matrix(c(704400+ 100, 205700 + 100, 100, 100), ncol = 2, nrow = 2, byrow=T)){
w <- wind
loc1 <- coords1 <- cbind(new_locations[,1] + kap[1,1] - kap[2,1], new_locations[,2] + kap[1,2] - kap[2,2])
if (max_time_lag == 0){
loc1 <- loc1
} else {
for (tt in 1:max_time_lag){
temploc <- matrix(, ncol=2, nrow=nrow(coords1))
for(rr in 1:nrow(coords1)){
temploc[rr,] <- c(coords1[rr,1] - tt*w[1], coords1[rr,2] - tt*w[2])
}
loc1 <- rbind(loc1, temploc)
}
}
loc2 <- coords2 <- cbind(new_locations[,1] - kap[1,1] + kap[2,1], new_locations[,2] - kap[1,2] + kap[2,2])
if (max_time_lag == 0){
loc2 <- loc2
} else {
for (tt in 1:max_time_lag){
temploc <- matrix(, ncol=2, nrow=nrow(coords2))
for(rr in 1:nrow(coords2)){
temploc[rr,] <- c(coords2[rr,1] - tt*w[1], coords2[rr,2] - tt*w[2])
}
loc2 <- rbind(loc2, temploc)
}
}
loc <- rbind(loc1, loc2)
if(meters == T){
dist0 <- spDists(loc, longlat=F)/1000
}else{
dist0 <- spDists(loc, longlat=F)
}
nu <- theta[1:2]
beta <- theta[3]
var <- theta[4:5]
rho <- theta[6]
if(nug_eff == T){
nug <- theta[7:8]
}else{
nug <- c(0, 0)
}
S=matrix(NA, q*dim(dist0)[1], q*dim(dist0)[1])
for(i in 1:q){
for(j in 1:i){
temp=(i-1)*dim(dist0)[1]+1:dim(dist0)[1]
temp1=(j-1)*dim(dist0)[1]+1:dim(dist0)[1]
if(i==j){
temp2=ifelse(dist0!=0,var[i]*(dist0/beta)^nu[i] * besselK(dist0/beta, nu[i])/(2^(nu[i]-1)*gamma(nu[i])),var[i]+nug[i])
S[temp,temp1]=temp2
}
if(i != j){
nu1 <- nu[i]
nu2 <- nu[j]
nu3 <- (nu1 + nu2)/2
#rho=Beta[i,j]*(gamma(nu1+3/2)/gamma(nu1))^(1/2) * (gamma(nu2+3/2)/gamma(nu2))^(1/2)*gamma(nu3)/(gamma(nu3+3/2))
temp3 <- (dist0/beta)^nu3 * besselK(dist0/beta,nu3)/(2^(nu3-1)*gamma(nu3))*sqrt(var[i] * var[j])*rho
temp3[is.na(temp3)] <- sqrt(var[i] * var[j])*rho
S[temp,temp1] <- temp3
S[temp1,temp] <- t(temp3)
}
}
}
S1 <- rbind(cbind(S[1:nrow(loc1), 1:nrow(loc1)], S[1:nrow(loc1), (nrow(loc1)*3 + 1):(nrow(loc1)*4)]),
cbind(S[(nrow(loc1)*3 + 1):(nrow(loc1)*4), 1:nrow(loc1)], S[(nrow(loc1)*3 + 1):(nrow(loc1)*4), (nrow(loc1)*3 + 1):(nrow(loc1)*4)]))
return(S1)
}
matern_random_cov <- function(theta, wind, wind_var, max_time_lag, q, new_locations, meters = T, nug_eff, kap){
nu <- theta[1:2]
beta <- theta[3]
rho <- theta[4]
var <- theta[5:6]
if(nug_eff == T){
nug <- theta[7:8]
}else{
nug <- c(0, 0)
}
if(meters == T){
w <- wind/1000
Sigma <- matrix(c(wind_var[1:2], wind_var[2:3]), ncol = 2)/1000
loc <- coords <- new_locations/1000
loc2 <- coords2 <- cbind(new_locations[,1] - kap[1], new_locations[,2] - kap[2])/1000
}else{
w <- wind
Sigma <- matrix(c(wind_var[1:2], wind_var[2:3]), ncol=2)
loc <- coords <- new_locations
loc2 <- coords2 <- cbind(new_locations[,1] - kap[1], new_locations[,2] - kap[2])
}
SS <- list()
S <- matrix(NA, q*nrow(coords)*(max_time_lag + 1), q*nrow(coords)*(max_time_lag + 1))
for(i in 1:q){
for(j in 1:i){
temp2 <- (i-1)*(nrow(coords)*(max_time_lag + 1)) + 1:(nrow(coords)*(max_time_lag + 1))
temp1 <- (j-1)*(nrow(coords)*(max_time_lag + 1)) + 1:(nrow(coords)*(max_time_lag + 1))
if(i == j){
for(tt in 0:max_time_lag){
temploc <- matrix(, ncol=nrow(coords), nrow=nrow(coords))
for(rr in 1:nrow(coords)){
for(ss in 1:nrow(coords)){
cat(tt,rr,ss,'\n')
h <- c(coords[rr,1]-coords[ss,1],coords[rr,2]-coords[ss,2])
emp_cov1 <- c(h[1], h[2], tt)
Int.func <- function(c, hvec){
y.fun <- function(y) y^(nu[i])*exp(-y)*dmvn(X = hvec[1:2], mu = hvec[3]*w, sigma = (hvec[3]^2*Sigma + beta^2*2*y*diag(2)))
sapply(c, y.fun)
}
lai <- function(xxxx) integrate(Int.func, lower = 0, upper = Inf, hvec = xxxx, abs.tol = 1e-18, rel.tol = 1e-18)$val
temp <- lai(emp_cov1)
temploc[rr,ss] <- ifelse(tt == 0 & h[1] == 0 & h[2] == 0, var[i], var[i]*4*pi*temp*beta^2/gamma(nu[i]))
}
}
SS[[tt + 1]] <- temploc
}
S2 <- toeplitz_mat(SS)
S[temp2,temp1] <- S2
}
if(i != j){
nu1 <- nu[i]
nu2 <- nu[j]
nu3 <- (nu1 + nu2)/2
#rho=rot*(gamma(nu1+3/2)/gamma(nu1))^(1/2) * (gamma(nu2+3/2)/gamma(nu2))^(1/2)*gamma(nu3)/(gamma(nu3+3/2))
for(tt in 0:max_time_lag){
temploc <- matrix(, ncol = nrow(coords), nrow = nrow(coords))
for(rr in 1:nrow(coords)){
for(ss in 1:nrow(coords)){
cat(tt, rr, ss,'\n')
h <- c(coords2[rr,1] - coords2[ss,1], coords2[rr,2] - coords2[ss,2])
emp_cov1 <- c(h[1], h[2], tt)
Int.func <- function(c, hvec){
y.fun <- function(y) y^(nu3)*exp(-y)*dmvn(X = hvec[1:2], mu = hvec[3]*w, sigma = (hvec[3]^2*Sigma + beta^2*2*y*diag(2)))
sapply(c, y.fun)
}
lai <- function(xxxx) integrate(Int.func, lower = 0, upper = Inf, hvec = xxxx, abs.tol = 1e-18, rel.tol = 1e-18)$val
temp <- lai(emp_cov1)
temploc[rr, ss] <- ifelse(tt == 0 & h[1] == 0 & h[2] == 0, sqrt(var[i] * var[j])*rho, sqrt(var[i] * var[j])*rho*4*pi*temp*beta^2/gamma(nu3))
}
}
SS[[tt + 1]] <- temploc
}
S2 <- toeplitz_mat(SS)
S[temp2, temp1] <- S2
S[temp1, temp2] <- t(S2)
}
}
}
return(S)
}
lmc_cov <- function(theta, wind, max_time_lag, q, new_locations = locations, meters = T, nug_eff){
nu <- theta[1:2]
beta <- theta[3:4]
var <- theta[5:6]
if(nug_eff == T){
nug <- theta[7:8]
}else{
nug <- c(0, 0)
}
alpha <- matrix(c(theta[7], theta[8], theta[9], theta[10]), ncol=2, byrow=T)
S <- list()
for(i in 1:q){
if(meters == T){
w <- matrix(wind, ncol = 2, byrow = T)/1000
loc <- coords <- locations/1000
}else{
w <- matrix(wind, ncol = 2, byrow = T)
loc <- coords <- locations
}
if (max_time_lag == 0){
loc <- coords
} else {
for (tt in 1:max_time_lag){
temploc <- matrix(, ncol=2, nrow=nrow(coords))
for(rr in 1:nrow(coords)){
temploc[rr,] <- c(coords[rr,1] - tt*w[i,1], coords[rr,2] - tt*w[i,2])
}
loc <- rbind(loc, temploc)
}
}
dist0 <- spDists(loc, longlat = F)
SS <- ifelse(dist0 != 0, var[i]*(dist0/beta[i])^nu[i] * besselK(dist0/beta[i], nu[i])/(2^(nu[i] - 1)*gamma(nu[i])), var[i] + nug[i])
S[[i]] <- SS
}
S1 <- rbind(cbind(alpha[1,1]^2*S[[1]] + alpha[1,2]^2*S[[2]], alpha[1,1]*alpha[2,1]*S[[1]] + alpha[1,2]*alpha[2,2]*S[[2]]),
cbind(alpha[1,1]*alpha[2,1]*S[[1]] + alpha[1,2]*alpha[2,2]*S[[2]], alpha[2,1]^2*S[[1]] + alpha[2,2]^2*S[[2]]))
return(S1)
}
lmc_random_cov <- function(theta, wind, wind_var, max_time_lag, q, new_locations, meters = T, nug_eff){
nu <- theta[1:2]
beta <- theta[3:4]
var <- theta[5:6]
if(nug_eff == T){
nug <- theta[7:8]
}else{
nug <- c(0, 0)
}
alpha <- matrix(c(theta[7], theta[8], theta[9], theta[10]), ncol=2, byrow=T)
if(meters == T){
w <- matrix(wind, ncol = 2, byrow = T)/1000
loc <- coords <- locations/1000
sigma <- wind_var
sigma[[1]] <- sigma[[1]]/1000
sigma[[2]] <- sigma[[2]]/1000
}else{
w <- matrix(wind, ncol = 2, byrow = T)
loc <- coords <- locations
sigma <- wind_var
}
S <- list()
temploc <- denom <- list()
for(i in 1:q){
temploc[[1]] <- loc <- spDists(coords, longlat = F)
denom[[1]] <- matrix(1, ncol = ncol(temploc[[1]]), nrow = nrow(temploc[[1]]))
if (max_time_lag == 0){
loc <- spDists(coords, longlat = F)
} else {
for (tt in 1:max_time_lag){
temploc.temp <- matrix(, ncol=nrow(coords), nrow=nrow(coords))
for(rr in 1:nrow(coords)){
for(ss in 1:nrow(coords)){
temploc.temp[rr,ss] <- sqrt((coords[rr,] - coords[ss,] - tt*w[i,])%*%solve(diag(2) + sigma[[i]])%*%matrix((coords[rr,] - coords[ss,] - tt*w[i,]), ncol=1))
}
}
temploc[[tt + 1]] <- temploc.temp
denom[[tt + 1]] <- sqrt(det(diag(2) + tt^2*sigma[[i]]))
}
}
dist0 <- toeplitz_mat(temploc)
denom.fin <- toeplitz_mat(denom)
SS <- ifelse(dist0 != 0, var[i]*(dist0/beta[i])^nu[i] * besselK(dist0/beta[i], nu[i])/(2^(nu[i]-1)*gamma(nu[i])), var[i] + nug[i])
S[[i]] <- SS/denom.fin
}
S1 <- rbind(cbind(alpha[1,1]^2*S[[1]] + alpha[1,2]^2*S[[2]], alpha[1,1]*alpha[2,1]*S[[1]] + alpha[1,2]*alpha[2,2]*S[[2]]),
cbind(alpha[1,1]*alpha[2,1]*S[[1]] + alpha[1,2]*alpha[2,2]*S[[2]], alpha[2,1]^2*S[[1]] + alpha[2,2]^2*S[[2]]))
return(S1)
}
matern_allard <- function(theta, max_time_lag, q, new_locations = locations, meters = T, nug_eff){
loc <- coords <- new_locations
tloc <- list()
if (max_time_lag == 0){
loc <- loc
tloc.temp <- matrix(0, ncol = nrow(coords), nrow = nrow(coords))
tloc[[1]] <- tloc.temp
} else {
tloc.temp <- matrix(0, ncol = nrow(coords), nrow = nrow(coords))
tloc[[1]] <- tloc.temp
for (tt in 1:max_time_lag){
loc <- rbind(loc, coords)
tloc.temp <- matrix(tt, ncol = nrow(coords), nrow = nrow(coords))
tloc[[tt + 1]] <- tloc.temp
}
}
if(meters == T){
dist1 <- spDists(loc, longlat = F)/1000
}else{
dist1 <- spDists(loc, longlat = F)
}
nu <- theta[1:2]
beta <- theta[3]
var <- theta[4:5]
rho <- theta[6]
if(nug_eff == T){
nug <- theta[7:8]
}else{
nug <- c(0, 0)
alpha <- theta[7]
a <- 1
b <- theta[8]
}
dist2 <- toeplitz_mat(tloc)
dist0 <- dist1/(alpha*dist2^(2*a) + 1)^(b/2)
S=matrix(NA, q*dim(dist0)[1], q*dim(dist0)[1])
for(i in 1:q){
for(j in 1:i){
temp=(i-1)*dim(dist0)[1]+1:dim(dist0)[1]
temp1=(j-1)*dim(dist0)[1]+1:dim(dist0)[1]
if(i == j){
temp2=ifelse(dist0 != 0, var[i]*(dist0/beta)^nu[i] * besselK(dist0/beta,nu[i])/(2^(nu[i]-1)*gamma(nu[i]))/((alpha*(dist2)^(2*a)+1)),(var[i]+nug[i])/(alpha*(dist2)^(2*a)+1))
#temp2=ifelse(dist0!=0,var[i]*(dist0/beta[i])^nu[i] * besselK(dist0/beta[i],nu[i])/(2^(nu[i]-1)*gamma(nu[i])),var[i]+nug[i])
S[temp,temp1]=temp2
}
if(i != j){
nu1 <- nu[i]
nu2 <- nu[j]
nu3 <- (nu1+nu2)/2
#rho=Beta[i,j]*(gamma(nu1+3/2)/gamma(nu1))^(1/2) * (gamma(nu2+3/2)/gamma(nu2))^(1/2)*gamma(nu3)/(gamma(nu3+3/2))
lai=ifelse(dist0 != 0, (dist0/beta)^nu3 * besselK(dist0/beta, nu3)/(2^(nu3 - 1)*gamma(nu3))*sqrt(var[i] * var[j])*rho/((alpha*(dist2)^(2*a) + 1)), sqrt(var[i] * var[j])*rho/((alpha*(dist2)^(2*a)+1)))
S[temp,temp1] <- lai
S[temp1,temp] <- t(lai)
}
}
}
return(S)
}
cov_lagrangian <- function(theta, wind, max_time_lag, q, new_locations = locations, meters = T, nug_eff){
w <- wind
loc <- coords <- new_locations
if (max_time_lag == 0){
loc <- loc
} else {
for (tt in 1:max_time_lag){
temploc <- matrix(, ncol=2, nrow=nrow(coords))
for(rr in 1:nrow(coords1)){
temploc[rr,] <- c(coords[rr,1] - tt*w[1], coords[rr,2] - tt*w[2])
}
loc <- rbind(loc, temploc)
}
}
if(meters == T){
dist0 <- spDists(loc, longlat = F)/1000
}else{
dist0 <- spDists(loc, longlat = F)
}
mu <- theta[1:2]
nu <- theta[3]
scale <- theta[4]
beta <- c(1,1)
S <- matrix(NA, q*dim(dist0)[1], q*dim(dist0)[1])
for(i in 1:q){
for(j in 1:i){
temp <- (i - 1)*dim(dist0)[1] + 1:dim(dist0)[1]
temp1 <- (j - 1)*dim(dist0)[1] + 1:dim(dist0)[1]
if(i == j){
temp2 <- pmax((1 - dist0/scale), 0)^(nu + mu[i])
S[temp, temp1] <- temp2
}
if(i != j){
mu1 <- mu[i]
mu2 <- mu[j]
mu3 <- (mu1 + mu2)/2
beta3 <- (gamma(1 + mu3)/gamma(1 + nu + mu3))*sqrt((gamma(1 + nu + mu1)*gamma(1 + nu + mu2))/(gamma(1 + mu1)*gamma(1 + mu2)))
lai <- beta3*pmax((1 - dist0/scale), 0)^(nu + mu3)
S[temp, temp1] <- lai
S[temp1, temp] <- t(lai)
}
}
}
return(S)
}
#---------NONSTATIONARY---------#
matern_cov_regular_grid <-function(theta,wind,time){
w <- wind
t=time
q=2
# create a spatial autocorrelation signature
# coordinate list
loc <- coords <- sim_grid_locations
n <- nrow(sim_grid_locations)
if (t==1){
loc <- loc
} else {
for (tt in 1:(t-1)){
temploc <- matrix(,ncol=2,nrow=nrow(coords))
for(rr in 1:nrow(coords)){
temploc[rr,] <- c(coords[rr,1]-tt*w[1],coords[rr,2]-tt*w[2])
}
loc <- rbind(loc, temploc)
}
}
locations <- loc
theta2 <- function (n,beta0,beta1,beta2,beta3,beta4) {
theta3 <- beta0 + beta1*(locations[,1] - .5) + beta2*(locations[,2]-.5) +
beta3*(locations[,1] - .5)^2 + beta4*(locations[,2] - .5)^2
theta3 <- matrix(theta3,nrow=nrow(locations),ncol=1)
return(theta3)
}
#log.lam1.1<-theta2(n,-3,1,1,-6,-7)
#log.lam1.2<-theta2(n,-5,1,1,6,-4)
#logit.phi.1<-theta2(n,0,1,-2,0,1)
#log.lam2.1<-theta2(n,-1.65,0.5,0.5,0,0)
#log.lam2.2<-theta2(n,-2.8,-1,2,0,-7)
#logit.phi.2<-theta2(n,-3,-1,2,0,-1)
#log.lam2.1<-theta2(n,-3,-1,-1,-6,-7)
#log.lam2.2<-theta2(n,-5,-1,-1,6,-4)
#logit.phi.2<-theta2(n,0,-1,-2,0,1)
log.lam1.1<-theta2(n,-3,1,1,-6,-7)
log.lam1.2<-theta2(n,-5,1,1,2,-12)
logit.phi.1<-theta2(n,0,1,-2,0,1)
log.lam2.1<-theta2(n,-3,-1,-1,-6,-7)
log.lam2.2<-theta2(n,-5,-1,-1,2,-12)
logit.phi.2<-theta2(n,0,-1,-2,0,1)
KERNEL_LIST <- list()
kernel.local <- array(0, dim = c(2, 2, nrow(locations)))
for(i in 1:nrow(locations)){
lam1 <- exp(log.lam1.1[i,])
lam2 <- exp(log.lam1.2[i,])
phi <- (pi/2)*exp(logit.phi.1[i,])/(1+exp(logit.phi.1[i,]))
Pmat <- matrix(c(cos(phi), -sin(phi), sin(phi), cos(phi)), nrow = 2, byrow = T)
Dmat <- diag(c(lam1, lam2))
Sigma <- Pmat %*% Dmat %*% t(Pmat)
kernel.local[, ,i] <- Sigma
}
KERNEL_LIST[[1]] <- kernel.local
for(i in 1:nrow(locations)){
lam1 <- exp(log.lam2.1[i,])
lam2 <- exp(log.lam2.2[i,])
phi <- (pi/2)*exp(logit.phi.2[i,])/(1+exp(logit.phi.2[i,]))
Pmat <- matrix(c(cos(phi), -sin(phi), sin(phi), cos(phi)), nrow = 2, byrow = T)
Dmat <- diag(c(lam1, lam2))
Sigma <- Pmat %*% Dmat %*% t(Pmat)
kernel.local[, ,i] <- Sigma
}
KERNEL_LIST[[2]] <- kernel.local
##Calculate Matern form Nonstationary Covariance function
FIN_Sigma.mat <- list()
dist0 <- list()
for(KK in 1:2){
Sigma.mat <- matrix(rep(NA, (n*t)^2), nrow = n*t)
Q.mat <- matrix(rep(NA, (n*t)^2), nrow = n*t)
Inv_ij <- matrix(rep(NA,4),2,2)
for (i in 1:nrow(locations)) {
#Sigma.mat[i, i] <- 1
#Q.mat[i, i] <- 0
Kernel_i <- KERNEL_LIST[[KK]][, , i]
det_i <- Kernel_i[1,1] * Kernel_i[2,2] - Kernel_i[1,2] * Kernel_i[2,1]
for (j in 1:nrow(locations)) {
Kernel_j <- KERNEL_LIST[[KK]][, , j]
det_j <- Kernel_j[1,1] * Kernel_j[2,2] - Kernel_j[1,2] * Kernel_j[2,1]
Kernel_ij <- 0.5 * (Kernel_i + Kernel_j)
Inv_ij[1,1] <- Kernel_ij[2,2]
Inv_ij[2,2] <- Kernel_ij[1,1]
Inv_ij[2,1] <- - Kernel_ij[2,1]
Inv_ij[1,2] <- - Kernel_ij[1,2]
det_ij <- Kernel_ij[1,1] * Kernel_ij[2,2] - Kernel_ij[1,2] * Kernel_ij[2,1]
x <- c(locations[i,1] - locations[j,1], locations[i,2] - locations[j,2])
Sigma.mat[i, j] <- sqrt(sqrt(det_i * det_j)/det_ij)
Q.mat[i, j] <- sqrt(t(x) %*% Inv_ij %*% x/det_ij)
#Sigma.mat[j, i] <- Sigma.mat[i, j]
#Q.mat[j, i] <- Q.mat[i, j]
}
}
FIN_Sigma.mat[[KK]] <- Sigma.mat
dist0[[KK]] <- Q.mat
}
for (i in 1:nrow(locations)) {
#Sigma.mat[i, i] <- 1
#Q.mat[i, i] <- 0
Kernel_i <- KERNEL_LIST[[1]][, , i]
det_i <- Kernel_i[1,1] * Kernel_i[2,2] - Kernel_i[1,2] * Kernel_i[2,1]
for (j in 1:nrow(locations)) {
Kernel_j <- KERNEL_LIST[[2]][, , j]
det_j <- Kernel_j[1,1] * Kernel_j[2,2] - Kernel_j[1,2] * Kernel_j[2,1]
Kernel_ij <- 0.5 * (Kernel_i + Kernel_j)
Inv_ij[1,1] <- Kernel_ij[2,2]
Inv_ij[2,2] <- Kernel_ij[1,1]
Inv_ij[2,1] <- - Kernel_ij[2,1]
Inv_ij[1,2] <- - Kernel_ij[1,2]
det_ij <- Kernel_ij[1,1] * Kernel_ij[2,2] - Kernel_ij[1,2] * Kernel_ij[2,1]
x <- c(locations[i,1] - locations[j,1], locations[i,2] - locations[j,2])
Sigma.mat[i, j] <- sqrt(sqrt(det_i * det_j)/det_ij)
Q.mat[i, j] <- sqrt(t(x) %*% Inv_ij %*% x/det_ij)
#Sigma.mat[j, i] <- Sigma.mat[i, j]
#Q.mat[j, i] <- Q.mat[i, j]
}
}
FIN_Sigma.mat[[3]] <- Sigma.mat
dist0[[3]] <- Q.mat
for (i in 1:nrow(locations)) {
#Sigma.mat[i, i] <- 1
#Q.mat[i, i] <- 0
Kernel_i <- KERNEL_LIST[[2]][, , i]
det_i <- Kernel_i[1,1] * Kernel_i[2,2] - Kernel_i[1,2] * Kernel_i[2,1]
for (j in 1:nrow(locations)) {
Kernel_j <- KERNEL_LIST[[1]][, , j]
det_j <- Kernel_j[1,1] * Kernel_j[2,2] - Kernel_j[1,2] * Kernel_j[2,1]
Kernel_ij <- 0.5 * (Kernel_i + Kernel_j)
Inv_ij[1,1] <- Kernel_ij[2,2]
Inv_ij[2,2] <- Kernel_ij[1,1]
Inv_ij[2,1] <- - Kernel_ij[2,1]
Inv_ij[1,2] <- - Kernel_ij[1,2]
det_ij <- Kernel_ij[1,1] * Kernel_ij[2,2] - Kernel_ij[1,2] * Kernel_ij[2,1]
x <- c(locations[i,1] - locations[j,1], locations[i,2] - locations[j,2])
Sigma.mat[i, j] <- sqrt(sqrt(det_i * det_j)/det_ij)
Q.mat[i, j] <- sqrt(t(x) %*% Inv_ij %*% x/det_ij)
#Sigma.mat[j, i] <- Sigma.mat[i, j]
#Q.mat[j, i] <- Q.mat[i, j]
}
}
FIN_Sigma.mat[[4]] <- Sigma.mat
dist0[[4]] <- Q.mat
nu=theta[1:2]
beta=theta[3]
rot=theta[4]
Beta=matrix(0,q,q)
diag(Beta)=1
Beta[2,1]=rot
Beta[1,2]=rot
S=matrix(NA, q*dim(dist0[[1]])[1], q*dim(dist0[[1]])[1])
for(i in 1:q){
for(j in 1:q){
temp=(i-1)*dim(dist0[[1]])[1]+1:dim(dist0[[1]])[1]
temp1=(j-1)*dim(dist0[[1]])[1]+1:dim(dist0[[1]])[1]
if(i==j){
temp2=ifelse(dist0[[i]]!=0,FIN_Sigma.mat[[i]]*(dist0[[i]]/beta)^nu[i] * besselK(dist0[[i]]/beta,nu[i])/(2^(nu[i]-1)*gamma(nu[i])),FIN_Sigma.mat[[i]])
S[temp,temp1]=temp2
}
if(i!=j & i<j){
nu1=nu[i]
nu2=nu[j]
nu3=(nu[i]+nu[j])/2
rho=Beta[i,j]*(gamma(nu1+3/2)/gamma(nu1))^(1/2) * (gamma(nu2+3/2)/gamma(nu2))^(1/2)*gamma(nu3)/(gamma(nu3+3/2))
#rho <- Beta[i,j]*gamma(nu3)/sqrt(gamma(nu1)*gamma(nu2))
lai=ifelse(dist0[[3]]!=0 ,(dist0[[3]]/beta)^nu3 * besselK(dist0[[3]]/beta,nu3)/(2^(nu3-1)*gamma(nu3))*sqrt(FIN_Sigma.mat[[i]] * FIN_Sigma.mat[[j]])*rho,sqrt(FIN_Sigma.mat[[i]] * FIN_Sigma.mat[[j]])*rho)
S[temp,temp1]=lai
#S[temp1,temp]=t(lai)
}
if(i!=j & i>j){
nu1=nu[i]
nu2=nu[j]
nu3=(nu[i]+nu[j])/2
rho=Beta[i,j]*(gamma(nu1+3/2)/gamma(nu1))^(1/2) * (gamma(nu2+3/2)/gamma(nu2))^(1/2)*gamma(nu3)/(gamma(nu3+3/2))
#rho <- Beta[i,j]*gamma(nu3)/sqrt(gamma(nu1)*gamma(nu2))
lai=ifelse(dist0[[4]]!=0 ,(dist0[[4]]/beta)^nu3 * besselK(dist0[[4]]/beta,nu3)/(2^(nu3-1)*gamma(nu3))*sqrt(FIN_Sigma.mat[[i]] * FIN_Sigma.mat[[j]])*rho,sqrt(FIN_Sigma.mat[[i]] * FIN_Sigma.mat[[j]])*rho)
S[temp,temp1]=lai
#S[temp1,temp]=t(lai)
}
}
}
return(S)
}
matern_cov_regular_grid_v4 <-function(theta,wind,time){
w <- wind
t=time
q=2
# create a spatial autocorrelation signature
# coordinate list
loc <- coords <- sim_grid_locations
n <- nrow(sim_grid_locations)
if (t==1){
loc <- loc
} else {
for (tt in 1:(t-1)){
temploc <- matrix(,ncol=2,nrow=nrow(coords))
for(rr in 1:nrow(coords)){
temploc[rr,] <- c(coords[rr,1]-tt*w[1],coords[rr,2]-tt*w[2])
}
loc <- rbind(loc, temploc)
}
}
locations <- loc
theta2 <- function (n,beta0,beta1,beta2,beta3,beta4) {
theta3 <- beta0 + beta1*(locations[,1] - .5) + beta2*(locations[,2]-.5) +
beta3*(locations[,1] - .5)^2 + beta4*(locations[,2] - .5)^2
theta3 <- matrix(theta3,nrow=nrow(locations),ncol=1)
return(theta3)
}
#log.lam1.1<-theta2(n,-3,1,1,-6,-7)
#log.lam1.2<-theta2(n,-5,1,1,6,-4)
#logit.phi.1<-theta2(n,0,1,-2,0,1)
#log.lam2.1<-theta2(n,-1.65,0.5,0.5,0,0)
#log.lam2.2<-theta2(n,-2.8,-1,2,0,-7)
#logit.phi.2<-theta2(n,-3,-1,2,0,-1)
#log.lam2.1<-theta2(n,-3,-1,-1,-6,-7)
#log.lam2.2<-theta2(n,-5,-1,-1,6,-4)
#logit.phi.2<-theta2(n,0,-1,-2,0,1)
log.lam1.1<-theta2(n,-3,1,1,-6,-7)
log.lam1.2<-theta2(n,-5,1,1,2,-12)
logit.phi.1<-theta2(n,0,1,-2,0,1)
log.lam2.1<-theta2(n,-3,-1,-1,-6,-7)
log.lam2.2<-theta2(n,-5,-1,-1,2,-12)
logit.phi.2<-theta2(n,0,-1,-2,0,1)
KERNEL_LIST <- list()
kernel.local <- array(0, dim = c(2, 2, nrow(locations)))
for(i in 1:nrow(locations)){
lam1 <- exp(log.lam1.1[i,])
lam2 <- exp(log.lam1.2[i,])
phi <- (pi/2)*exp(logit.phi.1[i,])/(1+exp(logit.phi.1[i,]))
Pmat <- matrix(c(cos(phi), -sin(phi), sin(phi), cos(phi)), nrow = 2, byrow = T)
Dmat <- diag(c(lam1, lam2))
Sigma <- Pmat %*% Dmat %*% t(Pmat)
kernel.local[, ,i] <- Sigma
}
KERNEL_LIST[[1]] <- kernel.local
for(i in 1:nrow(locations)){
lam1 <- exp(log.lam2.1[i,])
lam2 <- exp(log.lam2.2[i,])
phi <- (pi/2)*exp(logit.phi.2[i,])/(1+exp(logit.phi.2[i,]))
Pmat <- matrix(c(cos(phi), -sin(phi), sin(phi), cos(phi)), nrow = 2, byrow = T)
Dmat <- diag(c(lam1, lam2))
Sigma <- Pmat %*% Dmat %*% t(Pmat)
kernel.local[, ,i] <- Sigma
}
KERNEL_LIST[[2]] <- kernel.local
##Calculate Matern form Nonstationary Covariance function
FIN_Sigma.mat <- list()
dist0 <- list()
for(KK in 1:2){
Sigma.mat <- matrix(rep(NA, (n*t)^2), nrow = n*t)
Q.mat <- matrix(rep(NA, (n*t)^2), nrow = n*t)
Inv_ij <- matrix(rep(NA,4),2,2)
for (i in 1:nrow(locations)) {
#Sigma.mat[i, i] <- 1
#Q.mat[i, i] <- 0
Kernel_i <- KERNEL_LIST[[KK]][, , i]
det_i <- Kernel_i[1,1] * Kernel_i[2,2] - Kernel_i[1,2] * Kernel_i[2,1]
for (j in 1:nrow(locations)) {
Kernel_j <- KERNEL_LIST[[KK]][, , j]
det_j <- Kernel_j[1,1] * Kernel_j[2,2] - Kernel_j[1,2] * Kernel_j[2,1]
Kernel_ij <- 0.5 * (Kernel_i + Kernel_j)
Inv_ij[1,1] <- Kernel_ij[2,2]
Inv_ij[2,2] <- Kernel_ij[1,1]
Inv_ij[2,1] <- - Kernel_ij[2,1]
Inv_ij[1,2] <- - Kernel_ij[1,2]
det_ij <- Kernel_ij[1,1] * Kernel_ij[2,2] - Kernel_ij[1,2] * Kernel_ij[2,1]
x <- c(locations[i,1] - locations[j,1], locations[i,2] - locations[j,2])
Sigma.mat[i, j] <- sqrt(sqrt(det_i * det_j)/det_ij)
Q.mat[i, j] <- sqrt(t(x) %*% Inv_ij %*% x/det_ij)
#Sigma.mat[j, i] <- Sigma.mat[i, j]
#Q.mat[j, i] <- Q.mat[i, j]
}
}
FIN_Sigma.mat[[KK]] <- Sigma.mat
dist0[[KK]] <- Q.mat
}
for (i in 1:nrow(locations)) {
#Sigma.mat[i, i] <- 1
#Q.mat[i, i] <- 0
Kernel_i <- KERNEL_LIST[[1]][, , i]
det_i <- Kernel_i[1,1] * Kernel_i[2,2] - Kernel_i[1,2] * Kernel_i[2,1]
for (j in 1:nrow(locations)) {
Kernel_j <- KERNEL_LIST[[2]][, , j]
det_j <- Kernel_j[1,1] * Kernel_j[2,2] - Kernel_j[1,2] * Kernel_j[2,1]
Kernel_ij <- 0.5 * (Kernel_i + Kernel_j)
Inv_ij[1,1] <- Kernel_ij[2,2]
Inv_ij[2,2] <- Kernel_ij[1,1]
Inv_ij[2,1] <- - Kernel_ij[2,1]
Inv_ij[1,2] <- - Kernel_ij[1,2]
det_ij <- Kernel_ij[1,1] * Kernel_ij[2,2] - Kernel_ij[1,2] * Kernel_ij[2,1]
x <- c(locations[i,1] - locations[j,1], locations[i,2] - locations[j,2])
Sigma.mat[i, j] <- sqrt(sqrt(det_i * det_j)/det_ij)
Q.mat[i, j] <- sqrt(t(x) %*% Inv_ij %*% x/det_ij)
#Sigma.mat[j, i] <- Sigma.mat[i, j]
#Q.mat[j, i] <- Q.mat[i, j]
}
}
FIN_Sigma.mat[[3]] <- Sigma.mat
dist0[[3]] <- Q.mat
for (i in 1:nrow(locations)) {
#Sigma.mat[i, i] <- 1
#Q.mat[i, i] <- 0
Kernel_i <- KERNEL_LIST[[2]][, , i]
det_i <- Kernel_i[1,1] * Kernel_i[2,2] - Kernel_i[1,2] * Kernel_i[2,1]
for (j in 1:nrow(locations)) {
Kernel_j <- KERNEL_LIST[[1]][, , j]
det_j <- Kernel_j[1,1] * Kernel_j[2,2] - Kernel_j[1,2] * Kernel_j[2,1]
Kernel_ij <- 0.5 * (Kernel_i + Kernel_j)
Inv_ij[1,1] <- Kernel_ij[2,2]
Inv_ij[2,2] <- Kernel_ij[1,1]
Inv_ij[2,1] <- - Kernel_ij[2,1]
Inv_ij[1,2] <- - Kernel_ij[1,2]
det_ij <- Kernel_ij[1,1] * Kernel_ij[2,2] - Kernel_ij[1,2] * Kernel_ij[2,1]
x <- c(locations[i,1] - locations[j,1], locations[i,2] - locations[j,2])
Sigma.mat[i, j] <- sqrt(sqrt(det_i * det_j)/det_ij)
Q.mat[i, j] <- sqrt(t(x) %*% Inv_ij %*% x/det_ij)
#Sigma.mat[j, i] <- Sigma.mat[i, j]
#Q.mat[j, i] <- Q.mat[i, j]
}
}
FIN_Sigma.mat[[4]] <- Sigma.mat
dist0[[4]] <- Q.mat
nu=theta[1:2]
beta=theta[3]
rot=theta[4]
Beta=matrix(0,q,q)
diag(Beta)=1
Beta[2,1]=rot
Beta[1,2]=rot
S=matrix(NA, q*dim(dist0[[1]])[1], q*dim(dist0[[1]])[1])
for(i in 1:q){
for(j in 1:q){
temp=(i-1)*dim(dist0[[1]])[1]+1:dim(dist0[[1]])[1]
temp1=(j-1)*dim(dist0[[1]])[1]+1:dim(dist0[[1]])[1]
if(i==j){
temp2=ifelse(dist0[[i]]!=0,FIN_Sigma.mat[[i]]*(dist0[[i]]/beta)^nu[i] * besselK(dist0[[i]]/beta,nu[i])/(2^(nu[i]-1)*gamma(nu[i])),FIN_Sigma.mat[[i]])
S[temp,temp1]=temp2
}
if(i!=j & i<j){
nu1=nu[i]
nu2=nu[j]
nu3=(nu[i]+nu[j])/2
rho=Beta[i,j]*(gamma(nu1+3/2)/gamma(nu1))^(1/2) * (gamma(nu2+3/2)/gamma(nu2))^(1/2)*gamma(nu3)/(gamma(nu3+3/2))
#rho <- Beta[i,j]*gamma(nu3)/sqrt(gamma(nu1)*gamma(nu2))
lai=ifelse(dist0[[3]]!=0 ,(dist0[[3]]/beta)^nu3 * besselK(dist0[[3]]/beta,nu3)/(2^(nu3-1)*gamma(nu3))*sqrt(FIN_Sigma.mat[[i]] * FIN_Sigma.mat[[j]])*rho,sqrt(FIN_Sigma.mat[[i]] * FIN_Sigma.mat[[j]])*rho)
S[temp,temp1]=lai
#S[temp1,temp]=t(lai)
}
if(i!=j & i>j){
nu1=nu[i]
nu2=nu[j]
nu3=(nu[i]+nu[j])/2
rho=Beta[i,j]*(gamma(nu1+3/2)/gamma(nu1))^(1/2) * (gamma(nu2+3/2)/gamma(nu2))^(1/2)*gamma(nu3)/(gamma(nu3+3/2))
#rho <- Beta[i,j]*gamma(nu3)/sqrt(gamma(nu1)*gamma(nu2))
lai=ifelse(dist0[[4]]!=0 ,(dist0[[4]]/beta)^nu3 * besselK(dist0[[4]]/beta,nu3)/(2^(nu3-1)*gamma(nu3))*sqrt(FIN_Sigma.mat[[i]] * FIN_Sigma.mat[[j]])*rho,sqrt(FIN_Sigma.mat[[i]] * FIN_Sigma.mat[[j]])*rho)
S[temp,temp1]=lai
#S[temp1,temp]=t(lai)
}
}
}
return(S)
}
#--------------------------------------------------------------------------#
matern_cov_regular_grid_v2_for_estimation_sim_step1 <-function(theta,Q.mat1,Q.mat2,Q.mat3){
q=2
dist0 <- list()
dist0[[1]] <- Q.mat1
dist0[[2]] <- Q.mat2
dist0[[3]] <- Q.mat3
nu=theta[1:2]
beta=theta[3]
rot=theta[4]
Beta=matrix(0,q,q)
diag(Beta)=1
Beta[2,1]=rot
Beta[1,2]=rot
var=theta[5:6]
S=matrix(NA, q*dim(dist0[[1]])[1], q*dim(dist0[[1]])[1])
for(i in 1:q){
for(j in 1:i){
temp=(i-1)*dim(dist0[[1]])[1]+1:dim(dist0[[1]])[1]
temp1=(j-1)*dim(dist0[[1]])[1]+1:dim(dist0[[1]])[1]
if(i==j){
temp2=ifelse(dist0[[i]]!=0,(dist0[[i]]/beta)^nu[i] * besselK(dist0[[i]]/beta,nu[i])/(2^(nu[i]-1)*gamma(nu[i])),var[i])
#diag(temp2)=var[i]+nug[i]
S[temp,temp1]=temp2
}
if(i !=j){
nu1=nu[i]
nu2=nu[j]
nu3=(nu[i]+nu[j])/2
rho=Beta[i,j]*(gamma(nu1+3/2)/gamma(nu1))^(1/2) * (gamma(nu2+3/2)/gamma(nu2))^(1/2)*gamma(nu3)/(gamma(nu3+3/2))
lai=ifelse(dist0[[3]]!=0 ,(dist0[[3]]/beta)^nu3 * besselK(dist0[[3]]/beta,nu3)/(2^(nu3-1)*gamma(nu3))*sqrt(var[i]*var[j])*rho,sqrt(var[i]*var[j])*rho)
S[temp,temp1]=lai
S[temp1,temp]=t(lai)
}
}
}
return(S)
}
matern_cov_regular_grid_for_estimation_sim_v2 <-function(theta,Q.mat){
q=2
dist0 <- Q.mat
nu=theta[1:2]
beta=theta[3]
rot=theta[4]
Beta=matrix(0,q,q)
diag(Beta)=1
Beta[2,1]=rot
Beta[1,2]=rot
var=theta[5:6]
S=matrix(NA, q*dim(dist0)[1], q*dim(dist0)[1])
for(i in 1:q){
for(j in 1:i){
temp=(i-1)*dim(dist0)[1]+1:dim(dist0)[1]
temp1=(j-1)*dim(dist0)[1]+1:dim(dist0)[1]
if(i==j){
temp2=ifelse(dist0!=0,(dist0/beta)^nu[i] * besselK(dist0/beta,nu[i])/(2^(nu[i]-1)*gamma(nu[i])),var[i])
#diag(temp2)=var[i]+nug[i]
S[temp,temp1]=temp2
}
if(i !=j){
nu1=nu[i]
nu2=nu[j]
nu3=(nu[i]+nu[j])/2
rho=Beta[i,j]*(gamma(nu1+3/2)/gamma(nu1))^(1/2) * (gamma(nu2+3/2)/gamma(nu2))^(1/2)*gamma(nu3)/(gamma(nu3+3/2))
lai=ifelse(dist0!=0 ,(dist0/beta)^nu3 * besselK(dist0/beta,nu3)/(2^(nu3-1)*gamma(nu3))*sqrt(var[i]*var[j])*rho,sqrt(var[i]*var[j])*rho)
S[temp,temp1]=lai
S[temp1,temp]=t(lai)
}
}
}
return(S)
}
|
931bd68adfac1a75c4d01f0350642c716b8ff2a7
|
a3c78700a65f10714471a0d307ab984e8a71644d
|
/base/settings/man/read.settings.Rd
|
136aad2d1613c0d35bbfe379a33d23286ab81f1c
|
[
"NCSA",
"LicenseRef-scancode-unknown-license-reference"
] |
permissive
|
PecanProject/pecan
|
e42a8a6a0fc9c0bb624e0743ab891f6cf131ed3f
|
ce327b92bf14498fa32fcf4ef500a7a5db5c9c6c
|
refs/heads/develop
| 2023-08-31T23:30:32.388665
| 2023-08-28T13:53:32
| 2023-08-28T13:53:32
| 6,857,384
| 187
| 217
|
NOASSERTION
| 2023-09-14T01:40:24
| 2012-11-25T23:48:26
|
R
|
UTF-8
|
R
| false
| true
| 1,465
|
rd
|
read.settings.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/read.settings.R
\name{read.settings}
\alias{read.settings}
\title{Loads PEcAn settings file}
\usage{
read.settings(inputfile = "pecan.xml")
}
\arguments{
\item{inputfile}{the PEcAn settings file to be used.}
}
\value{
list of all settings as loaded from the XML file(s)
}
\description{
This will try and find the PEcAn settings file in the following order:
\enumerate{
\item \verb{--settings <file>} passed as command line argument using \code{--settings}
\item \code{inputfile} passed as argument to function
\item \code{PECAN_SETTINGS} environment variable \code{PECAN_SETTINGS} pointing to a specific file
\item \code{./pecan.xml} \code{pecan.xml} in the current folder
}
}
\details{
Once the function finds a valid file, it will not look further.
Thus, if \code{inputfile} is supplied, \code{PECAN_SETTINGS} will be
ignored.
Even if a \code{file} argument is passed, it will be ignored if a file
is passed through a higher priority method.
}
\examples{
\dontrun{
## bash shell:
## example workflow.R and pecan.xml files in pecan/tests
R --vanilla -- --settings path/to/mypecan.xml < workflow.R
## R:
settings <- read.settings()
settings <- read.settings(file="willowcreek.xml")
test.settings.file <- system.file("tests/test.xml", package = "PEcAn.all")
settings <- read.settings(test.settings.file)
}
}
\author{
Shawn Serbin
Rob Kooper
David LeBauer
Ryan Kelly
Betsy Cowdery
}
|
75b158c502220b2c8874cbfb35628e336096c572
|
2a7e77565c33e6b5d92ce6702b4a5fd96f80d7d0
|
/fuzzedpackages/Rfast/man/is_element.Rd
|
8abc762546e4bfb8406f5de60190b039ca7f9332
|
[] |
no_license
|
akhikolla/testpackages
|
62ccaeed866e2194652b65e7360987b3b20df7e7
|
01259c3543febc89955ea5b79f3a08d3afe57e95
|
refs/heads/master
| 2023-02-18T03:50:28.288006
| 2021-01-18T13:23:32
| 2021-01-18T13:23:32
| 329,981,898
| 7
| 1
| null | null | null | null |
UTF-8
|
R
| false
| false
| 978
|
rd
|
is_element.Rd
|
\name{Find element}
\alias{is_element}
\title{
Find element
}
\description{
Search a value in an unordered vector.
}
\usage{
is_element(x, key)
}
\arguments{
\item{x}{
A vector or matrix with the data.
}
\item{key}{
A value to check if exists in the vector x.
}
}
\details{
Find if the key exists in the vector and return returns TRUE/FALSE if the value is been found. If the vector is unordered it is fast but if the vector is ordered then use binary_search. The functions is written in C++ in order to be as fast as possible.
}
\value{
TRUE/FALSE if the value is been found.
}
%\references{
%}
\author{
Manos Papadakis
R implementation and documentation: Manos Papadakis <papadakm95@gmail.com>.
}
\seealso{
\code{ \link{binary_search} (buit-in R function)
}
}
\examples{
x <- rnorm(500)
key <- x[50]
b <- is_element(x, key)
}
\keyword{ Find element }
\keyword{ Divide and Qonquer }
\keyword{ Linear time }
|
f8dddbbb36e35bf644161175556fb9ec1c6b65ff
|
7f3ebc404f3e2a93a267eec0cc2f536e01bc0cab
|
/app/Transportation.R
|
af104a039682ad84535115f13f475ef1c33f37a7
|
[] |
no_license
|
DDDaiii/Shiny-App-Development---SuperHunt
|
f163650138b8ecbb3a503a416f7eb8bce28fa905
|
16794991929ea5e30292607ff5bbd3a6aace0b25
|
refs/heads/master
| 2020-12-10T05:57:02.698811
| 2020-02-10T01:25:17
| 2020-02-10T01:25:17
| 233,519,109
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 3,575
|
r
|
Transportation.R
|
library(shiny)
library(ggmap)
library(dplyr)
library(leaflet)
library(revgeo)
###Load data###
bus_count = read.csv('/Users/MacBook/Desktop/Data/bus_count.csv')
subway_count = read.csv('/Users/MacBook/Desktop/Data/subway_count.csv')
bus_stops = read.csv('/Users/MacBook/Desktop/Data/bus_stops.csv')
subway_stops = read.csv('/Users/MacBook/Desktop/Data/subway_stops.csv')
job = read.csv('/Users/MacBook/Desktop/job.csv')
###Boroughs###
subway_count <-
transform(
subway_count,
Boroughs =
ifelse( zipcode %in% c(10026,10027, 10030, 10037, 10039) , 'Central Harlem' ,
ifelse( zipcode %in% c(10001, 10011, 10018, 10019, 10020, 10036) , 'Chelsea and Clinton' ,
ifelse( zipcode %in% c(10029, 10035) , 'East Harlem' ,
ifelse( zipcode %in% c(10010, 10016, 10017, 10022), 'Gramercy Park and Murray Hill',
ifelse( zipcode %in% c(10004, 10005, 10006, 10007, 10038, 10280), 'Lower Manhattan' ,
ifelse( zipcode %in% c(10002, 10003, 10009), 'Lower East Side' ,
ifelse( zipcode %in% c(10021, 10028, 10044, 10065, 10075, 10128), 'Upper East Side' ,
ifelse( zipcode %in% c(10023, 10024, 10025), 'Upper West Side', 'Inwood and Washington Heights')))))))))
str(subway_count)
subway_count$Boroughs
bus_count <-
transform(
bus_count,
Boroughs =
ifelse( zipcode %in% c(10026,10027, 10030, 10037, 10039) , 'Central Harlem' ,
ifelse( zipcode %in% c(10001, 10011, 10018, 10019, 10020, 10036) , 'Chelsea and Clinton' ,
ifelse( zipcode %in% c(10029, 10035) , 'East Harlem' ,
ifelse( zipcode %in% c(10010, 10016, 10017, 10022), 'Gramercy Park and Murray Hill',
ifelse( zipcode %in% c(10004, 10005, 10006, 10007, 10038, 10280), 'Lower Manhattan' ,
ifelse( zipcode %in% c(10002, 10003, 10009), 'Lower East Side' ,
ifelse( zipcode %in% c(10021, 10028, 10044, 10065, 10075, 10128), 'Upper East Side' ,
ifelse( zipcode %in% c(10023, 10024, 10025), 'Upper West Side', 'Inwood and Washington Heights')))))))))
str(bus_count)
bus_count$Boroughs
ui = fluidPage(
# Copy the line below to make a text input box
textInput("text", label = h3("Transportation"), value = "123 West 116th Street"),
hr(),
fluidRow(column(3, verbatimTextOutput("value")))
)
server = function(input, output) {
# You can access the value of the widget with input$text, e.g.
output$value <- renderPrint({
###Stops count###
geodata = geocode('input$text')
geodata = as.data.frame(geodata)
str(geodata)
zip = revgeo(longitude=geodata$lon, latitude=geodata$lat, provider = 'google', API = "AIzaSyBiAeAiiRtpYFflQxXa5S9vr6sOM0wZBGQ", output = 'hash', item = 'zip')
subway_count$V1[which(subway_count$zipcode==as.numeric(zip))]
bus_count$count[which(bus_count$zipcode==as.numeric(zip))]
for (i in subway_count$V1) {
for (j in bus_count$count) {
if(i+j >= 5) {
print('Convenient')
} else if(i+j == c(2:4)) {
print('Not bad')
} else {
print('Terrible')
}
}}
})
}
# Run the application
shinyApp(ui = ui, server = server)
|
bd07e8aa74c8fae5f8309fa4dad43d677e3c0431
|
1adbabec184f1ef6ee5e55462b03e4d19c71dce6
|
/Simulation study code/Old versions/gen_sbatch_mice.R
|
04f18375db65116f9a0b12fa83733e8e4ac6c5e9
|
[] |
no_license
|
mayamathur/multiple_outcomes
|
61434c2cbefaf966bdfe789db4513ab874833fed
|
6a62511807e8aea155de62e4d95f13a893832e9f
|
refs/heads/master
| 2023-04-06T20:09:04.094926
| 2023-03-16T18:28:04
| 2023-03-16T18:28:04
| 117,344,251
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 3,341
|
r
|
gen_sbatch_mice.R
|
########################### SET SIMULATION PARAMETERS MATRIX ###########################
# FOR CLUSTER USE
path = "/share/PI/manishad/multTest"
setwd(path)
# FOR LOCAL USE
# path = "~/Dropbox/Personal computer/HARVARD/THESIS/Thesis paper #2 (MO)/Sandbox/2018-1-13"
# setwd(path)
n = 1000
nX = 1
nY = 40
rho.XX = 0
rho.YY = c(0.25, 0)
rho.XY = c(0.05, 0) # null hypothesis: 0
half = c(0) # exchangeable vs. half-correlated matrix
# scenarios from OLS
# rho.YY = c(0.25, 0.5, 0)
# rho.XY = c(0.02, 0.05, 0) # null hypothesis: 0
# half = c(0, 1) # exchangeable vs. half-correlated matrix
# bootstrap iterates and type
boot.reps = 1000
bt.type = c( "MICE.H0" )
# matrix of scenario parameters
scen.params = expand.grid( bt.type, n, nX, nY, rho.XX, rho.YY, rho.XY, half )
names(scen.params) = c( "bt.type", "n", "nX", "nY", "rho.XX", "rho.YY", "rho.XY", "half" )
# name the scenarios
# remove letters that are privileged variables in R
letter.names = c(letters, LETTERS)[ ! c(letters, LETTERS) %in% c("i","T","F") ]
scen.params$scen.name = letter.names[ 1:dim(scen.params)[1] ]
n.scen = length(scen.params[,1])
# we don't need scenarios with rho.XY = 0 and half = 1 because redundant with
# scenarios where rho.XY = 0 and half = 0
scen.to.toss = scen.params$scen.name[ scen.params$rho.XY == 0 & scen.params$half == 1 ]
scen.params = scen.params[ ! scen.params$scen.name %in% scen.to.toss, ]
# write the csv file of params (to Sherlock)
write.csv( scen.params, "scen_params.csv" )
########################### GENERATE SBATCHES ###########################
# load functions for generating sbatch files
source("functions.R")
# number of sbatches to generate (i.e., iterations within each scenario)
n.reps.per.scen = 500
n.reps.in.doParallel = 1
n.files = ( n.reps.per.scen / n.reps.in.doParallel ) * n.scen
path = "/share/PI/manishad/multTest"
scen.name = rep( scen.params$scen.name, each = ( n.files / n.scen ) )
jobname = paste("job", 1:n.files, sep="_")
outfile = paste("rm_", 1:n.files, ".out", sep="")
errorfile = paste("rm_", 1:n.files, ".err", sep="")
write_path = paste(path, "/sbatch_files/", 1:n.files, ".sbatch", sep="")
runfile_path = paste(path, "/testRunFile.R", sep="")
# was 5 hours with 2K bootstrap and n = 5e4
sbatch_params <- data.frame(jobname,
outfile,
errorfile,
jobtime = "6:00:00",
quality = "normal",
node_number = 1,
mem_per_node = 64000,
mailtype = "NONE",
user_email = "mmathur@stanford.edu",
tasks_per_node = 16,
cpus_per_task = 1,
path_to_r_script = paste(path, "/doParallel_mice.R", sep=""),
args_to_r_script = paste("--args", jobname, scen.name, boot.reps, sep=" "),
write_path,
stringsAsFactors = F,
server_sbatch_path = NA)
generateSbatch(sbatch_params, runfile_path)
# run them all
# works
setwd( paste(path, "/sbatch_files", sep="") )
for (i in 1:1) {
system( paste("sbatch -p normal,owners /share/PI/manishad/multTest/sbatch_files/", i, ".sbatch", sep="") )
}
|
7045db0bc1ef8c8a08159dce07689d9c31990d21
|
a128e97f5ed10bd283a05b329a1e96d3be4ed7f8
|
/R_Basic&ggplot/R_Base_Review.R
|
bfd987f3dd6be7c385a061aed73e3d2a225ce3df
|
[] |
no_license
|
dandipeng/R_Project
|
4e870f71b1b3ba7b698ab55f163747011bb5e916
|
a6840ef61cd7e14e5d074e256bb1934ae235fe89
|
refs/heads/master
| 2020-06-04T16:20:31.429094
| 2019-07-01T22:29:18
| 2019-07-01T22:29:18
| 192,100,492
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 8,352
|
r
|
R_Base_Review.R
|
# R Project Review
# get directory
getwd()
# reset directory
setwd('Documents/Programming/R_Project/R_Basic_ggplot/')
# list files/directories in a directory
list.files()
setwd('..') # set directory to the parent one
list.dirs()
# reset to what we want
setwd('R_Basic_ggplot/')
# list all items in current global environment
ls()
# read data inside
dogs <- readRDS('data/dogs/dogs_full.rds')
# read xls
library(readxl)
mortality = read_excel("mortality.xls")
# read from website
bone = read.table("https://web.stanford.edu/~hastie/ElemStatLearn/datasets/bone.data",
header = TRUE)
# dataset structure
head(dogs,5)
nrow(dogs)
ncol(dogs)
dim(dogs)
# first scanning
str(dogs) # most useful one in my opinion
sapply(dogs, typeof)
summary(dogs)
?IQR # interquartile range
IQR(dogs$weight,na.rm = T) # = quantile(dogs$weight,3/4,na.rm=T)-quantile(dogs$weight,1/4,na.rm=T)
dogs[order(dogs$weight),]
sort(dogs$weight)
?order
?na.omit
na.omit(dogs[order(dogs$weight),]$weight)
sort(rivers)
IQR(rivers)
quantile(rivers)
# count numbers for categorical numbers
table(dogs$size)
# select out rows or columns
dogs[, "breed"]
dogs[c(1:3,5),]
dogs[1:3,-c(1:12)]
library(ggplot2)
ggplot(data=dogs, aes(x=datadog, y =popularity,color=group))+geom_point()+
geom_text(aes(label=breed),hjust='left',vjust='top') + scale_y_reverse()+
labs(title = "Best in Show", x = "Computed DataDog Score",
y = "AKC Popularity Ranking")+
guides(color = guide_legend(title='Dog Type'))
# The default stat for points is "identity", which doesn't transform the data
# at all.
#
# In this case we want to count up the groups, so instead of "identity", we use
# the default stat from geom_bar, which is "count".
ggplot(dogs, aes(x = group)) + geom_point(stat = "count")
# When you make a vector, R converts to the highest type (see below)
typeof(c(5, "hello", 4.1))
# Type Hierarchy
# --------------
# lists -- list()
# character -- "hello"
# complex -- 1+4i, ...
# double -- 3.1, 5.222, ...
# integers -- 1L, 2L, ...
# logical -- TRUE, FALSE
# ------------
# Outside the hierarchy:
# functions -- mean, median, typeof, ...
# all() : Given a set of logical vectors, are all of the values true?
all(dogs[[4]] == dogs$popularity_all)
dogs[[4]][[3]] # same as dogs[[3, 4]]
# You can see the class(es) of an object with class():
class(dogs)
# You can remove the class(es) of an object with unclass().
#
# unclass() lets you see how the object looks "under the hood".
#
# So if we unclass() a data frame, we can see that it looks like a named list.
unclass(dogs)
# Create A named list, for comparison:
list(x = 1, y = 2)
# rename levels
# Get category names with levels():
levels(dogs$size)
dogs$size[20:30]
# Rename categories:
levels(dogs$size) = c("HUGE", "Medium", "Small") # this will correspond to the output of levels(dogs$size)
levels(dogs$size)
dogs$size[20:30]
dogs <- readRDS('data/dogs/dogs_full.rds')
# change variables from factor to numeric
# Concern! first as.character, then as.numeric
as.numeric(as.character())
# Right way to reorder levels:
size_fix = factor(dogs$size, c("small", "medium", "large"))
# Make table() show NA as a category:
table(dogs$kids) # this will ignore NA
table(dogs$kids, useNA = "always")
?fivenum # Returns Tukey's five number summary (minimum, lower-hinge, median, upper-hinge, maximum) for the input data
fivenum(dogs$datadog)
# A boxplot shows Tukey's five number summary graphically:
ggplot(dogs, aes(y = datadog)) + geom_boxplot()
# A histogram cuts the data into bins, and shows the count for each bin.
ggplot(dogs, aes(x = datadog)) + geom_histogram()
# A density plot is a smoothed histogram.
ggplot(dogs, aes(x = datadog)) + geom_density()
ggplot(dogs, aes(x = datadog)) + geom_density(bw = 0.01)
# show skewed situation
mu = mean(dogs$datadog, na.rm = T)
m = median(dogs$datadog, na.rm = T)
ggplot(dogs, aes(x = datadog)) + geom_density() +
geom_vline(aes(xintercept = m)) +
geom_vline(aes(xintercept = mu), color = "red") +
geom_text(aes(x = m, y = 0.2, label = 'median'), angle=90, vjust = 1, text=element_text(size=11))+
geom_text(aes(x = mu, y = 0.2, label = 'mean'),color = 'red', angle=90, vjust = -0.4, text=element_text(size=11))
# pivot table
# (categorical, categorical) -> frequencies
#
# Similar to the univariate case!
#
tbl = table(size = dogs$size, group = dogs$group)
tbl
# add sum column and row
addmargins(tbl)
# give proportion
prop.table(tbl) # total proportions
prop.table(tbl, margin = 1) # proportions row-wise
prop.table(tbl, margin = 2) # proportions column-wise
# A 2d density plot shows where most of the points are at.
ggplot(dogs, aes(height, weight)) + geom_density2d() + geom_point()
# We can also check for linear relationship with correlation:
cor(dogs$height, dogs$weight, use = "complete.obs") # to avoid NA
ggplot(dogs, aes(x = size, y = height)) + geom_boxplot()
# Aggregation -- computing statistics on groups
# |-----------|-- relationship of interest
# v v v---- data
aggregate(height ~ size, dogs, mean, na.rm = TRUE)
# ^---- statistic
# You can group by more than one categorical variable:
aggregate(height ~ size + grooming, dogs, mean, na.rm = TRUE)
# ^ ^
# |------|-------- compute statistic for all combinations
# Alternative syntax:
aggregate(dogs$height, list(dogs$size, dogs$grooming), mean)
aggregate(dogs$height, dogs[c("size", "grooming")], mean)
# Dog height distributions, faceted by size:
ggplot(dogs, aes(height)) + geom_histogram() + facet_wrap(~ size)
# Dog height distributions, faceted by grooming needs and size:
ggplot(dogs, aes(height)) + geom_histogram() + facet_grid(grooming ~ size)
# If we use a density plot, how can we display the groups?
ggplot(dogs, aes(color = group, height)) + geom_density()
# Too many lines! We can use a ridge plot instead to show many densities at
# once.
library(ggridges)
ggplot(dogs, aes(x = height, y = group)) + geom_density_ridges()
# Putting ggplots side-by-side:
g1 = ggplot(anscombe, aes(x1, y1)) + geom_point()
g2 = ggplot(anscombe, aes(x2, y2)) + geom_point()
g3 = ggplot(anscombe, aes(x3, y3)) + geom_point()
g4 = ggplot(anscombe, aes(x4, y4)) + geom_point()
library(gridExtra)
grid.arrange(g1, g2, g3, g4, ncol = 2, nrow = 2)
# If we want to make our own bins:
# * cut() -- built-in
# * cut_interval(), cut_number(), cut_width() -- in ggplot2
# cut continuous variables into several intervals
# match(M,N) method: give indexes of M in N
match(c("A","D"),c("A","B","C","D","E")) # 1 4
match(c("A","D"),c("A","B","C","D","E")) # 1 NA
#read csv
air = read.csv('data/airlines/2018.01_air_delays.csv', header = T)
head(air)
names(air)
dim(air)
summary(air)
str(air)
# quite messy for every variables
day_of_week = factor(air$DAY_OF_WEEK)
class(air$DAY_OF_WEEK)
days = read.csv("data/airlines/L_WEEKDAYS.csv_")
days
str(days)
levels(day_of_week)
m = match(day_of_week, days$Code)
day_of_week = days$Description[m]
air$DAY_OF_WEEK = day_of_week
air$ontime <- air$ARR_DELAY<=0
ggplot(air,aes(ontime, fill = OP_UNIQUE_CARRIER))+
geom_bar(position = "dodge")
ggplot(air,aes(ontime, fill = OP_UNIQUE_CARRIER))+
geom_bar(position = "stack")
air_complete = air[complete.cases(air), ]
str(air_complete) # no observations
str(air[rowSums(is.na(air['WEATHER_DELAY']))==0,]) # exclude the NA noise
# What kinds of questions can we ask (or answer) with the airlines data?
#
# * What airports are most likely to have delays? Or least likely?
aggregate(height ~ size + grooming, dogs, mean, na.rm = TRUE)
num_delay_air <- aggregate(DAY_OF_WEEK~OP_UNIQUE_CARRIER,air,FUN=length)
num_delay_air <- num_delay_air[order(num_delay_air$DAY_OF_WEEK),]
ggplot(num_delay_air,aes(x=OP_UNIQUE_CARRIER,y=DAY_OF_WEEK))+
geom_bar(stat='identity')
ggplot(air,aes(x=ontime,fill=OP_UNIQUE_CARRIER))+geom_bar(position='dodge')
check_num_delay <- table(air$ontime,air$OP_UNIQUE_CARRIER)
colnames(check_num_delay)[which(order(check_num_delay[2,],decreasing = T)==1)]
# * Check for seasonal delays (but we would need data on more months)
# * What area or region is most likely to have delays?
# * What are the main causes of delay?
# * How often does weather cause a delay?
# * Does a delay on one flight cause later delays (for the same plane)?
na.omit(air)
|
a1503369d544a1499957ed6aeda53f16a1b6f728
|
597abe32c64a0c065f99fe3fdc1dcd637549777f
|
/R/check_funs.R
|
9dc5545884f166a71d1bdc0ac4e2ff98835509e5
|
[] |
no_license
|
HuidongTian/nordcanpreprocessing
|
e63a5f0204097f13dfec087f41aa7a0d39263103
|
77a82e31728fba63e4602de14a9b1e433fea6f06
|
refs/heads/master
| 2022-11-01T14:27:51.903385
| 2020-04-16T11:46:18
| 2020-04-16T11:46:18
| 273,481,284
| 0
| 0
| null | 2020-06-19T11:50:25
| 2020-06-19T11:50:25
| null |
UTF-8
|
R
| false
| false
| 1,355
|
r
|
check_funs.R
|
#' @importFrom easyassertions assert_is_data.table_with_required_names
check_nordcan_cancer_case_dataset <- function(
x,
check_col_nms = nordcancore::nordcan_col_nms()
) {
nordcancore::assert_is_set_of_nordcan_col_nms(test_col_nms)
easyassertions::assert_is_data.table_with_required_names(
x,
required_names = check_col_nms
)
report <- report_on_nordcan_cancer_case_dataset(
x = x,
report_col_nms = check_col_nms
)
# TODO: go through the results, raise errors where tests do not pass
invisible(NULL)
}
check_categorical_column <- function(values, col_nm = "sex") {
expected_levels <- nordcancore::get_column_level_space(col_nm)[[1L]]
observed_levels <- sort(unique(values))
extra_levels <- setdiff(observed_levels, expected_levels)
missing_levels <- setdiff(expected_levels, observed_levels)
# raise appropriate errors
invisible(NULL)
}
check_sex <- function(values) {
check_categorical_column(values, col_nm = "sex")
}
check_region <- function(values) {
check_categorical_column(values, col_nm = "region")
}
check_nuts <- function(values) {
check_categorical_column(values, col_nm = "nuts")
}
check_agegroup <- function(values) {
check_categorical_column(values, col_nm = "agegroup")
}
check_year <- function(values) {
# raise appropriate errors
invisible(NULL)
}
# etc.
|
0849fd3b0bb3aa19bea83e4f2bac45a11e787127
|
25a6eea4150a277e0808203cf31882fd92f78170
|
/man/plot_matrix.Rd
|
97a9f0826c0e38f13f335377d5963250a1babb59
|
[] |
no_license
|
Giappo/jap
|
42e1142546c85f97d262c2409ef5aa0cfd3f94dd
|
0fbfd381b77f2c0f744f4b7cf5726807fb3b186a
|
refs/heads/master
| 2021-08-16T08:17:19.535440
| 2020-08-25T16:07:03
| 2020-08-25T16:07:03
| 215,803,412
| 2
| 2
| null | 2020-08-25T16:07:04
| 2019-10-17T13:46:52
|
R
|
UTF-8
|
R
| false
| true
| 455
|
rd
|
plot_matrix.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/utility_functions.R
\name{plot_matrix}
\alias{plot_matrix}
\title{Just plot a matrix without rotating it}
\usage{
plot_matrix(mat, logs = TRUE, low_triangular = FALSE)
}
\arguments{
\item{mat}{a matrix}
\item{logs}{do you want to plot in log scale?}
\item{low_triangular}{do you want to plot only the low triangular?}
}
\description{
Just plot a matrix without rotating it
}
|
f008195cf1ca703c9b4a65d21b13af24f2d20bb8
|
aee2c11aff6bd0874a03fbd16f852ad785efe5ba
|
/man/getData4haemkrt.Rd
|
c031db340d1a36f5c873e5fe34630ca34d1cde7e
|
[
"MIT"
] |
permissive
|
maciejrosolowski/progressdatenbankderivate
|
bca72eadf47ba2dcffeed80cc120f25458f13581
|
1a2e31ed7e62970a0206883173d32e14d888563d
|
refs/heads/master
| 2021-02-18T18:34:28.100509
| 2020-08-02T13:07:09
| 2020-08-02T13:07:09
| 245,223,654
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| true
| 854
|
rd
|
getData4haemkrt.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/getData4haemkrt.R
\name{getData4haemkrt}
\alias{getData4haemkrt}
\title{Get data on haematocrit.}
\usage{
getData4haemkrt(FRM_DIL_LABORWERTE)
}
\arguments{
\item{FRM_DIL_LABORWERTE}{data.table containing the table FRM_DIL_LABORWERTE
from the database of the PROGRESS study}
}
\value{
data.table with the ID of the patient (patstuid), and the
information on haematocrit, in the wide format.
}
\description{
Get data on haematocrit.
}
\examples{
\dontrun{
excel_fn <- paste0("/net/ifs1/san_projekte/projekte/",
"PROGRESS/Datenmanagement/Data_freezes/",
"20190320/PROGRESS-freeze_201903_01.xlsx")
FRM_DIL_LABORWERTE <- readxl::read_excel(excel_fn, 'FRM_DIL_LABORWERTE')
data.table::setDT(FRM_DIL_LABORWERTE)
toadd_haemkrt <- getData4haemkrt(FRM_DIL_LABORWERTE)
toadd_haemkrt
}
}
|
85da9f46bf875ae23184ef096e428160ddf71f4b
|
f74aea7ec8f87cadf072872c7dabd75c5bb4deef
|
/man/parse_PROCESSED_CSV.Rd
|
40b40cf9c4bcb331e524ffe7be8e63e7ac632574
|
[
"MIT"
] |
permissive
|
jinshijian/cosore
|
cd00622451f7033cf1647676247787d2cce4a15e
|
bf60add39633b8235afb97bcb816e9e3f34f4a3c
|
refs/heads/master
| 2020-05-31T01:16:31.660552
| 2019-06-03T18:12:51
| 2019-06-03T18:12:51
| 190,047,180
| 1
| 0
|
MIT
| 2019-06-03T17:02:12
| 2019-06-03T17:02:12
| null |
UTF-8
|
R
| false
| true
| 511
|
rd
|
parse_PROCESSED_CSV.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/parse-LI8100.R
\name{parse_PROCESSED_CSV}
\alias{parse_PROCESSED_CSV}
\title{Read processed data in CSV format}
\usage{
parse_PROCESSED_CSV(path)
}
\arguments{
\item{path}{Data directory path, character}
}
\value{
A data frame with all data read from file(s).
}
\description{
Read processed data in CSV format
}
\note{
Processed (in the Licor application) data consists of a tab-delimited
text file with a standard set of columns.
}
|
bf430fe4e18c8e8b140a43a1e002b048cb3b1a5c
|
29585dff702209dd446c0ab52ceea046c58e384e
|
/mousetrack/R/pathoffset.R
|
f182c2b5d73c4ae40b6f737e174ce966e3d323ec
|
[] |
no_license
|
ingted/R-Examples
|
825440ce468ce608c4d73e2af4c0a0213b81c0fe
|
d0917dbaf698cb8bc0789db0c3ab07453016eab9
|
refs/heads/master
| 2020-04-14T12:29:22.336088
| 2016-07-21T14:01:14
| 2016-07-21T14:01:14
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 714
|
r
|
pathoffset.R
|
# Calculate path offset and its max
.packageName <- 'mousetrack'
pathoffset <- function(x,y){
# require("pracma") ## to compute the vector cross-product
startend = c( (y[length(y)] - y[1]), (x[length(x)] - x[1]))
startenddistance = sqrt(sum(startend^2))
perpdistance = vector()
for (m in 1:length(x)){
pointstart = c(y[m] - y[1], x[m] - x[1])
perpdistance = c(perpdistance,
sqrt( sum( cross( c(startend, 0), c(pointstart,0)
)
)^2
)/ sqrt(sum(startend^2))
)
}
pathoffset = perpdistance/startenddistance
maxpathoffset = max(pathoffset)
return(maxpathoffset)
}
|
7e97978f49648137e331c6b0509f361149fbbccb
|
516c650a67e8b3188b52ff54fba410498014a979
|
/man/verhoeff_validate.Rd
|
b76ae9db634784b3bc67576ab12740a7b2f355f5
|
[] |
no_license
|
condwanaland/verhoeff
|
1e3aba2742f3fc7995bae13057a06af16839abaa
|
bf6658d4e48d1a224a9f766522bfbc670e3e5b7b
|
refs/heads/master
| 2021-09-08T16:50:00.198575
| 2021-08-29T04:14:50
| 2021-08-29T04:14:50
| 124,794,870
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| true
| 572
|
rd
|
verhoeff_validate.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/verhoeff.R
\name{verhoeff_validate}
\alias{verhoeff_validate}
\title{verhoeff_validate}
\usage{
verhoeff_validate(number, check_digit)
}
\arguments{
\item{number}{A numerical input}
\item{check_digit}{An existing check digit for the input number}
}
\value{
Logical vector
}
\description{
Enter a number, and an existing check digit. Function will return true if the supplied check digit is a correct verhoeff check digit for the given number
}
\examples{
verhoeff::verhoeff_validate(123, 3)
}
|
fd0b382a0f5dc131ce2914f51c150e8519a32614
|
c9ce3de18238db9b2c62f57a2c49128f226bccdd
|
/R/R Shiny App Tutorial - Building Interactive Web Apps in R using Shiny/Tutorial 20/ui.R
|
26295399b3f4d72a4eee4acad62b0c99d2acc0fa
|
[] |
no_license
|
AdamYuWen/LearningProgramming
|
1ebc7d8bb942846dda7b2fff795af157d19fed47
|
34d3276923224db848db818d4322a2dd28ac7be4
|
refs/heads/master
| 2023-05-20T20:37:57.269187
| 2021-06-07T00:46:31
| 2021-06-07T00:46:31
| 203,454,875
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 649
|
r
|
ui.R
|
library(shiny)
shinyUI(fluidPage(
titlePanel(title = h4("Tutorial 20", align = "center")),
sidebarLayout(
sidebarPanel(
selectInput(inputId = "Year",
label = "Year",
choices = unique(data$Year)),
# The choices are conditional, so it is empty for now.
selectInput(inputId = "Month",
label = "Month",
choices = "",
selected = ""),
selectInput(inputId = "Name",
label = "Name",
choices = "",
selected = "")
),
mainPanel(
tableOutput("dataset")
)
)
))
|
823ee99e4474e74db40f3c69c7a05703d1657fb3
|
2b5747d64097df379b18df4a03bba21971517da1
|
/demo/apps/linked-hover/ui.r
|
03c8865a591811a93253076d454ffa03f8bbd128
|
[] |
no_license
|
Lingbing/ggvis
|
dde1407276b2421bd2b84e62a2e35a8230a3c439
|
4c6f9830ec35e586b59fba819c48b4ac7616a5c2
|
refs/heads/master
| 2020-04-03T08:57:06.298252
| 2013-09-30T22:46:29
| 2013-09-30T22:46:29
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 294
|
r
|
ui.r
|
shinyUI(pageWithSidebar(
headerPanel("ggvis plot"),
sidebarPanel(
uiOutput("ggvis_ui"),
ggvisControlGroup("plot1")
),
mainPanel(
ggvis_output("plot1"),
ggvis_output("plot2"),
h3("Hover data (sent from client to server)"),
verbatimTextOutput("hover_data")
)
))
|
39d9655f0f8d8f3a1ede446cf55b00d1d310cac6
|
93feed59a42c1edd9b8979bb26acaa598477c9c1
|
/functions/importAirbase.R
|
9c9b83922e55fbc573194f18ea915414f3b02f01
|
[] |
no_license
|
eliavs/Wind
|
6dc0693ac95f0f3e78706af9747cf0dbe59c8f3d
|
bd2f2b221d01cee76b61a4724dbad12ba7f9a5c2
|
refs/heads/master
| 2021-01-19T18:10:33.391593
| 2017-02-27T14:25:44
| 2017-02-27T14:25:44
| 11,786,834
| 2
| 2
| null | null | null | null |
UTF-8
|
R
| false
| false
| 5,397
|
r
|
importAirbase.R
|
##' Import European Environment Agency airbase hourly air quality data
##'
##' The European Environment Agency (EEA) makes available hourly air
##' pollution data from across Europe (see
##' \url{http://acm.eionet.europa.eu/databases/airbase/}). The EEA go
##' to great lengths to compile, check and make available a huge
##' amount of air quality data. The EEA provide a range of interactive
##' maps and make all data available as csv files. These csv files are
##' split by country and can be very large.
##'
##' The aim of the \code{importAirbase} function is to provide an
##' alternative and hopefully complementary approach to accessing
##' airbase data with a specific focus on integration with R and the
##' openair package.
##'
##' Similar to other import functions in openair (see links), the
##' \code{importAirbase} function works with sites and combines all
##' species into one data frame. Rather than having year-specific
##' files there is only one file (data frame) per site covering all
##' years.
##'
##' There are many potential issues that need to be dealt with,
##' although for the most part everything should be compiled in a
##' straightforward way. One of the key issues is the use of different
##' instrument techniques measuring the same species at a site, or an
##' instrument that was replaced at some point. The EEA usefully
##' record this information. Rather than attempt to combine several
##' potential time series for the same pollutant, they have been kept
##' separate. Examples include these use of different methods to
##' measure PM10 e.g. TEOM and FDMS. Because different instruments can
##' provide very different concentrations it is probably wise to keep
##' them separate and analyse them as separate species. In other cases
##' e.g. ozone or NO2, if an instrument was replaced half way through
##' a time series it would be reasonable to combine the time series
##' into a single set. There is a function \code{airbaseSplice} that
##' will combine pollutants once imported using \code{importAirbase}.
##'
##' NOTE! This function should be considered as provisional and the
##' author would appreciate any feedback on its use.
##'
##'
##' @title Import hourly data from the European Environment Agency airbase database
##' @param site Site code(s) of the sites to be imported. Can be upper or lower case.
##' @param year The year or years of interest. For example to select
##' 2010 to 2012 use \code{year = 2010:2012}.
##' @param pollutant The pollutant(s) to be selected. See the list in
##' \code{airbaseStats}.
##' @param add Additional fields to add to the returned data frame. By
##' default the country and site type are returned. Other useful options
##' include \dQuote{city}, \dQuote{site} (site name),
##' \dQuote{EMEP_station}, \dQuote{lat}, \dQuote{lon} and \dQuote{altitude}.
##' @param splice Should the pollutant fields be consolidated when
##' multiple measurements of individual pollutants are available? See
##' \code{airbaseSplice} for details.
##' @param local Used for tesing local imports.
##' @export
##' @return Returns an hourly data frame with POSIXct date, EEA site
##' code and each individual species.
##' @seealso \code{\link{airbaseSplice}},
##' \code{\link{airbaseFindCode}}, \code{\link{airbaseStats}}, \code{\link{airbaseInfo}}
##' @author David Carslaw
importAirbase <- function(site = "gb0620a", year = 1969:2012, pollutant = NA,
add = c("country", "site.type"), splice = FALSE, local = NA) {
## get rid of R check annoyances
dat <- NULL
site <- toupper(site)
files <- site
loadData <- function(x) {
tryCatch({
if (is.na(local)) {
fileName <- paste("http://www.erg.kcl.ac.uk/downloads/Policy_Reports/airbase/", x, ".RData", sep = "")
con <- url(fileName)
load(con)
close(con)
} else { ## load from local file system
con <- paste(local, x, ".RData", sep = "")
load(con)
}
## select years
dat <- selectByDate(dat, year = year)
## pollutant
if (splice)
dat <- airbaseSplice(dat) ## combine to get single names
if (any(!is.na(pollutant))) {
dat <- dat[, c("date", "code", "site",
names(dat)[which(toupper(names(dat)) %in% toupper(pollutant))])]
}
dat
},
error = function(ex) {cat(x, "does not exist - ignoring that one.\n")})
}
thedata <- lapply(files, loadData)
thedata <- thedata[!sapply(thedata, is.null)] ## remove NULL
thedata <- do.call(rbind.fill, thedata)
if (length(add) > 0 ) {
## add other fields
if (is.na(local)) {
fileName <- "http://www.erg.kcl.ac.uk/downloads/Policy_Reports/airbase/site.info.RData"
con <- url(fileName)
load(con) ## brings in data frame site.info
} else {
con <- paste(local, "site.info.RData", sep = "")
load(con)
}
if (!is.null(thedata)) {
site.info <- site.info[, c("code", add)] ## just the fields needed
thedata <- merge(thedata, site.info, by = "code")
}
}
thedata
}
|
7cecc9626c3e0a4a48078d7262e68f1c1f956d36
|
fa809b4109757cc3b82923ddd40e4adc2cf3baac
|
/plot3.R
|
13251652742d43227dd1f6364fbdb31eaebd1dd2
|
[] |
no_license
|
cransford-copart/ExData_Plotting1
|
a7f17fe8cf716a9a723c5f5f59b68a7cbefdc340
|
286952b3a9181d8320b3d30e3da7b6aa6e363b7c
|
refs/heads/master
| 2020-03-28T16:49:03.904757
| 2018-09-17T00:28:24
| 2018-09-17T00:28:24
| 148,731,436
| 0
| 0
| null | 2018-09-14T03:34:43
| 2018-09-14T03:34:43
| null |
UTF-8
|
R
| false
| false
| 1,149
|
r
|
plot3.R
|
library(chron)
library(ggplot2)
energyDf <- read.delim("~/exploratory_data_analysis_wk1/household_power_consumption.txt", stringsAsFactors = FALSE, sep = ";")
energyDf$Date <- as.Date(energyDf$Date, format = "%d/%m/%Y")
energyDfSubset <- as.data.table(energyDf)[Date >= "2007-02-01" & Date <= "2007-02-02"]
energyDfSubset$Time2 <- as.POSIXct(energyDfSubset$Time, tz = "", format = "%H:%M:%S", usetz = FALSE)
energyDfSubset$Time2 <- times(format(energyDfSubset$Time2, "%H:%M:%S"))
energyDfSubset$dateTime <- as.POSIXct(paste(energyDfSubset$Date, energyDfSubset$Time2), formatt = "%d/%m/%Y %H:%M:%S")
png(filename = "~/exploratory_data_analysis_wk1/plot_3.png", width = 480, height = 480)
plot(energyDfSubset$dateTime, type = "n", energyDfSubset$Sub_metering_1, xlab = "", ylab = "Energy sub metering")
lines(energyDfSubset$dateTime, energyDfSubset$Sub_metering_1)
lines(energyDfSubset$dateTime, energyDfSubset$Sub_metering_2, col = "red")
lines(energyDfSubset$dateTime, energyDfSubset$Sub_metering_3, col = "blue")
legend("topright", lwd = 1, col = c("black","red","blue"), legend = c("Sub_metering_1","Sub_metering_2","Sub_metering_3"))
dev.off()
|
975b56422754602c83211d200ef82479e08ce8b0
|
184180d341d2928ab7c5a626d94f2a9863726c65
|
/valgrind_test_dir/rcpp_kronDBS-test.R
|
c717d33473b37e82fa49d5f48eeb4d06bc435227
|
[] |
no_license
|
akhikolla/RcppDeepStateTest
|
f102ddf03a22b0fc05e02239d53405c8977cbc2b
|
97e73fe4f8cb0f8e5415f52a2474c8bc322bbbe5
|
refs/heads/master
| 2023-03-03T12:19:31.725234
| 2021-02-12T21:50:12
| 2021-02-12T21:50:12
| 254,214,504
| 2
| 1
| null | null | null | null |
UTF-8
|
R
| false
| false
| 234
|
r
|
rcpp_kronDBS-test.R
|
function (A, B, p)
{
e <- get("data.env", .GlobalEnv)
e[["rcpp_kronDBS"]][[length(e[["rcpp_kronDBS"]]) + 1]] <- list(A = A,
B = B, p = p)
invisible(c(".Call", "_CGGP_rcpp_kronDBS", "CGGP", "A", "B",
"p"))
}
|
e79531528b3fe183081510ebbc984b5ca8bad5e4
|
b8b3443e3b7021e9ac458bc12166f3e6f470843d
|
/examples/propagation_pipeline_example.R
|
592daa27eb89af25c5d1db70d010b1dc3fc21459
|
[
"MIT"
] |
permissive
|
taylorpourtaheri/nr
|
e745a5734ca244e642ef089d9dfd20b957f00852
|
5c2710c197533ecf8b439d58d3d317bc203ac990
|
refs/heads/main
| 2023-07-14T04:53:48.773417
| 2021-08-11T22:15:55
| 2021-08-11T22:15:55
| 386,658,478
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 1,022
|
r
|
propagation_pipeline_example.R
|
# load relevant packages
library(STRINGdb)
library(igraph)
library(ggplot2)
library(ggnetwork)
library(dplyr)
library(glue)
library(devtools)
# load all package functions
load_all()
# read differential expression data (annotated with gene symbols)
de_string <- readRDS('data/de_string_v11.RDS')
# select MYC condition as an example
myc_de <- de_string$MYC
# call wrapper
results <- propagation_pipeline(deg = myc_de,
edge_conf_score_min = 950,
logFC_min = 2.0,
pvalue_max = 0.25,
method = 'ml',
causal_gene_symbol = 'MYC',
export_network = FALSE,
sim_method = 'jaccard',
n_sim = 9999,
weighted = TRUE)
# # plot output
# set.seed(4)
# plot_graph(results[['network']], method = 'weighted_score', gene_list = c('MYC'))
# ggsave('test2.png', width = 12, height = 12)
|
1fac8c7bf4d6ba3802cb542cdba08d4913230625
|
bfaa4377e6efac6c40c65a10bc6f125b97d7361b
|
/w7/script7.R
|
4a2a4f2984609da584f90213f461fca3688659f4
|
[] |
no_license
|
ekosovan/RfEX_stud_version
|
2edd0fe532f9531996d8312f2238a29c9667fcb3
|
34b136767665ed6204720de42c8d5b6a3aa28854
|
refs/heads/master
| 2020-07-16T14:21:03.691888
| 2019-12-17T13:15:12
| 2019-12-17T13:15:12
| 205,804,828
| 1
| 13
| null | 2019-12-16T10:51:59
| 2019-09-02T07:53:06
|
HTML
|
UTF-8
|
R
| false
| false
| 2,301
|
r
|
script7.R
|
library('magrittr')
romeo = readLines("http://www.gutenberg.org/cache/epub/1112/pg1112.txt", encoding = "UTF8")
romeo %<>% {.[. != ""]}
first_line = which(romeo[1:100] == "1595")
romeo = romeo[-1:-first_line]
persons = romeo[4:28]
corpus = romeo[-1:-32]
library(stringr)
corpus[1:4] %>%
print() %>%
str_length() # or "nchar()" from base R
(prol = str_sub(corpus[1], 29, 36)) # or "substring()" from base R
paste("the", prol, sep = "_||_") %>% #default sep is whitespace
print() %>%
tolower() %>% # or "stringr::str_to_lower()"
print() %>%
toupper() # or "stringr::str_to_upper()"
paste0("the //__//", prol) # no separator
file.path("w7", "data", "text.csv") # file path wrapper utility
head(persons, 3) %>%
print() %>%
str_split(",",n = 2) # split only once
corpus[1] %>%
print() %>%
str_trim() # take away the preceding and proceeding whitespaces
str_trim(" abc def ")
str_squish(" abc def ")
(jap = readLines("japanese.txt"))
# iconvlist()
iconv(jap, "x-mac-japanese", "UTF-8")
files = paste0("file_", c(0,1,10,100,1000), "_", 2010:2014, ".csv"))
str_sub(files, 8, 11)
str_detect("abcd", "a") # grep / grepl
str_locate("abcd", "a")
str_extract("abcd", "a") # gsub
str_replace("abcd", "ab","d") # gsub
str_remove("abcd", "a") # gsub
#"." any character
str_replace("abcd_e",".","g")
# "*" any number of times
str_extract("abc_de","_.*")
# "?" at most once
str_extract("abc_de","_.?")
# "+" at least once
str_extract("abc_de","_.+")
# "^" start of string
str_replace_all("abcd_a","^a","g")
# "$" end of string
str_replace_all("abcd_ab","a$","g")
# "|" RegEx or
str_extract(c("abc","cd"),"ab|c")
# "{}" number of times
str_extract("aaaabcde","a{1,3}")
str_extract("aaaacde","a{2,}")
# "[]" set of possibilities
str_extract(c("aabcde"),"[ab]{3}")
# "()" glue characters
str_extract(c("ababcdab"),"(ab){2}")
str_extract("abc123_ ?!<>","[:alnum:]*") # or [:alpha:] or [:digit:] only
str_replace_all("abc123_ ?!","[:punct:]","X") # special characters
str_extract("abc123_ ?!<>","[:space:]") # whitespace, newlines, ...
str_replace("abc123_ ?!<>","\\s","XX") # special characters (whitespace in this case)
str_extract("$?+.",".")
str_extract("$?+.","\\.")
(files = paste0("file_", c(0,1,10,100,1000), "_", 2010:2014, ".csv"))
|
91c08e45d817681c882b44c4c1acca338b015c7d
|
de71c62e745b048c95c08f7e516d4aaa215a0194
|
/man/mean_beta.Rd
|
479d129909fd1b5b2e0e779e41e51f470656439d
|
[
"MIT"
] |
permissive
|
jjbrehm/BadApple
|
55db2fa7208a5231f06b4aded8b9838dd12fd174
|
0fe9a9742c53fdafa8788c90f905f8d2d3c7d913
|
refs/heads/master
| 2023-02-05T21:42:05.624824
| 2020-12-22T18:45:47
| 2020-12-22T18:45:47
| 277,906,381
| 1
| 0
| null | null | null | null |
UTF-8
|
R
| false
| true
| 336
|
rd
|
mean_beta.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/mean_beta.R
\name{mean_beta}
\alias{mean_beta}
\title{mean_beta}
\usage{
mean_beta(betaobj, testvals, brange = c(0, 1))
}
\arguments{
\item{betaobj}{betamle object}
\item{testvals}{vector}
\item{brange}{vector}
}
\value{
vector
}
\description{
mean_beta
}
|
f42364354e23173f054021a12e9cdc27ede64389
|
9abd7b69ced0b119d3f11eb2f64679c797affeae
|
/R/plp.R
|
0d9d08fb6a86ba2e85e41eace557643b3f4287a3
|
[] |
no_license
|
cran/DJL
|
f90ab82207bfa627a7c8bb6971fd388c9736fe58
|
057a53eac20cb6857c7ebe38735d81f7390e0d12
|
refs/heads/master
| 2023-03-31T10:51:30.750898
| 2023-03-16T14:10:02
| 2023-03-16T14:10:02
| 48,079,064
| 1
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 1,662
|
r
|
plp.R
|
plp <-
function (x){
m <- dim(x)[1]
n <- dim(x)[2]
control <- lp.control(x)
cat(paste("Model name: ", name.lp(x), "\n", sep = ""))
ans <- matrix(0, m + 1, n)
for(j in 1:n) {
col <- get.column(x, j)
col$column -> ans[1 + col$nzrow, j]
}
type <- get.type(x); kind <- get.kind(x)
type[type == "integer" ] <- "Int"
type[type == "real" ] <- "Real"
kind[kind == "standard" ] <- "Std"
kind[kind == "semi-continuous"] <- "S-C"
bounds <- get.bounds(x)
upper <- bounds$upper; lower <- bounds$lower
ans <- format(rbind(dimnames(x)[[2]], ans, kind, type, upper, lower), justify = "right")
sense <- ifelse(control$sense == "minimize", "Minimize", "Maximize")
lhs <- get.constr.value(x, side = "lhs")
rhs <- get.constr.value(x, side = "rhs")
r.nm <- format(c("", sense, dimnames(x)[[1]], "Kind", "Type", "Upper", "Lower"))
const <- format(c("", "", get.constr.type(x), "", "", "", ""), justify = "right")
rhs <- format(c("", "", as.character(rhs), "", "", "", ""), justify = "right")
p.lhs <- any(!is.infinite(lhs[is.element(get.constr.type(x, as.char = FALSE), c(1, 2))]))
lhs <- format(c("", "", as.character(lhs), "", "", "", ""), justify = "right")
if(p.lhs){
ans <- cbind(r.nm, lhs, const, ans, const, rhs)
}else{
ans <- cbind(r.nm, ans, const, rhs)
}
ans <- apply(ans, 1, paste, collapse = " ")
ans <- paste(ans, collapse = "\n")
m.nm <- paste("Model name: ", name.lp(x), "\n", sep = "")
ans <- paste(m.nm, ans, "\n", sep = "")
cat(ans)
invisible(x)
}
|
db6885ee69ce0b2d5f84e1a66ad2b91f70a3efec
|
b549bd1a71a999b084b866fd54f850dc3a91104a
|
/src/clustering_functions.R
|
f454ec7667611b03ceafa352855fb132a8b352a5
|
[
"MIT"
] |
permissive
|
graebnerc/trade-typology
|
ba4f850e19194c3ab47b4c1a2400ebad70091036
|
9051a420efa1557fcca933cb2d809b01d99026f9
|
refs/heads/master
| 2022-12-24T03:41:12.998028
| 2022-12-15T12:45:59
| 2022-12-15T12:45:59
| 175,810,261
| 0
| 1
| null | null | null | null |
UTF-8
|
R
| false
| false
| 9,221
|
r
|
clustering_functions.R
|
icae_cols <- c("#696969", "#888888", "#A0A0A0",
"#B0B0B0", "#C8C8C8", "#D8D8D8")
#' Clustering
#'
#' Conducts the clustering
#'
#' @param data_file The file for the clustering, must not contain NA
#' @param clustering_vars Should contain all the variables to be used
#' in the clustering as strings
#' @param nb_groups The number of groups to be highlighted in the plot
#' @param clustering_method The method to be used in the \code{agnes} function:
#' 'ward', 'single', 'complete', 'average' or 'divisive'.
#' @return List with clustering object, the data used, and the plot.
do_clustering <- function(data_file,
clustering_vars,
nb_groups,
clustering_method="ward"){
cluster_data <- data_file %>%
dplyr::select(one_of("country", clustering_vars))
cluster_data <- as.data.frame(cluster_data)
rownames(cluster_data) <- cluster_data$country
cluster_data <- select(cluster_data, -country)
if (clustering_method=="divisive"){
clustering_object <- cluster_data %>%
cluster::diana(.)
} else {
clustering_object <- cluster_data %>%
cluster::agnes(method = clustering_method) # Compute hierachical clustering
}
if (n_groups==6){
colors_clustering <- icae_cols
} else{
colors_clustering <- RColorBrewer::brewer.pal(n_groups, "Dark2")
}
cluster_plot <- factoextra::fviz_dend(
clustering_object,
k = nb_groups,
cex = 0.75, # label size
rect = TRUE, # Add rectangle around groups
rect_fill = TRUE,
color_labels_by_k = FALSE, # color labels by groups
k_colors = "black",
rect_border = colors_clustering,
horiz = TRUE
) +
theme(axis.text.x = element_blank(),
axis.ticks.x = element_blank()
)
return_list <- list(
"cluster_obj" = clustering_object,
"cluster_data" = cluster_data,
"cluster_plot" = cluster_plot
)
return(return_list)
}
#' Save the clustering dendogram
#'
#' Takes a list of variables to be used for the clustering, then implements
#' the clustering using \code{do_clustering}. Saves the resulting dendogram.
#'
#' @param clustering_variables The original names of the variables to be
#' used for the clustering. Adds 'z' to the variables automatically.
#' @param number_groups The number of clusters to be highlighted.
#' @param vers Optional; adds a version in brackets to the dendogram title
#' and adjusts the name of the resulting pdf file. FALSE by default.
#' @return The resulting ggplot2 object of the dendogram, which is also
#' saved in the output folder.
save_dendogram <- function(clustering_variables, number_groups, vers=FALSE){
clustering_variables_coded <- paste0("z", clustering_variables)
clustering_list <- do_clustering(
dplyr::mutate(cluster_data, country=ifelse(
country=="United Kingdom", "UK", country)),
clustering_variables_coded,
n_groups)
clustering_dendogram <- clustering_list$cluster_plot +
xlab("Countries") + ylab("") +
theme(axis.title = element_blank())
if (vers){
clustering_dendogram <- clustering_dendogram +
ggtitle(paste0("Result of the hierarchical clustering (", vers, ")"))
}
clustering_dendogram
if (vers){
file_name <- here(paste0("output/fig_2_clustering_", vers, ".pdf"))
} else {
file_name <- here("output/fig_2_clustering.pdf")
}
ggplot2::ggsave(plot = clustering_dendogram,
filename = file_name,
width = 7.5, height = 4)
return(clustering_dendogram)
}
#' Compare clustering algorithms
#'
#' Compares three clustering algorithms by computing their scores and by
#' producing a table.
#'
#' @param raw_dat The data to be used for the clustering
compare_clustering_types <- function(raw_dat,
clustering_vars,
nb_clusters) {
hc_agnes_complete_linkage <- # Hierarchical clustering using Complete Linkage
do_clustering(raw_dat,
clustering_vars, nb_clusters, "complete")[["cluster_obj"]]
hc_agnes_average_linkage <- # Hierarchical clustering using Average Linkage
do_clustering(raw_dat,
clustering_vars, nb_clusters, "average")[["cluster_obj"]]
hc_agnes_single_linkage <- # Hierarchical clustering using single Linkage
do_clustering(raw_dat,
clustering_vars, nb_clusters, "single")[["cluster_obj"]]
hc_agnes_ward <- # Hierarchical clustering using Ward's method
do_clustering(raw_dat,
clustering_vars, nb_clusters, "ward")[["cluster_obj"]]
divisive_cluster <- # divisive hierarchical clustering
do_clustering(raw_dat,
clustering_vars, nb_clusters, "divisive")[["cluster_obj"]]
cluster_type <- c("agnes_complete", "agnes_average", "agnes_single",
"agnes_ward", "diana_divisive")
fit_coefs <- c(hc_agnes_complete_linkage$ac, hc_agnes_average_linkage$ac,
hc_agnes_single_linkage$ac, hc_agnes_ward$ac,
divisive_cluster$dc)
info_frame <- data.frame(type_clustering = cluster_type,
dif_coef = fit_coefs) %>%
dplyr::arrange(dplyr::desc(dif_coef)) %>%
dplyr::rename(Algorithm=type_clustering,
`Clust. coef.`=dif_coef)
return(info_frame)
}
#' Setup data for cluster taxonomy
#'
#' Takes the taxonomy data and returns a data frame that can be used
#' to create figures illustrating the differences among clusters.
#'
#' @param data_used The data used
#' @param cluster considered The name of the cluster (e.g. C1 or C2); must be
#' a character with a leading C, as in data_used.
#' @param cluster_variables A list with information about the variables to
#' be included in the final data. Names of the list should be clusters as
#' in \code{cluster}, the items the names of the variables as strings.
#' @return A data table with the data as to be used by ggplot2.
setup_taxonomy_data <- function(data_used,
cluster_considered,
cluster_variables){
if (!cluster_considered %in% data_used[["cluster"]]){
stop("Cluster considered not present in data set!")
}
cluster_data <- data_used %>%
dplyr::select(one_of("country", "cluster",
cluster_variables[[cluster_considered]])) %>%
dplyr::mutate(cluster = ifelse(cluster == cluster_considered,
cluster_considered, "Rest")) %>%
dplyr::group_by(cluster) %>%
dplyr::summarise_if(is.numeric, mean, na.rm=TRUE) %>%
dplyr::ungroup()
# TODO check for NA
return(cluster_data)
}
#' Make the taxonomy plots
#'
#' Creates plots to visualize the descriptive statistics of the trade models.
#' Takes as input the raw taxonomy data, processes it using the function
#' \code{setup_taxonoy_data} and then creates the plots. Can return both
#' a complete plot, or a list of individual plots.
#'
#' @param data_used The data to be used. Must have a column \code{cluster}
#' in which 'C1', 'C2', etc. identify the cluster.
#' @param cluster considered A string in the form 'C1', 'C2' etc. to
#' identify the cluster to be visualized.
#' @param cluster_variables A list in which keys have the same name as the
#' clusters, and items indicate the relevant variables as strings.
#' @param variable_subset If FALSE (the default) all variables that are
#' specified in \code{cluster_variables} are used for the visualization.
#' Otherwise, you can pass a list of variable names as strings to visualize
#' onle those.
#' @param return_full_plot If TRUE (the default) function combines the single
#' plots into one full plot (using \code{ggpubr:ggarrange}. If FALSE a list of
#' single plots is returned.)
make_plots <- function(data_used,
cluster_considered,
cluster_variables,
variable_subset=FALSE,
return_full_plot=TRUE){
if (!(FALSE %in% variable_subset)){
cluster_variables <- variable_subset
}
cluster_data <- setup_taxonomy_data(data_used,
cluster_considered,
cluster_variables)
plots_to_do <- names(cluster_data)[2:length(names(cluster_data))]
final_plots <- list()
for (p in plots_to_do){
print(p)
final_plots[[p]] <- ggplot(cluster_data,
aes_string(x="cluster",
y=p,
fill="cluster",
color="cluster")) +
geom_bar(stat = "identity") +
ggtitle(p) +
scale_y_continuous(expand = c(0, 0))
theme_bw() +
theme(panel.border = element_blank(),
axis.line = element_line(),
legend.position = "none")
}
if (return_full_plot){
full_plot <- ggpubr::ggarrange(
plotlist = final_plots,
ncol = length(names(final_plots)),
legend = "none")
return(full_plot)
} else {
return(final_plots)
}
}
|
78023e6e57d422f40698202c57fe15a089fbfe0a
|
0a906cf8b1b7da2aea87de958e3662870df49727
|
/grattan/inst/testfiles/IncomeTax/libFuzzer_IncomeTax/IncomeTax_valgrind_files/1610051597-test.R
|
95eb2c7091b8d36ab3371eeb08c8ec67ad964de9
|
[] |
no_license
|
akhikolla/updated-only-Issues
|
a85c887f0e1aae8a8dc358717d55b21678d04660
|
7d74489dfc7ddfec3955ae7891f15e920cad2e0c
|
refs/heads/master
| 2023-04-13T08:22:15.699449
| 2021-04-21T16:25:35
| 2021-04-21T16:25:35
| 360,232,775
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 2,441
|
r
|
1610051597-test.R
|
testlist <- list(rates = numeric(0), thresholds = c(NaN, 3.56048348188083e-306, 34082834.0004893, NaN, 2.80614289249855e-312, -2.2308331518689e-289, 1.67616711008101e-319, 0, 0, 9.72926582174422e-309, 2.84809453888922e-306, 2.84347436052587e-312, 2.47222686857108e-94, 3.5601616320854e-306, 8.90029544463878e-306, -2.81542935260124e-147, 7.52609340439459e-304, 34056240.0314952, 1.84870553760557e-272, 2.30127027866676e-12, 6.98147722549945e-310, -2.67120456398365e+305, 1.39066109167247e-309, 0, 0, 0, 9.23492568228087e-311, 5.79909433230408e-316, 1.44804023176556e+190, -1.75590191507535e+306, 2.29775642758296e+189, 4.36428616530596e-306, -7.39330507484462e-287, 5.4323092248711e-312, 2.332463439243e-12, 2.12199589323686e-314, -5.48612406879369e+303, -3.1845552891024e-248, 1.80354405063812e-130, 1.12812834827904e-291, 7.2908647453913e-304, 3.24665356080927e-312, 2.72888639290682e-312, NaN, NaN, NaN, NaN, NaN, 33554432.0627937, -3.47040827763403e-147, 2.30126964169804e-12, NaN, 5.43230910471433e-312, 3.56013998772825e-306, 0, 0, 0, 0, 0), x = c(-2.30331110816477e-156, -2.30331110763906e-156, -2.30331110816477e-156, NaN, 3.65365168976753e-306, 2.8480945455619e-306, -5.48612677708818e+303, 2.72888655986149e-312, -Inf, -1.2341419504043e-30, -1.80650535611164e+307, -9.7757963632732e-150, 2.73729184714066e-312, -1.26418156348547e-30, NaN, 7.19929512000783e-310, NaN, NaN, 1.08646184497373e-311, 8.97712454626148e-308, 3.66343140099342e-305, -9.77941371393343e-150, 2.33246235504083e-12, -2.15995069167566e-277, -5.69758255001719e-306, 4.92150736865414e-303, -1.26836452888033e-30, NaN, -1.26836407059221e-30, -Inf, -1.26826829408497e-30, 2.40086640681612e-300, 5.67865049360052e-270, -Inf, -1.26826754177099e-30, 2.40086640681612e-300, 1.11579631478852e-308, -2.11966640218428e-289, -5.81608789291487e-34, -1.26836459270829e-30, -8.81443176193936e-280, 2.47222686855331e-94, 3.5601616320854e-306, 8.90029544463878e-306, 0, 4.53801546776677e+279, -5.48612669704375e+303, 1.3899072431119e-312, 1.59481413420777e-313, -3.07840103521725e-288, -6.40588384090095e-145, NaN, -7.17064860746532e-310, 2.81199573737869e-312, -9.77586643088729e-150, -2.97619079749159e-288, -9.19490323621235e-186, 8.63073241202175e+115, -2.82116926034698e-277, 3.23785921002061e-319, -2.30331110816311e-156, 0, NaN, 5.59610256518376e-275, -5.48612406879369e+303, 0))
result <- do.call(grattan::IncomeTax,testlist)
str(result)
|
ac16584b8dcb6ee3010e13d938e29007e798eee4
|
680bf4e601fe744d15e323c598e696e3f79effe1
|
/R/manifold_data.R
|
fc47e2dcb11445d07ad861d6f64b96c833d9a341
|
[
"MIT"
] |
permissive
|
bergloman/ad_examples
|
706da7930af83dd2abcc8760ce904b12c4a478c7
|
ed458c06d4ebfc2b6f5ff3e619cc082d4ce2f795
|
refs/heads/master
| 2023-03-20T03:16:16.945393
| 2021-03-15T08:15:39
| 2021-03-15T08:15:39
| 264,755,130
| 0
| 0
|
MIT
| 2020-05-17T20:54:46
| 2020-05-17T20:54:45
| null |
UTF-8
|
R
| false
| false
| 1,874
|
r
|
manifold_data.R
|
#rm(list=ls())
# Sample usage draw_circle:
# n <- 100
# r <- 40
# center <- c(50, 100)
# width <- 10
# draw_circle(n, r, center, width)
draw_circle <- function(n, r, center, width) {
# draw a circle with noise
rads <- runif(n, min=0, max=2*pi)
rr <- runif(n, r - (width/2), r + (width/2))
samples <- matrix(0, nrow=n, ncol=2)
for (i in 1:n) {
samples[i, ] <- center + c(rr[i] * cos(rads[i]), rr[i] * sin(rads[i]))
}
return (samples)
}
# draw line with noise
draw_line <- function(n, bottomleft, width, height) {
xs <- runif(n, min=0, max=width)
ys <- runif(n, min=0, max=height)
samples <- matrix(0, nrow=n, ncol=2)
samples[, 1] <- bottomleft[1] + xs
samples[, 2] <- bottomleft[2] + ys
return (samples)
}
# generate a 'face' with two eyes and a mouth:
# O O
# -
# Each of the elements will be a different class. Add noise
set.seed(42)
n <- 100
e1_samples <- draw_circle(n, 40, c(50, 150), 10)
e2_samples <- draw_circle(n, 40, c(170, 150), 10)
m_samples <- draw_line(n, c(50, 50), 120, 10)
face <- rbind(e1_samples, e2_samples, m_samples)
label <- rep(c(1,2,3), each=n)
facedf <- data.frame(label=label, x=face[, 1], y=face[, 2])
write.table(facedf, file="/Users/moy/work/git/adnotes/data/face.csv",
append=F, quote=F, sep=",",
row.names=F, col.names=colnames(facedf))
if (F) {
facedata <- read.csv("/Users/moy/work/git/adnotes/data/face.csv", header=T)
plot(facedata$x, facedata$y, typ="p", xlim=c(0, 250), ylim=c(0, 250), main="Face",
pch=".", col=ifelse(facedata$label==1, "red", ifelse(facedata$label==2, "blue", "green")))
}
if (F) {
plot(0, typ="n", xlim=c(0, 250), ylim=c(0, 250), main="Face")
points (e1_samples[, 1], e1_samples[, 2], pch=".", col="red")
points (e2_samples[, 1], e2_samples[, 2], pch=".", col="red")
points (m_samples[, 1], m_samples[, 2], pch=".", col="red")
}
|
28019554c6514f57ba9daf4a96bc1eecd78a1c71
|
6ceab1bf9c435b523d2f8e7e9440da39770d741b
|
/R/f7Searchbar.R
|
51ec055516b5a9cac2606ee21866c5656a2097c7
|
[] |
no_license
|
RinteRface/shinyMobile
|
a8109cd39c85e171db893d1b3f72d5f1a04f2c62
|
86d36f43acf701b6aac42d716adc1fae4f8370c6
|
refs/heads/master
| 2023-07-25T16:28:41.026349
| 2022-11-25T17:04:29
| 2022-11-25T17:04:29
| 139,186,586
| 328
| 92
| null | 2023-03-26T05:58:53
| 2018-06-29T19:13:06
|
R
|
UTF-8
|
R
| false
| false
| 6,360
|
r
|
f7Searchbar.R
|
#' Framework 7 searchbar
#'
#' Searchbar to filter elements in a page.
#'
#' @param id Necessary when using \link{f7SearchbarTrigger}. NULL otherwise.
#' @param placeholder Searchbar placeholder.
#' @param expandable Whether to enable the searchbar with a target link,
#' in the navbar. See \link{f7SearchbarTrigger}.
#' @param inline Useful to add an \link{f7Searchbar} in an \link{f7Appbar}.
#' Notice that utilities like \link{f7HideOnSearch} and \link{f7NotFound} are not
#' compatible with this mode.
#' @param options Search bar options.
#' See \url{https://v5.framework7.io/docs/searchbar.html#searchbar-parameters}.
#' If no options are provided, the searchbar will search in list elements by
#' item title. This may be changed by updating the default searchContainer and
#' searchIn.
#' @export
#'
#' @examples
#' if (interactive()) {
#' library(shiny)
#' library(shinyMobile)
#'
#' cars <- rownames(mtcars)
#'
#' shinyApp(
#' ui = f7Page(
#' title = "Simple searchbar",
#' f7SingleLayout(
#' navbar = f7Navbar(
#' title = "f7Searchbar",
#' hairline = FALSE,
#' shadow = TRUE,
#' subNavbar = f7SubNavbar(
#' f7Searchbar(id = "search1")
#' )
#' ),
#' f7Block(
#' "This block will be hidden on search.
#' Lorem ipsum dolor sit amet, consectetur adipisicing elit."
#' ) %>% f7HideOnSearch(),
#' f7List(
#' lapply(seq_along(cars), function(i) {
#' f7ListItem(cars[i])
#' })
#' ) %>% f7Found(),
#'
#' f7Block(
#' p("Nothing found")
#' ) %>% f7NotFound()
#'
#' )
#' ),
#' server = function(input, output) {}
#' )
#'
#' # Expandable searchbar with trigger
#' cities <- names(precip)
#'
#' shinyApp(
#' ui = f7Page(
#' title = "Expandable searchbar",
#' f7SingleLayout(
#' navbar = f7Navbar(
#' title = "f7Searchbar with trigger",
#' hairline = FALSE,
#' shadow = TRUE,
#' subNavbar = f7SubNavbar(
#' f7Searchbar(id = "search1", expandable = TRUE)
#' )
#' ),
#' f7Block(
#' f7SearchbarTrigger(targetId = "search1")
#' ) %>% f7HideOnSearch(),
#' f7List(
#' lapply(seq_along(cities), function(i) {
#' f7ListItem(cities[i])
#' })
#' ) %>% f7Found(),
#'
#' f7Block(
#' p("Nothing found")
#' ) %>% f7NotFound()
#'
#' )
#' ),
#' server = function(input, output) {}
#' )
#'
#' # Searchbar in \link{f7Appbar}
#' shinyApp(
#' ui = f7Page(
#' title = "Searchbar in appbar",
#' f7Appbar(
#' f7Searchbar(id = "search1", inline = TRUE)
#' ),
#' f7SingleLayout(
#' navbar = f7Navbar(
#' title = "f7Searchbar in f7Appbar",
#' hairline = FALSE,
#' shadow = TRUE
#' ),
#' f7List(
#' lapply(seq_along(cities), function(i) {
#' f7ListItem(cities[i])
#' })
#' ) %>% f7Found()
#' )
#' ),
#' server = function(input, output) {}
#' )
#' }
f7Searchbar <- function(id, placeholder = "Search", expandable = FALSE, inline = FALSE,
options = NULL) {
if (is.null(options)) {
options <- list(
searchContainer = ".list",
searchIn = ".item-title"
)
}
searchBarConfig <- shiny::tags$script(
type = "application/json",
`data-for` = id,
jsonlite::toJSON(
x = options,
auto_unbox = TRUE,
json_verbatim = TRUE
)
)
searchBarCl <- "searchbar"
if (expandable) searchBarCl <- paste0(searchBarCl, " searchbar-expandable")
searchBarTag <- if (inline) {
shiny::tags$div(
class = "searchbar searchbar-inline",
id = id,
shiny::tags$div(
class = "searchbar-input-wrap",
shiny::tags$input(type = "search", placeholder = placeholder),
shiny::tags$i(class = "searchbar-icon"),
shiny::tags$span(class = "input-clear-button")
)
)
} else {
shiny::tags$form(
class = searchBarCl,
id = id,
shiny::tags$div(
class = "searchbar-inner",
shiny::tags$div(
class = "searchbar-input-wrap",
shiny::tags$input(type = "search", placeholder = placeholder),
shiny::tags$i(class = "searchbar-icon"),
shiny::tags$span(class = "input-clear-button")
),
shiny::tags$span(class = "searchbar-disable-button", "Cancel")
)
)
}
shiny::tagList(
searchBarTag,
searchBarConfig
)
}
#' Framework 7 searchbar trigger
#'
#' Element that triggers the searchbar.
#'
#' @param targetId Id of the \link{f7Searchbar}.
#' @export
f7SearchbarTrigger <- function(targetId) {
shiny::tags$a(
class = "link icon-only searchbar-enable",
`data-searchbar` = paste0("#", targetId),
shiny::tags$i(class = "icon f7-icons if-not-md", "search"),
shiny::tags$i(class = "icon material-icons md-only", "search")
)
}
#' Utility to hide a given tag on search
#'
#' Use with \link{f7Searchbar}.
#'
#' @param tag tag to hide.
#' @export
f7HideOnSearch <- function(tag) {
tag$attribs$class <- paste0(tag$attribs$class, " searchbar-hide-on-search")
return(tag)
}
#' Utility to hide a given tag when \link{f7Searchbar} is enabled.
#'
#' Use with \link{f7Searchbar}.
#'
#' @param tag tag to hide.
#' @export
f7HideOnEnable <- function(tag) {
tag$attribs$class <- paste0(tag$attribs$class, " searchbar-hide-on-enable")
return(tag)
}
#' Utility to display an item when the search is unsuccessful.
#'
#' Use with \link{f7Searchbar}.
#'
#' @param tag tag to use.
#' @export
f7NotFound <- function(tag) {
tag$attribs$class <- paste0(tag$attribs$class, " searchbar-not-found")
return(tag)
}
#' Utility to display an item when the search is successful.
#'
#' Use with \link{f7Searchbar}.
#'
#' @param tag tag to display. When using \link{f7Searchbar}, one must
#' wrap the items to search in inside \link{f7Found}.
#' @export
f7Found <- function(tag) {
tag$attribs$class <- paste0(tag$attribs$class, " searchbar-found")
return(tag)
}
#' Utility to ignore an item from search.
#'
#' Use with \link{f7Searchbar}.
#'
#' @param tag tag to ignore.
#' @export
f7SearchIgnore <- function(tag) {
tag$attribs$class <- paste0(tag$attribs$class, " searchbar-ignore")
return(tag)
}
|
aec6d739489ebeb3a49ae49a21c3d94b1e7d65e5
|
9f2dda9828688a2155c0d6da22b3270ce8f05e86
|
/plot3.R
|
dd2a20669196f5efc9f6a73dac641a70932ea125
|
[] |
no_license
|
nickfaelnar/ExData_Plotting1
|
d736bf8e20f35dd8224c97ca27b659a370850da0
|
d60a446756d216e0782f6e24f57d736ad53ad65e
|
refs/heads/master
| 2020-03-07T05:06:20.090735
| 2018-03-29T14:28:13
| 2018-03-29T14:28:13
| 127,286,016
| 0
| 0
| null | 2018-03-29T12:07:48
| 2018-03-29T12:07:48
| null |
UTF-8
|
R
| false
| false
| 1,049
|
r
|
plot3.R
|
library("data.table")
# Download source data
download.file("https://d396qusza40orc.cloudfront.net/exdata%2Fdata%2Fhousehold_power_consumption.zip", destfile = paste(getwd(),"data.zip", sep="/"))
unzip("data.zip")
#Reads the text file
hpcTable <- data.table::fread(input = "household_power_consumption.txt", na.strings="?")
# Add a new column with the combined Date and Time values
hpcTable[, dateTime := as.POSIXct(paste(Date, Time), format = "%d/%m/%Y %H:%M:%S")]
# Include only records between 2007-02-01 and 2007-02-02
hpcTable <- hpcTable[(dateTime >= "2007-02-01 00:00:00") & (dateTime <= "2007-02-02 23:59:59")]
png("plot3.png", width=480, height=480)
#draw plot
plot(hpcTable[, dateTime], hpcTable[, Sub_metering_1], type="l", xlab="", ylab="Energy sub metering")
lines(hpcTable[, dateTime], hpcTable[, Sub_metering_2],col="red")
lines(hpcTable[, dateTime], hpcTable[, Sub_metering_3],col="blue")
legend("topright", col=c("black","red","blue"), c("Sub_metering_1","Sub_metering_2", "Sub_metering_3"), lty=c(1,1), lwd=c(1,1))
dev.off()
|
211493834918cf58ae8cc84281fdf3c3bf4ecb91
|
fc3b1aa8da7c4c57a12cdb8e8895c428a0d16ce8
|
/ManDrought/dat.r
|
b5bb526b3588f331953f9e642b702a2de46967d5
|
[] |
no_license
|
christopear/UoA-Studentship
|
97618fe7260253bb4d2dc70383401e42e4f34297
|
d4b8dbd024f3052a5f8cacc79e7ad7824d64790c
|
refs/heads/master
| 2021-05-30T08:33:59.733929
| 2016-01-24T23:48:34
| 2016-01-24T23:48:34
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 102
|
r
|
dat.r
|
CSV.loc ="~/Studentship/ManDrought/thirdset.csv"
GIS.loc ="~/Studentship/ManDrought/GIS"
title ="Age"
|
59baf898ec1ac13635102832d09a8e2fa558b8b6
|
d37c2dea2f709120e6c97dd7f07f8f5443d46f9d
|
/analysis/step2_gpANOVA.R
|
64a45e11ccb1ee8733a93dfcfbe3a5f7a5efcd22
|
[] |
no_license
|
yuchinchiu/decodeCC
|
4cf3fd5970f957cc28972b39b18c0e5c8ed3ee2d
|
0a0b6b99af5d7132df4702ff6b9becec145aaba5
|
refs/heads/master
| 2021-01-25T04:14:56.696702
| 2017-08-11T18:59:35
| 2017-08-11T18:59:35
| 93,412,358
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 3,426
|
r
|
step2_gpANOVA.R
|
################################
# This file performs group stats - repeated measure ANOVA
# load gpData_v1.Rda (gpM1-gpM3, workerIdList)
#
################################
rm(list=ls())
library(tidyverse)
library(ez) # ezANOVA
currentDir <- getwd()
setwd(currentDir)
load("step1_allData_v1.Rda")
catVblList <- c("stimCat", "trialType", "blockType", "memCond", "swProb", "task","half", "sbjId")
# turn a bunch of 1/0 codings into categorical factors
idx <- match(catVblList,colnames(gpM1))
for (c in idx[!is.na(idx)]){
gpM1[[c]] <- factor(gpM1[[c]], ordered = TRUE)
}
idx <- match(catVblList,colnames(gpM2))
for (c in idx[!is.na(idx)]){
gpM2[[c]] <- factor(gpM2[[c]], ordered = TRUE)
}
idx <- match(catVblList,colnames(gpM3))
for (c in idx[!is.na(idx)]){
gpM3[[c]] <- factor(gpM3[[c]], ordered = TRUE)
}
## Stroop Task : Repeated measure ANOVA 2 blockType x 2 trialType
gpM1$validRT <- 1
gpM1[which(gpM1$sbjACC==0) ,"validRT"] <- 0
gpM1[which(gpM1$sbjRT<200) ,"validRT"] <- 0
gpM1[which(gpM1$sbjRT>1000),"validRT"] <- 0
ezPrecis(gpM1)
levels(gpM1$trialType) <- c("Congruent", "Incongruent")
levels(gpM1$blockType) <- c("Rarely InCongruent", "Freq. InCongruent")
# Stroop: RT
rt_anova = ezANOVA(data = gpM1[gpM1$validRT==1,]
, dv = sbjRT
, wid = .(sbjId)
, within = .(blockType, trialType)
)
print(rt_anova)
# Stroop: ACC
acc_anova = ezANOVA(data = gpM1
, dv = sbjACC
, wid = .(sbjId)
, within = .(blockType, trialType)
)
print(acc_anova)
# descriptive stats M, SD and FLSD (this is not within-subject SE...)
rt_CondMeans = ezStats(data = gpM1[gpM1$validRT==1,]
, dv = sbjRT
, wid = .(sbjId)
, within = .(blockType, trialType)
)
print(rt_CondMeans)
# Plotting
stroop = ezPlot(
data = list(gpM1[gpM1$validRT==1,], gpM1)
, dv = .(sbjRT, sbjACC)
, wid = sbjId
, within = .(blockType, trialType)
, x = blockType
, split = trialType
, x_lab = 'BlockType'
, split_lab = 'TrialType'
, dv_labs = c('stroop RT(ms)', 'stroop ACC(%)')
, y_free = TRUE
)
print(stroop)
## Memory Recognition Task : Repeated measure ANOVA 2 blockType x 2 trialType
ezPrecis(gpM3)
# blockType, trialType includes 'new'... need to clean up here
memData <- gpM3[gpM3$memCond<=4,] # new =5
memData$blockType <- factor(memData$blockType)
memData$trialType <- factor(memData$trialType)
levels(memData$trialType) <- c("Congruent", "Incongruent")
levels(memData$blockType) <- c("Rarely InCongruent", "Freq. InCongruent")
# memory: ACC
memACC_anova = ezANOVA(data = memData
, dv = sbjACC
, wid = .(sbjId)
, within = .(blockType, trialType)
)
print(memACC_anova)
# descriptive stats M, SD and FLSD (this is not within-subject SE...)
mem_CondMeans = ezStats(data = memData
, dv = sbjACC
, wid = .(sbjId)
, within = .(blockType, trialType)
)
print(mem_CondMeans)
# Plotting
memory = ezPlot(
data = list(memData, memData[!is.na(memData$sbjRT),])
, dv = .(sbjACC, sbjRT)
, wid = sbjId
, within = .(blockType, trialType)
, x = blockType
, split = trialType
, x_lab = 'BlockType'
, split_lab = 'TrialType'
, dv_labs = c('memory ACC(%)', 'memory RT(ms)')
, y_free = TRUE
)
print(memory)
|
72f7620c13f66d9593e14dae56fd49be58766f14
|
9efa134c757f6f8938cb17d565be9f5e87e8c8e9
|
/man/getEveryBrewerySocialaccount.Rd
|
7aba9ccc86d0ddacc5282fae31a8b5779db07c51
|
[] |
no_license
|
bpb824/brewerydb
|
04f07279e18c63c054c62244669aeeaccacbf921
|
1fed6d68ac6a9543b8fa04c0efb11631fdc78d65
|
refs/heads/master
| 2022-01-20T05:11:49.833630
| 2019-06-23T02:37:58
| 2019-06-23T02:37:58
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| true
| 438
|
rd
|
getEveryBrewerySocialaccount.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/BrewerySocialaccount.R
\name{getEveryBrewerySocialaccount}
\alias{getEveryBrewerySocialaccount}
\title{Get All Social Accounts For Brewery}
\usage{
getEveryBrewerySocialaccount(breweryId)
}
\arguments{
\item{breweryId}{The breweryId}
}
\value{
none
}
\description{
Gets a listing of all social accounts for a specific brewery.
}
\concept{BrewerySocialaccount}
|
cac2d05e9c3ad343d18255cd17e5872f12d7249f
|
7655f699565c789c940eabc0b7a2688551839b63
|
/src/publication/fig-celltype_mb_bmi_geneset_enrichment.R
|
7eafa0dfe1c3ff46a8eee972ec840bf0c06f0125
|
[] |
no_license
|
Tobi1kenobi/msc-thesis-2019
|
072b00e5e7051271dc35fe94d31ac48bc128523d
|
769be4cd36eb6b5e27121d9c572e805242347034
|
refs/heads/master
| 2020-07-16T15:30:30.837512
| 2019-09-11T11:30:14
| 2019-09-11T11:30:14
| 205,814,840
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 3,403
|
r
|
fig-celltype_mb_bmi_geneset_enrichment.R
|
############### SYNOPSIS ###################
### AIM: Mousebrain BMI geneset cell-type enrichment
### OUTPUT:
# ....
### REMARKS:
# ....
### REFERENCE:
# ======================================================================= #
# ================================ SETUP ================================ #
# ======================================================================= #
library(tidyverse)
library(here)
library(patchwork)
source(here("src/lib/load_functions.R")) # load sc-genetics library
source(here("src/publication/lib-load_pub_lib_functions.R"))
setwd(here("src/publication"))
# ======================================================================= #
# ========================== CELL-TYPE BMI ENRICHMENT =================== #
# ======================================================================= #
### READ: all MB + Campbell cell-types
file.enrich <- here("results/es_enrichment--bmi_gene_lists.pvals.csv")
df.enrich <- read_csv(file.enrich)
# ======================================================================= #
# ============================ MOUSEBRAIN LDSC =========================== #
# ======================================================================= #
dataset_prefix <- "mousebrain_all"
filter.gwas <- "BMI_UKBB_Loh2018"
# ================== LOAD LDSC CTS RESULTS (multi GWAS) ================ #
### Read LDSC results
file.results <- here("results/prioritization_celltypes--mousebrain_all.multi_gwas.csv.gz")
df.ldsc_cts <- read_csv(file.results)
# =========================== FILTER GWAS =========================== #
df.ldsc_cts <- df.ldsc_cts %>% filter(gwas == filter.gwas)
# =========================== FILTER GWAS =========================== #
df.ldsc_cts <- df.ldsc_cts %>% filter(gwas == filter.gwas)
# =========================== ADD METADATA =========================== #
df.metadata <- get_metadata(dataset_prefix)
df.ldsc_cts <- df.ldsc_cts %>% left_join(df.metadata, by="annotation")
# ======================================================================= #
# ================================= PLOT ================================ #
# ======================================================================= #
df.plot <- df.ldsc_cts
### Filter
filter.annotations <- get_prioritized_annotations_bmi(dataset="mousebrain")
df.plot <- df.plot %>% filter(annotation %in% filter.annotations)
# df.plot <- df.plot %>% mutate(flag_highlight = if_else(annotation %in% filter.annotations, TRUE, FALSE))
### Add enrichment data
df.plot <- df.plot %>% left_join(df.enrich %>% select(annotation, combined_rare_mendelian_obesity), by="annotation")
df.plot <- df.plot %>% mutate(enrichment = -log10(combined_rare_mendelian_obesity))
### Get annotation colors
colormap.annotations <- get_color_mapping.prioritized_annotations_bmi(dataset="mousebrain")
### Plot
p <- ggplot(df.plot, aes(x=annotation, y=enrichment, label=annotation))
p <- p + geom_segment(aes(x=annotation, xend=annotation, y=0, yend=enrichment), color="grey", alpha=0.3)
p <- p + geom_point(aes(color=annotation), size=3)
p <- p + labs(x=" ", y=expression(-log[10](P[enrichment])))
p <- p + scale_color_manual(values=colormap.annotations)
p <- p + guides(color=F) # hide legend
p <- p + coord_flip()
### Theme
p <- p + theme_classic()
p
file.out <- sprintf("figs/fig_celltype_geneset_enrichment.mb.bmi_celltypes.pdf")
ggsave(p, filename=file.out, width=4, height=4)
|
d63cf1e10efac3e36c59ba7e487cb18c703a5fdc
|
4f8636a29a581a9637d069fe81a9bc3d8d46a56b
|
/Project3-WebScraping/Hayes Cozart/visualizations.R
|
c783831369006f89983dd12ca0b8b7000c438538
|
[] |
no_license
|
jeperez/bootcamp005_project
|
55ecb745469947ded31883703f5f5e6f7abe73e5
|
936fee3e4f7a2b6b1872c20718fe42ac976e3f8a
|
refs/heads/master
| 2021-01-11T07:03:27.124107
| 2016-08-04T21:49:29
| 2016-08-04T21:49:29
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 4,054
|
r
|
visualizations.R
|
setwd("C:/Users/Hayes/Desktop/BDS 005/Projects/Project 3")
library(ggplot2)
library(dplyr)
boardgames = read.csv('boardgames.csv')
str(boardgames)
View(boardgames)
boardgames$age = factor(boardgames$age, levels = c("[]", "[3+]", "[4+]","[5+]","[6+]",
"[7+]","[8+]","[9+]","[10+]",
"[11+]","[12+]","[13+]","[14+]",
"[15+]","[16+]","[17+]","[18+]",
"[25+]"))
#Age Graph
ggplot(data = boardgames, aes(x= age)) +
geom_bar(aes(fill = Game.Rank), position ="fill")+
ggtitle("Proportion of Boardgames by Suggested age") +
ylab(label = "Proportion" )+ scale_fill_brewer(palette = "Set1")
ggplot(data = boardgames, aes(x= age)) +
geom_bar(aes(fill = Game.Rank), position ="stack")+
ggtitle("Number of Boardgames by Suggested age") +
ylab(label = "Count" )+ scale_fill_brewer(palette = "Set1")
unique(boardgames$age)
#Mechanics Graph
ggplot(data = boardgames, aes(x= factor(Number.of.mechanisms))) +
geom_bar(aes(fill = Game.Rank), position ="stack")+
ggtitle("NUmber of Boardgames by number of mechanisms") +
ylab(label = "Count" )+ xlab(label = "Number of Game Mechanics" )+
scale_fill_brewer(palette = "Set1")
ggplot(data = boardgames, aes(x= factor(Number.of.mechanisms))) +
geom_bar(aes(fill = Game.Rank), position ="fill")+
ggtitle("Proportion of Boardgames by number of mechanisms") +
ylab(label = "Proportion" )+ xlab(label = "Number of Game Mechanics" )+
scale_fill_brewer(palette = "Set1")
#Category Graph
ggplot(data = boardgames, aes(x= factor(Number.of.categories))) +
geom_bar(aes(fill = Game.Rank), position ="fill")+
ggtitle("Number of Boardgames by number of Themes") +
ylab(label = "Proportion" )+ xlab(label = "Number of Themes" )+
scale_fill_brewer(palette = "Set1")
#time graphs
filter(boardgames, timemin < 480)%>%
ggplot(data = . , aes(x= timemin)) +
geom_density(aes(color = Game.Rank))+
ggtitle("Density of Boardgames by Time (Min)") +
ylab(label = "Density" )+ xlab(label = "Time (Min)" )+
scale_fill_brewer(palette = "Set1")
ggplot(data = boardgames, aes(x= factor(timemin))) +
geom_bar(aes(fill = Game.Rank), position ="fill")+
ggtitle("Proportion of Boardgames by Minimum Time") +
ylab(label = "Proportion" )+ xlab(label = "Minimum time to play" )+
scale_fill_brewer(palette = "Set1")+
theme(axis.text.x = element_text(angle = 90, hjust = 1))
ggplot(data = boardgames, aes(x= factor(timemin))) +
geom_bar(aes(fill = Game.Rank), position ="stack")+
ggtitle("Number of Boardgames by Minimum Time") +
ylab(label = "Count" )+ xlab(label = "Minimum time to play" )+
scale_fill_brewer(palette = "Set1")+
theme(axis.text.x = element_text(angle = 90, hjust = 1))
boardgames$timemin
#year graph
filter(boardgames, year > 1950)%>%
ggplot(data = ., aes(x= year)) +
geom_density(aes(color = Game.Rank))+
ggtitle("Density of Boardgames by Year") +
ylab(label = "Density" )+ xlab(label = "Minimum time to play" )+
scale_fill_brewer(palette = "Set1")
boardgames$year
#Price graph
filter(boardgames, price < 100)%>%
ggplot(data = . , aes(x= price)) +
geom_density(aes(color = Game.Rank))+
ggtitle("Density of Boardgames by Price ($)") +
ylab(label = "Density" )+ xlab(label = "Price ($)" )+
scale_fill_brewer(palette = "Set1")
#dificulty
ggplot(data = boardgames, aes(x= dificulty)) +
geom_density(aes(color = Game.Rank))+
ggtitle("Density of Boardgames by Dificulty") +
ylab(label = "Density" )+ xlab(label = "Dificulty to Understand" )+
scale_fill_brewer(palette = "Set1")
#language Graph, seems not much to tell
ggplot(data = boardgames, aes(x= language)) +
geom_bar(aes(fill = Game.Rank), position ="fill")+
ggtitle("Proportion of Boardgames by Language Requirement") +
ylab(label = "Proportion" )+ scale_fill_brewer(palette = "Set1")
theme(axis.text.x = element_text(angle = 90, hjust = 1))
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.