blob_id
stringlengths 40
40
| directory_id
stringlengths 40
40
| path
stringlengths 2
327
| content_id
stringlengths 40
40
| detected_licenses
listlengths 0
91
| license_type
stringclasses 2
values | repo_name
stringlengths 5
134
| snapshot_id
stringlengths 40
40
| revision_id
stringlengths 40
40
| branch_name
stringclasses 46
values | visit_date
timestamp[us]date 2016-08-02 22:44:29
2023-09-06 08:39:28
| revision_date
timestamp[us]date 1977-08-08 00:00:00
2023-09-05 12:13:49
| committer_date
timestamp[us]date 1977-08-08 00:00:00
2023-09-05 12:13:49
| github_id
int64 19.4k
671M
⌀ | star_events_count
int64 0
40k
| fork_events_count
int64 0
32.4k
| gha_license_id
stringclasses 14
values | gha_event_created_at
timestamp[us]date 2012-06-21 16:39:19
2023-09-14 21:52:42
⌀ | gha_created_at
timestamp[us]date 2008-05-25 01:21:32
2023-06-28 13:19:12
⌀ | gha_language
stringclasses 60
values | src_encoding
stringclasses 24
values | language
stringclasses 1
value | is_vendor
bool 2
classes | is_generated
bool 2
classes | length_bytes
int64 7
9.18M
| extension
stringclasses 20
values | filename
stringlengths 1
141
| content
stringlengths 7
9.18M
|
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
5906e647089301b82df6703a1d268d815124f76b
|
b2f61fde194bfcb362b2266da124138efd27d867
|
/code/dcnf-ankit-optimized/Results/QBFLIB-2018/A1/Database/Jordan-Kaiser/reduction-finding-full-set-params-k1c3n4/query08_query27_1344n/query08_query27_1344n.R
|
4d099d97b6ce36ae5f61f25d119664cb8235db65
|
[] |
no_license
|
arey0pushpa/dcnf-autarky
|
e95fddba85c035e8b229f5fe9ac540b692a4d5c0
|
a6c9a52236af11d7f7e165a4b25b32c538da1c98
|
refs/heads/master
| 2021-06-09T00:56:32.937250
| 2021-02-19T15:15:23
| 2021-02-19T15:15:23
| 136,440,042
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 73
|
r
|
query08_query27_1344n.R
|
3f03bb120ae7cd22fc779c4d4c25f881 query08_query27_1344n.qdimacs 3609 13554
|
f28335a23c71f1061830ebd69a5f2f48de1dfd54
|
48130e58d3dad3e0a9362898f1d3bb21a5f64c64
|
/R/Search_Anywhere.R
|
3edae38cfd7be0d7598e3f73d65b759f7f9a5616
|
[] |
no_license
|
ss-lab-cancerunit/SpiderSeqR
|
e8f571c5432b1a14372cac4baa5845aad6c0f4b1
|
a4c00b4aa9b34b00b7761130a887e203c743e2a3
|
refs/heads/master
| 2023-01-07T00:27:39.465475
| 2020-11-01T19:13:26
| 2020-11-01T19:13:26
| 232,421,625
| 6
| 2
| null | null | null | null |
UTF-8
|
R
| false
| false
| 39,593
|
r
|
Search_Anywhere.R
|
#' Search Anywhere within SRA and GEO databases
#'
#' @param query_all Search term for both SRA and GEO (gse and gsm tables)
#'
#' @param acc_levels Accession levels at which the search is conducted.
#' Possible options include run, sample, experiment, study, gsm, gse.
#' Defaults to c("run", "experiment", "sample", "gsm")
#' @param category_both A character with category
#' for SRA library strategy and GEO type
#' @param SRA_library_strategy A character with SRA library strategy
#' @param SRA_other_library_strategy A logical whether to include
#' unclassified entries
#' @param GEO_type A character with GEO type
#' @param SRA_query Search term for SRA only
#' @param GEO_query Search term for GEO only
#' @param GSM_query Search term for gsm table only (GEO)
#' @param GSE_query Search term for gse table only (GEO)
#' @param call_output A logical indicating whether to produce a call record
#' @return A data frame with results of the search
#'
#'
#'
#' @examples
#' startSpiderSeqRDemo()
#' searchAnywhere("*sir3*") # The broadest search
#' searchAnywhere("sir3") # omits entries with characters before/after sir3
#' searchAnywhere("sir3 OR sir3p") # Can list synonyms
#'
#' ## Only search for matches in SRA
#' searchAnywhere ("sir3", acc_levels = c("run",
#' "sample", "experiment", "study"))
#'
#' ## Only search for matches in GEO
#' searchAnywhere ("sir3", acc_levels = c("gsm", "gse"))
#'
#'
#'
#'
#' @section Argument requirements:
#' Either query_all or \strong{both} SRA_query and GEO_query
#' need to be provided (this is to facilitate column-specific search
#' in the databases; if you wish to search within specific columns,
#' provide SRA_query and GEO_query with appropriate column names)
#'
#'
#' @section Query arguments:
#'
#' Query arguments include query_both, SRA_query, GEO_query,
#' GSM_query and GSE_query.
#'
#' In the simplest case, it is recommended to just use query_both,
#' which will apply to all the searches across databases.
#' However, for user in need of more fine-tuning, other query arguements
#' can be used (e.g. when you wish to search within specific columns
#' of each database table; this is mostly appropriate for use in fts search).
#' Only the highest level query arguments will be considered.
#' Hence the following combinations of arguments are accepted
#' (any extra query arguments will be ignored):
#' \itemize{
#' \item query_both
#' \item SRA_query and GEO_query
#' \item SRA_query and GSM_query and GSE_query
#'
#' }
#'
#' @section Accession levels:
#' Each accession level is associated with its own set of information.
#' Sometimes the information is replicated across levels,
#' sometimes it is unique to the level.
#' Only information associated with the specified accession levels
#' will be subject of the search.
#' For example, it is common for study abstracts to mention a lot
#' of gene names or proteins that were not a direct object of the study;
#' by searching everywhere studies with a mere mention
#' of a gene will be included.
#'
#' Restricting accession levels, e.g.
#'
#' \code{searchAnywhere(query_all = "p53",
#' acc_levels = c("run", "experiment", "sample", "gsm"))}
#'
#' will help avoid including these cases.
#' However, always consider using a broader search
#' and comparing the results to the more refined one.
#'
#' Another use of accession levels is to restrict search to only one database.
#' To do so, only list accession levels specific to one database:
#' SRA (run, experiment, sample, study) or GEO (gsm, gse).
#'
#'
#' @section Category_both, SRA_library_strategy and GEO_type:
#'
#' SRA and GEO have distinct ways of specifying the type of their data
#' (such as e.g. RNA-Seq, ChIP-Seq or microarray expression experiments).
#' SRA stores that information as *library_strategy*, GEO records *types*.
#' For users' convenience, a data frame with the conversion
#' between the commmonest *library_strategies* and *types* is provided
#' in \code{SRA_GEO_Category_Conversion} (for more details, please examine
#' \code{SRA_GEO_Category_Conversion} or its documentation,
#' \code{?SRA_GEO_Category_Conversion}).
#'
#' Hence, it is possible to specify *category*, which refers to either
#' one or both SRA and GEO (some categories exist within both SRA and GEO,
#' some only in one of the databases; e.g. only GEO stores microarray data).
#'
#' Similarly to query arguments, the highest level argument
#' will be taken into account and if lower-level arguments exist,
#' they will be ignored.
#'
#' Hence, the user can provide the following combinations of arguments:
#' \itemize{
#' \item NONE of category_both, SRA_library_strategy and GEO_type
#' \item category_both ONLY
#' \item SRA_library_strategy AND GEO_type
#' \item SRA_library_strategy ONLY*
#' \item GEO_type ONLY*
#' }
#' * If only one of the SRA_library_strategy and GEO_type is provided,
#' no search will be undertaken in the database corresponding
#' to the missing argument.
#' The same is the case if the supplied category_both refers only
#' to one of the databases
#' (e.g. search in SRA only if category_both = "DNA NGS" (DNA sequencing))
#'
#'
#'
#'
#' @export
#'
searchAnywhere <- function(query_all,
acc_levels = c("run", "experiment", "sample", "gsm"),
category_both=NULL,
SRA_library_strategy=NULL,
SRA_other_library_strategy = c("OTHER", "NA", "NULL"),
GEO_type=NULL,
SRA_query,
GEO_query,
GSM_query,
GSE_query,
call_output = FALSE){
## Query arguments ####
## Checking arguments (either query_all or SRA_query AND GEO_query
## (OR GSM_query AND GSE_query))
if (!missing(query_all)){ # QUERY_ALL PRESENT
if (!missing(SRA_query) |
!missing(GEO_query) |
!missing(GSM_query) |
!missing(GSE_query)){
warning(paste0("query_all already provided; ",
"sra/geo/gsm/GSE_query will be ignored"))
}
SRA_query <- query_all
GSM_query <- query_all
GSE_query <- query_all
} else { # QUERY_ALL ABSENT
## SRA_query as provided (SRA_query <- SRA_query)
if (missing(SRA_query)){
stop("SRA_query is required")
}
if (!missing(GEO_query)){ ## QUERY_ALL ABSENT; GEO_query PRESENT
if( !missing(GSM_query) | !missing(GSE_query)){
warning(paste0("GEO_query already provided; ",
"gsm/GSE_query will be ignored"))
}
GSM_query <- GEO_query
GSE_query <- GEO_query
} else { ## QUERY_ALL ABSENT; GEO_query ABSENT
# GSM_query, GSE_query as provided (GSM_query <-
# GSM_query; GSE_query <- GSE_query)
if ( missing(GSM_query) | missing(GSE_query)){
stop(paste0("GSM_query and GSE_query are both required ",
"in the absence of query_all or GEO_query"))
}
}
}
# category_both, SRA_library_strategy and GEO_type ####
# Convert SRA_library_strategy from a list of synonyms
# to a canonical form (will be disregarded if category_both is provided)
if (!is.null(SRA_library_strategy)){
x <- character()
for (s in seq_along(SRA_library_strategy)){
x[s] <- .manageLibraryStrategy(SRA_library_strategy[s],
input = "syn", output = "can")
}
SRA_library_strategy <- x
}
# category_both PRESENT ####
# Populate SRA_library_strategy and GEO_type with converted categories
if (!is.null(category_both)){
if ( !is.null(SRA_library_strategy) | !is.null(GEO_type)){
#warning("category_both already provided;
#SRA_library_strategy/GEO_type will be ignored")
message(paste0("category_both already provided; ",
"SRA_library_strategy/GEO_type will be ignored"))
}
SRA_library_strategy <-
convertCategoriesToLibraryStrategyType(category_both)$
SRA_library_strategy
GEO_type <-
convertCategoriesToLibraryStrategyType(category_both)$GEO_type
#print(SRA_library_strategy)
#print(GEO_type)
# If catagory_both has no corresponding library_strategy in SRA,
# don't search there
if (is.null(SRA_library_strategy)){
length_pre <- length(acc_levels)
acc_levels <- acc_levels[!acc_levels %in% c("run",
"experiment",
"sample",
"study")]
if (length(acc_levels) < length_pre){
#warning("Category_both does not exist in SRA
#- will not search there")
message(paste0("Category_both does not exist in SRA ",
"- will not search there"))
}
}
# If catagory_both has no corresponding type in GEO, don't search there
if (is.null(GEO_type)){
length_pre <- length(acc_levels)
acc_levels <- acc_levels[!acc_levels %in% c("gsm", "gse")]
if (length(acc_levels) < length_pre){
#warning("Category_both does not exist in GEO
# - will not search there")
message(paste0("Category_both does not exist in GEO ",
"- will not search there"))
}
}
} else {
# category_both ABSENT ####
# Omit search within SRA if GEO_type exists,
# but SRA_library_strategy is null
if (!is.null(GEO_type) & is.null(SRA_library_strategy)){
length_pre <- length(acc_levels)
acc_levels <- acc_levels[!acc_levels %in% c("run",
"experiment",
"sample",
"study")]
if (length(acc_levels) < length_pre){
#warning("SRA library strategy not provided.
# Will only search in GEO")
message(paste0("SRA library strategy not provided. ",
"Will only search in GEO"))
}
}
# Omit search within GEO if SRA_library_strategy exists,
#but GEO_type is null
if (is.null(GEO_type) & !is.null(SRA_library_strategy)){
length_pre <- length(acc_levels)
acc_levels <- acc_levels[!acc_levels %in% c("gsm", "gse")]
if (length(acc_levels) < length_pre){
#warning("GEO type not provided. Will onlly search in SRA")
message("GEO type not provided. Will onlly search in SRA")
}
}
}
.mm(cli::rule(), "search")
.mm(cli::rule(left = "SEARCH DETAILS:"), "search")
#.mm(cli::rule(), "search")
.mm(cli::rule(left = "QUERY"), "search")
#.mm(cli::rule(), "search")
.mm(paste0("SRA_query: ", SRA_query), "search")
.mm(paste0("GSM_query: ", GSM_query), "search")
.mm(paste0("GSE_query: ", GSE_query), "search")
.mm(cli::rule(left = "LIBRARY_STRATEGY/TYPE"), "search")
.mm(paste0("SRA_library_strategy: ", SRA_library_strategy), "search")
.mm(paste0("GEO_type: ", GEO_type), "search")
.mm(cli::rule(left = "ACCESSION LEVELS FOR SEARCHING"),
"search")
.mm(paste0("acc_levels: ", paste0(acc_levels, collapse = ", ")), "search")
.mm(cli::rule(), "search")
#message("========================================================")
#message("===SEARCH DETAILS=======================================")
#message("---QUERY------------------------------------------------")
#message("SRA_query: ", SRA_query)
#message("GSM_query: ", GSM_query)
#message("GSE_query: ", GSE_query)
#message("---LIBRARY_STRATEGY/TYPE--------------------------------")
#message("SRA_library_strategy: ", SRA_library_strategy)
#message("GEO_type: ", GEO_type)
#message("---ACCESSION LEVELS FOR SEARCHING-----------------------")
#message("acc_levels: ", paste0(acc_levels, collapse = ", "))
#message("========================================================")
if (call_output){
sa_argument_list <- list(SRA_query=SRA_query,
GSM_query=GSM_query,
GSE_query=GSE_query,
SRA_library_strategy=SRA_library_strategy,
GEO_type=GEO_type,
acc_levels=acc_levels)
sa_file <- .generateFileName_CALL_SA(sa_argument_list)
.generateCallRecord(file = sa_file)
}
# Developer check ===*===
#sra_arg_check <- list(...)$sra_arg_check
#if (!is.null(sra_arg_check)){
# .searchAnywhereSRA(SRA_query, SRA_library_strategy,
#SRA_other_library_strategy, acc_levels = acc_levels, sra_arg_check)
#}
# Search within SRA ####
if (sum(acc_levels %in% c("run", "experiment", "sample", "study"))>0){
.mm("Searching for matches in SRA...", "prog")
sra_df <- .searchAnywhereSRA(SRA_query = SRA_query,
acc_levels = acc_levels,
SRA_library_strategy = SRA_library_strategy,
SRA_other_library_strategy = SRA_other_library_strategy)
.vex("temp_anywhere_sra_df", sra_df)
if (dim(sra_df)[1]!=0){
sra_out <- .searchForAccessionAcrossDBsDF(sra_df$run_accession,
"*", "*", "*", sra_df)
} else {
sra_out <- .generateEmptyDF() # Warning already generated by SRA
}
.vex("temp_anywhere_sra_out_sfa", sra_out)
sra_out <- .unifyDFFormat(sra_out)
.vex("temp_anywhere_sra_out_udf", sra_out)
} else {
sra_out <- data.frame() # Create an empty data frame for rbind
}
# Search within GEO ####
if (sum(acc_levels %in% c("gse", "gsm"))>0){
.mm("Searching for matches in GEO...", "prog")
geo_df <- .searchAnywhereGEO(GSM_query = GSM_query,
GSE_query = GSE_query,
acc_levels = acc_levels,
GEO_type = GEO_type)
.vex("temp_anywhere_geo_df", geo_df)
if (dim(geo_df)[1]!=0){
geo_out <- .searchForAccessionAcrossDBsDF(geo_df$gsm,
"*", "*", "*", geo_df)
} else {
# Warning already generated by GSM, GSE
geo_out <- .generateEmptyDF()
}
.vex("temp_anywhere_geo_out_sfa", geo_out)
geo_out <- .unifyDFFormat(geo_out)
.vex("temp_anywhere_geo_out_udf", geo_out)
} else {
geo_out <- data.frame() # Create an empty data frame for rbind
}
.vex("temp_anywhere_sra_out", sra_out)
.vex("temp_anywhere_geo_out", geo_out)
# Combine results from GEO and SRA ####
df_out <- rbind(sra_out, geo_out)
#df_out <- .unifyDFFormat(df_out)
#---------------------------------------------------
# TBD ####
#---------------------------------------------------
# Search in SRA if any of the acc_levels are from SRA
# ===*===
if (sum(acc_levels %in% c("run", "experiment", "sample", "study"))>0){
# Don't search if category_both doesn't include SRA
if (!(!is.null(category_both) & length(SRA_library_strategy)==0)){
#print("Search SRA")
#sra_df <- .searchAnywhereSRA(SRA_query,
#acc_levels = acc_levels,
#SRA_library_strategy = SRA_library_strategy,
#SRA_other_library_strategy = SRA_other_library_strategy)
# NOT PASSING ANY OTHER ARGUMENTS HERE ===*===
}
}
if (sum(acc_levels %in% c("gse", "gsm"))>0){
# Don't search if category_both doesn't include GEO
if (!(!is.null(category_both) & length(GEO_type)==0)){
#print("Search GEO")
#geo_df <- .searchAnywhereGEO(GSM_query = GSM_query,
#GSE_query = GSE_query, acc_levels = acc_levels,
#GEO_type = GEO_type)
}
}
#---------------------------------------------------
#---------------------------------------------------
# Process results and add unifying columns ####
#Don't create sample column
df_out <- .extractGSM(df_out, sampleColumn = FALSE)
df_out <- .saExtractor(df_out)
.vex("temp_TEN_df_out", df_out)
df_out <- .chExtractor(df_out)
# No .detectInputs/Controls used
df_out <- createEmptyColumns(df_out, c("input", "control"))
df_out <- .detectMerges(df_out, do_nothing = TRUE)
#No .verifyMissingRuns used
df_out <- .convertPairedEnds(df_out)
if (dim(df_out)[1]!=0){
df_out <- .unifyNAs(df_out)
}
df_out <- .renameOTHColumns(df_out)
.vex("temp_df_out", df_out)
df_out <- .unifyDFFormat(df_out)
df_dim <- dim(df_out)[1]
if (df_dim == 1){
.mm(paste0("Found ", df_dim, " entry matching search terms"),
"res")
} else {
.mm(paste0("Found ", df_dim, " entries matching search terms"),
"res")
}
return(df_out)
}
#' Search anywhere within gse and gsm tables of GEO database
#'
#' @param GSM_query String to search for within gsm table
#' @param GSE_query String to search for within gse table
#' @param acc_levels Character vector indicating where to conduct the search
#' (only "gse" and "gsm" are considered)
#' @param GEO_type Study type for filtering results (optional)
#' @return Data frame with result data from whole GEO (gsm and gse tables)
#'
#' @keywords internal
#'
.searchAnywhereGEO <- function(GSM_query,
GSE_query,
acc_levels = c("gse", "gsm"),
GEO_type=NULL){
if ("gsm" %in% acc_levels){
df_gsm <- .searchAnywhereGSM(GSM_query, GEO_type)
df_out <- df_gsm
}
if ("gse" %in% acc_levels){
df_gse <- .searchAnywhereGSE(GSE_query, GEO_type)
df_out <- df_gse
}
if (sum(c("gsm", "gse") %in% acc_levels)==2){
df_out <- unique((rbind(df_gsm, df_gse)))
}
return(df_out)
}
#' Search anywhere within gsm table of GEO database
#'
#' @param GSM_query String to search for
#' @param GEO_type Study type for filtering results (optional)
#' @return Data frame with result data from whole GEO (gsm and gse tables)
#'
#' @keywords internal
#'
.searchAnywhereGSM <- function(GSM_query, GEO_type){
# No acc_levels needed in this case
database_name <- "geo_con"
database_env <- ".GlobalEnv"
# List all tables within geo_con
geo_tables <- DBI::dbListTables(get(database_name,
envir = get(database_env)))
# Construct a query within gsm ####
if (!("gsm_ft" %in% geo_tables)){
# Standard search ####
gsm_columns <- DBI::dbListFields(get(database_name,
envir = get(database_env)), "gsm")
full_query <- "SELECT * FROM gsm WHERE ( " # initial bracket
for (g in gsm_columns){
chunk <- paste0("( ", g, " LIKE '%", GSM_query, "%') OR ")
full_query <- paste0(full_query, chunk)
}
full_query <- substr(full_query, 1, nchar(full_query)-4)
full_query <- paste0(full_query, " )") # final bracket
.mm(full_query, "query")
df <- DBI::dbGetQuery(get(database_name,
envir = get(database_env)), full_query)
} else {
# Fts search ####
stop("No fts support yet")
#df <- 1 # To be added ===*===
}
if (dim(df)[1]==0){
warning(paste0("No results found in GSM. ",
"Try refining your search terms or acc_levels"), call. = FALSE)
}
df <- .renameGSMColumns(df)
.vex("temp_df_diag", df)
# Append gse columns ####
df <- .appendGSEColumns(df, "*")
# Filter by GEO_type if provided ####
if (!is.null(GEO_type)){
filt_ind <- grepl(GEO_type, df$GSE_type)
df <- df[filt_ind,]
}
return(df)
}
#' Search anywhere within gse table of GEO database
#'
#' @param GSE_query String to search for
#' @param GEO_type Study type for filtering results (optional)
#' @return Data frame with result data from whole GEO (gsm and gse tables)
#'
#'
#' @keywords internal
#'
.searchAnywhereGSE <- function(GSE_query, GEO_type){
database_name <- "geo_con"
database_env <- ".GlobalEnv"
# List all tables within geo_con
geo_tables <- DBI::dbListTables(get(database_name,
envir = get(database_env)))
# Construct a query within gse ####
if (!("gse_ft" %in% geo_tables)){
# Standard search ####
gse_columns <- DBI::dbListFields(get(database_name,
envir = get(database_env)), "gse")
full_query <- "SELECT * FROM gse WHERE ( " # initial bracket
for (g in gse_columns){
chunk <- paste0("( ", g, " LIKE '%", GSE_query, "%') OR ")
full_query <- paste0(full_query, chunk)
}
full_query <- substr(full_query, 1, nchar(full_query)-4)
full_query <- paste0(full_query, " )") # final bracket
.mm(full_query, "query")
df <- DBI::dbGetQuery(get(database_name,
envir = get(database_env)), full_query)
} else {
# Fts search ####
stop("No fts support yet")
#df <- 1 # To be added ===*===
}
# Filter by GEO_type if provided ####
if (!is.null(GEO_type)){
filt_ind <- rep(FALSE, dim(df)[1])
for (t in GEO_type){
# Note that column name here is type, not GSE_type
filt_ind <- filt_ind | grepl(GEO_type, df$type)
}
df <- df[filt_ind,]
}
# Search across GEO for GSEs ####
if (dim(df)[1]!=0){
df <- .searchGEOForGSE(df$gse, "*", "*")
} else {
warning(paste0("No results found in GSE. ",
"Try refining your search terms or acc_levels"), call. = FALSE)
df <- .generateEmptyDF(c("gsm", "gse"))
}
return(df)
}
#------------------------------------------
# ------------------DONE-------------------
#------------------------------------------
#'
#' Fulltext search in SRA
#'
#' @param SRA_query Query passed to fts MATCH operator (cannot be a vector)
#' @param SRA_library_strategy Character vector denoting
#' library_strategy/ies of choice (OPTIONAL)
#' @param SRA_other_library_strategy A character vector indicating whether
#' (and which) uncategorised library strategies are accepted
#' (choose between one and all elements of c("OTHER", "NA", "NULL"));
#' if not desired, set equal to FALSE.
#' NOTE: only evaluated if library strategy is provided
#' @param acc_levels Character vector denoting which accession levels
#' will be searched.
#' Choose from some/all of c("run", "experiment", "sample", "study")
#' @param ... Other options
#' @return Data frame with results
#'
#' @examples
#' ## stat3
#' # .searchAnywhereSRA("stat3")
#'
#' ## stat3 in human samples
#' # .searchAnywhereSRA("stat3 taxon_id: 9606")
#'
#' ## stat3 chip-seq (including unclassified library strategies)
#' # .searchAnywhereSRA("stat3", library_strategy = "ChIP-Seq")
#' ## stat3 chip-seq not including unclassified library strategies
#' # .searchAnywhereSRA("stat3",
#' # library_strategy = "ChIP-Seq",
#' # SRA_other_library_strategy = FALSE)
#'
#' ## stat3 ignoring matches at study level
#' #.searchAnywhereSRA("stat3", acc_levels = c("run", "experiment", "sample"))
#'
#'
#' @keywords internal
.searchAnywhereSRA <- function(SRA_query,
acc_levels = c("run", "experiment", "sample", "study"),
SRA_library_strategy=NULL,
SRA_other_library_strategy = c("OTHER", "NA", "NULL"),
...){
sra_arg_check <- list(...)$sra_arg_check
if(!is.null(sra_arg_check)){
if(isTRUE(sra_arg_check)){
#return(as.list(match.call(def=sys.function(-1),
# call = sys.call(-1))))
# Previously
#print(as.list(match.call(expand.dots = TRUE)))
print(as.list(match.call(definition = sys.function(-1),
call = sys.call(-1))))
l <- list()
l$SRA_query <- SRA_query
l$SRA_library_strategy <- SRA_library_strategy
l$SRA_other_library_strategy <- SRA_other_library_strategy
l$acc_levels <- acc_levels
l$dots <- list(...)
return(l)
.mm(paste0("SRA_query: ", SRA_query), "search")
.mm(paste0("SRA_library_strategy: ", SRA_library_strategy),"search")
.mm(paste0("SRA_other_library_strategy: ",
SRA_other_library_strategy), "search")
.mm(paste0("acc_levels: ", acc_levels), "search")
.mm(paste0("other: ", unlist(list(...))), "search")
#return(as.list(match.call(expand.dots = TRUE)))
#argg <- c(as.list(environment()), list(...))
#return(argg)
#l <- as.list(match.call(expand.dots = TRUE))
#print(l)
#return(get(as.character((l[3]))))
#Didn't work
#return(as.list(get(
# as.character(unlist((match.call(expand.dots = TRUE)))))))
}
}
database_name <- "sra_con"
database_env <- ".GlobalEnv"
query_full <- paste0("SELECT * FROM sra_ft WHERE sra_ft MATCH '",
SRA_query, "'")
if (!is.null(SRA_library_strategy)){
# other library strategy clause
ls_query <- paste0("library_strategy = '",
SRA_library_strategy, sep = "' OR ", collapse = "")
if(sum(c("OTHER", "NA", "NULL") %in% SRA_other_library_strategy) > 0 ){
ols_clause <- character()
ols_clause[1] <- "library_strategy = 'OTHER'"
ols_clause[2] <- "library_strategy = 'NA'"
ols_clause[3] <- "library_strategy IS NULL"
ols_check <- logical()
ols_check[1] <- "OTHER" %in% SRA_other_library_strategy
ols_check[2] <- "NA" %in% SRA_other_library_strategy
ols_check[3] <- "NULL" %in% SRA_other_library_strategy
# SRA_other_library_strategy clause
ols_query <-
paste0(ols_clause[ols_check], sep = " OR ", collapse = "")
ls_query <- paste0(ls_query, ols_query)
.mm(ls_query, "query")
}
ls_query <- substr(ls_query, 1, nchar(ls_query)-4)
query_full <- paste0(query_full, " AND ( ", ls_query, ")")
}
.mm(query_full, "query")
query_check <- list(...)$query_check
if(!is.null(query_check)){
if (isTRUE(query_check)){
return(query_full)
}
}
df <- DBI::dbGetQuery(get(database_name,
envir = get(database_env)), query_full)
.vex("temp_searchAnywhereSRA", df)
if (dim(df)[1]!=0){
.mm("Filtering results according to accession levels...", "prog")
df <- .filterSRAByTermByAccessionLevel(SRA_query, df, acc_levels)
}
# Must come after filtering, otherwise it would not work
df <- .renameSRAColumns(df)
if (dim(df)[1]==0){
warning(paste0("No results found in SRA. ",
"Try refining your search terms or acc_levels"), call. = FALSE)
}
return(df)
#------TBC
# ===*===
}
#' Filter df according to query matches
#' only within accession levels of interest
#'
#' Performs fts search on the data frame according to the query, only searching
#' in the columns corresponding to specified accession levels of interest
#'
#'
#' @param query Query to be passed to MATCH operator (for fts)
#' @param df Data frame to be filtered
#' @param acc_levels Accession levels to search within
#' (choose from: run, experiment, sample, study;
#' defaults to c("run", "experiment", "sample"))
#' @return Filtered df (containing only rows matching query
#' within specified accession levels)
#'
#'
#' @keywords internal
#'
.filterSRAByTermByAccessionLevel <- function(query,
df,
acc_levels = c("run", "experiment", "sample")){
# =====================================================================
#warning("Only works for SRA") # ===*===
# =====================================================================
acc_possible <- c("run", "experiment", "sample", "study")
acc_levels <- match.arg(acc_levels, acc_possible, several.ok = TRUE)
.mm(paste0("Filtering for the following acc_levels: ",
paste0(acc_levels, collapse = ", ")), "search")
if (sum(unique(acc_levels) %in% c("study",
"sample",
"experiment",
"run"))==4){
.mm("Nothing to filter - returning original df", "adverse")
return(df)
}
df_out <-
filterByTerm(df = df,
query = query,
filter_columns =
.findSRAAccessionLevelColumnNames(acc_levels =
acc_levels))
return(df_out)
}
#' Filter df according to query matches
#' only within accession levels of interest
#'
#' Performs fts search on the data frame according to the query, only searching
#' in the columns corresponding to specified accession levels of interest
#'
#'
#' @param query Query to be passed to MATCH operator (for fts)
#' @param df Data frame to be filtered
#' @param acc_levels Accession levels to search within
#' (choose from: run, experiment, sample, study, gsm, gse);
#' defaults to c("run", "experiment", "sample", "gsm")
#' @return Filtered df (containing only rows matching query
#' within specified accession levels)
#'
#' @family Workflow functions
#' @family Manipulation functions
#'
#'
#' @export
#'
filterByTermByAccessionLevel <- function(
query,
df,
acc_levels = c("run", "experiment", "sample", "gsm")){
# =====================================================================
# warning("Only works for SRA") # ===*===
# =====================================================================
acc_possible <- c("run", "experiment", "sample", "study", "gsm", "gse")
acc_levels <- match.arg(acc_levels, acc_possible, several.ok = TRUE)
.mm(paste0("Filtering for the following acc_levels: ",
paste0(acc_levels, collapse = ", ")), "search")
if (sum(unique(acc_levels) %in% c("study",
"sample",
"experiment",
"run", "gsm", "gse"))==6){
.mm("Nothing to filter - returning original df", "adverse")
return(df)
}
acc_columns <- .findAccessionLevelColumnNames(acc_levels = acc_levels)
df_out <- filterByTerm(df = df,query = query, filter_columns = acc_columns)
return(df_out)
}
#' Convert from SRA-GEO Categories
#'
#' Converts from SRA-GEO Categories to corresponding SRA library_strategy
#' and GEO (study) type.
#' For further details regarding available categories (and their
#' corresponding elements), inspect the \code{SRA_GEO_Category_Conversion}
#' object or see its documentation page: \code{?SRA_GEO_Category_Conversion}.
#'
#' @param x Character with a category (can be a vector).
#' NOTE: must match exactly (but matching is case insensitive)
#' @return A list with a vector each for SRA_library_strategy and GEO_type
#'
#'
#' @examples
#' convertCategoriesToLibraryStrategyType("Transcriptome MA")
#' convertCategoriesToLibraryStrategyType("ChIP-Seq")
#'
#'
#' @export
convertCategoriesToLibraryStrategyType <- function(x){
# devtools::check()
DB <- NULL
#utils::data("SRA_GEO_Category_Conversion", envir = environment())
# Retrieve category conversion data frame
df <- SpiderSeqR::SRA_GEO_Category_Conversion
# Make matching case insensitive
x <- tolower(x)
df_lower <- df
df_lower$Category <- tolower(df_lower$Category)
if (sum(df_lower$Category %in% x)==0) stop("Provide a valid category")
df <- df[df_lower$Category %in% x,] #Filter by matching category/categories
y <- list()
#.vex("temp_convertCat_y_ori", df)
y$SRA_library_strategy <- dplyr::filter(df, DB == "SRA")$Name
y$GEO_type <- dplyr::filter(df, DB == "GEO")$Name
if (length(y$SRA_library_strategy)==0){
# This is a slight trick to avoid losing that list element altogether
y[1] <- list(NULL)
}
if (length(y$GEO_type)==0){
# This is a slight trick to avoid losing that list element altogether
y[2] <- list(NULL)
}
#.vex("temp_convertCat_y_later", y)
#if (length(y$SRA_library_strategy)==0){
# y$SRA_library_strategy <- NULL
#}
#if (length(y$GEO_type)==0){
# y$GEO_type <- NULL
#}
#y$SRA_library_strategy <- df$Name[df$DB=="SRA"]
#y$GEO_type <- df$Name <- df$Name[df$DB=="GEO"]
return(y)
}
#' Find SRA column names corresponding to accession levels (ORI)
#'
#' @param acc_levels Accession levels
#' @param add_run_accession Logical indicating whether to add
#' run_accession column name
#' @param table_name A character with table name
#' @return Vector with column names
#'
#' NOTE: works on the original column names (from the database)
#'
#' @examples
#' # .findSRAAccessionLevelColumnNames("run")
#'
#' @keywords internal
#'
.findSRAAccessionLevelColumnNames <-
function(acc_levels = c("run", "experiment", "sample", "study"),
add_run_accession = TRUE, table_name = "sra_ft"){
database_name <- "sra_con"
database_env <- ".GlobalEnv"
col_list <- DBI::dbListFields(get(database_name,
envir = get(database_env)), table_name)
# Store index of first column relevant for an accession level
run_beg <- grep("^run_ID$", col_list)
exp_beg <- grep("^experiment_ID$", col_list)
sample_beg <- grep("^sample_ID$", col_list)
study_beg <- grep("^study_ID$", col_list)
run_cols <- col_list[run_beg:(exp_beg-1)]
exp_cols <- col_list[exp_beg:(sample_beg-1)]
sample_cols <- col_list[sample_beg:(study_beg-1)]
study_cols <- col_list[study_beg:length(col_list)]
all_levels <- c("run", "experiment", "sample", "study", "gsm", "gse")
sra_levels <- c("run", "experiment", "sample", "study")
# Check that there is at least one valid SRA level provided
if (sum(acc_levels %in% sra_levels)==0){
stop("Provide at least one valid SRA accession level")
}
# Check that all accession levels belong to the set of acceptable levels
if (sum(acc_levels %in% all_levels)!=length(acc_levels)){
warning("Some accession levels do not belong to SRA/GEO type")
}
# Create a vector with column names of interest
sel_cols <- NULL
# Add run_accession if run is not one of the levels
if (add_run_accession & !("run" %in% acc_levels)){
sel_cols <- c(sel_cols, "run_accession")
}
if ("run" %in% acc_levels){
sel_cols <- c(sel_cols, run_cols)
}
if ("experiment" %in% acc_levels){
sel_cols <- c(sel_cols, exp_cols)
}
if ("sample" %in% acc_levels){
sel_cols <- c(sel_cols, sample_cols)
}
if ("study" %in% acc_levels){
sel_cols <- c(sel_cols, study_cols)
}
if (is.null(sel_cols)){
stop("Provide at least one accession level to search within")
}
return(sel_cols)
}
#' Find column names corresponding to accession levels in SRA and GEO (SP)
#'
#' @param acc_levels Accession levels
#' @return Vector with column names
#'
#' NOTE: works on the SpiderSeqR column names (i.e. with prefixes)
#'
#' @examples
#' # .findAccessionLevelColumnNames("run")
#'
#' @keywords internal
#'
.findAccessionLevelColumnNames <-
function(acc_levels = c("run", "experiment", "sample", "gsm")){
sra_accessions <- c("run", "experiment", "sample", "study")
acc_possible <- c(sra_accessions, "gsm", "gse")
acc_levels <- match.arg(acc_levels, acc_possible, several.ok = TRUE)
acc_columns <- character()
if (sum(acc_levels %in% sra_accessions)>0){
sra_columns <-
.findSRAAccessionLevelColumnNames(acc_levels =
acc_levels[acc_levels %in% sra_accessions])
df <- stats::setNames(data.frame(matrix(ncol = length(sra_columns),
nrow = 0)), sra_columns)
df <- .renameSRAColumns(df)
sra_columns <- colnames(df)
acc_columns <- c(acc_columns, sra_columns)
}
if ("gsm" %in% acc_levels){
acc_columns <- c(acc_columns, listValidColumns()$GSM)
}
if ("gse" %in% acc_levels){
acc_columns <- c(acc_columns, listValidColumns()$GSE)
}
return(acc_columns)
}
|
3462aa9f81cc985fea0097903dcb6d34b31c90f3
|
537387f00bca75524b026cf8c55e5ade18b1ed6f
|
/man/colgrp.Rd
|
c64c788d97ee12ab6f228f27b7fa9399a6ee9282
|
[
"MIT"
] |
permissive
|
cran/groupr
|
3dfb212389213322eabf7a0f5229520da2334bf8
|
df5392db79144789a613aaf49e95afcaf43fba6a
|
refs/heads/master
| 2023-03-30T15:24:33.731772
| 2023-03-23T02:50:02
| 2023-03-23T02:50:02
| 304,045,652
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| true
| 607
|
rd
|
colgrp.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/columns.R
\name{colgrp}
\alias{colgrp}
\title{Make a Single Column Grouping}
\usage{
colgrp(x, data_name, index_name = "group")
}
\arguments{
\item{x}{A tibble}
\item{data_name}{A string, the name of the new column}
\item{index_name}{A string, the name of the new column index}
}
\value{
A grouped tibble
}
\description{
Takes a tibble and groups columns together into a single data column. All
columns that are not row indices will be grouped, and the resulting column
will be named \code{data_name}.
}
|
45074f650d76b7105026fda023bc6f0a95659bdc
|
c1b7138a9fd9e415276ef6428e1faab83b45d895
|
/calculate-error.R
|
8b43d16e4356af52d2218f4bca323a88b59e57d0
|
[] |
no_license
|
hmendeslx/cpsc540-project
|
7e504eea0c24cfa95d47b34226db713f7d98728e
|
7467372676ff7b17eac5a5cd8f1a8d255ea09858
|
refs/heads/master
| 2020-12-25T13:08:08.245515
| 2013-04-16T23:43:20
| 2013-04-16T23:43:20
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 686
|
r
|
calculate-error.R
|
#!/usr/local/bin/Rscript
# Calculate the mean absolute error of the predictions.
train.y <- read.table('data/train-y.tab', header=T)
colnames(train.y) <- c('id', 'y')
train.yhat <- read.table('data/train-yhat.tab')
colnames(train.yhat) <- 'yhat'
cross.y <- read.table('data/cross-y.tab', header=T)
colnames(cross.y) <- c('id', 'y')
cross.yhat <- read.table('data/cross-yhat.tab')
colnames(cross.yhat) <- 'yhat'
train.data <- cbind(train.y, train.yhat)
train.data$abs.error <- abs(train.data$yhat - train.data$y)
cross.data <- cbind(cross.y, cross.yhat)
cross.data$abs.error <- abs(cross.data$yhat - cross.data$y)
cat(mean(train.data$abs.error), mean(cross.data$abs.error))
cat('\n')
|
6b33c40aaa451f11a19b6670a90c5c53b2094fdd
|
e3bc9fddd975ad0c0e885055315e8541e36d1c66
|
/FireRisk_Dashboard/server.R
|
8ef6d690e8d2d544440e2c360fb49b888b5be4ee
|
[
"MIT"
] |
permissive
|
mlane3/fire_risk_analysis
|
3676b58e5a2618b204ac544a2fe6886fa59fb62e
|
0318990ca5400deb3fccdb0f40ff263885e281e0
|
refs/heads/master
| 2020-07-12T06:11:59.037892
| 2019-08-27T20:54:30
| 2019-08-27T20:54:30
| 204,315,358
| 0
| 0
|
MIT
| 2019-08-25T15:45:15
| 2019-08-25T15:45:15
| null |
UTF-8
|
R
| false
| false
| 4,137
|
r
|
server.R
|
# Data dashboard for Metro21 Fire Risk Analysis Project
# Created for: Pittsburgh Bureau of Fire
# Authors: Qianyi Hu, Michael Madaio, Geoffrey Arnold
# Latest update: February 20, 2018
# The server side of the dashboard
source("global.R", local = FALSE)
shinyServer(function(input, output, session) {
# Bookmark Event
observeEvent(input$bookmark, {
bookmarks <- isolate(reactiveValuesToList(input))
if (is.null(inputs)) {
data <- bookmarks
} else {
data <- bookmarks
for (i in names(inputs[!grepl("^_", names(inputs))])) {
data[i] <- ifelse(is.null(bookmarks[i]), inputs[i], bookmarks[i])
names(data[i]) <- i
}
}
# print(bookmarks)
addUpdateDoc(bookmark_id, data, conn)
showNotification("Your bookmarks have been saved successfully", type = "message")
})
model <- loadModel
model$Score <- ceiling(model$RiskScore*10)
# get data subset
data <- reactive({
# default option: select all
print("Filtering Fire Risk Scores")
d <- subset(model, subset = (Score <= input$range[2] & Score >= input$range[1]))
# filter by property type (STATEDESC)
if (!("All Classification Types" %in% input$property)){
d <- subset(d, subset=(state_desc %in% input$property))
}
# filter by usage type (USEDESC)
if (!("All Usage Types" %in% input$use)) {
d <- subset(d, subset=(use_desc %in% input$use))
}
# filter by neighborhood (NEIGHDESC)
if (!("All Neighborhoods" %in% input$nbhood)) {
d <- subset(d, subset=(geo_name_nhood %in% input$nbhood))
}
# filter by fire district
if (!("All Fire Districts" %in% input$fire_dst)){
d <- subset(d, subset=(Pgh_FireDistrict %in% input$fire_dst))
}
d
})
# visualization plot
output$distPlot <- renderPlotly({
if (input$xvar == "Property Classification") {
x_axis <- "state_desc"
} else if (input$xvar == "Property Usage Type") {
x_axis <- "use_desc"
} else if (input$xvar == "Neighborhood") {
x_axis <- "geo_name_nhood"
}
if (input$yvar == "Fire Risk Scores") {
if (input$xvar == "Fire District") {
x_axis <- "Pgh_FireDistrict"
}
}
y_axis <- input$yvar
## Create visualization ##
if (y_axis == "Fire Risk Scores"){
print("displaying Fire risk")
# consider average risk score by x axis
if (nlevels(data()[[x_axis]]) <= 15){
plot <- ggplot(data = data()[!is.na(data()[[x_axis]]),],aes(x=data()[!is.na(data()[[x_axis]]),][[x_axis]],y=Score)) +
stat_summary(fun.y = "mean",geom = "bar",width=0.8,fill="steelblue") +
theme(plot.title = element_text(size = 18, face = "bold"),text = element_text(size=12)) +
ggtitle("Average Risk Score") + ylim(0,10) +
xlab(x_axis) +
ylab("Risk Score")
}else{
data_selected <- data()[!is.na(data()[[x_axis]]),]
ag_score <- aggregate(data_selected[["Score"]] ~ data_selected[[x_axis]], data_selected, mean)
ag_label <- as.vector(unlist(ag_score[order(ag_score[2]),][1]))
print(length(ag_label))
# h = 550 + 10 * length(ag_label)
plot <- ggplot(data = data_selected, aes(x=data_selected[[x_axis]],y=Score)) +
stat_summary(fun.y = "mean",geom = "bar",width=0.8,fill="steelblue") +
coord_flip() + scale_x_discrete(limits=ag_label,labels=ag_label) +
ggtitle("Average Risk Score") + ylim(0,10) +
theme(plot.title = element_text(size = 18, face = "bold"),text = element_text(size=12)) +
xlab(x_axis) +
ylab("Risk Score")
}
}
plot
})
output$table <- DT::renderDataTable(data(), options = list(scrollX = TRUE))
# download table
output$downloadTable <- downloadHandler(
filename = "table.csv",
content = function(file) {
write.csv(as.data.frame(data()), file)
}
)
# print total number of records selected (for reference)
output$n_records <- renderText({
nrow(data())
})
})
|
65f86daa07d532f8711392faec2c08515eaa0b3e
|
c3b7dcd1e0f5603b652204dbdc9403a3bed4634a
|
/Shiny/4_Shiny_Database_SQLite_Immunogenicity_3Tier/ui.R
|
ba42a535167fff07a7eec998fa68dfe52e1fe18c
|
[] |
no_license
|
philbowsher/shiny-rmd-APIs-DBs-pharma-session-2019-11-12
|
36afb2c2b27e2393f99c63b2b8b74aabe3c07b4f
|
d3e72c0295c37aee4d9259be257611146e11d16f
|
refs/heads/master
| 2023-03-11T17:17:12.921600
| 2023-02-08T14:30:10
| 2023-02-08T14:30:10
| 221,027,744
| 4
| 2
| null | null | null | null |
UTF-8
|
R
| false
| false
| 6,248
|
r
|
ui.R
|
#Required library
library(plotly)
library(shinydashboard)
# Get current table list
con <- DBI::dbConnect(RSQLite::SQLite(), "database.sqlite")
table_names <- c("{Select Table}", DBI::dbListTables(con))
DBI::dbDisconnect(con)
sidebar <- dashboardSidebar(
sidebarMenu(
selectInput(
inputId = "table1",
label = "Screening Table",
choices = table_names),
numericInput("screening_cutpoint", "Enter Screening Cutpoint",0),
tags$hr(),
selectInput(
inputId = "table2",
label = "Confirmatory Table",
choices = table_names),
numericInput("confirmatory_cutpoint", "Enter Confirmatory Cutpoint",0),
tags$hr(),
selectInput(
inputId = "table3",
label = "Titer Table",
choices = table_names),
# fileInput('file3', 'Input File for Titer', accept=c('text/csv', 'text/comma-separated-values,text/plain', '.csv')),
tags$hr(),
downloadButton('downloadData1', 'True Positives'),
tags$hr(),
downloadButton('downloadData2', 'All Results'),
tags$hr(),
downloadButton('downloadData3', 'Top 1000'),
tags$hr()
))
body <- dashboardBody(tags$head
(tags$style(HTML #CSS Rules: Hash (#) is CLASS & period (.) is ID
('
.sidebar-menu>li>a
{padding: 0px 5px 0px 5px; #top, right, bottom, left
}
#Table1
{text-align:center;
}
#downloadData1
{width: 130px;
height: 35px;
font-size: 16px;
margin-left:50px;
margin-top: 20px;
margin-bottom: 20px;
color: #fffff;
background-color: #367fa9;
border: #000000;
padding-right: 12px;
padding-top: 8px;
}
#downloadData2
{width: 130px;
height: 35px;
font-size: 16px;
margin-left:50px;
margin-top: 20px;
margin-bottom: 20px;
color: #fffff;
background-color: #367fa9;
border: #000000;
padding-right: 12px;
padding-top: 8px;
}
#downloadData3
{width: 130px;
height: 35px;
font-size: 16px;
margin-left:50px;
margin-top: 20px;
margin-bottom: 20px;
color: #fffff;
background-color: #367fa9;
border: #000000;
padding-right: 12px;
padding-top: 8px;
}
.form-group.shiny-input-container
{width: 250px;
margin-bottom: 10px;
}
'))),
tabsetPanel(type = "tabs",
tabPanel(h4("Tier1: Screening Inputs"),
DT::dataTableOutput("screening")),
tabPanel(h4("Tier2: Confirmatory Inputs"),
DT::dataTableOutput("confirmatory")),
tabPanel(h4("Tier3: Titer Inputs"),
DT::dataTableOutput("titer")),
tabPanel(h4("True Positives"),
DT::dataTableOutput("truepositives")),
tabPanel(h4("Plots"),
box(title = "Histogram", status = "primary", plotlyOutput("plot", height = 250)),
box(title = "Histogram2", status = "success", plotlyOutput("plot2", height = 250)),
box(title = "Clicked", status = "info", verbatimTextOutput("event"))
),
tabPanel(h4("Aggregated Data Histogram"),
sliderInput("stDev", "Specify the percentile value to select positive samples?", min = 0, max = .99, value = .95, step = .05),
box(title = "Histogram", status = "primary", plotlyOutput("plot3", height = 250)),
verbatimTextOutput("results"),
verbatimTextOutput("table")),
tabPanel(h4("All Results"),
DT::dataTableOutput("allresults")),
tabPanel(h4("Top 1000"),
DT::dataTableOutput("top1000"))
))
ui <- dashboardPage(
dashboardHeader(title = strong("IMMUNOGENICITY")),
sidebar,
body
)
|
88f06c2a4aa83eaf2fa3ad7dc2cb0144cc869537
|
8c56fe0b4119962be654c966be19a444c3300e9d
|
/man/dataTestExperiment.Rd
|
8cd1f0d9780a4fca46daf5c97247f78a7a0a296c
|
[] |
no_license
|
jonathantemplin/EPSY905R
|
0b7a776f74be21a26f2baff3c543f1e39c3fd826
|
1d131ea38f87d01a96e78c30527bda1cbf813747
|
refs/heads/master
| 2021-09-05T07:09:59.540470
| 2018-01-24T18:53:45
| 2018-01-24T18:53:45
| 115,202,382
| 0
| 3
| null | 2018-01-25T03:27:57
| 2017-12-23T14:56:32
|
R
|
UTF-8
|
R
| false
| true
| 750
|
rd
|
dataTestExperiment.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/data.R
\docType{data}
\name{dataTestExperiment}
\alias{dataTestExperiment}
\title{Data file for examples used in EPSY 905}
\format{A data frame with 100 observations and 5 variables
\describe{
\item{PersonID}{Id number}
\item{Senior}{Dummy-coded indicator of a person's status as a Senior (vs. Freshman)}
\item{New}{Dummy-coded indicator of a person's status as being in the New type of instruction group (vs. Old)}
\item{Group}{Numeric gruop number produced from permutation of Senior and New (four groups total)}
\item{Test}{Test score}
}}
\usage{
dataTestExperiment
}
\description{
A dataset containing the data for interactions examples
}
\keyword{datasets}
|
9b26a3af80b01cf6f7dfbda652e036c77db4e040
|
91602005bf63ec56c158e4d72beb254e3481e581
|
/code/v3_final_figure_pdfs/fig1.R
|
739864e128b29aabed8f1c67fe55f319d482bcd4
|
[] |
no_license
|
gregbritten/wham-sim
|
d706d18f48768ca89c25db6f47a01056599e2747
|
d2ecaba6ad8f43ef15df3b9570097ecb6e0480d9
|
refs/heads/master
| 2023-03-30T14:32:57.267608
| 2021-04-08T17:42:07
| 2021-04-08T17:42:07
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 930
|
r
|
fig1.R
|
# Figure 1
# dAIC by process and stock
library(wham)
library(here)
library(tidyverse)
df <- readRDS(here("plots","old","bias_correct_oepe","daic.rds"))
df$re2 <- factor(df$re, levels=c("NAA","M","sel","Ecov2"), labels=c("NAA","M","Sel","Ecov"))
df$Model <- paste0(df$re2,"-",df$m)
df$relab <- factor(df$re, levels=c("NAA","M","sel","Ecov2"), labels=c("Numbers at age","Natural mortality","Selectivity","Ecov (CPI-Recruitment)"))
grDevices::cairo_pdf(filename=here("plots","v3","final_pdfs","fig1_daic.pdf"), width=9, height=3)
print(ggplot(df, aes(x=Model, y=daic, shape=Stock)) +
geom_point(size=3) +
ylab(expression(Delta*phantom()*AIC)) +
facet_wrap(vars(relab), nrow=1, scales='free') +
theme_bw() +
theme(strip.text.x = element_text(size = 10), axis.text.x = element_text(size = 8,angle = 45, vjust = 1, hjust=1),
axis.title.x = element_text(margin=margin(-5,0,0,0))))
dev.off()
|
5e26ca7911f67b2cd9cb187c46242a37f557e271
|
af716fe719978bda13825c90190b5fd40a644edc
|
/man/predict.plm.Rd
|
9b07c48c483d2ca5f1aaa8c2fc45ba9d549f8505
|
[] |
no_license
|
cran/plm
|
492fed724b1b4e917829990d1295117055fcdb50
|
b1eb02da282264741609692ac73c61b8722fc7e8
|
refs/heads/master
| 2023-04-15T03:38:07.442011
| 2023-04-09T10:40:02
| 2023-04-09T10:40:02
| 17,698,568
| 19
| 23
| null | null | null | null |
UTF-8
|
R
| false
| true
| 4,154
|
rd
|
predict.plm.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/tool_methods.R
\name{predict.plm}
\alias{predict.plm}
\title{Model Prediction for plm Objects}
\usage{
\method{predict}{plm}(
object,
newdata = NULL,
na.fill = !inherits(newdata, "pdata.frame"),
...
)
}
\arguments{
\item{object}{An object of class \code{"plm"},}
\item{newdata}{An optional pdata.frame in which to look for variables to be
used for prediction. If \code{NULL}, the fitted values are returned.
For fixed effects models, supplying a pdata.frame is recommended.}
\item{na.fill}{A logical, only relevant if \code{object} is a pdata.frame, indicating
whether for any supplied out-of-sample indexes (individual, time,
combination of both), the missing fixed effect estimate is filled
with the weighted mean of the model's present fixed effect estimates
or not.}
\item{\dots}{further arguments.}
}
\value{
A numeric (or a pseries if \code{newdata} is a pdata.frame) carrying the
predicted values with length equal to the number of rows as the data
supplied in \code{newdata} and with names the row names of \code{newdata} or, if
\code{newdata = NULL}, the fitted values the original model given in \code{object}.
}
\description{
Predicted values of response based on plm models.
}
\details{
\code{predict}calculates predicted values by evaluating the regression function of
a plm model for \code{newdata} or, if \code{newdata = NULL}, it returns the fitted values
the plm model.
The fixed effects (within) model is somewhat special in prediction as it has
fixed effects estimated per individual, time period (one-way) or both (two-ways
model) which should to be respected when predicting values relating to these
fixed effects in the model: To do so, it is recommended to supply a pdata.frame
(and not a plain data.frame) in \code{newdata} as it describes the relationship
between the data supplied to the individual. and/or time periods. In case
the \code{newdata}´'s pdata.frame has out-of-sample data (data contains individuals
and/or time periods not contained in the original model), it is not clear
how values are to be predicted and the result will contain \code{NA}
values for these out-of-sample data. Argument \code{na.fill} can be set to \code{TRUE}
to apply the original model's weighted mean of fixed effects for the
out-of-sample data to derive a prediction.
If a plain data.frame is given in \code{newdata} for a fixed effects model, the
weighted mean is used for all fixed effects as \code{newdata} for prediction as a
plain data.frame cannot describe any relation to individuals/time periods
(\code{na.fill} is automatically set to \code{TRUE} and the function warns).
See also \strong{Examples}.
}
\examples{
library(plm)
data("Grunfeld", package = "plm")
# fit a fixed effect model
fit.fe <- plm(inv ~ value + capital, data = Grunfeld, model = "within")
# generate 55 new observations of three firms used for prediction:
# * firm 1 with years 1935:1964 (has out-of-sample years 1955:1964),
# * firm 2 with years 1935:1949 (all in sample),
# * firm 11 with years 1935:1944 (firm 11 is out-of-sample)
set.seed(42L)
new.value2 <- runif(55, min = min(Grunfeld$value), max = max(Grunfeld$value))
new.capital2 <- runif(55, min = min(Grunfeld$capital), max = max(Grunfeld$capital))
newdata <- data.frame(firm = c(rep(1, 30), rep(2, 15), rep(11, 10)),
year = c(1935:(1935+29), 1935:(1935+14), 1935:(1935+9)),
value = new.value2, capital = new.capital2)
# make pdata.frame
newdata.p <- pdata.frame(newdata, index = c("firm", "year"))
## predict from fixed effect model with new data as pdata.frame
predict(fit.fe, newdata = newdata.p)
## set na.fill = TRUE to have the weighted mean used to for fixed effects -> no NA values
predict(fit.fe, newdata = newdata.p, na.fill = TRUE)
## predict with plain data.frame from fixed effect model: uses mean fixed effects
## for prediction and, thus, yields different result with a warning
predict(fit.fe, newdata = newdata)
}
\keyword{regression}
|
c2ba4cf74a19e4eb3af88a5e4d3caac686af6ee1
|
5127927036020b809569d9b57581518f13e4c548
|
/Figures/Figure_2/Figure2.R
|
169bb1a35b1f640cb36cecbdede4e3244bd423ed
|
[
"MIT"
] |
permissive
|
zamanianlab/Bmsinglecell-ms
|
f2abacefbe74feb3041df06076b71fc740ea8779
|
ebcf3846212be1a97583a7777663910033a76484
|
refs/heads/main
| 2023-06-30T09:25:14.631569
| 2023-05-01T23:25:16
| 2023-05-01T23:25:16
| 528,939,348
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 55,603
|
r
|
Figure2.R
|
############
## FIG 2 B. malayi mf single-cell atlas via 10X Genomics and cluster annotation
############
#data wrangling/analysis
library(tidyverse)
library(Seurat)
library(dplyr)
# plotting
library(magick)
library(pdftools)
library(cowplot)
library(ggplot2)
library(ggdendro)
library(pals)
library(ggtext)
library(ZamanianLabThemes)
library(viridis)
#other
setwd("path/to/directory")
library(here)
# read in integrated seurat object
new_combined <- readRDS(here("Figures/Figure_2/10XGenomics/scmulti_integrated.RDS"))
DefaultAssay(new_combined) <- "RNA"
# pull out the normalized counts, metadata, and UMAP coordinates into dataframes for plotting in ggplot2
data <- as_tibble(new_combined@reductions$umap@cell.embeddings, rownames = 'index') # UMAP coordinates for each cell
md <- as_tibble(new_combined@meta.data, rownames = 'index') # metadata detailing ut/t identity and cluster information
counts <- as_tibble(new_combined@assays[["RNA"]]@data, rownames = "gene_id") %>% # gene expression matrix of normalized counts
pivot_longer(!gene_id, names_to = "index", values_to = "counts")
# color palette (30 total)
dakota <- c("#cd4c42", "#5c8492", "#b25757", "#fe906a", "#6f636b", "#6a9491", "#82ac92", "#a26f6a", "#184459", "#596c7f", "#d97f64", "#263946", "#bebab6", "#7a7f84", "#cab6b2", "#fae2af", "#f3933b","#65838d", "#82aca7", "#a0b4ac", "#b5b9b0", "#fbc1c1", "#e89690", "#d76660", "#cac6b9", "#878787", "#cb8034", "#7f93a2", "#ac8287", "#c1d6d3" )
####################
### Fig. 2a - Total utBM UMAP with bulk vs sc RNA-seq inset
####################
# subset data to only the untreated
data2 <- data %>%
left_join(counts) %>%
left_join(md) %>%
subset(counts >= 2.2) %>%
subset(orig.ident == "utBM") %>%
select("index", "UMAP_1", "UMAP_2","integrated_snn_res.0.5", "orig.ident") %>%
distinct()
#Assign identified cluster names
data2$integrated_snn_res.0.5 <- factor(data2$integrated_snn_res.0.5, levels = c("0", "1", "2", "3", "4", "5", "6", "7", "8", "9", "10", "11", "12", "13", "14", "15", "16", "17", "18", "19", "20", "21", "22", "23", "24", "25", "26"), labels = c("1", "Muscle (2)", "3", "4", "5", "Coelomocyte (6)", "7", "8", "Mesoderm (9)", "10", "Neuron (11)", "Neuron (12)", "Neuron (13)", "Canal-assoc. (14)", "Secretory (15)", "16", "Mesoderm (17)", "Neuron (18)", "Muscle (19)", "20", "21", "Inner body (22)", "Interneuron (23)", "Neuron (24)", "Neuron (25)", "Neuron (26)", "Neuron (27)"))
#table for cluster numbers on umap
clusters <- read.csv(here("Figures/Figure_2/fig2a_labels.csv"))
# plot
global_plot <- data2 %>%
ggplot(aes(x = UMAP_1, y = UMAP_2))+
geom_point(data = data2, aes(color = integrated_snn_res.0.5), size = 0.01, show.legend = FALSE)+
geom_text(data = clusters, aes(x = x, y = y, label = str_wrap(text, width = 8)), size = 3, fontface = "plain")+
geom_segment(data = clusters, aes(x = xline, y = yline, xend = xend, yend = yend), color = "black", size = 0.5)+
scale_size_area(max_size = 15)+
#scale_color_manual(values = dakota)+
scale_color_manual(values = c("#c1d6d3", "#5c8492", "#b25757", "#6a9491", "#7a7f84", "#cab6b2", "#fae2af", "#f3933b","#ac8287", "#65838d", "#82aca7", "#fe906a", "#e3e2e1", "#e89690","#cd4c42", "#6f636b", "#82ac92", "#a26f6a", "#184459", "#596c7f","#263946", "#d97f64", "#a0b4ac", "#e3e2e1", "#fbc1c1", "#7f93a2", "#d76660", "#cac6b9", "#e3e2e1", "#cb8034"), labels = function(color) str_wrap(color, width = 8))+
labs( color = "Cell Type")+
theme(#text=element_text(family="Helvetica"),
axis.text.x = ggplot2::element_text(size = 8),
axis.text.y = ggplot2::element_text(size = 8, hjust = 1),
axis.title.x = ggplot2::element_text(size = 10),
axis.title.y = ggplot2::element_text(size = 10),
legend.text = element_markdown(size = 8, face = "plain"),
legend.key.size = unit(0.2, "cm"),
panel.background = element_blank(),
legend.margin = margin(0, 0, 0, 0.5, "cm"),
axis.line = element_line (colour = "black"),
legend.background=element_blank(),
legend.key = element_blank(),
plot.margin = margin(0.5, 0.5, 0.5, 0.25, unit = "cm"))+
guides(color = guide_legend(override.aes = list(size=3), ncol = 1))+
NULL
##################
### Figure 2a- inset comparing the sc raw counts to the bulk RNA raw counts for mf (READ RDS AT TOP)
##################
library(Rfast)
# Load the RDS oject instead of running all the code below
rna <- readRDS(here("Figures/Figure_2/bulk_sc_rnaseq_comp.RDS"))
###########################################################
# pull out the raw counts for each gene in each cell
raw<- as_tibble(new_combined@assays[["RNA"]]@data, rownames = "gene_id") %>% # gene expression matrix of normalized counts
pivot_longer(!gene_id, names_to = "index", values_to = "counts")
# create counts matrix with raw count dataframe
#matrix <- raw %>%
pivot_wider(names_from = index, values_from = counts)
# sum fraw counts for each gene across all cells
matrix$total <- rowsums(as.matrix(matrix[,2:46621]))
# extract count data for each gene
sums <- matrix %>% select("gene_id", "total")
#saveRDS(sums, "sc_count_summary.RDS")
#sc_rna <- readRDS("sc_count_summary.RDS")
#For the bulk RNA seq for bma mf:
# pull in the Rda object with counts table
bulk_rna <- readRDS(here("Figures/Figure_2/bulk_rna_tpm.RDS"))
# combine the two rna-seq datasets
rna <- left_join(bulk_rna, sc_rna, by = "gene_id")
#saveRDS(rna, "bulk_sc_rnaseq_comp.RDS")
##############################################################3
# trip the dataset of low count/tpm values and NAs in either dataset
rna <- rna %>%
filter(tpm >= 1) %>%
filter(total > 1) %>%
na.omit()
# calculate correlation coefficient (r)
cor.test(log10(rna$total), log10(rna$tpm), method = "pearson") # 0.85
#calculate R^2 (coefficient of determination)
summary(lm(log10(rna$tpm) ~ log10(rna$total))) #R^2= 0.72, p = < 2.2e-16
# plot the comparison
plot <- ggplot(rna, aes(log10(total), log10(tpm))) +
geom_point(size = 0.05)+
geom_smooth(method = "lm", se = FALSE, color = "red", size = 0.5)+
scale_x_continuous(expand = c(0, 0))+
scale_y_continuous(expand = c(0,0))+
labs(x = expression("SC:" ~Log[10]*"(Total)"), y = expression("Bulk:" ~Log[10]*"(tpm)"))+
theme(panel.background = element_blank(),
panel.grid = element_line(),
axis.text = element_text(size = 6),
axis.title = element_text(size = 6),
panel.border = element_rect(color = "black", fill = NA, size = 0.5),
plot.margin = margin(0, 0, 0, 0, "cm"))
# combined global plot with the RNA comparison inset
global_plot <- global_plot + annotation_custom(ggplotGrob(plot), xmin = -15, xmax = -6, ymin = 5, ymax = 15)
###################################################################################
## Figure 2b - Mapping neuron classes (cholinergic, amingergic, GABAergic, etc.) ##
###################################################################################
# read in csv with neuron info
csv <- read.csv(here("Auxillary/neuron_types.csv"))
genes <- csv$gene_id
genes <- genes[!duplicated(genes)]
#calculate average gene expression per cluster using seurat's DotPlot function
dot <- DotPlot(new_combined, features = genes, assay = "RNA", scale = FALSE)
dot <- dot$data
dot <- rownames_to_column(dot, "genes")
dot <- dot %>%
mutate(gene_id = substr(genes, 1, 14)) %>%
select(-"genes")
dot <- dot %>%
left_join(csv)
#rename clusters
dot$id<- factor(dot$id, levels = c("0", "1", "2", "3", "4", "5", "6", "7", "8", "9", "10", "11", "12", "13", "14", "15", "16", "17", "18", "19", "20", "21", "22", "23", "24", "25", "26"), labels = c("1", "2", "3", "4", "5","6", "7", "8", "9", "10", "11", "12", "13", "14", "15", "16", "17", "18", "19", "20", "21", "22", "23", "24", "25", "26", "27"))
dot <- dot %>%
mutate(ID = case_when(
id == "1" ~ "Unannotated",
id == "2" ~ "MS",
id == "3" ~ "Unannotated",
id == "4" ~ "Unannotated",
id == "5" ~ "Unannotated",
id == "6" ~ "C",
id == "7" ~ "Unannotated",
id == "8" ~ "Unannotated",
id == "9" ~ "MD",
id == "10" ~ "Unannotated",
id == "11" ~ "Neuron",
id == "12" ~ "Neuron",
id == "13" ~ "Neuron",
id == "15" ~ "S",
id == "14" ~ "CA",
id == "16" ~ "Unannotated",
id == "17" ~ "MD",
id == "18" ~ "Neuron",
id == "19" ~ "MS",
id == "20" ~ "Unannotated",
id == "21" ~ "Unannotated",
id == "22" ~ "IB",
id == "23" ~ "Neuron",
id == "24" ~ "Neuron",
id == "25" ~ "Neuron",
id == "26" ~ "Neuron",
id == "27" ~ "Neuron"))
dot$ID <- factor(dot$ID, levels = c("MS","MD", "C", "S", "CA", "IB", "Neuron", "Unannotated"))
# plot
(row4 <- dot %>%
ggplot(aes(y = id, x = gene_name))+
geom_point(aes(size = pct.exp, color = avg.exp.scaled))+
scale_size("Proportion (%)", range = c(-1, 3))+
#scale_size_continuous(range = c(-1, 3), nice.breaks = TRUE)+
scale_color_viridis()+
labs(x = "Genes", y = "Cluster", size = "Proportion (%)", color = "Avg. Exp.")+
facet_grid(cols = vars(ID), rows = vars(neurotransmitter), space = "free", scales = "free", drop = TRUE)+
theme(text=element_text(family="Helvetica"),
panel.background = element_blank(),
axis.line = element_line (colour = "black"),
legend.background=element_blank(),
legend.text = element_text(size = 8),
legend.title = element_text(size = 8, vjust = 1),
legend.key = element_blank(),
axis.text.x = ggplot2::element_text(size = 8, angle = 90, vjust = 0.5),
axis.text.y = ggplot2::element_text(size = 8, hjust = 1, face = "italic"),
axis.title.x = ggplot2::element_text(size = 10, vjust = -1),
axis.title.y = ggplot2::element_text(size = 10),
strip.text.x = element_text(size = 8),
strip.text.y = element_text(size = 8, angle = 0),
strip.background = element_blank(),
panel.spacing.x = unit(0.5, "lines"),
legend.key.width = unit(0.35, "cm"),
legend.key.height = unit(0.25, "cm"),
#legend.key.size = unit(0.25, "cm"),
legend.position = "bottom",
panel.grid = element_line(color = "#ededed", size = 0.05))+
coord_flip()+
#guides(size=guide_bins(title= str_wrap("Proportion (%)", width = 13)))+
NULL)
#### Combine Fig2A and Fig2B to create Figure 2
(Figure2 <- plot_grid(new_plot, row4, nrow = 2, rel_widths = c(1, 1), rel_heights = c(2.2, 1.7), labels = c("A", "B"), label_fontface = "plain")+theme(plot.margin = margin(0.1, 0, 0, 0, "cm")))
# save plot
ggsave(Figure2, filename = "~/Desktop/Figure2.pdf", device = cairo_pdf, width = 6.5, height = 8, units = "in")
##############################################
########### Supplemental Plots################
##############################################
##################
### Fig2 - figure supplement 1 - histogram of gene and read counts per cell for annotated clusters
##################
# genes expressed per cell, median marked in red vertical line @ 230 genes/cell
genes <- md %>%
ggplot()+
geom_histogram(aes(x = nFeature_RNA), bins = 60)+
geom_vline(aes(xintercept = median(nFeature_RNA), col = "red"), show.legend = FALSE)+
scale_x_continuous(breaks = c(0, 50, 250, 500,750, 1000, 1250, 1500, 1750), expand = c(0, 50))+
scale_y_continuous(expand = c(0,0))+
labs(x = "Genes per cell", y = "Count")+
theme(axis.text = element_text(size = 9),
axis.title = element_text(size = 10),
plot.background = element_blank(),
panel.grid = element_blank(),
panel.background = element_blank(),
axis.line = element_line(color = "black"))
# reads captured per cell, median marked in red vertical line @ 267 reads/cell
counts <- md %>%
ggplot()+
geom_histogram(aes(x = nCount_RNA), bins = 60)+
geom_vline(aes(xintercept = median(nCount_RNA), col = "red"), show.legend = FALSE)+
labs(x = "Reads per cell", y = "Count")+
scale_x_continuous(breaks = c(0, 50, 250, 500,750, 1000, 1250, 1500, 1750), expand = c(0, 50))+
scale_y_continuous(expand = c(0,0))+
theme(axis.text = element_text(size = 9),
axis.title = element_text(size = 10),
plot.background = element_blank(),
panel.grid = element_blank(),
panel.background = element_blank(),
axis.line = element_line(color = "black"))
annotated <- plot_grid(genes, counts, ncol = 2)
ggsave(annotated, filename = "figure2-figuresupplement1.pdf", device = cairo_pdf, width = 6, height = 3, units = "in")
##########################
### Fig2 - figure supplement 2 - Orthologous transcription factors and UMAPs of ciliated sensory neuron markers
##########################
## dataframe of Bma-daf-19 expression
daf19 <- counts %>%
subset(gene_id == "WBGene00224065") %>%
subset(counts > 0) %>%
left_join(md) %>%
left_join(data)
daf19$cat <- "Bma-daf-19"
daf19$gene_name <- "daf-19"
#### Distribution of genes involved in cilia assembly
cilia <- read.csv(here("Auxillary/cilia_assembly_genes.csv")) %>%
select("bma_ortho", "bma_genename", "component") %>%
unique()
cilia <- cilia[-4,]
colnames(cilia) <- c("gene_id", "gene_name", "component")
list <- cilia$gene_id
data2<- data %>%
left_join(md) %>%
subset(orig.ident == "utBM") %>%
left_join(counts) %>%
subset(counts >=1) %>%
select("index", "UMAP_1", "UMAP_2","integrated_snn_res.0.5", "orig.ident", "gene_id", "counts") %>%
distinct()
# create dataframe and rename the ciliogenesis functions to make shorter
umaps <- data2 %>%
filter(gene_id %in% list) %>%
subset(orig.ident == "utBM") %>%
left_join(cilia) %>%
mutate(cat = case_when(
component == "Kinesin-II" ~ "Kinesin-II",
component == "IFT-dynein" ~ "IFT",
str_detect(component, "IFT") ~ "IFT",
component == "BBS proteins" ~ "BBS",
component == "Motor activators" ~ "Motor activator",
component == "Various" ~ "IFT",
TRUE ~ component)) %>%
select(-"component")
daf19 <- daf19 %>% select("index", "UMAP_1", "UMAP_2", "integrated_snn_res.0.5", "orig.ident", "gene_id", "gene_name", "cat", "counts")
# add daf19 data to the ciliogenesis genes
umaps <- rbind(umaps, daf19)
# factor the ciliogenesis genes by function
umaps$cat <- factor(umaps$cat, levels = c("Bma-daf-19", "IFT", "Kinesin-II", "BBS", "Motor activator"), labels = c("DAF-19", "IFT", "Kinesin-II", "BBS", "Motor activator"))
# UMAPs for each gene
figb <- umaps %>%
ggplot(aes(x = UMAP_1, y = UMAP_2))+
geom_point(data= data2[data2$counts > 4,], size = 0.1, alpha = 0.1, color = "grey")+
geom_point(data = subset(umaps, counts > 1 & counts <= 1.5), aes(color = counts), size = 0.5, show.legend = TRUE)+
geom_point(data = subset(umaps, counts > 1.5 & counts <= 3), aes(color = counts), size = 0.5, show.legend = TRUE)+
geom_point(data = subset(umaps, counts > 3), aes(color = counts), size = 0.5, show.legend = TRUE)+
scale_color_viridis(limits = c(1,4))+
facet_grid(rows = vars(cat))+
labs(color = "Avg. Exp.")+
theme(axis.title = element_text(size = 11),
plot.title = element_text(hjust = 0.5, vjust = 3),
panel.background = element_blank(),
strip.background = element_blank(),
#strip.placement = "outside",
axis.line = element_line(),
legend.background=element_blank(),
legend.key = element_blank(),
legend.position = "bottom",
legend.key.width = unit(0.4, "cm"),
legend.key.height = unit(0.25, "cm"),
legend.text = element_text(size = 10),
legend.title = element_text(vjust =1.1))+
NULL
### put it all together
sup_fig<- plot_grid(tf_dot, figb, ncol = 2, rel_widths = c(1.25, 0.75), scale = c(1, 0.95), labels = c("A", "B"), label_fontface = "plain")
# save plot
ggsave(sup_fig, filename = "~/Desktop/figure2_figuresupplement2.pdf", device = cairo_pdf, width = 8.5, height = 10, units = "in")
##########################
### Fig2 - figure supplement 3 - Individual expression UMAPs of markers defining cell tyes
##########################
table <- data %>%
left_join(md) %>%
left_join(counts) %>%
subset(counts > 0)
# Muscle
muscles <- c("WBGene00231447","WBGene00222011", "WBGene00224604")
muscle <- table %>%
filter(gene_id %in% muscles) %>%
subset(counts >= 2.5)
# create a count column and pivot wider to sum across all columns to show how many genes are expressed per cell and then calculate the fraction of markers expressed in each cell
muscle$count <- 1
tmp <- muscle %>%
select(gene_id, index, count) %>%
pivot_wider(names_from = "gene_id", values_from = "count")
tmp[is.na(tmp)] <- 0
tmp$total <- rowSums(tmp[,-1])
tmp <- tmp %>% mutate(fraction = total/3) %>%
select("index", "fraction") %>%
mutate(alpha = ifelse(fraction > 0.6, 1, 0.1))
#left join the fraction of markers per cel back onto the muscle dataframe for plotting
muscle <- muscle %>%
left_join(tmp) %>%
subset(alpha == 1)
muscle$type <- "Muscle"
# plot using the fraction of markers expressed as the color gradient
muscle_plot <- muscle %>%
ggplot(aes(x = UMAP_1, y = UMAP_2))+
geom_point(data= data2, size = 0.1, alpha = 0.1, color = "grey")+
geom_point(data = subset(muscle, counts <= 3), aes(color = counts), size = 0.5, show.legend = FALSE)+
geom_point(data = subset(muscle, counts <= 5), aes(color = counts), size = 0.5, show.legend = FALSE)+
geom_point(data = subset(muscle, counts > 5), aes(color = counts), size = 0.5, show.legend = FALSE)+
scale_color_viridis(limits = c(2, 7.5))+
facet_grid(cols = vars(gene_id), rows = vars(type), switch = "y")+
theme(axis.text = element_blank(),
axis.title.x = element_blank(),
axis.title.y = element_text(size = 10, color = "white", vjust = 5),
axis.ticks = element_blank(),
plot.title = element_text(hjust = 0.5, vjust = 3),
panel.background = element_blank(),
strip.background = element_blank(),
strip.text.x = element_text(size = 10),
strip.text.y = element_text(size = 11),
strip.placement = "outside",
axis.line.x = element_blank(),
legend.background=element_blank(),
legend.key = element_blank(),
legend.title = element_blank(),
panel.spacing.y = unit(6, "cm"))+
NULL
# coelomocytes (cluster 5 = 1664 cells)
######## Coelomocytes
coelomocytes <- c("WBGene00268467", "WBGene00227182", "WBGene00223567","WBGene00223869")
coel <- table %>%
filter(gene_id %in% coelomocytes) %>%
subset(counts >= 2.5)
# create a count column and pivot wider to sum across all columsn to show how many genes are expressed per cell and then calculate the fraction of markers expressed in each cell
coel$count <- 1
tmp <- coel %>%
select(gene_id, index, count) %>%
pivot_wider(names_from = "gene_id", values_from = "count")
tmp[is.na(tmp)] <- 0
tmp$total <- rowSums(tmp[,-1])
tmp <- tmp %>% mutate(fraction = total/4) %>%
select("index", "fraction") %>%
mutate(alpha = ifelse(fraction >= 0.5, 1, 0.1))
#left join the fraction of markers per cel back onto the muscle dataframe for plotting
coel <- coel %>%
left_join(tmp) %>%
subset(alphs = 1)
coel$type <- "Coelomocytes"
# plot using the fraction of markers expressed as the color gradient
coel_plot <- coel %>%
ggplot(aes(x = UMAP_1, y = UMAP_2))+
geom_point(data= data2, size = 0.1, alpha = 0.1, color = "grey")+
geom_point(data = subset(coel, counts <= 3), aes(color = counts), size = 0.5, show.legend = FALSE)+
geom_point(data = subset(coel, counts <= 5), aes(color = counts), size = 0.5, show.legend = FALSE)+
geom_point(data = subset(coel, counts > 5), aes(color = counts), size = 0.5, show.legend = FALSE)+
#scale_alpha_manual(guide = FALSE)+
scale_color_viridis(limits = c(2, 7.5))+
facet_grid(cols = vars(gene_id), rows = vars(type), switch = "y")+
theme(axis.text = element_blank(),
axis.title.x = element_blank(),
axis.title.y = element_text(size = 10, color = "white", vjust = 5),
axis.ticks = element_blank(),
plot.title = element_text(hjust = 0.5, vjust = 3),
panel.background = element_blank(),
strip.background = element_blank(),
strip.text.x = element_text(size = 10),
strip.text.y = element_text(size = 11),
strip.placement = "outside",
axis.line.x = element_blank(),
#axis.line.y = element_line(color = "black"),
legend.background=element_blank(),
legend.key = element_blank(),
legend.title = element_blank(),
panel.spacing.y = unit(6, "cm"))+
NULL
# Inner Body related (cluster 21 = 503 cells)
IB_marker <- c("WBGene00229597", "WBGene00223435", "WBGene00228562", "WBGene00224494")
IB <- table %>%
filter(gene_id %in% IB_marker) %>%
subset(counts >= 2.5)
# create a count column and pivot wider to sum across all columsn to show how many genes are expressed per cell and then calculate the fraction of markers expressed in each cell
IB$count <- 1
tmp <- IB %>%
select(gene_id, index, count) %>%
pivot_wider(names_from = "gene_id", values_from = "count")
tmp[is.na(tmp)] <- 0
tmp$total <- rowSums(tmp[,-1])
tmp <- tmp %>% mutate(fraction = total/4) %>%
select("index", "fraction")%>%
mutate(alpha = ifelse(fraction >= 0.5, 1, 0.1))
#left join the fraction of markers per cel back onto the muscle dataframe for plotting
IB <- IB %>%
left_join(tmp) %>%
subset(alpha == 1)
IB$type <- "Inner body"
# plot using the fraction of markers expressed as the color gradient
IB_plot <- IB %>%
ggplot(aes(x = UMAP_1, y = UMAP_2))+
geom_point(data= data2, size = 0.1, alpha = 0.1, color = "grey")+
geom_point(data = subset(IB, counts <= 3), aes(color = counts), size = 0.5, show.legend = FALSE)+
geom_point(data = subset(IB, counts <= 5), aes(color = counts), size = 0.5, show.legend = FALSE)+
geom_point(data = subset(IB, counts > 5), aes(color = counts), size = 0.5, show.legend = FALSE)+
scale_color_viridis(limits = c(2, 7.5))+
facet_grid(cols = vars(gene_id), rows = vars(type), switch = "y")+
theme(axis.text = element_blank(),
axis.title.x = element_blank(),
axis.title.y = element_text(size = 10, color = "white", vjust = 5),
axis.ticks = element_blank(),
plot.title = element_text(hjust = 0.5, vjust = 3),
panel.background = element_blank(),
strip.background = element_blank(),
strip.text.x = element_text(size = 10),
strip.text.y = element_text(size = 11),
strip.placement = "outside",
axis.line.x = element_blank(),
legend.background=element_blank(),
legend.key = element_blank(),
legend.title = element_blank(),
panel.spacing.y = unit(6, "cm"))+
NULL
# canal-associated (cluster 13 = 1006 cells)
canal_markers <- c("WBGene00222948", "WBGene00226559", "WBGene00226410")
canal<- table %>%
filter(gene_id %in% canal_markers) %>%
subset(counts >= 2.2)
# create a count column and pivot wider to sum across all columsn to show how many genes are expressed per cell and then calculate the fraction of markers expressed in each cell
canal$count <- 1
tmp <- canal %>%
select(gene_id, index, count) %>%
pivot_wider(names_from = "gene_id", values_from = "count")
tmp[is.na(tmp)] <- 0
tmp$total <- rowSums(tmp[,-1])
tmp <- tmp %>% mutate(fraction = total/3) %>%
select("index", "fraction")%>%
mutate(alpha = ifelse(fraction >= 0.6, 1, 0.1))
#left join the fraction of markers per cel back onto the muscle dataframe for plotting
canal <- canal %>%
left_join(tmp)
canal$type <- "Canal-associated"
# plot using the fraction of markers expressed as the color gradient
canal_plot <- canal %>%
ggplot(aes(x = UMAP_1, y = UMAP_2))+
geom_point(data= data2, size = 0.1, alpha = 0.1, color = "grey")+
geom_point(data = subset(canal, counts <= 3), aes(color = counts), size = 0.5, show.legend = FALSE)+
geom_point(data = subset(canal, counts <= 5), aes(color = counts), size = 0.5, show.legend = FALSE)+
geom_point(data = subset(canal, counts > 5), aes(color = counts), size = 0.5, show.legend = FALSE)+
scale_color_viridis(limits = c(2, 7.5))+
facet_grid(cols = vars(gene_id), rows = vars(type), switch = "y")+
theme(axis.text = element_blank(),
axis.title.x = element_blank(),
axis.title.y = element_text(size = 10, color = "white", vjust = 5),
axis.ticks = element_blank(),
plot.title = element_text(hjust = 0.5, vjust = 3),
panel.background = element_blank(),
strip.background = element_blank(),
strip.text.x = element_text(size = 10),
strip.text.y = element_text(size = 11),
strip.placement = "outside",
axis.line.x = element_blank(),
legend.background=element_blank(),
legend.key = element_blank(),
legend.title = element_blank(),
panel.spacing.y = unit(6, "cm"))+
NULL
# Mesoderm (clusters 8 and 16, 1455 + 942 = 2397 cells)
mesodermal <- c("WBGene00225364", "WBGene00222162")
mes <- table %>%
filter(gene_id %in% mesodermal) %>%
subset(counts >= 3)
# create a count column and pivot wider to sum across all columsn to show how many genes are expressed per cell and then calculate the fraction of markers expressed in each cell
mes$count <- 1
tmp <- mes %>%
select(gene_id, index, count) %>%
pivot_wider(names_from = "gene_id", values_from = "count")
tmp[is.na(tmp)] <- 0
tmp$total <- rowSums(tmp[,-1])
tmp <- tmp %>% mutate(fraction = total/2) %>%
select("index", "fraction")%>%
mutate(alpha = ifelse(fraction ==1, 1, 0.1))
#left join the fraction of markers per cel back onto the muscle dataframe for plotting
mes <- mes %>%
left_join(tmp)
mes$type <- "Mesoderm"
# plot using the fraction of markers expressed as the color gradient
mes_plot <- mes %>%
ggplot(aes(x = UMAP_1, y = UMAP_2))+
geom_point(data= data2, size = 0.1, alpha = 0.1, color = "grey")+
geom_point(data = subset(mes, counts <= 3), aes(color = counts), size = 0.5, show.legend = FALSE)+
geom_point(data = subset(mes, counts <= 5), aes(color = counts), size = 0.5, show.legend = FALSE)+
geom_point(data = subset(mes, counts > 5), aes(color = counts), size = 0.5, show.legend = FALSE)+
#scale_alpha_manual(guide = FALSE)+
scale_color_viridis(limits = c(2, 7.5))+
facet_grid(cols = vars(gene_id), rows = vars(type), switch = "y")+
theme(axis.text = element_blank(),
axis.title.x = element_blank(),
axis.title.y = element_text(size = 10, color = "white", vjust = 5),
axis.ticks = element_blank(),
plot.title = element_text(hjust = 0.5, vjust = 3),
panel.background = element_blank(),
strip.background = element_blank(),
strip.text.x = element_text(size = 10),
strip.text.y = element_text(size = 11),
strip.placement = "outside",
axis.line.x = element_blank(),
#axis.line.y = element_line(color = "black"),
legend.background=element_blank(),
legend.key = element_blank(),
legend.title = element_blank(),
panel.spacing.y = unit(6, "cm"))+
NULL
### Combine non-neuronal markers into single dataframe
nonneuronal <- rbind(muscle, IB, coel, canal, mes)
nonneuronal$type <- factor(nonneuronal$type, levels = c("Muscle", "Coelomocytes", "Inner body", "Canal-associated", "Mesoderm"))
# pan-neuronal
pan_markers <- c("WBGene00223147", "WBGene00223381", "WBGene00221982", "WBGene00225764", "WBGene00226594")
pan<- table %>%
filter(gene_id %in% pan_markers) %>%
subset(counts >=2)
# create a count column and pivot wider to sum across all columsn to show how many genes are expressed per cell and then calculate the fraction of markers expressed in each cell
pan$count <- 1
tmp <- pan %>%
select(gene_id, index, count) %>%
pivot_wider(names_from = "gene_id", values_from = "count")
tmp[is.na(tmp)] <- 0
tmp$total <- rowSums(tmp[,-1])
tmp <- tmp %>% mutate(fraction = total/4) %>%
select("index", "fraction")%>%
mutate(alpha = ifelse(fraction >= 0.4, 1, 0.1))
#left join the fraction of markers per cel back onto the muscle dataframe for plotting
pan <- pan %>%
left_join(tmp)
pan$type <- "Pan-neuronal"
pan_plot <- pan %>%
ggplot(aes(x = UMAP_1, y = UMAP_2))+
geom_point(data= data2, size = 0.1, alpha = 0.1, color = "grey")+
geom_point(data = subset(pan, counts <= 3), aes(color = counts), size = 0.5, show.legend = FALSE)+
geom_point(data = subset(pan, counts <= 5), aes(color = counts), size = 0.5, show.legend = FALSE)+
geom_point(data = subset(pan, counts > 5), aes(color = counts), size = 0.5, show.legend = FALSE)+
scale_color_viridis(limits = c(2, 7.5))+
facet_grid(cols = vars(gene_id), rows = vars(type), switch = "y")+
theme(axis.text = element_blank(),
axis.title.x = element_blank(),
axis.title.y = element_text(size = 10, color = "white", vjust = 5),
axis.ticks = element_blank(),
plot.title = element_text(hjust = 0.5, vjust = 3),
panel.background = element_blank(),
strip.background = element_blank(),
strip.text.x = element_text(size = 10),
strip.text.y = element_text(size = 11),
strip.placement = "outside",
axis.line.x = element_blank(),
legend.background=element_blank(),
legend.key = element_blank(),
legend.title = element_blank(),
panel.spacing.y = unit(6, "cm"))+
NULL
# motor
motor<- table %>%
subset(gene_id == "WBGene00223870") %>%
subset(counts >=2)
# create a count column and pivot wider to sum across all columsn to show how many genes are expressed per cell and then calculate the fraction of markers expressed in each cell
motor$count <- 1
tmp <- motor %>%
select(gene_id, index, count) %>%
pivot_wider(names_from = "gene_id", values_from = "count")
tmp[is.na(tmp)] <- 0
tmp$total <- rowSums(tmp[,-1])
tmp <- tmp %>% mutate(fraction = total/1) %>%
select("index", "fraction")%>%
mutate(alpha = ifelse(fraction >= 1, 1, 0.1))
#left join the fraction of markers per cel back onto the muscle dataframe for plotting
motor <- motor %>%
left_join(tmp)
motor$type <- "Motor"
motor_plot <- motor %>%
ggplot(aes(x = UMAP_1, y = UMAP_2))+
geom_point(data= data2, size = 0.1, alpha = 0.1, color = "grey")+
geom_point(data = subset(motor, counts <= 3), aes(color = counts), size = 0.5, show.legend = FALSE)+
geom_point(data = subset(motor, counts <= 5), aes(color = counts), size = 0.5, show.legend = FALSE)+
geom_point(data = subset(motor, counts > 5), aes(color = counts), size = 0.5, show.legend = FALSE)+
scale_color_viridis(limits = c(2, 7.5))+
facet_grid(cols = vars(gene_id), rows = vars(type), switch = "y")+
theme(axis.text = element_blank(),
axis.title.x = element_blank(),
axis.title.y = element_text(size = 10, color = "white", vjust = 5),
axis.ticks = element_blank(),
plot.title = element_text(hjust = 0.5, vjust = 3),
panel.background = element_blank(),
strip.background = element_blank(),
strip.text.x = element_text(size = 10),
strip.text.y = element_text(size = 11),
strip.placement = "outside",
axis.line.x = element_blank(),
legend.background=element_blank(),
legend.key = element_blank(),
legend.title = element_blank(),
panel.spacing.y = unit(6, "cm"))+
NULL
### DVA (cluster 22 = 503 cells)
# tail
dva<- table %>%
subset(gene_id == "WBGene00225297") %>%
subset(counts >=4.5)
# create a count column and pivot wider to sum across all columsn to show how many genes are expressed per cell and then calculate the fraction of markers expressed in each cell
dva$count <- 1
tmp <- dva %>%
select(gene_id, index, count) %>%
pivot_wider(names_from = "gene_id", values_from = "count")
tmp[is.na(tmp)] <- 0
tmp$total <- rowSums(tmp[,-1])
tmp <- tmp %>% mutate(fraction = total/1) %>%
select("index", "fraction")%>%
mutate(alpha = ifelse(fraction == 1, 1, 0.1))
#left join the fraction of markers per cel back onto the muscle dataframe for plotting
dva <- dva %>%
left_join(tmp)
dva$type <- "Interneuron"
dva_plot <- dva %>%
ggplot(aes(x = UMAP_1, y = UMAP_2))+
geom_point(data= data2, size = 0.1, alpha = 0.1, color = "grey")+
geom_point(data = subset(dva, counts <= 3), aes(color = counts), size = 0.5, show.legend = FALSE)+
geom_point(data = subset(dva, counts <= 5), aes(color = counts), size = 0.5, show.legend = FALSE)+
geom_point(data = subset(dva, counts > 5), aes(color = counts), size = 0.5, show.legend = FALSE)+
scale_color_viridis(limits = c(2, 7.5))+
facet_grid(cols = vars(gene_id), rows = vars(type), switch = "y")+
theme(axis.text = element_blank(),
axis.title.x = element_blank(),
axis.title.y = element_text(size = 10, color = "white", vjust = 5),
axis.ticks = element_blank(),
plot.title = element_text(hjust = 0.5, vjust = 3),
panel.background = element_blank(),
strip.background = element_blank(),
strip.text.x = element_text(size = 10),
strip.text.y = element_text(size = 11),
strip.placement = "outside",
axis.line.x = element_blank(),
legend.background=element_blank(),
legend.key = element_blank(),
legend.title = element_blank(),
panel.spacing.y = unit(6, "cm"))+
NULL
# Neuropeptidergic neurons
pep_markers <- c("WBGene00223554","WBGene00223147", "WBGene00233246", "WBGene00225067" )
pep<- table %>%
filter(gene_id %in% pep_markers) %>%
subset(counts >=2.5)
# create a count column and pivot wider to sum across all columsn to show how many genes are expressed per cell and then calculate the fraction of markers expressed in each cell
pep$count <- 1
tmp <- pep %>%
select(gene_id, index, count) %>%
pivot_wider(names_from = "gene_id", values_from = "count")
tmp[is.na(tmp)] <- 0
tmp$total <- rowSums(tmp[,-1])
tmp <- tmp %>% mutate(fraction = total/4) %>%
select("index", "fraction")%>%
mutate(alpha = ifelse(fraction >= 0.5, 1, 0.1))
#left join the fraction of markers per cel back onto the muscle dataframe for plotting
pep <- pep %>%
left_join(tmp)
pep$type <- "Neuropeptidergic"
pep_plot <- pep %>%
ggplot(aes(x = UMAP_1, y = UMAP_2))+
geom_point(data= data2, size = 0.1, alpha = 0.1, color = "grey")+
geom_point(data = subset(pep, counts <= 3), aes(color = counts), size = 0.5, show.legend = FALSE)+
geom_point(data = subset(pep, counts <= 5), aes(color = counts), size = 0.5, show.legend = FALSE)+
geom_point(data = subset(pep, counts > 5), aes(color = counts), size = 0.5, show.legend = FALSE)+
scale_color_viridis(limits = c(2, 7.5))+
facet_grid(cols = vars(gene_id), rows = vars(type), switch = "y")+
theme(axis.text = element_blank(),
axis.title.x = element_blank(),
axis.title.y = element_text(size = 10, color = "white", vjust = 5),
axis.ticks = element_blank(),
plot.title = element_text(hjust = 0.5, vjust = 3),
panel.background = element_blank(),
strip.background = element_blank(),
strip.text.x = element_text(size = 10),
strip.text.y = element_text(size = 11),
strip.placement = "outside",
axis.line.x = element_blank(),
legend.background=element_blank(),
legend.key = element_blank(),
legend.title = element_blank(),
panel.spacing.y = unit(6, "cm"))+
NULL
# Aminergic neurons
amine<- table %>%
subset(gene_id == "WBGene00225236") %>%
subset(counts >=2)
# create a count column and pivot wider to sum across all columsn to show how many genes are expressed per cell and then calculate the fraction of markers expressed in each cell
amine$count <- 1
tmp <- amine %>%
select(gene_id, index, count) %>%
pivot_wider(names_from = "gene_id", values_from = "count")
tmp[is.na(tmp)] <- 0
tmp$total <- rowSums(tmp[,-1])
tmp <- tmp %>% mutate(fraction = total/1) %>%
select("index", "fraction")%>%
mutate(alpha = ifelse(fraction ==1, 1, 0.1))
#left join the fraction of markers per cel back onto the muscle dataframe for plotting
amine <- amine %>%
left_join(tmp)
amine$type <- "Aminergic"
amine_plot <- amine %>%
ggplot(aes(x = UMAP_1, y = UMAP_2))+
geom_point(data= data2, size = 0.1, alpha = 0.1, color = "grey")+
geom_point(data = subset(amine, counts <= 3), aes(color = counts), size = 0.5, show.legend = TRUE)+
geom_point(data = subset(amine, counts <= 5), aes(color = counts), size = 0.5, show.legend = TRUE)+
geom_point(data = subset(amine, counts > 5), aes(color = counts), size = 0.5, show.legend = TRUE)+
labs(color = "Norm. Counts")+
scale_color_viridis(limits = c(2, 7.5))+
facet_grid(cols = vars(gene_id), rows = vars(type), switch = "y")+
theme(axis.text = element_blank(),
axis.title.x = element_blank(),
axis.title.y = element_text(size = 10, color = "white", vjust = 5),
axis.ticks = element_blank(),
plot.title = element_text(hjust = 0.5, vjust = 3),
panel.background = element_blank(),
strip.background = element_blank(),
strip.text.x = element_text(size = 10),
strip.text.y = element_text(size = 11),
strip.placement = "outside",
axis.line.x = element_blank(),
legend.background=element_blank(),
legend.key = element_blank(),
legend.title = element_text(size = 9, vjust = 0.9),
legend.position = "bottom",
panel.spacing.y = unit(6, "cm"),
legend.key.height = unit(0.4, "cm"),
legend.key.width = unit(0.5, "cm"))+
NULL
legend <- get_legend(amine_plot) # then go back and remove legend from amine plot (show.legend = FALSE)
### Combining neuron markers into a single UMAP
neuronal <- rbind(motor, dva, pan, pep, amine)
neuronal$type <- factor(neuronal$type, levels = c("Pan-neuronal", "Motor", "Interneuron", "Neuropeptidergic", "Aminergic"))
# combine the plots to make complete supplemental figure
row1<- plot_grid(muscle_plot, NULL,legend, ncol = 3, rel_widths = c(0.6, 0.1, 0.3))
row2 <- plot_grid(coel_plot, NULL, ncol = 2, rel_widths = c(0.8, 0.2))
row3 <- plot_grid(IB_plot, NULL, ncol = 2, rel_widths = c(0.8, 0.2))
row4 <- plot_grid(canal_plot, mes_plot, ncol = 2, rel_widths = c(0.6, 0.4))
row5 <- plot_grid(pan_plot, NULL, ncol = 2, rel_widths = c(0.99, 0.01))
row6 <- plot_grid(motor_plot, dva_plot, NULL, ncol = 3, rel_widths = c(0.25, 0.25, 0.5))
row7 <- plot_grid(pep_plot, amine_plot, NULL, ncol = 3, rel_widths = c(0.6, 0.25, 0.15))
UMAPS <- plot_grid(row1, row2, row3, row4, row5, row6, row7, nrow = 7)
# save plot
ggsave(UMAPS, filename = "marker_umaps.pdf", device = cairo_pdf, width = 8, height = 12, units = "in")
###################
### Create Figure 2
###################
Figure2 <- plot_grid(fig2a, row2, row3, row4, nrow = 4, rel_widths = c(1.5, 1, 1, 1), rel_heights = c(2.2, 0.9, 1.1, 1.9), labels = c("A", "B","", "C"), label_fontfamily = "helvetica", label_fontface = "plain")+theme(plot.margin = margin(0.1, 0, 0, 0, "cm"))
ggsave(Figure2, filename = "Figure2.pdf", device = cairo_pdf, width = 8, height = 12, units = "in")
###################
### Supplemenetal Figure - alternate dot plot of normalized transcription per cluster for neuron classes
##################
library(Rfast)
raw <- as_tibble(new_combined@assays[["RNA"]]@counts, rownames = "gene_id") %>%
pivot_longer(!gene_id, names_to = "index", values_to = "counts") # gene expression matrix of raw counts
trans <- data %>%
left_join(md) %>%
left_join(raw) %>%
filter(counts > 0) %>%
select("gene_id", "counts", "integrated_snn_res.0.5", "index") %>%
filter(gene_id %in% genes)
tmp <- trans %>%
group_by(integrated_snn_res.0.5) %>%
pivot_wider(names_from = index, values_from = counts)
tmp[is.na(tmp)] <- 0
# calculate the total
tmp$raw_summed <- rowsums(as.matrix(tmp[,c(-1, -2)]))
new <- tmp %>% select("gene_id", "integrated_snn_res.0.5", "raw_summed")
total <- new %>%
group_by(gene_id) %>%
summarise(total = sum(raw_summed)) %>%
left_join(new) %>%
mutate(fraction = ((raw_summed/total)*100)) %>%
left_join(csv)
total$integrated_snn_res.0.5<- factor(total$integrated_snn_res.0.5, levels = c("0", "1", "2", "3", "4", "5", "6", "7", "8", "9", "10", "11", "12", "13", "14", "15", "16", "17", "18", "19", "20", "21", "22", "23", "24", "25", "26"), labels = c("1", "2", "3", "4", "5","6", "7", "8", "9", "10", "11", "12", "13", "14", "15", "16", "17", "18", "19", "20", "21", "22", "23", "24", "25", "26", "27"))
total <- total %>%
mutate(ID = case_when(
integrated_snn_res.0.5 == "1" ~ "Unannotated",
integrated_snn_res.0.5 == "2" ~ "MS",
integrated_snn_res.0.5 == "3" ~ "Unannotated",
integrated_snn_res.0.5 == "4" ~ "Unannotated",
integrated_snn_res.0.5 == "5" ~ "Unannotated",
integrated_snn_res.0.5 == "6" ~ "C",
integrated_snn_res.0.5 == "7" ~ "Unannotated",
integrated_snn_res.0.5 == "8" ~ "Unannotated",
integrated_snn_res.0.5 == "9" ~ "MD",
integrated_snn_res.0.5 == "10" ~ "Unannotated",
integrated_snn_res.0.5 == "11" ~ "Neuron",
integrated_snn_res.0.5 == "12" ~ "Neuron",
integrated_snn_res.0.5 == "13" ~ "Neuron",
integrated_snn_res.0.5 == "15" ~ "S",
integrated_snn_res.0.5 == "14" ~ "CA",
integrated_snn_res.0.5 == "16" ~ "Unannotated",
integrated_snn_res.0.5 == "17" ~ "MD",
integrated_snn_res.0.5 == "18" ~ "Neuron",
integrated_snn_res.0.5 == "19" ~ "MS",
integrated_snn_res.0.5 == "20" ~ "Unannotated",
integrated_snn_res.0.5 == "21" ~ "Unannotated",
integrated_snn_res.0.5 == "22" ~ "IB",
integrated_snn_res.0.5 == "23" ~ "Neuron",
integrated_snn_res.0.5 == "24" ~ "Neuron",
integrated_snn_res.0.5 == "25" ~ "Neuron",
integrated_snn_res.0.5 == "26" ~ "Neuron",
integrated_snn_res.0.5 == "27" ~ "Neuron"))
total$ID <- factor(total$ID, levels = c("MS","MD", "C", "S", "CA", "IB", "Neuron", "Unannotated"))
# plot
(supp_plot <- total %>%
filter(fraction > 1) %>%
ggplot(aes(y = integrated_snn_res.0.5, x = gene_name))+
geom_point(aes(size = fraction, color = neurotransmitter))+
scale_size("Total reads (%)", range = c(0, 4), breaks = c(1, 5, 10, 25, 50, 75, 100))+
scale_color_manual(values = dakota[3:8])+
labs(x = "Genes", y = "Cluster", size = "Total reads (%)")+
facet_grid(cols = vars(ID), rows = vars(neurotransmitter), space = "free", scales = "free", drop = TRUE)+
theme(#text=element_text(family="Helvetica"),
panel.background = element_blank(),
axis.line = element_line (colour = "black"),
legend.background=element_blank(),
legend.text = element_text(size = 8),
legend.title = element_text(size = 8, vjust = 1),
legend.key = element_blank(),
axis.text.x = ggplot2::element_text(size = 8, angle = 90, vjust = 0.5),
axis.text.y = ggplot2::element_text(size = 8, hjust = 1, face = "italic"),
axis.title.x = ggplot2::element_text(size = 10, vjust = -1),
axis.title.y = ggplot2::element_text(size = 10),
strip.text.x = element_text(size = 8),
strip.text.y = element_text(size = 8, angle = 0),
strip.background = element_blank(),
panel.spacing.x = unit(0.5, "lines"),
legend.key.width = unit(0.35, "cm"),
legend.key.height = unit(0.25, "cm"),
#legend.key.size = unit(0.25, "cm"),
legend.position = "bottom",
panel.grid = element_line(color = "#ededed", size = 0.05))+
coord_flip()+
guides(color= "none"))
ggsave(supp_plot, filename = "neurons_readfraction_percluster.pdf", device = cairo_pdf, width = 6, height = 5, units = "in")
##################
### Figure 2 Supplemental marker UMAPs
##################
row1<- plot_grid(muscle_plot, NULL,legend, ncol = 3, rel_widths = c(0.6, 0.1, 0.3))
row2 <- plot_grid(coel_plot, NULL, ncol = 2, rel_widths = c(0.8, 0.2))
row3 <- plot_grid(IB_plot, NULL, ncol = 2, rel_widths = c(0.8, 0.2))
row4 <- plot_grid(canal_plot, mes_plot, ncol = 2, rel_widths = c(0.6, 0.4))
row5 <- plot_grid(pan_plot, NULL, ncol = 2, rel_widths = c(0.99, 0.01))
row6 <- plot_grid(motor_plot, dva_plot, NULL, ncol = 3, rel_widths = c(0.25, 0.25, 0.5))
row7 <- plot_grid(pep_plot, amine_plot, NULL, ncol = 3, rel_widths = c(0.6, 0.25, 0.15))
UMAPS <- plot_grid(row1, row2, row3, row4, row5, row6, row7, nrow = 7)
ggsave(UMAPS, filename = "marker_umaps.pdf", device = cairo_pdf, width = 8, height = 12, units = "in")
##################
### Figure 2 Supplementals - histogram of gene and read counts per cell for annotated clusters
##################
# genes expressed per cell, median marked in red vertical line @ 230 genes/cell
genes <- md %>%
ggplot()+
geom_histogram(aes(x = nFeature_RNA), bins = 60)+
geom_vline(aes(xintercept = median(nFeature_RNA), col = "red"), show.legend = FALSE)+
scale_x_continuous(breaks = c(0, 50, 250, 500,750, 1000, 1250, 1500, 1750), expand = c(0, 50))+
scale_y_continuous(expand = c(0,0))+
labs(x = "Genes per cell", y = "Count")+
theme(axis.text = element_text(size = 9),
axis.title = element_text(size = 10),
plot.background = element_blank(),
panel.grid = element_blank(),
panel.background = element_blank(),
axis.line = element_line(color = "black"))
# reads captured per cell, median marked in red vertical line @ 267 reads/cell
counts <- md %>%
ggplot()+
geom_histogram(aes(x = nCount_RNA), bins = 60)+
geom_vline(aes(xintercept = median(nCount_RNA), col = "red"), show.legend = FALSE)+
labs(x = "Reads per cell", y = "Count")+
scale_x_continuous(breaks = c(0, 50, 250, 500,750, 1000, 1250, 1500, 1750), expand = c(0, 50))+
scale_y_continuous(expand = c(0,0))+
theme(axis.text = element_text(size = 9),
axis.title = element_text(size = 10),
plot.background = element_blank(),
panel.grid = element_blank(),
panel.background = element_blank(),
axis.line = element_line(color = "black"))
annotated <- plot_grid(genes, counts, ncol = 2)
ggsave(annotated, filename = "annotated_summary.pdf", device = cairo_pdf, width = 6, height = 3, units = "in")
#########################
### Fig2 - figure supplement 4 - Pseudobulk analysis
#########################
### Pseudobulk analysis of B. malayi mf scRNAseq data
# read in processed RDS object for pheatmap plotting. RDS object represents all processing between the ## marks below
centered <- readRDS("~/Library/CloudStorage/Box-Box/ZamanianLab/LabMembers/Clair/project_singlecell/other/bma_mf_pseudobulk.RDS")
################################################################
# aggregate the raw read counts for utBM and tBM clusters
split <- SplitObject(new_combined, split.by = "orig.ident")
utBM_agg <- AggregateExpression(split$utBM, assays = "RNA", slot = "counts", verbose = TRUE, )
utBM_agg <- as.data.frame(utBM_agg$RNA)
tBM_agg <- AggregateExpression(split$tBM, assays = "RNA", slot = "counts", verbose = TRUE, )
tBM_agg <- as.data.frame(tBM_agg$RNA)
#rename columns so utBM and tBM clusters are distinguishable
colnames(utBM_agg) <- paste(colnames(utBM_agg), "utBM", sep= "_")
colnames(tBM_agg) <- paste(colnames(tBM_agg), "tBM", sep= "_")
# rename first column in order to combine the two dataframes
utBM_agg <- rownames_to_column(utBM_agg, var = "gene_id")
tBM_agg <- rownames_to_column(tBM_agg, var = "gene_id")
# leftjoin the two dataframes
expression <- utBM_agg %>% left_join(tBM_agg)
### TMM normalization using edgeR
#turn the dataframe back to a matrix
rownames(joined) <- joined[,1]
joined <- joined %>% select(-"gene_id")
# create a vector with the sample names
samples <- data.frame(samples = colnames(joined))
samples <-factor(levels = colnames(joined))
# condition
condition <- samples %>%
mutate(condition = ifelse(grepl('utBM', samples), "Control", "Treated")) %>%
column_to_rownames(var = "samples")
condition <- factor(condition$condition)
# build the DGEList object
dge <- DGEList(counts = joined, group=condition, samples = samples)
#normalize the libraries using the default trimmed mean of M values (TMM)
dge <- calcNormFactors(dge)
tmm <- cpm(dge)
# remove genes that have < 10 counts
#filter 1 (require x reads total across all samples)
keep <- rowSums(tmm) > 10
tmm <- tmm[keep,]
# add 1 to each count for log transformation and median-centering
tmm <- tmm + 1
# log2 transform
log <- log2(tmm)
#median-center the transformed data
centered <- t(apply(log,1,function(
x){x-median(x)
}))
# change colnmaes to match the renamed clusters
colnames(centered) <- c("1_utBM", "2_utBM", "3_utBM", "4_utBM", "5_utBM", "6_utBM" ,"7_utBM", "8_utBM", "9_utBM", "10_utBM", "11_utBM", "12_utBM", "13_utBM", "14_utBM", "15_utBM", "16_utBM", "17_utBM", "18_utBM", "19_utBM", "20_utBM", "21_utBM", "22_utBM", "23_utBM", "24_utBM", "25_utBM", "26_utBM", "27_utBM", "1_tBM", "2_tBM", "3_tBM", "4_tBM", "5_tBM", "6_tBM", "7_tBM", "8_tBM", "9_tBM", "10_tBM", "11_tBM", "12_tBM", "13_tBM", "14_tBM", "15_tBM", "16_tBM" , "17_tBM", "18_tBM", "19_tBM" , "20_tBM", "21_tBM", "22_tBM", "23_tBM", "24_tBM", "25_tBM", "26_tBM", "27_tBM")
#saveRDS(centered, "~/Library/CloudStorage/Box-Box/ZamanianLab/LabMembers/Clair/project_singlecell/other/bma_mf_pseudobulk.RDS")
###################################################
### Heirarchically cluster the genes and "samples"
library(ClassDiscovery)
# use the uncentered pearson correlation using the classdiscovery package
rows <- distanceMatrix(as.matrix(t(centered)), "uncentered correlation")
rowclus <- hclust(rows, method = "complete") # cluster the genes
cols <- distanceMatrix(as.matrix(centered), "uncentered correlation")
colclus <- hclust(cols, method = "complete") #cluster the samples
## generate heatmap of pseudobulk expression with axis labels
rg <- max(abs(centered))
setHook("grid.newpage", function() pushViewport(viewport(x=1,y=1,width=0.9, height=0.9, name="vp", just=c("right","top"))), action="prepend")
heatmap <- pheatmap(centered, cluster_rows = rowclus, cluster_cols = colclus, show_rownames = FALSE, color = colorRampPalette(c("deepskyblue", "black", "yellow"))(40), breaks = seq(-rg, rg, length.out = 40))
setHook("grid.newpage", NULL, "replace")
#add x and y axis labels
library(grid)
grid.text("Pseudobulk sample", y = -0.025, gp=gpar(fontsize=13))
grid.text("Genes", x=-0.025, rot=90, gp=gpar(fontsize=13))
# save plot
ggsave(filename = "~/Desktop/figure2_figuresupplement4.pdf", device = cairo_pdf, width = 7, height = 6.52, units = "in")
###################
### Figure 2 - figure supplement 5 - alternate dot plot of normalized transcription per cluster for neuron classes
##################
library(Rfast)
raw <- as_tibble(new_combined@assays[["RNA"]]@counts, rownames = "gene_id") %>%
pivot_longer(!gene_id, names_to = "index", values_to = "counts") # gene expression matrix of raw counts
trans <- data %>%
left_join(md) %>%
left_join(raw) %>%
filter(counts > 0) %>%
select("gene_id", "counts", "integrated_snn_res.0.5", "index") %>%
filter(gene_id %in% genes)
tmp <- trans %>%
group_by(integrated_snn_res.0.5) %>%
pivot_wider(names_from = index, values_from = counts)
tmp[is.na(tmp)] <- 0
# calculate the total
tmp$raw_summed <- rowsums(as.matrix(tmp[,c(-1, -2)]))
new <- tmp %>% select("gene_id", "integrated_snn_res.0.5", "raw_summed")
total <- new %>%
group_by(gene_id) %>%
summarise(total = sum(raw_summed)) %>%
left_join(new) %>%
mutate(fraction = ((raw_summed/total)*100)) %>%
left_join(csv)
total$integrated_snn_res.0.5<- factor(total$integrated_snn_res.0.5, levels = c("0", "1", "2", "3", "4", "5", "6", "7", "8", "9", "10", "11", "12", "13", "14", "15", "16", "17", "18", "19", "20", "21", "22", "23", "24", "25", "26"), labels = c("1", "2", "3", "4", "5","6", "7", "8", "9", "10", "11", "12", "13", "14", "15", "16", "17", "18", "19", "20", "21", "22", "23", "24", "25", "26", "27"))
total <- total %>%
mutate(ID = case_when(
integrated_snn_res.0.5 == "1" ~ "Unannotated",
integrated_snn_res.0.5 == "2" ~ "MS",
integrated_snn_res.0.5 == "3" ~ "Unannotated",
integrated_snn_res.0.5 == "4" ~ "Unannotated",
integrated_snn_res.0.5 == "5" ~ "Unannotated",
integrated_snn_res.0.5 == "6" ~ "C",
integrated_snn_res.0.5 == "7" ~ "Unannotated",
integrated_snn_res.0.5 == "8" ~ "Unannotated",
integrated_snn_res.0.5 == "9" ~ "MD",
integrated_snn_res.0.5 == "10" ~ "Unannotated",
integrated_snn_res.0.5 == "11" ~ "Neuron",
integrated_snn_res.0.5 == "12" ~ "Neuron",
integrated_snn_res.0.5 == "13" ~ "Neuron",
integrated_snn_res.0.5 == "15" ~ "S",
integrated_snn_res.0.5 == "14" ~ "CA",
integrated_snn_res.0.5 == "16" ~ "Unannotated",
integrated_snn_res.0.5 == "17" ~ "MD",
integrated_snn_res.0.5 == "18" ~ "Neuron",
integrated_snn_res.0.5 == "19" ~ "MS",
integrated_snn_res.0.5 == "20" ~ "Unannotated",
integrated_snn_res.0.5 == "21" ~ "Unannotated",
integrated_snn_res.0.5 == "22" ~ "IB",
integrated_snn_res.0.5 == "23" ~ "Neuron",
integrated_snn_res.0.5 == "24" ~ "Neuron",
integrated_snn_res.0.5 == "25" ~ "Neuron",
integrated_snn_res.0.5 == "26" ~ "Neuron",
integrated_snn_res.0.5 == "27" ~ "Neuron"))
total$ID <- factor(total$ID, levels = c("MS","MD", "C", "S", "CA", "IB", "Neuron", "Unannotated"))
# plot
(supp_plot <- total %>%
filter(fraction > 1) %>%
ggplot(aes(y = integrated_snn_res.0.5, x = gene_name))+
geom_point(aes(size = fraction, color = neurotransmitter))+
scale_size("Total reads (%)", range = c(0, 4), breaks = c(1, 5, 10, 25, 50, 75, 100))+
scale_color_manual(values = dakota[3:8])+
labs(x = "Genes", y = "Cluster", size = "Total reads (%)")+
facet_grid(cols = vars(ID), rows = vars(neurotransmitter), space = "free", scales = "free", drop = TRUE)+
theme(#text=element_text(family="Helvetica"),
panel.background = element_blank(),
axis.line = element_line (colour = "black"),
legend.background=element_blank(),
legend.text = element_text(size = 8),
legend.title = element_text(size = 8, vjust = 1),
legend.key = element_blank(),
axis.text.x = ggplot2::element_text(size = 8, angle = 90, vjust = 0.5),
axis.text.y = ggplot2::element_text(size = 8, hjust = 1, face = "italic"),
axis.title.x = ggplot2::element_text(size = 10, vjust = -1),
axis.title.y = ggplot2::element_text(size = 10),
strip.text.x = element_text(size = 8),
strip.text.y = element_text(size = 8, angle = 0),
strip.background = element_blank(),
panel.spacing.x = unit(0.5, "lines"),
legend.key.width = unit(0.35, "cm"),
legend.key.height = unit(0.25, "cm"),
#legend.key.size = unit(0.25, "cm"),
legend.position = "bottom",
panel.grid = element_line(color = "#ededed", size = 0.05))+
coord_flip()+
guides(color= "none"))
ggsave(supp_plot, filename = "~/Desktop/Figure2-figuresupplement5.pdf", device = cairo_pdf, width = 6, height = 5, units = "in")
|
79fe8c8d9b537e9b4a0686df64c21df5bb901ab9
|
31a5abb076114964ccefca0242b679dcb3553228
|
/R/1-discorr.R
|
d536e4d5844ffb29ea471b39b594d4783c719ebf
|
[] |
no_license
|
aslez/femaR
|
6ef5783778b877a55ba7ffbab49c97e19e615060
|
58e31553f10439611dba40bb03e713ef43a4c28a
|
refs/heads/master
| 2021-01-10T13:57:06.864519
| 2016-03-04T23:30:35
| 2016-03-04T23:30:35
| 52,839,917
| 5
| 1
| null | null | null | null |
UTF-8
|
R
| false
| false
| 2,935
|
r
|
1-discorr.R
|
#'Construct Distance and Correlation Matrices
#'
#'\code{discorr} is used to construct the distance and correlation matrices
#'needed to estimate range, organization, and strength.
#'
#'@param df a fema-class data.frame.
#'
#'@param longlat logical indicating whether coordinates are projected. Default
#'is \code{FALSE}.
#'
#'@param ctype a string indicating the type of correlation to be caluclated.
#'Valid options include single1 (cosine similarity for a single slope),
#'single2 (linear similarity for a single slope),
#'and multi (square root of the sum of squared differences across
#'multiple slopes).
#'
#'@param x1 a vector of slopes associated with observation i.
#'
#'@param x2 a vector of slopes associated with observation j.
#'
#'@details \code{discorr} is used to generate distance and correlation matrices
#'that can then be summarized in terms of range, organization, and strength.
#'\code{pwc} calculates pairwise correlations using the methods outlined by
#'Martin, Slez, and Borkenhagen (2016).
#'
#'@return The function \code{discorr} returns the following list of objects:
#' \describe{
#' \item{coords}{matrix of coordinates.}
#' \item{slopes}{vectors of slopes.}
#' \item{longlat}{logical indicating whether coordinates are projected.}
#' \item{dmat}{the distance matrix.}
#' \item{rmat}{the correlation matrix.}
#' }
#'
#'@references Martin, J.L., Slez, A., and Borkenhagen, C. 2016.
#'"Some Provisional Techniques for Quantifying the Degree of Field Effect
#'in Social Data."
#'
discorr <- function(df, longlat = FALSE, ctype = 'single1') {
if (!'fema.df' %in% class(df)) {
stop('df is not a fema-class data frame')
}
#generate distance matrix
coords <- cbind(df$coord.x, df$coord.y)
dmat <- spDists(coords, longlat = longlat)
#generate correlations
cloc <- sapply(c('coord.x', 'coord.y'), function(x) grep(x, names(df)))
slopes <- data.frame(df[, -cloc])
if (NCOL(slopes) > 1) ctype <- 'multi'
names(slopes) <- names(df)[-cloc]
rmat <- matrix(0, nrow(dmat), ncol(dmat))
for (i in 2:nrow(slopes)) {
for (j in 1:(i - 1)) {
rmat[i, j] = pwc(slopes[i, ], slopes[j, ], ctype)
}
}
if (ctype == 'multi') {
rmat <- (max(rmat) - 2 * rmat) / max(rmat)
}
rmat <- rmat + t(rmat)
#compile results
result <- list(coords = coords, slopes = slopes,
longlat = longlat, ctype = ctype,
dmat = dmat, rmat = rmat)
class(result) <- 'fema'
result
}
#'@rdname discorr
pwc <- function(x1, x2, ctype) {
x1x2 <- data.frame(x1 = x1, x2 = x2)
if (ctype == 'single1') {
x1x2 <- rbind(1, x1x2)
cpx1 <- with(x1x2, crossprod(x1))
cpx2 <- with(x1x2, crossprod(x2))
cpx1x2 <- with(x1x2, crossprod(x1, x2))
r <- cpx1x2 / sqrt(cpx1 * cpx2)
}
if (ctype == 'single2') r <- with(x1x2, (pi - 2 * abs(x1 - x2)) / pi)
if (ctype == 'multi') r <- with(x1x2, sqrt(sum((x1 - x2)^2)))
r
}
|
732ad4be63e913e2fdca28ce4ad66db8c2da5901
|
54d0e0b1cfb9935174e0f9907f176e721d6d3bf3
|
/16. CH8 - Tree-Based Methods Bagging, Random Forests/Section2BaggingAndRandomForests/ProblemSetRegression.R
|
993b5b08ef2af2dbedbdbb22579ee3509e396b50
|
[] |
no_license
|
clairehu9/R_ML_ISLR
|
29f16ddcb02d654ae272f06510d85243ea30c68e
|
26bce2a45a1037cfbbc64eef4dca0d93ea56f461
|
refs/heads/master
| 2020-09-12T06:11:09.600859
| 2019-11-18T01:38:55
| 2019-11-18T01:38:55
| 222,336,639
| 1
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 4,543
|
r
|
ProblemSetRegression.R
|
rm(list=ls())
installIfAbsentAndLoad <- function(neededVector) {
if(length(neededVector) > 0) {
for(thispackage in neededVector) {
if(! require(thispackage, character.only = T)) {
install.packages(thispackage)}
require(thispackage, character.only = T)
}
}
}
#pRoc contains roc.plot
#verification contains roc.area
#rpart for building a single tree
needed <- c('randomForest', 'pROC', 'verification', 'rpart', 'ISLR', 'Metrics', 'ggplot2')
installIfAbsentAndLoad(needed)
###Get the data
data <- Boston
str(data)
###Create training and test sets
nobs <- nrow(data)
set.seed(5082)
train <- sample(nobs, 0.7*nobs)
test <- setdiff(1:nobs, train)
###Grow a 500-bagging trees
rf.bag <- randomForest(formula=medv ~ .,data=data[train,],ntree=500, mtry=13,
importance=TRUE,localImp=TRUE,na.action=na.roughfix,replace=TRUE)
rf.bag
head(rf.bag$predicted,25)
###Display Variable Importance
importance(rf.bag)[order(importance(rf.bag)[,"%IncMSE"], decreasing=T),]
###Display a chart of Variable Importance
varImpPlot(rf.bag, main="Variable Importance in the Bagging")
###Grow a 500-tree random forest
set.seed(5082)
rf <- randomForest(formula=medv ~ .,data=data[train,],ntree=500, mtry=4,
importance=TRUE,localImp=TRUE,na.action=na.roughfix,replace=TRUE)
head(rf$predicted,25)
###Display Variable Importance
importance(rf)[order(importance(rf)[,"%IncMSE"], decreasing=T),]
###Display a chart of Variable Importance
varImpPlot(rf, main="Variable Importance in the Random Forest")
#Bagging model considers all predictors at each split, while random forest model
# at each split randomly select only 4 predictors as split candidates. It can be seen
# from the variable importance plots as well. Comparing with the bagging model,
# there exist smaller gaps between non-important variables and important variables(rm, lstat)
# both in %IncMSE plot and IncNodePurity plot. This supports the idea that random
# forest algorithm will bring more opportunities for non-important predictors to make
# decisions at each split, which results further decrease in variance.
###Examine MSEs for the Trees
head(rf$mse)
plot(rf, main="MSEs for Random Forest")
#plot(rf$rsq, main = "R-Squares for Random Forest")
min.mse <- min(rf$mse)
min.mse.idx <- which(rf$mse == min.mse)
min.mse.idx
rf$mse[min.mse.idx[1]]
###Rebuild the forest with the number of trees that minimizes the OOB error rate - use the first one if there are more than one
set.seed(5082)
rf <- randomForest(formula=medv ~ .,data=data[train,],ntree= min.mse.idx[1], mtry=4,
importance=TRUE,localImp=TRUE,na.action=na.roughfix,replace=TRUE)
###Evaluate by scoring the training set
prtrain <- predict(rf, newdata=data[train,])
trainMSE <- mse(actual = data[train, 'medv'], predicted = prtrain)
###Evaluate by scoring the test set
prtest <- predict(rf, newdata=data[test,])
testMSE <- mse(actual = data[test, 'medv'], predicted = prtest)
print(trainMSE)
print(rf$mse[length(rf$mse)])
print(testMSE)
# Train MSE 1.90 is very small, comparing to OOB MSE of 9.96 and test MSE of 20.16
# The reason why test MSE is so high is probalby because of randomness, if we do
# hundreads of iterations, it should be very close to OOB MSE.
# Here I do 100 iterations to justify my explanation above
testMSEs <- c()
trainMSEs <- c()
OOBMSEs <- c()
for (i in 1:100) {
nobs <- nrow(data)
train <- sample(nobs, 0.7*nobs)
test <- setdiff(1:nobs, train)
rf <- randomForest(formula=medv ~ .,data=data[train,],ntree= min.mse.idx[1], mtry=4,replace=TRUE)
prtrain <- predict(rf, newdata=data[train, ])
trainMSEs[i] <- mse(actual = data[train, 'medv'], predicted = prtrain)
###Evaluate by scoring the test set
prtest <- predict(rf, newdata=(data[test,]))
testMSEs[i] <- mse(actual = data[test, 'medv'], predicted = prtest)
OOBMSEs[i] <- rf$mse[length(rf$mse)]
}
ggplot(data.frame(trainMSEs, testMSEs), aes(1:100, trainMSEs, colour = 'Train MSE')) +
geom_line(aes(y = testMSEs, colour = 'Test MSE')) +
geom_line(aes(y = OOBMSEs, colour = 'OOB MSE')) +
geom_line() +
labs(title = 'MSE Comparison',
x = "Number of Iterations", y = "MSE") +
theme(legend.title = element_blank())
|
720429fd59d1e0898be78134f8b0a29717c15e20
|
7fe979af0f4987588a5d3aad18f61679b47f3ae0
|
/R/time_worked.R
|
7e5cf0c25c7417cc32c67b9c8e5d68854a3ae752
|
[] |
no_license
|
lwjohnst86/worktime
|
5226279d93007875c0e149edc11249037ae692df
|
340e897b33542f48a68a2a02b0fd58d0637de258
|
refs/heads/master
| 2022-05-22T06:41:59.686014
| 2020-04-22T09:20:16
| 2020-04-22T09:20:16
| 105,091,102
| 1
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 294
|
r
|
time_worked.R
|
#' Add minutes worked between clocking in and out.
#'
#' @param .data Clocking data from `.org` file.
#'
#' @return Add a column of minutes worked.
#' @export
#'
add_minutes_worked <- function(.data) {
.data %>%
mutate(MinutesWorked = difftime(ClockOut, ClockIn, unit = "mins"))
}
|
5746ceb31e0df811763e9bffd6817a362f55cd6d
|
15945ee3410e8313406faf7aaa7a01cc02c5d15e
|
/pull_followers.R
|
8660097d4dd31d0bc199ca9c793d049c18f3906a
|
[] |
no_license
|
SiddharthEngineer23/CC_Internship
|
87cf3685370ddb20297549527021f027ac01ed13
|
840bae78f4625edc4c45c6a9c8fee531ff559460
|
refs/heads/main
| 2023-07-08T03:46:56.185036
| 2021-08-10T19:30:52
| 2021-08-10T19:30:52
| 394,759,423
| 0
| 1
| null | null | null | null |
UTF-8
|
R
| false
| false
| 152
|
r
|
pull_followers.R
|
library(rvest)
url = "https://rvest.tidyverse.org/articles/starwars.html"
page = read_html(url)
sections <- page %>% html_elements("section")
|
3c71ebf1cac37dbfc6b4dffb2946d22016f44ce9
|
573932f2fc40e94001a3659043b902db4d70a912
|
/11-R/tidytuesday-2021/2021-all-plots/combine-plots.R
|
364d2ea3d10247e5db995fe175625cb38bb60e3a
|
[] |
no_license
|
BioInformatica-Labs/My-Data-Science-Journey
|
69f441f88e9e106975ec27de873d82af2dd351c7
|
92e04aa2e3612a198dc213f2dbd13b5df404bfbe
|
refs/heads/main
| 2023-08-28T05:22:56.970894
| 2021-11-08T20:05:10
| 2021-11-08T20:05:10
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 16,929
|
r
|
combine-plots.R
|
# Author : Jenn Schilling
# Title: Combine #TidyTuesday Plots
# Date: Apr 8 2021
#### Libraries ####
library(magick)
library(here)
#### Get Plots for 2021 [Long] ####
row_1_21 <- image_append(
c(image_border(image_resize(image_read("2021-01-12\\turner_oil_colors_circle.png"), "500x400!"), color = "white", geometry = "10x10"),
image_border(image_resize(image_read("2021-01-19\\county_female_map.png"), "500x400!"), color = "white", geometry = "10x10"),
image_border(image_resize(image_read("2021-01-19\\county_female_bar.png"), "500x400!"), color = "white", geometry = "10x10"),
image_border(image_resize(image_read("2021-01-26\\sankey_plastic.png"), "500x400!"), color = "white", geometry = "10x10")))
row_2_21 <- image_append(
c(image_border(image_resize(image_read("2021-02-02\\bach_hs_attain_women.png"), "500x400!"), color = "white", geometry = "10x10"),
image_border(image_resize(image_read("2021-02-02\\bach_hs_attain.png"), "500x400!"), color = "white", geometry = "10x10"),
image_border(image_resize(image_read("2021-02-09\\wealth_distribution.png"), "500x400!"), color = "white", geometry = "10x10"),
image_border(image_resize(image_read("2021-02-16\\c1_final.png"), "500x400!"), color = "white", geometry = "10x10")))
row_3_21 <- image_append(
c(image_border(image_resize(image_read("2021-02-16\\c2_final.png"), "500x400!"), color = "white", geometry = "10x10"),
image_border(image_resize(image_read("2021-02-23\\earnings_age_2020.png"), "500x400!"), color = "white", geometry = "10x10"),
image_border(image_resize(image_read("2021-02-23\\occupations_2020.png"), "500x400!"), color = "white", geometry = "10x10"),
image_border(image_resize(image_read("2021-03-02\\superbowl_2.png"), "500x400!"), color = "white", geometry = "10x10")))
row_4_21 <- image_append(
c(image_border(image_resize(image_read("2021-03-09\\genre_summary.png"), "500x400!"), color = "white", geometry = "10x10"),
image_border(image_resize(image_read("2021-03-16\\steam_games.png"), "500x400!"), color = "white", geometry = "10x10"),
image_border(image_resize(image_read("2021-03-23\\un_votes.png"), "500x400!"), color = "white", geometry = "10x10"),
image_border(image_resize(image_read("2021-03-30\\foundation_shades.png"), "500x400!"), color = "white", geometry = "10x10")))
row_5_21 <- image_append(
c(image_border(image_resize(image_read("2021-04-06\\percent_forest.png"), "500x400!"), color = "white", geometry = "10x10"),
image_border(image_resize(image_read("2021-04-20\\netflix.png"), "500x400!"), color = "white", geometry = "10x10"),
image_border(image_resize(image_read("2021-04-27\\ceo.png"), "500x400!"), color = "white", geometry = "10x10"),
image_border(image_resize(image_read("2021-05-04\\water_art.png"), "500x400!"), color = "white", geometry = "10x10")))
row_6_21 <- image_append(
c(image_border(image_resize(image_read("2021-05-11\\az_broadband.png"), "500x400!"), color = "white", geometry = "10x10"),
image_border(image_resize(image_read("2021-05-18\\us_salaries.png"), "500x400!"), color = "white", geometry = "10x10"),
image_border(image_resize(image_read("2021-05-25\\mario_kart.png"), "500x400!"), color = "white", geometry = "10x10"),
image_border(image_resize(image_read("2021-06-01\\survivor.png"), "500x400!"), color = "white", geometry = "10x10")))
row_7_21 <- image_append(
c(image_border(image_resize(image_read("2021-06-08\\greatlakes.png"), "500x400!"), color = "white", geometry = "10x10"),
image_border(image_resize(image_read("2021-06-15\\duboischallenge.png"), "500x400!"), color = "white", geometry = "10x10"),
image_border(image_resize(image_read("2021-06-22\\tucsonparks.png"), "500x400!"), color = "white", geometry = "10x10")))
final_21 <- image_append(c(row_1_21,
row_2_21,
row_3_21,
row_4_21,
row_5_21,
row_6_21,
row_7_21),
stack = TRUE)
image_write(final_21,
"2021-all-plots\\2021-summary.png")
#### Get Plots for 2021 [Wide] ####
row_1_21 <- image_append(
c(image_border(image_resize(image_read("2021-01-12\\turner_oil_colors_circle.png"), "500x400!"), color = "white", geometry = "10x10"),
image_border(image_resize(image_read("2021-01-19\\county_female_map.png"), "500x400!"), color = "white", geometry = "10x10"),
image_border(image_resize(image_read("2021-01-19\\county_female_bar.png"), "500x400!"), color = "white", geometry = "10x10"),
image_border(image_resize(image_read("2021-01-26\\sankey_plastic.png"), "500x400!"), color = "white", geometry = "10x10"),
image_border(image_resize(image_read("2021-02-02\\bach_hs_attain_women.png"), "500x400!"), color = "white", geometry = "10x10"),
image_border(image_resize(image_read("2021-02-02\\bach_hs_attain.png"), "500x400!"), color = "white", geometry = "10x10"),
image_border(image_resize(image_read("2021-02-09\\wealth_distribution.png"), "500x400!"), color = "white", geometry = "10x10"),
image_border(image_resize(image_read("2021-02-16\\c1_final.png"), "500x400!"), color = "white", geometry = "10x10")))
row_2_21 <- image_append(
c(image_border(image_resize(image_read("2021-02-16\\c2_final.png"), "500x400!"), color = "white", geometry = "10x10"),
image_border(image_resize(image_read("2021-02-23\\earnings_age_2020.png"), "500x400!"), color = "white", geometry = "10x10"),
image_border(image_resize(image_read("2021-02-23\\occupations_2020.png"), "500x400!"), color = "white", geometry = "10x10"),
image_border(image_resize(image_read("2021-03-02\\superbowl_2.png"), "500x400!"), color = "white", geometry = "10x10"),
image_border(image_resize(image_read("2021-03-09\\genre_summary.png"), "500x400!"), color = "white", geometry = "10x10"),
image_border(image_resize(image_read("2021-03-16\\steam_games.png"), "500x400!"), color = "white", geometry = "10x10"),
image_border(image_resize(image_read("2021-03-23\\un_votes.png"), "500x400!"), color = "white", geometry = "10x10"),
image_border(image_resize(image_read("2021-03-30\\foundation_shades.png"), "500x400!"), color = "white", geometry = "10x10")))
row_3_21 <- image_append(
c(image_border(image_resize(image_read("2021-04-06\\percent_forest.png"), "500x400!"), color = "white", geometry = "10x10"),
image_border(image_resize(image_read("2021-04-20\\netflix.png"), "500x400!"), color = "white", geometry = "10x10"),
image_border(image_resize(image_read("2021-04-27\\ceo.png"), "500x400!"), color = "white", geometry = "10x10"),
image_border(image_resize(image_read("2021-05-04\\water_art.png"), "500x400!"), color = "white", geometry = "10x10"),
image_border(image_resize(image_read("2021-05-11\\az_broadband.png"), "500x400!"), color = "white", geometry = "10x10"),
image_border(image_resize(image_read("2021-05-18\\us_salaries.png"), "500x400!"), color = "white", geometry = "10x10"),
image_border(image_resize(image_read("2021-05-25\\mario_kart.png"), "500x400!"), color = "white", geometry = "10x10"),
image_border(image_resize(image_read("2021-06-01\\survivor.png"), "500x400!"), color = "white", geometry = "10x10")))
row_4_21 <- image_append(
c(image_border(image_resize(image_read("2021-06-08\\greatlakes.png"), "500x400!"), color = "white", geometry = "10x10"),
image_border(image_resize(image_read("2021-06-15\\duboischallenge.png"), "500x400!"), color = "white", geometry = "10x10"),
image_border(image_resize(image_read("2021-06-22\\tucsonparks.png"), "500x400!"), color = "white", geometry = "10x10")))
final_21_w <- image_append(c(row_1_21,
row_2_21,
row_3_21,
row_4_21),
stack = TRUE)
image_write(final_21_w,
"2021-all-plots\\2021-summary.png")
#### Get Plots for 2020 ####
row_1_20 <- image_append(
c(image_border(image_resize(image_read("https://github.com/jennschilling/tidytusesday/raw/master/2020-07-28/tidytuesday-palmerpenguins.png"), "500x400!"), color = "white", geometry = "10x10"),
image_border(image_resize(image_read("https://github.com/jennschilling/tidytusesday/raw/master/2020-08-04/top10_european_energy.png"), "500x400!"), color = "white", geometry = "10x10"),
image_border(image_resize(image_read("https://github.com/jennschilling/tidytusesday/raw/master/2020-08-11/avatar.ratings.characters.png"), "500x400!"), color = "white", geometry = "10x10"),
image_border(image_resize(image_read("https://github.com/jennschilling/tidytusesday/raw/master/2020-08-18/extinct.plants.png"), "500x400!"), color = "white", geometry = "10x10")))
row_2_20 <- image_append(
c(image_border(image_resize(image_read("https://github.com/jennschilling/tidytusesday/raw/master/2020-08-25/chopped.png"), "500x400!"), color = "white", geometry = "10x10"),
image_border(image_resize(image_read("https://github.com/jennschilling/tidytusesday/raw/master/2020-09-01/banana.yield.png"), "500x400!"), color = "white", geometry = "10x10"),
image_border(image_resize(image_read("https://github.com/jennschilling/tidytusesday/raw/master/2020-09-08/friends_seasons.gif"), "500x400!")[1], color = "white", geometry = "10x10"),
image_border(image_resize(image_read("https://github.com/jennschilling/tidytusesday/raw/master/2020-09-15/k12_spend_per_child.png"), "500x400!"), color = "white", geometry = "10x10")))
row_3_20 <- image_append(
c(image_border(image_resize(image_read("https://github.com/jennschilling/tidytusesday/raw/master/2020-09-22/first_ascents.png"), "500x400!"), color = "white", geometry = "10x10"),
image_border(image_resize(image_read("https://github.com/jennschilling/tidytusesday/raw/master/2020-09-29/worldwide_sales.png"), "500x400!"), color = "white", geometry = "10x10"),
image_border(image_resize(image_read("https://github.com/jennschilling/tidytusesday/raw/master/2020-11-03/ikea_category_prices.png"), "500x400!"), color = "white", geometry = "10x10"),
image_border(image_resize(image_read("https://github.com/jennschilling/tidytusesday/raw/master/2020-11-24/washington_hikes.png"), "500x400!"), color = "white", geometry = "10x10")))
final_20<- image_append(c(row_1_20,
row_2_20,
row_3_20),
stack = TRUE)
image_write(final_20,
"2021-all-plots\\2020-summary.png")
#### Put 2020 and 2021 Together ####
final <- image_append(c(final_20,
final_21),
stack = TRUE)
image_write(final,
"2021-all-plots\\summary_long.png")
#### Alternate Layout 4 rows by 7 columns ####
row_1 <- image_append(
c(image_border(image_resize(image_read("https://github.com/jennschilling/tidytusesday/raw/master/2020-07-28/tidytuesday-palmerpenguins.png"), "500x400!"), color = "white", geometry = "10x10"),
image_border(image_resize(image_read("https://github.com/jennschilling/tidytusesday/raw/master/2020-08-04/top10_european_energy.png"), "500x400!"), color = "white", geometry = "10x10"),
image_border(image_resize(image_read("https://github.com/jennschilling/tidytusesday/raw/master/2020-08-11/avatar.ratings.characters.png"), "500x400!"), color = "white", geometry = "10x10"),
image_border(image_resize(image_read("https://github.com/jennschilling/tidytusesday/raw/master/2020-08-18/extinct.plants.png"), "500x400!"), color = "white", geometry = "10x10"),
image_border(image_resize(image_read("https://github.com/jennschilling/tidytusesday/raw/master/2020-08-25/chopped.png"), "500x400!"), color = "white", geometry = "10x10"),
image_border(image_resize(image_read("https://github.com/jennschilling/tidytusesday/raw/master/2020-09-01/banana.yield.png"), "500x400!"), color = "white", geometry = "10x10"),
image_border(image_resize(image_read("https://github.com/jennschilling/tidytusesday/raw/master/2020-09-08/friends_seasons.gif"), "500x400!")[1], color = "white", geometry = "10x10")))
row_2 <- image_append(
c(image_border(image_resize(image_read("https://github.com/jennschilling/tidytusesday/raw/master/2020-09-15/k12_spend_per_child.png"), "500x400!"), color = "white", geometry = "10x10"),
image_border(image_resize(image_read("https://github.com/jennschilling/tidytusesday/raw/master/2020-09-22/first_ascents.png"), "500x400!"), color = "white", geometry = "10x10"),
image_border(image_resize(image_read("https://github.com/jennschilling/tidytusesday/raw/master/2020-09-29/worldwide_sales.png"), "500x400!"), color = "white", geometry = "10x10"),
image_border(image_resize(image_read("https://github.com/jennschilling/tidytusesday/raw/master/2020-11-03/ikea_category_prices.png"), "500x400!"), color = "white", geometry = "10x10"),
image_border(image_resize(image_read("https://github.com/jennschilling/tidytusesday/raw/master/2020-11-24/washington_hikes.png"), "500x400!"), color = "white", geometry = "10x10"),
image_border(image_resize(image_read("2021-01-12\\turner_oil_colors_circle.png"), "500x400!"), color = "white", geometry = "10x10"),
image_border(image_resize(image_read("2021-01-19\\county_female_map.png"), "500x400!"), color = "white", geometry = "10x10")))
row_3 <- image_append(
c(image_border(image_resize(image_read("2021-01-19\\county_female_bar.png"), "500x400!"), color = "white", geometry = "10x10"),
image_border(image_resize(image_read("2021-01-26\\sankey_plastic.png"), "500x400!"), color = "white", geometry = "10x10"),
image_border(image_resize(image_read("2021-02-02\\bach_hs_attain_women.png"), "500x400!"), color = "white", geometry = "10x10"),
image_border(image_resize(image_read("2021-02-02\\bach_hs_attain.png"), "500x400!"), color = "white", geometry = "10x10"),
image_border(image_resize(image_read("2021-02-09\\wealth_distribution.png"), "500x400!"), color = "white", geometry = "10x10"),
image_border(image_resize(image_read("2021-02-16\\c1_final.png"), "500x400!"), color = "white", geometry = "10x10"),
image_border(image_resize(image_read("2021-02-16\\c2_final.png"), "500x400!"), color = "white", geometry = "10x10")))
row_4 <- image_append(
c(image_border(image_resize(image_read("2021-02-23\\earnings_age_2020.png"), "500x400!"), color = "white", geometry = "10x10"),
image_border(image_resize(image_read("2021-02-23\\occupations_2020.png"), "500x400!"), color = "white", geometry = "10x10"),
image_border(image_resize(image_read("2021-03-02\\superbowl_2.png"), "500x400!"), color = "white", geometry = "10x10"),
image_border(image_resize(image_read("2021-03-09\\genre_summary.png"), "500x400!"), color = "white", geometry = "10x10"),
image_border(image_resize(image_read("2021-03-16\\steam_games.png"), "500x400!"), color = "white", geometry = "10x10"),
image_border(image_resize(image_read("2021-03-23\\un_votes.png"), "500x400!"), color = "white", geometry = "10x10"),
image_border(image_resize(image_read("2021-03-30\\foundation_shades.png"), "500x400!"), color = "white", geometry = "10x10")))
row_5 <- image_append(
c(image_border(image_resize(image_read("2021-04-06\\percent_forest.png"), "500x400!"), color = "white", geometry = "10x10"),
image_border(image_resize(image_read("2021-04-20\\netflix.png"), "500x400!"), color = "white", geometry = "10x10"),
image_border(image_resize(image_read("2021-04-27\\ceo.png"), "500x400!"), color = "white", geometry = "10x10"),
image_border(image_resize(image_read("2021-05-04\\water_art.png"), "500x400!"), color = "white", geometry = "10x10"),
image_border(image_resize(image_read("2021-05-11\\az_broadband.png"), "500x400!"), color = "white", geometry = "10x10"),
image_border(image_resize(image_read("2021-05-18\\us_salaries.png"), "500x400!"), color = "white", geometry = "10x10"),
image_border(image_resize(image_read("2021-05-25\\mario_kart.png"), "500x400!"), color = "white", geometry = "10x10")))
row_6 <- image_append(
c(image_border(image_resize(image_read("2021-06-01\\survivor.png"), "500x400!"), color = "white", geometry = "10x10"),
image_border(image_resize(image_read("2021-06-08\\greatlakes.png"), "500x400!"), color = "white", geometry = "10x10"),
image_border(image_resize(image_read("2021-06-15\\duboischallenge.png"), "500x400!"), color = "white", geometry = "10x10"),
image_border(image_resize(image_read("2021-06-22\\tucsonparks.png"), "500x400!"), color = "white", geometry = "10x10")))
final_together <- image_append(c(row_1,
row_2,
row_3,
row_4,
row_5,
row_6),
stack = TRUE)
image_write(final_together,
"2021-all-plots\\summary_wide.png")
|
9076491dfa436f8c64f17a2af9e8b113a48d7db6
|
1ac0e1b3d0ec8203f60a5abc1d036a3bb5c113ff
|
/plot5.r
|
c48f3050a1fc1507856d3377bf22f35762e14a33
|
[] |
no_license
|
niambhs/ExData_Plotting2
|
c615ec794a46eeefd7a512a1c2c76c2944f7c34f
|
a8bd3947f043f2f48f720d769bd9ddb799a1e145
|
refs/heads/master
| 2016-09-16T01:14:09.333392
| 2015-06-21T08:36:39
| 2015-06-21T08:36:39
| 37,209,881
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 1,247
|
r
|
plot5.r
|
plot5 <- function() {
#How have emissions from motor vehicle sources changed from 1999-2008 in Baltimore City?
#read each of the two files using the readRDS() function in R.
NEI <- readRDS("summarySCC_PM25.rds")
SCC <- readRDS("Source_Classification_Code.rds")
#subset data by vehicle
vehicleSubset <- subset(SCC,
grepl('Vehicle',EI.Sector),ignore.case = TRUE)
NEIBalt <- subset(NEI, fips=='24510')
#merge the data
mergedData <- merge(NEIBalt, vehicleSubset, by=c("SCC"))
#establish factors
factoredNEIBalt<-transform(mergedData,type=factor(type),year=factor(year))
# Group data by year and type of factoredNEIBalt
aggregateEmmissions <- aggregate(Emissions ~ year,
data=factoredNEIBalt,
FUN=sum)
if (!require("ggplot2")) {
install.packages("ggplot2")
}
require("ggplot2")
a <- ggplot(data = aggregateEmmissions, aes(x = year, y = Emissions,group = 1))
a <- a + geom_line()
a <- a + xlab("Year") + ylab("Emmisions") + ggtitle("Baltimore Vehicle Emmisions 1999-2008")
print(a)
dev.copy(a, file="plot5.png", width=500, height=500)
dev.off()
}
|
3359131515842b3ea89f8ef30f5a685eb8e1d0b5
|
c8e9e754e3a751ea785aaaf6a929d02fa106dbcc
|
/man/searchrules.Rd
|
901b97bdb3f33626cea5b6eac9f01f766f5c29cf
|
[] |
no_license
|
beerda/lfl
|
3a6849da19165990bcac9c57cf136b62d8ccc951
|
9b8028447ab53e0f91553cd827f7a15783593c6b
|
refs/heads/master
| 2023-02-24T10:09:01.496322
| 2023-02-15T06:57:08
| 2023-02-15T06:57:08
| 99,807,073
| 5
| 1
| null | 2020-02-26T07:49:58
| 2017-08-09T12:42:17
|
R
|
UTF-8
|
R
| false
| true
| 5,596
|
rd
|
searchrules.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/searchrules.R
\name{searchrules}
\alias{searchrules}
\title{Searching for fuzzy association rules}
\usage{
searchrules(
d,
lhs = 2:ncol(d),
rhs = 1,
tnorm = c("goedel", "goguen", "lukasiewicz"),
n = 100,
best = c("confidence"),
minSupport = 0.02,
minConfidence = 0.75,
maxConfidence = 1,
maxLength = 4,
numThreads = 1,
trie = (maxConfidence < 1)
)
}
\arguments{
\item{d}{An object of class \code{\link[=fsets]{fsets()}} - it is basically a matrix
where columns represent the fuzzy sets and values are the membership
degrees. For creation of such object, use \code{\link[=fcut]{fcut()}} or \code{\link[=lcut]{lcut()}} function.}
\item{lhs}{Indices of fuzzy attributes that may appear on the left-hand-side
(LHS) of association rules, i.e. in the antecedent.}
\item{rhs}{Indices of fuzzy attributes that may appear on the
right-hand-side (RHS) of association rules, i.e. in the consequent.}
\item{tnorm}{A t-norm to be used for computation of conjunction of fuzzy
attributes. (Allowed are even only starting letters of "lukasiewicz",
"goedel" and "goguen").}
\item{n}{The non-negative number of rules to be found. If zero, the function
returns all rules satisfying the given conditions. If positive, only
\code{n} best rules are returned. The criterium of what is ``best'' is
specified with the \code{best} argument.}
\item{best}{Specifies measure accordingly to which the rules are ordered
from best to worst. This argument is used mainly in combination with the
\code{n} argument. Currently, only single value ("confidence") can be used.}
\item{minSupport}{The minimum support degree of a rule. Rules with support
below that number are filtered out. It must be a numeric value from interval
\eqn{[0, 1]}. See below for details on how the support degree is computed.}
\item{minConfidence}{The minimum confidence degree of a rule. Rules with
confidence below that number are filtered out. It must be a numeric value
from interval \eqn{[0, 1]}. See below for details on how the confidence degree is
computed.}
\item{maxConfidence}{Maximum confidence threshold. After finding a rule that
has confidence degree above the \code{maxConfidence} threshold, no other
rule is resulted based on adding some additional attribute to its antecedent
part. I.e. if "Sm.age & Me.age => Sm.height" has confidence above
\code{maxConfidence} threshold, no another rule containing "Sm.age & Me.age"
will be produced regardless of its interest measures.
If you want to disable this feature, set \code{maxConfidence} to 1.}
\item{maxLength}{Maximum allowed length of the rule, i.e. maximum
number of predicates that are allowed on the left-hand + right-hand side of the rule. If
negative, the maximum length of rules is unlimited.}
\item{numThreads}{Number of threads used to perform the algorithm in
parallel. If greater than 1, the OpenMP library (not to be confused with
Open MPI) is used for parallelization. Please note that there are known
problems of using OpenMP together with another means of parallelization that
may be used within R. Therefore, if you plan to use the \code{searchrules}
function with some of the external parallelization mechanisms such as
library \code{doMC}, make sure that \code{numThreads} equals 1. This
feature is available only on systems that have installed the OpenMP library.}
\item{trie}{Whether or not to use internal mechanism of Tries. If FALSE,
then in the output may appear such rule that is a descendant of a rule that
has confidence above \code{maxConfidence} threshold.
Tries consume very much memory, so if you encounter problems with
insufficient memory, set this argument to FALSE. On the other hand, the size
of result (if \code{n} is set to 0) can be very high if trie is set to
FALSE.}
}
\value{
A list of the following elements: \code{rules} and \code{statistics}.
\code{rules} is a list of mined fuzzy association rules. Each element of
that list is a character vector with consequent attribute being on the first
position.
\code{statistics} is a data frame of statistical characteristics about mined
rules. Each row corresponds to a rule in the \code{rules} list. Let us
consider a rule "a & b => c", let \eqn{\otimes} be a t-norm specified with
the \code{tnorm} parameter and \eqn{i} goes over all rows of a data table
\code{d}. Then columns of the \code{statistics} data frame are as follows:
\itemize{
\item support: a rule's support degree: \eqn{1/nrow(d) * \sum_{\forall i} a(i) \otimes b(i) \otimes c(i)}
\item lhsSupport: a support of rule's antecedent (LHS): \eqn{1/nrow(d) * \sum_{\forall i} a(i) \otimes b(i)}
\item rhsSupport: a support of rule's consequent (RHS): \eqn{1/nrow(d) * \sum_{\forall i} c(i)}
\item confidence: a rule's confidence degree: \eqn{support / lhsSupport}
}
}
\description{
This function searches the given \code{\link[=fsets]{fsets()}} object \code{d} for all
fuzzy association rules that satisfy defined constraints. It returns a list
of fuzzy association rules together with some statistics characterizing them
(such as support, confidence etc.).
}
\details{
The function searches data frame \code{d} for fuzzy association rules that
satisfy conditions specified by the parameters.
}
\examples{
d <- lcut(CO2)
searchrules(d, lhs=1:ncol(d), rhs=1:ncol(d))
}
\seealso{
\code{\link[=fcut]{fcut()}}, \code{\link[=lcut]{lcut()}}, \code{\link[=farules]{farules()}}, \code{\link[=fsets]{fsets()}}, \code{\link[=pbld]{pbld()}}
}
\author{
Michal Burda
}
\keyword{models}
\keyword{multivariate}
\keyword{robust}
|
40d3b29ee6156d0e4ee864aa7f5561d2a032781e
|
5643509ed57314f610c16a02e7fc3fce6e8b6267
|
/Chapter 5/R/soft_prune.R
|
caf968053a1f7f6050bd9c067d3eb1cccd0be295
|
[] |
no_license
|
aida-ugent/PhD_Code_Robin_Vandaele
|
d942ea50defe5417b032dcf7d2f61c9eac19c143
|
229166059fd8fb366f23162a0b72e7547e80649b
|
refs/heads/main
| 2023-02-21T16:26:44.183346
| 2021-01-27T15:07:35
| 2021-01-27T15:07:35
| 318,493,807
| 1
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 1,418
|
r
|
soft_prune.R
|
#' Prune leaves in a graph either a given amount of times or until there are no more than a specified number of leaves
#' At each iteration, the leaves representing the fewest amount of nodes are pruned
#' Each node is set to represent only itself at the start of the iterations
#' When leaves are pruned, they are consequently represented by their sole neighbor
#'
#' @param G graph to be pruned
#' @param times the number of times all leaves must be pruned, standard set to 1
#' @param leaves (maximum) number of leaves to be kept, 'times' is ignored if provided
#'
#' @return the pruned graph
soft_prune <- function(G, times=1, leaves=NA){
if(times < 0) stop("'times' must be at least 0")
if(times == 0) return(G)
if(times == 1 & is.na(leaves)) return(hard_prune(G))
leaves <- max(as.integer(leaves), 0, na.rm = TRUE)
if(leaves >= 2) times <- Inf
to_prune <- which(degree(G) == 1)
no_leaves <- length(to_prune)
NoR <- rep(1, length(V(G)))
names(NoR) <- V(G)$name
it <- 0
while(no_leaves > leaves){
it <- it + 1
for(u in to_prune){
v <- neighbors(G, u)$name
NoR[v] <- NoR[v] + NoR[V(G)[u]$name]
}
G <- delete_vertices(G, to_prune)
if(it >= times) break
to_prune <- which(degree(G) == 1)
no_leaves <- length(to_prune)
if(length(to_prune) > 0) to_prune <- to_prune[NoR[V(G)$name[to_prune]] == min(NoR[V(G)$name[to_prune]])]
}
return(G)
}
|
f64393cd5bf5b8670185081e4b013d864b82ef9c
|
024df6efa9b21bfa2dbcdb6b5560f85404097c5c
|
/Barren Land Analysis.R
|
99cd9f9f11d9e750448cfe800050814874e85be6
|
[] |
no_license
|
pedstrom/barren-land-analysis
|
e9fdac6cc7e6471d02b5b1d825cdaf8072bc826e
|
75ac69995df27960f8149469c58f63bea2ba0911
|
refs/heads/master
| 2021-05-10T13:20:00.922693
| 2018-01-23T15:16:54
| 2018-01-23T15:16:54
| 118,470,635
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 2,197
|
r
|
Barren Land Analysis.R
|
# Barren Land Analysis R file
#
# use: Rscript Barren\ Land\ Analysis.R "0 292 399 307"
# use: Rscript Barren\ Land\ Analysis.R "48 192 351 207" "48 392 351 407" "120 52 135 547" "260 52 275 547"
#
# Peter Edstrom
# peter@edstrom.net
# January 2018
#
library(ggplot2)
args <- commandArgs(trailingOnly = TRUE)
# setup the grid and a few vars
#dfFlood <- expand.grid(x = 0:399, y = 0:599)
dfFlood <- expand.grid(x = 0:9, y = 0:9)
dfFlood$z <- 1
results <- c()
Q <- data.frame(x=integer(), y=integer())
setBarren <- function(df, barren_lands, val=0){
for (bString in barren_lands){
b <- unlist(strsplit(bString,"[ ]")) #split on spaces
b <- as.numeric(b) #convert string to numbers
df$z[df$x >= b[1] & df$x <= b[3] & df$y >= b[2] & df$y <= b[4]] <- val
}
return(df)
}
# z=0 is barren land
# z=1 is fertile land
# z=.75 is marked fertile land that we are currently counting
floodFill <- function(xCord, yCord) {
paste("floodFill2")
paste(xCord,yCord)
zVal <- dfFlood[dfFlood$x==xCord & dfFlood$y==yCord,]$z
if (is.na(zVal) | zVal == .75 | zVal == 0) {
return()
} else {
dfFlood[dfFlood$x==xCord & dfFlood$y==yCord,]$z <<- .75
if (xCord > 0) { Q[nrow(Q)+1,] <<- c(xCord-1, yCord) } #left
if (xCord < 399) { Q[nrow(Q)+1,] <<- c(xCord+1, yCord) } #right
if (yCord < 599) { Q[nrow(Q)+1,] <<- c(xCord, yCord+1) } #up
if (yCord > 0) { Q[nrow(Q)+1,] <<- c(xCord, yCord-1) } #down
}
}
prepQ <- function() {
nextFertile <- dfFlood[dfFlood$z==1,][1,] # find next area
floodFill(nextFertile$x,nextFertile$y) # prime the Q
}
# prep the Q and areas
dfFlood <- setBarren(dfFlood, args)
prepQ()
while ( nrow(Q) > 0 ) {
# process the queue and set all of the area to .75
while ( nrow(Q) > 0 ) {
floodFill(Q[1,]$x, Q[1,]$y) #set and add neighbors to queue
Q <<- Q[-1,] #remove row 1
}
area <- nrow(dfFlood[dfFlood$z==.75,]) # count the area
results <<- c(results, area) # save the area into the results
dfFlood[dfFlood$z==.75,]$z <- 0 # mark the area as done
prepQ() # find the next area if there is one
}
results[order(results)]
|
fbfa793aa0c4328516001676961cba95c0725e31
|
938da43ec87a0cee7ed50b02627775c4a679b234
|
/Waterloo_For_R/gshell/man/G.asMap.Rd
|
d1a47035b6dd97e81d56bae066fa9e85b6cfaeee
|
[] |
no_license
|
irondukepublishing/waterloo
|
bcce869bf83d8987ceaa7382f0d9f129d14fb3bf
|
c764b4b13d85af2e58e4e493d867e8a1c861a2e1
|
refs/heads/master
| 2021-04-09T10:48:40.135877
| 2016-12-30T21:55:54
| 2016-12-30T21:55:54
| 62,012,985
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 793
|
rd
|
G.asMap.Rd
|
\name{G.asMap}
\alias{G.asMap}
%- Also NEED an '\alias' for EACH other topic documented here.
\title{
G.asMap
}
\description{
Returns java.util.LinkedHashMap<String, Object> given an R list.
}
\usage{
G.asMap(inList)
}
%- maybe also 'usage' for other objects documented here.
\arguments{
\item{inList}{
An R list
}
}
\value{
The returned java.util.LinkedHashMap<String, Object> instance will hold the the key/value pairs form the R list as the key/value pairs of the Map.
}
\author{
Malcolm Lidierth
}
\seealso{
G.asList
}
\examples{
example <- function(){
G.asMap(list(a='10d', b=99, c=TRUE))
}
}
% Add one or more standard keywords, see file 'KEYWORDS' in the
% R documentation directory.
\keyword{programming}
\keyword{Interfaces to Other Languages}% __ONLY ONE__ keyword per line
|
c86dff0e832215585dc3beae31419d49690c2be0
|
33968755d496757f17da5815c0b0544708174bd4
|
/tests/testthat/test-strippedConsoleText.R
|
37d4f2e594754bba48fcb0f8f69d14a1d6639b86
|
[] |
no_license
|
ljrendell/ccopy
|
51a6e40e95e4d17528fdeaf13851b45046cbb9d5
|
d52fc0e8859509fa4303240633f9793cd6a2d57e
|
refs/heads/master
| 2021-01-11T19:18:10.981133
| 2017-04-02T14:09:11
| 2017-04-02T14:09:11
| 79,348,324
| 1
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 606
|
r
|
test-strippedConsoleText.R
|
context("strippedConsoleText must delete strings not beginning with the
commmand or continuation prompt, and delete these prompts from the
remaining strings")
test_that("strippedConsoleText works correctly", {
testVec <- c("> a", "> b", "> c ",
"+ d", "+ +e", "+ f> ",
">g", " > h",
"+i", " + j",
"k", "l> ", "m + ",
"[1] n", "Warning: o",
"> \"Error: p\"")
expect_identical(ccopy:::strippedConsoleText(testVec),
c("a", " b", "c ", "d", "+e", "f> ", "\"Error: p\""))
})
|
493eedf20285095ccc7c36b3c7d3e2151cc206d0
|
17e75933bd6fc146e60eddd4abcdda275ddb1128
|
/R/ds06_freshman_15.R
|
1876e59ccede3e638b092cb631254c9672ea2351
|
[] |
no_license
|
ngay/kustats
|
ca6d65094c4147d2aede6211c323c5703933e6e3
|
7e699753597d0cde0e2eab7a209cd032d40bb2de
|
refs/heads/master
| 2021-01-09T20:08:21.539585
| 2018-04-10T01:53:45
| 2018-04-10T01:53:45
| 63,873,690
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 820
|
r
|
ds06_freshman_15.R
|
#' Data Set 6: Freshman 15 Data
#'
#' Measurements were made in September of freshman year and then
#' later in April of freshman year. Results are published in "Changes
#' in Body Weight and Fat Mass of Men and Women in the First Year
#' of College: A Study of the 'Freshman 15' " by Hoffman, Policastro,
#' Quick, and Lee, Journal of American College Health, Vol. 55, No. 1.
#' \itemize{
#' \item SEX is the gender of the subject
#' \item WT_SEPT is the September weight (kg)
#' \item WT_APRIL is the April weight (kg)
#' \item BMI_SEPT denotes measured body mass index in September
#' \item BMI_APRIL denotes measured body mass index in April
#' }
#'
#' @format A data frame with 67 rows and 5 variables
#' @source Triola, M. 2018. Elementary Statistics, 13th ed. Boston, MA: Pearson.
#' @name ds06_freshman_15
NULL
|
b46236b683bbf4d79a395465af31eda3c9483d7d
|
9cb9dfcc049c125c278c5402ecdfce2336f5c07a
|
/multiStockassessment/man/corplot.msam.Rd
|
9a6873d68ef5aeee1564018b77cf022b45265612
|
[] |
no_license
|
fishfollower/multi_SAM
|
4c521214b5733f07a4a516c909be411e7337c23d
|
dd449d0340dfbadb006ddebfef2384bd4ff337d9
|
refs/heads/master
| 2020-04-01T14:21:18.078337
| 2018-10-26T08:26:29
| 2018-10-26T08:26:29
| 153,290,836
| 1
| 0
| null | 2018-10-16T13:32:27
| 2018-10-16T13:32:27
| null |
UTF-8
|
R
| false
| true
| 566
|
rd
|
corplot.msam.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/corplot.R
\name{corplot.msam}
\alias{corplot.msam}
\title{Correlation and partial correlation plot between survival processes}
\usage{
\method{corplot}{msam}(object, ...)
}
\arguments{
\item{object}{msam object}
\item{...}{Other parameters not currently used}
}
\value{
A matrix of correlations (lower triangular) and partial correlations (upper triangular)
}
\description{
Correlation and partial correlation plot between survival processes
}
\author{
Christoffer Moesgaard Albertsen
}
|
da31fb251907e1e188b60ec6aed9a5e5c5777e9a
|
63efc0126780ff341fa76728b856da42d52565ab
|
/functions.R
|
7b07a49165e118998b929e11519f318c623412cd
|
[] |
no_license
|
Jean-Baptiste-Camps/Wauchier_stylo
|
80a34b6c542f6a2bf06ad22d4e9dd6a1440f63dc
|
bd3acc3290317df1d623313732181ea9eadacfc0
|
refs/heads/master
| 2021-06-25T10:16:23.290600
| 2021-03-08T09:13:11
| 2021-03-08T09:13:11
| 214,435,379
| 0
| 1
| null | 2021-03-08T09:13:12
| 2019-10-11T12:50:21
|
HTML
|
UTF-8
|
R
| false
| false
| 19,875
|
r
|
functions.R
|
# Functions for stylometric analysis
## Distance measures
### Cosine distance
cosineDist <- function(x){
as.dist(1 - x%*%t(x)/(sqrt(rowSums(x^2) %*% t(rowSums(x^2)))))
}
### MinMax metric
MinMax =
function(x){
myDist = matrix(nrow = ncol(x),ncol = ncol(x), dimnames = list(colnames(x),colnames(x)))
for(i in 1:ncol(x)){
for(j in 1:ncol(x)){
min = sum(apply(cbind(x[,i],x[,j]), 1, min))
max = sum(apply(cbind(x[,i],x[,j]), 1, max))
resultat = 1 - (min / max)
myDist[i,j] = resultat
}
}
return(myDist)
}
### Normalisations
relativeFreqs = function(x){
# Relative frequencies
for(i in 1:ncol(x)){
x[,i] = x[,i]/sum(x[,i])
}
return(x)
}
# Z-scores
ZTransf = function(x){
for(i in 1:nrow(x)){
x[i,] = ( x[i,] - mean(x[i,]) ) / sd(x[i,])
}
return(x)
}
normalisations = function(x){
# Z-transformation
x = ZTransf(x)
# Vector length normalisation
for(i in 1:ncol(x)){
x[,i] = x[,i] / sqrt(sum(x[,i]^2))
}
return(x)
}
### Feature selection (Moisl 2011)
selection = function(x, z = 1.96){
# Conversion to probabilities
probs = relativeFreqs(x)
# Prepare output
results = matrix(nrow = nrow(probs), ncol = 4, dimnames = list(rownames(probs), c('freq', 'mean prob', 'sample size necessary', 'passes')))
results = as.data.frame(results)
results[,1] = rowSums(x)
results[,2] = rowMeans(probs)
for (i in 1:nrow(probs)){
var = probs[i,]
# hist(probs[i,])
# Calculates mirror-image to compensate for non normality
mirror = ( max(var) + min(var) ) - var
var = c(var,mirror)
# e as function of sigma
e = 2 * sd(var)
results[i,3] = mean(var) * (1 - mean(var)) * (z / e )^2
}
# And now, mark as false all the rows that would necessit bigger sample than available
results[,4] = results[,3] <= min(colSums(x))
return(results)
}
### SOM
library("kohonen")
somCluster = function(x, gridx = 10, gridy = 10){
maSOM = som(x, grid=somgrid(gridx, gridy,"hexagonal"))
#plot(maSOM, type="mapping", labels=rownames(maSOM$data))
results = cluster::agnes(t(as.data.frame(maSOM$codes)), metric = "manhattan", method = "ward")
return(results)
}
### Robustness checks
# Gives the cluster purity with reference to alledged authors, and an adjusted Rand index in comparison with the analysis showed in fig. 1
robustnessChecks = function(data, refCAH, k = "10"){
# Get classes from the reference CAH
refCAHClasses = cutree(refCAH, k = k)
# Prepare results
results = matrix(ncol = 3, nrow = 0, dimnames = list(NULL, c("N", "CPAuteurs", "CPReference")))
for (i in list(
seq(0, 1, 0.001),
seq(0, 1, 0.01),
seq(0, 1, 0.1),
seq(0, 1, 0.25),
seq(0, 1, 0.5),
seq(0, 0.5, 0.25),
seq(0, 1, 1)) ) {
# First, get the cutoffs: first 1000-quantile, first 100-quantile, first decile, all
selec = quantile(rowSums(data), probs = i)
selec = selec[length(selec) - 1]
myData = data[rowSums(data) >= selec, , drop = FALSE]
myData = normalisations(myData)
myCAH = cluster::agnes(t(myData), metric = "manhattan", method="ward")
# Classes as per alledged author
expected = sub("_.*", "", rownames(myCAH$data))
# Cluster purity
classes = cutree(myCAH, k = k)
N = nrow(myData)
purity = NMF::purity(as.factor(classes), expected)
#NMF::entropy(as.factor(classes), expected)
purityRef = NMF::purity(as.factor(classes), as.factor(refCAHClasses))
#Rand = mclust::adjustedRandIndex(classes, refCAHClasses)
MF = paste(round(100 - as.numeric(sub("%", "", names(selec))), digits = 2), "%", sep = "")
#localRes = matrix(data = c(N, purity, purityRef, Rand), nrow = 1, ncol = 4, dimnames = list(MF, NULL))
localRes = matrix(data = c(N, purity, purityRef), nrow = 1, ncol = 3, dimnames = list(MF, NULL))
results = rbind(results, localRes)
}
return(results)
}
# Gives the cluster purity with reference to alledged authors, and an adjusted Rand index in comparison with the analysis showed in fig. 1
compareHC = function(cahList, k = "10"){
cahList = lapply(cahList, as.hclust)
# Prepare results
results = matrix(ncol = length(cahList), nrow = length(cahList), dimnames = list(labels(cahList), labels(cahList)))
for (i in 1:length(cahList)){
for (j in 1:length(cahList)){
classes1 = cutree(cahList[[i]], k = k)
classes2 = cutree(cahList[[j]], k = k)
results[i, j] = purityRef = NMF::purity(as.factor(classes1), as.factor(classes2))
#Rand = mclust::adjustedRandIndex(classes, refCAHClasses)
}
}
return(results)
}
### Gives, for each HC, three indicators
# - Purity with regard to Meyer Leg. A/B/C and Wauchier
# - Quality of clustering with HC
# - Stability: ARI with regard to a reference HC
benchmarkHC = function(refCAH, cahList, k = 10){
# Prepare results
results = matrix(ncol = 4, nrow = length(cahList),
dimnames = list(labels(cahList), c("N", "AC", "CPMeyer", "CPREF")))
for (i in 1:length(cahList)){
N = ncol(cahList[[i]]$data)
# 1. Purity with regard to Wauchier/non-Wauchier
classes1 = cutree(as.hclust(cahList[[i]]), k = k)
expected = classes1
# Now we set 1 for Wauchier, 2 for NOT Wauchier
expected[grep("Leg.A", rownames(cahList[[i]]$data))] = "Leg-A"
expected[grep("Leg.B", rownames(cahList[[i]]$data))] = "Leg-B"
expected[grep("Leg.C", rownames(cahList[[i]]$data))] = "Leg-C"
expected[grep("_Wau_", rownames(cahList[[i]]$data))] = "Wauchier"
CPMeyer = NMF::purity(as.factor(classes1), as.factor(expected))
# 2. Quality of clustering with HC
AC = cahList[[i]]$ac
# 3. Stability: CP with regard to a reference HC
classes2 = cutree(as.hclust(refCAH), k = k)
CPRef = NMF::purity(as.factor(classes1), as.factor(classes2))
results[i, ] = c(N, AC, CPMeyer, CPRef)
}
return(results)
}
volatility = function(cahList, k = 9){
textsLabels = cahList[[1]]$order.lab
results = matrix(nrow = length(textsLabels), ncol = 1,
dimnames = list(textsLabels, "V_i"))
classes = lapply(cahList, as.hclust)
classes = lapply(classes, cutree, k = k)
nclasses = length(classes)
for (i in 1:length(textsLabels)){
# Find all X with their freqs. Start with as much row as labels, will remove
# unused ones later
X = matrix(ncol = 1, nrow = length(textsLabels), dimnames = list(textsLabels, 'Freq'), data = 0)
for (j in 1:length(classes)){
myMembers = labels(classes[[j]][ classes[[j]] == classes[[j]][textsLabels[i]]])
X[myMembers, ] = X[myMembers, ] + 1
}
# Remove 0s
X = X[X > 0, ]
# Compute index
V_i = sum( (X - (nclasses - X) )/ nclasses ) / length(X)
results[i, ] = V_i
}
return(results)
}
###########################################
### Compare results with other datasets ###
###########################################
#### First, a function to replicate all other analyses on an alternate dataSet
## subfunction to perform analysis
readData = function(toKeep, path){
# toKeep: texts to keep for analysis
# path: path to data
# return: data…
data = read.csv(path, header = TRUE, row.names = 1)
data = t(data)
data = data[, toKeep]
data = data[rowSums(data) > 0, ]
return(data)
}
readAnnotData = function(toKeep, path){
# toKeep: texts to keep for analysis
# path: path to data
# return: data…
data = read.csv(path, header = TRUE, row.names = 1, sep = ";")
data = data[, -1]
colnames(data) = gsub("^X", "", colnames(data))
colnames(data) = gsub(".decolumnized", "", colnames(data))
colnames(data) = gsub("Leg.", "Leg-", colnames(data))
data = data[, toKeep]
data = data[rowSums(data) > 0, ]
data = as.matrix(data)
return(data)
}
performAnalysis = function(data){
# data: well, … data.
# returns: a HC (agnes object)
# Selection based on Moisl 2011
select = selection(data, z = 1.645)
select = select[,4]
# Normalisations
data = relativeFreqs(data)
data = data[select,]
data = normalisations(data)
myCAH = cluster::agnes(t(data), metric = "manhattan", method="ward")
return(myCAH)
}
## and the one to replicate
replicateAnalysis = function(toKeep, path_raw_char3grams,
path_expanded_words, path_pos3_gr, path_lemmas,
functionWords, functionLemmas){
# toKeep: list of texts to keep for analysis
# path_raw_char3grams: path to raw_char3grams file;
# path_expanded_words, path_pos3_gr, path_lemmas: you get the idea…
# functionWords: list of functionWords
# functionLemmas: well, in fact, a list of functionLemmas (really)
data3grams = readData(toKeep, path_raw_char3grams)
CAHRaw3gr = performAnalysis(data3grams)
dataWords = readData(toKeep, path_expanded_words)
CAHForms = performAnalysis(dataWords)
dataAffs = countAffixes(dataWords)
CAHAffs = performAnalysis(dataAffs)
# Now, function words, a bit more annoying (should put it in readData function, yeah, right)
dataFW = relativeFreqs(dataWords)
dataFW = dataFW[functionWords,]
dataFWsave = dataFW
dataFW = normalisations(dataFW)
CAHFW = cluster::agnes(t(dataFW), metric = "manhattan", method="ward")
# back to normal. Or not.
dataPOS = readAnnotData(toKeep, path_pos3_gr)
CAHPOS3gr = performAnalysis(dataPOS)
dataLemmas = readAnnotData(toKeep, path_lemmas)
CAHLemmas = performAnalysis(dataLemmas)
# And now back to functionLemmas
dataFL = relativeFreqs(dataLemmas)
dataFL = dataFL[functionLemmas,]
dataFL = normalisations(dataFL)
CAHFL = cluster::agnes(t(dataFL), metric = "manhattan", method="ward")
# And NOOOOOW: Affixes + POS 3-gr + Function words (unnorm)
# Select relevant data
dataList = list(dataAffs, dataPOS)
results = matrix(ncol = ncol(dataAffs), nrow = 0, dimnames = list(NULL, colnames(dataAffs)))
for (i in 1:length(dataList)){
select = selection(dataList[[i]], z = 1.645)
select = select[,4]
# Normalisations
dataList[[i]] = relativeFreqs(dataList[[i]])
results = rbind(results, dataList[[i]][select,])
}
results = rbind(results, dataFWsave)
rm(dataList)
dataGlob = normalisations(results)
CAHGlob2 = cluster::agnes(t(dataGlob), metric = "manhattan", method="ward")
# AND NOW: lemmas and words
# Select relevant data
dataList = list(dataLemmas, dataWords)
results = matrix(ncol = ncol(dataLemmas), nrow = 0, dimnames = list(NULL, colnames(dataLemmas)))
for (i in 1:length(dataList)){
select = selection(dataList[[i]], z = 1.645)
select = select[,4]
# Normalisations
dataList[[i]] = relativeFreqs(dataList[[i]])
results = rbind(results, dataList[[i]][select,])
}
rm(dataList)
dataWordsLemmas = normalisations(results)
CAHWordsLemmas = cluster::agnes(t(dataWordsLemmas), metric = "manhattan", method="ward")
cahList = list(raw3grams = CAHRaw3gr, Affs = CAHAffs, FunctWords = CAHFW, FunctLemm = CAHFL, POS3gr = CAHPOS3gr, FWPOSandAffs = CAHGlob2, Forms = CAHForms, Lemmas = CAHLemmas, WordsLemmas = CAHWordsLemmas)
return(cahList)
}
### And now for something entirely different
### Compare one to one the analyses
compareReplications = function(refCahList, replicatedCahList, k = 5){
results = matrix(nrow = length(names(refCahList)), ncol = 1,
dimnames = list(names(refCahList), NULL))
for(i in 1:nrow(results)){
classes1 = cutree(as.hclust(refCahList[[i]]), k = k)
classes2 = cutree(as.hclust(replicatedCahList[[i]]), k = k)
results[i, 1] = NMF::purity(as.factor(classes1), as.factor(classes2))
}
return(results)
}
### Plots and layout
#### ACP
library(ggfortify)
pcaPlot = function(x, main ="Plot"){
dat = x
auts = vector(length=ncol(dat))
auts[startsWith(colnames(dat), "CORNEILLEP_")] = 'CORNEILLEP'
auts[startsWith(colnames(dat), "CORNEILLET_")] = 'CORNEILLET'
auts[startsWith(colnames(dat), "MOLIERE_")] = 'MOLIERE'
auts[startsWith(colnames(dat), "ROTROU_")] = 'ROTROU'
auts[startsWith(colnames(dat), "SCARRON_")] = 'SCARRON'
auts[startsWith(colnames(dat), "OUVILLE_")] = 'OUVILLE'
# rename texts
colnames(dat) = sub('^[^_]+_', '', colnames(dat))
colnames(dat) = substring(colnames(dat), 1, 10)
dat = dat[rowSums(dat) != 0,]
ggplot2::autoplot(prcomp(t(dat), scale. = TRUE, center = TRUE), label = TRUE, data = cbind.data.frame(t(dat), auts), colour='auts', main = main, label.show.legend=FALSE)
}
#### HC
cahPlot = function(x, main="Plot", xlab = paste(ncol(x$data), "features"), k = 6){
x$order.lab = sub("CORNEILLEP","CP", x$order.lab)
x$order.lab = sub("CORNEILLET","CT", x$order.lab)
x$order.lab = sub("MOLIERE","M", x$order.lab)
x$order.lab = sub("OUVILLE","O", x$order.lab)
x$order.lab = sub("ROTROU","R", x$order.lab)
x$order.lab = sub("SCARRON","S", x$order.lab)
x$order.lab = sub("BOISSY","B", x$order.lab)
x$order.lab = sub("DANCOURT","DA", x$order.lab)
x$order.lab = sub("DUFRESNY","DU", x$order.lab)
x$order.lab = sub("NIVELLE","N", x$order.lab)
x$order.lab = sub("REGNARD","R", x$order.lab)
x$order.lab = sub("VOLTAIRE","V", x$order.lab)
x$order.lab = sub("BOURSAULT","B", x$order.lab)
x$order.lab = sub("CHEVALIER","C", x$order.lab)
x$order.lab = sub("DONNEAUDEVISE","DDV", x$order.lab)
x$order.lab = sub("DORIMOND","DOR", x$order.lab)
x$order.lab = sub("GILLET","G", x$order.lab)
x$order.lab = sub("LAFONTAINE","LF", x$order.lab)
x$order.lab = sub("QUINAULT","Q", x$order.lab)
#TODO: modif temporaire pour lisibilité
x$order.lab = substring(x$order.lab, 1, 10)
plot(x, main=main, xlab=xlab, which.plots = 2)
myCAH2 = as.hclust(x)
# Cut in 6 groups
rect.hclust(myCAH2, k = k, border = 2:5)
}
cahPlotCol = function(x, main="Plot", xlab = paste(ncol(x$data), "features"), k = 3, lth = 7, lrect = -12, cex = 0.6, ylab = "height"){
# Redefining labels and Coloring them
labels = rep("black", length(x$order.lab))
labels[grepl('_Leg.C_', x$order.lab)] = "deeppink4"
labels[grepl('_Leg.A_', x$order.lab)] = "blue"
labels[grepl('_Leg.B_', x$order.lab)] = "darkgreen"
labels[grepl('_Wau_Leg.C_', x$order.lab)] = "red"
x$order.lab = gsub("^X?(\\d+).*(_\\w+?)(\\.|$)", "\\1\\2", x$order.lab, perl=TRUE)
xlab = paste(xlab, "|| Agglomerative coefficient = ", round(x$ac, digits = 2))
factoextra::fviz_dend(x, k = k,
k_colors = rep("black", k),
color_labels_by_k = FALSE,
rect = TRUE,
labels_track_height = lth,
label_cols = labels,
cex = cex,
lower_rect = lrect,
main = main, xlab = xlab, ylab = ylab) + theme(plot.margin = margin(5,15,5,5))
}
#### Boxplots and descriptive statistics
myDescPlot = function(x, type = "boxplot", classes, main = "",
ylab = "freq", xlab = "", classlabels = NULL){
# classes: list of classes (as per the output of cutree)
# classlabels : a vector of labels for classes, in the order
# of the classes number: i.e., label for class 1, label for class 2, etc.
if(!is.null(classlabels)){
classes = as.factor(classes)
levels(classes) = classlabels
}
# TODO: fix all that follows
if('counts' %in% type){
return(list(CORNEILLEP, CORNEILLET, MOLIERE, ROTROU, SCARRON, OUVILLE))
}
if('boxplot' %in% type){
#boxplot
if(withOuville){
boxplot(list(CORNEILLEP, CORNEILLET, MOLIERE, ROTROU, SCARRON, OUVILLE), names=names, main=main,ylab=ylab)
}
else{
boxplot(list(CORNEILLEP, CORNEILLET, MOLIERE, ROTROU, SCARRON), names=names, main=main,ylab=ylab)
}
}
# TODO: adapt violinplot
if('violinplot' %in% type){
#violinplot
data = cbind(as.data.frame(t(x)), as.character(classes))
colnames(data)[2] = "classes"
violinplot <- ggplot(data, aes_(x = quote(classes), y = as.name(colnames(data)[1]))) +
ggtitle(main) +
ylab(ylab) +
xlab(xlab) +
geom_violin() +
geom_boxplot(width=0.1) +
theme(axis.text.x = element_text(size = rel(0.7)))
return(violinplot)
}
if('barplot' %in% type){
data = cbind(as.data.frame(t(x)), sub("_.*$", "", colnames(x)))
colnames(data)[2] = "author"
barplot = ggplot(data, aes_(x = quote(author), y = as.name(colnames(data)[1]))) +
ggtitle(main) +
ylab(ylab) +
xlab("") +
geom_col() # equivalent to geom_bar(stat=identity)
return(barplot)
}
}
classesDesc = function(x, y, k = "10"){
# Classes description
classes = cutree(x, k = k)
#Add classes to data frame
dataClassif = t(y)
dataClassif = cbind(as.data.frame(dataClassif), as.factor(classes))
colnames(dataClassif[length(dataClassif)])[] = "Classes"
dataClassif[length(dataClassif)]
# Desc
classDesc = FactoMineR::catdes(dataClassif, num.var = length(dataClassif))
return(classDesc)
}
classBarplot = function(values, title = "", ylab=""){
colors = vector(mode="logical", length = length(values))
colors[values>0] = TRUE
colors[values<=0] = FALSE
df = cbind(labels(values), as.data.frame(values), as.data.frame(colors))
colnames(df)[1] = "labels"
ggplot(df, aes(x = reorder(labels, values), y = values, fill = colors)) +
geom_col(position = "identity", colour = "black", size = 0.25) +
scale_fill_manual(values = c("#CCEEFF", "#FFDDDD"), guide = FALSE) +
ggtitle(title) +
ylab(ylab) +
xlab("feats")
}
#TODO: check this function
specifPlot = function(freq_abs, myCAH, k = 5, nfeats = 5, classlabels = NULL){
# freq_abs: absolute frequencies original data
# myCAH : the CAH to cut
# k : the number of classes
# nfeats: number of positive and negative feats to
# TODO: Mieux, utiliser seuil de banalité
classes = cutree(myCAH, k = k)
classes = as.factor(classes)
if(!is.null(classlabels)){
levels(classes) = classlabels
}
freq_abs_class = matrix(nrow = nrow(freq_abs), ncol = length(levels(classes)), dimnames = list(rownames(freq_abs), levels(classes)))
for(i in 1:length(levels(classes))){
# sum the values for each member of the class
freq_abs_class[, i] = rowSums(freq_abs[, classes == levels(classes)[i]])
}
specs = textometry::specificities(freq_abs_class)
plots = list()
for(i in 1:ncol(specs)){
# et maintenant, on peut regarder les spécificités de la classe 1
# positives ou négatives
values = c(head(sort(specs[, i], decreasing = TRUE), n = nfeats), head(sort(specs[, i]), n = nfeats))
plots[[i]] = classBarplot(values, title = paste("Specificities for class ", levels(classes)[i]),
ylab = "Specif.")
}
myfun <- get("grid.arrange", asNamespace("gridExtra"))
do.call("myfun", c(plots, ncol=2))
}
### Data manipulation
aggrAndClean = function(x){
# aggregate
aggr = aggregate(x~rownames(x),FUN = sum)
# cleaning
x = as.matrix(aggr[,-1])
rownames(x) = aggr[,1]
return(x)
}
countAffixes = function(x){
# Prefixes
prefs = x
# Remove words shorter than 3+1 chars
prefs = prefs[stringr::str_length(rownames(prefs)) > 3,]
# Extract the first three chars as new rownames
rownames(prefs) = paste("$", substr(rownames(prefs), 1, 3), sep = "")
prefs = aggrAndClean(prefs)
# Space prefixes
spPrefs = x
rownames(spPrefs) = paste("_", substr(rownames(spPrefs), 1, 2), sep = "")
spPrefs = aggrAndClean(spPrefs)
# Suffixes
sufs = x
sufs = sufs[stringr::str_length(rownames(sufs)) > 3,]
rownames(sufs) = paste(stringr::str_sub(rownames(sufs), -3), "^", sep = "")
sufs = aggrAndClean(sufs)
# Space suffixes
spSufs = x
rownames(spSufs) = paste(stringr::str_sub(rownames(spSufs), -2), "_", sep = "")
spSufs = aggrAndClean(spSufs)
results = rbind(prefs, spPrefs, sufs, spSufs)
return(results)
}
|
3e18a3803fe52ecd517f5f7a661f97c7734c0703
|
6d156008f87d86a05396c1b34497105cb399e821
|
/biok/man/relationships.Rd
|
2aefe4d6b37cdd6261a55fd1a5e9c0077e3eaacc
|
[] |
no_license
|
karldes/biok
|
9b60e1217801a1625d518ec02a48e4300f7c51a4
|
0ebf176b17c6c59eee52e1d3b9db87bacffd1ecb
|
refs/heads/master
| 2020-03-25T00:55:10.491823
| 2018-08-01T21:56:41
| 2018-08-01T21:56:41
| 143,212,272
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| true
| 642
|
rd
|
relationships.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/relationships.R
\name{relationships}
\alias{relationships}
\title{relationships Function}
\usage{
relationships(type, S, mean = 0, sd = 0.1, C = 0.1, precision = 0.001)
}
\arguments{
\item{type}{the type of the network}
\item{S}{size of the population}
\item{mean}{used in the gaussian law}
\item{sd}{standard deviation used in the gaussian law}
\item{C}{connectance of the network}
\item{precision}{the treshold size}
}
\description{
This function returns the number of edges in the network and
two list showing every single relationship of the network.~
}
|
951e17827b80079e295081e1aa96ed20e49dfcdc
|
bcb329b2dfaa867fbf7a39b7366cfbb80552ad97
|
/CollectDealerInfor/HondaDealersLinks.R
|
cfb9ef79ef0a451165cc9ef929338118f43d2fb6
|
[] |
no_license
|
jpzhangvincent/Dealership-Scraping
|
3c8ecfa72e7692f0f709afcbac840899781d27e2
|
13634892a8098cca260ddf1c4017946b76f0deca
|
refs/heads/master
| 2021-01-12T14:25:19.400164
| 2015-10-06T21:55:17
| 2015-10-06T21:55:17
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 3,917
|
r
|
HondaDealersLinks.R
|
#HONDA DEALER
install.packages("XML", repos = "http://cran.cnr.Berkeley.edu/")
library(XML)
install.packages("RCurl", repos = "http://cran.cnr.Berkeley.edu/")
library(RCurl)
####get zipcode that may have honda dealer from dealer rater website
url = "http://www.dealerrater.com/directory/Massachusetts/Honda/"
doc = htmlParse(url)
statelist = gsub(" ", "-", xpathSApply(doc, "//select[@id='StateCode']/option",xmlValue)[-1])
statelinks = sapply(1:length(statelist), function(i)gsub("Massachusetts", statelist[i], url))
getHondastate = function(url){
href = getHTMLLinks(url)
nextpage = unique(href[grep("page=", href)])
if(length(nextpage)!=0){
nextlink = paste0("http://www.dealerrater.com", nextpage)
}
else{nextlink = NULL}
links = c(url, nextlink)
return(links)
}
state.all = sapply(statelinks, getHondastate)
stateurl = as.vector(unlist(unname(state.all)))
getall = function(url){
a = unique(getHTMLLinks(url))
index = grep("/dealer/", a)
links = a[index]
return(links)
}
temp = lapply(stateurl, getall)
temp2 = unlist(temp)
temp3 = temp2[-grep("#", temp2)]
hondadealerlink = paste0("http://www.dealerrater.com", temp3)
getcontent= function(node, content = "href"){
tt = xmlAttrs(node)[content]
return(tt)
}
getDealer.honda = function(url){
print(url)
if(url.exists(url)){
doc = htmlParse(url)
DealerName = xpathSApply(doc, "//h1[@itemprop='name']", xmlValue)
AddressNode = getNodeSet(doc, "//input[@id='end' and @type='hidden']")
Address = unlist(unname(sapply(AddressNode, getcontent, content="value")))
Zipcode = tail(strsplit(Address, " ")[[1]], n = 1)
df <- data.frame(DealerName, Address, Zipcode, index = url, stringsAsFactors = F)
return(df)
}
else{return(NULL)}
}
tempdata = lapply(hondadealerlink, getDealer.honda)
hondatemp = Reduce(function(x, y) rbind(x, y), tempdata)
hondazip = hondatemp$Zipcode
save(hondazip, file = "hondazip.rdata")
##########grab data through zip offered by dealerrater though honda oem website to get
#the list
load("hondazip.rdata")
url = "http://automobiles.honda.com/tools/dealer-locator/results.aspx?address=&city=&state=&zip=10036&dealername=&filters=&fromsection=NEW#~pmZngNzcmx84O0"
zipcodelist = unique(hondazip)
tt = sapply(1:length(zipcodelist), function(i)gsub("zip=[0-9]{5}", paste0("zip=", zipcodelist[i]), url))
getcontent= function(node, content = "href"){
tt = xmlAttrs(node)[content]
return(tt)
}
getziplinks = function(url){
print(url)
if(url.exists(url)){
doc = htmlParse(url)
nodes = getNodeSet(doc, "//a[@id='dealerinfolink']")
if(length(nodes)==0){return(NULL)}
else{
templink = strsplit(unname(sapply(nodes,getcontent,content='href')), "\\'")
links = paste0("http://automobiles.honda.com/tools/dealer-locator/", sapply(templink, "[", 2))
nlinks = substr(links, 1, 81)
return(nlinks)
}
}
else{return(NULL)}
}
links = sapply(tt, getziplinks)
ll = unique(as.vector(unlist(unname(links))))
getdealer = function(url){
print(url)
if(url.exists(url)){
doc = htmlParse(url)
tempname = xpathSApply(doc,"//div[@id='detail_dealername']",xmlValue)[1]
index = tail(gregexpr("-", tempname)[[1]], n = 1)
DealerName = gsub("^\\s+|\\s+$", "", substr(tempname, 1, index-1))
Address = xpathSApply(doc,"//div[@id='dealer_address']",xmlValue)[1]
Zipcode = tail(strsplit(Address, " ")[[1]], n = 1)
Link = xpathSApply(doc, "//div[@id='detail_website1']/a", xmlValue)
if(length(Link)==0){
Link = "NA"
}
df <- data.frame(DealerName, Address, Link, Zipcode, stringsAsFactors = F)
return(df)
}
else{return(NULL)}
}
tempdata = lapply(ll, getdealer)
HondaDealer = Reduce(function(x, y) rbind(x, y), tempdata)
names(HondaDealer)[4]="zipcode"
HondaDealers = merge(HondaDealer, zipdata)
HondaDealers$Latitude = "NA"
HondaDealers$Longitude = "NA"
HondaDealers$IV_link = "NA"
save(HondaDealers, file = "hondaDealers.rdata")
|
9100f75f24f39cbc909b9cb328afac74394554d3
|
982e0b8514cefd4e0ed8b8084a653c3e6c647ffb
|
/cachematrix.R
|
c0350f2fd96a2de3df01260cf09cc5c9b1bce199
|
[] |
no_license
|
danbennett360/ProgrammingAssignment2
|
716b14944a74ca9ee91319de9ac13e5658587613
|
931465a44d4fe10c395ce919d3374c4c394c46aa
|
refs/heads/master
| 2021-01-21T02:26:56.605970
| 2015-06-08T16:11:56
| 2015-06-08T16:11:56
| 36,941,244
| 0
| 0
| null | 2015-06-05T15:50:02
| 2015-06-05T15:50:02
| null |
UTF-8
|
R
| false
| false
| 1,735
|
r
|
cachematrix.R
|
# Assignment 2
#
# Write a function which will cache a matrix and a function that will use
# that cached matrix
# this is monstly a copy from the assignment page
# see https://class.coursera.org/rprog-015/human_grading/view/courses/973496/assessments/3/submissions
# this one does the dirty work.
# save the matrix, and possibly the inverse in local variables
# and create four routines to support operations on this matrix
makeCacheMatrix <- function(x = matrix()) {
# the inverse is the key here
# if it is null, then it has yet to be calculated
# otherwise it is the inverse
theInverse <- NULL
# function to set the value
set <- function(y) {
x <<- y
theInverse <<- NULL
}
# function to return the current value
get <- function() {
x
}
# function to set the inverse value
setInverse <- function(i) {
theInverse <<- i
}
# function to get the inverse value
#
# This follows the assignment and the example but I don't like it
# it would be much better to compute the inverse here if it does
# not exist.
#
# This would reduce the chance of operator error.
# The dagner is, someone might call this function before cacheSolve has been called.
getInverse <- function() {
theInverse
}
list(set=set, get=get, setInverse=setInverse, getInverse=getInverse)
}
# Return a matrix that is the inverse of 'x'
cacheSolve <- function(x, ...) {
myInverse <- x$getInverse()
if (!is.null(myInverse)) {
# message("returning a cached matrix")
return(myInverse)
}
myInverse = solve(x$get())
x$setInverse(myInverse)
myInverse
}
|
6c71dfae276c2913460450735079e4badc443068
|
366ec1e0e81f9d8c40e2fde01efa44d640c67daa
|
/R/globals.R
|
5ca17cbc8f1678ab217239a2d747f5c86b4921c8
|
[] |
no_license
|
tjfarrar/skedastic
|
20194324833b8f2f20e5666b642cff617159588c
|
050e6a177a28fb0cc2054b506a53b09d6859e3c7
|
refs/heads/master
| 2022-11-17T22:41:43.930246
| 2022-11-06T06:39:13
| 2022-11-06T06:39:13
| 219,455,416
| 6
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 146
|
r
|
globals.R
|
utils::globalVariables(c("dpeakdat", "T_alpha", "X", "y", "e", "p", "yhat",
"xx", "exact", "deflator", "method", "par"))
|
093e121a5c7c25abacadac617e6240d3deaf9cf8
|
367e9a10503601ea19f3471e77a046ac8a848d3a
|
/DNAaa_list.R
|
4aa7fdb638267925aca511aa38103e20a332b6b3
|
[] |
no_license
|
hyhy8181994/R-porject-workflow-package
|
005534758da7f7bbd018c48acc6ec7fe1d64747a
|
adac08dd7464d7ddcbd541f8fc002dc650c1499b
|
refs/heads/master
| 2020-04-11T19:41:15.854612
| 2018-12-16T21:51:31
| 2018-12-16T21:51:31
| 162,043,655
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 4,843
|
r
|
DNAaa_list.R
|
#' DNA to amino acid converter
#'
#' This function allows you to convert DNA sequence of specific gene to amino acid sequence based on standard DNA codon table. The unrecognised DNA codon will be shown as "*" , and also the stop codon will not be shown in the sequence. Note: make sure the input DNA sequence is complete coding sequence (CDS).
#'
#'
#' @param bpseq a string input of DNA sequence (optimally starts with ATG and ends with stop codon in 5'- 3' direction) [blanks and numbers will be ignored]
#' @keywords DNA
#' @return a list of amino acid number, length of gene, amino acid sequence.
#' @references NCBI DNA codon tables
#'
#' https://www.ncbi.nlm.nih.gov/Taxonomy/taxonomyhome.html/index.cgi?chapter=tgencodes#SG1
#'
#' @export
#' @examples
#' DNAaa("ATGGTA")
#'
#' #the output
#' #$aa_number
#' [1] 2
#'
#' $length_of_gene
#' [1] 6
#'
#' $aa_seq
#' [1] "MV"
#'
#'
#'
#'
#'
DNAaa<-function(bpseq){
a<-NULL
b<-NULL
c<-NULL
d<-NULL
e<-1
f<-NULL
h<-NULL
j<-NULL
k<-NULL
l<-NULL
m<-NULL
n<-NULL
list<-NULL
a<-toupper(bpseq)
l<-gsub("\n","",a) #clear the unrecognised characters
m<-gsub(" ","",l) #clear the blanks
n<-gsub('[[:digit:]]+', "",m) #clear the numbers
b<-nchar(n) #count number of characters in string
c<-seq(1,b,by=3) #create a vector with a sequence from 1 to the length of the string, by 3
d<-sapply(c,function(ii){ #replace the numeric vector with input string
substr(n,ii,ii+2) #convert the gene sequence to codon
})
f<-length(d) #count the number of codons
for (e in e:f){ #convert the codons to amino acids according to standard DNA codon table
g<-NULL
if (d[e]=="TTT"){
g<-"F"
} else if (d[e]=="TTC"){
g<-"F"
} else if (d[e]=="TTA"){
g<-"L"
} else if (d[e]=="TTG"){
g<-"L"
} else if (d[e]=="CTT"){
g<-"L"
} else if (d[e]=="CTC"){
g<-"L"
} else if (d[e]=="CTA"){
g<-"L"
} else if (d[e]=="CTG"){
g<-"L"
} else if (d[e]=="ATT"){
g<-"I"
} else if (d[e]=="ATC"){
g<-"I"
} else if (d[e]=="ATA"){
g<-"I"
} else if (d[e]=="ATG"){
g<-"M"
} else if (d[e]=="GTT"){
g<-"V"
} else if (d[e]=="GTC"){
g<-"V"
} else if (d[e]=="GTA"){
g<-"V"
} else if (d[e]=="GTG"){
g<-"V"
} else if (d[e]=="TCT"){
g<-"S"
} else if (d[e]=="TCC"){
g<-"S"
} else if (d[e]=="TCA"){
g<-"S"
} else if (d[e]=="TCG"){
g<-"S"
} else if (d[e]=="CCT"){
g<-"P"
} else if (d[e]=="CCC"){
g<-"P"
} else if (d[e]=="CCA"){
g<-"P"
} else if (d[e]=="CCG"){
g<-"P"
} else if (d[e]=="ACT"){
g<-"T"
} else if (d[e]=="ACC"){
g<-"T"
} else if (d[e]=="ACA"){
g<-"T"
} else if (d[e]=="ACG"){
g<-"T"
} else if (d[e]=="GCT"){
g<-"A"
} else if (d[e]=="GCC"){
g<-"A"
} else if (d[e]=="GCA"){
g<-"A"
} else if (d[e]=="GCG"){
g<-"A"
} else if (d[e]=="TAT"){
g<-"Y"
} else if (d[e]=="TAC"){
g<-"Y"
} else if (d[e]=="TAA"){
g<-" "
} else if (d[e]=="TAG"){
g<-" "
} else if (d[e]=="TGA"){
g<-" "
} else if (d[e]=="CAT"){
g<-"H"
} else if (d[e]=="CAC"){
g<-"H"
} else if (d[e]=="CAA"){
g<-"Q"
} else if (d[e]=="CAG"){
g<-"Q"
} else if (d[e]=="AAT"){
g<-"N"
} else if (d[e]=="AAC"){
g<-"N"
} else if (d[e]=="AAA"){
g<-"K"
} else if (d[e]=="AAG"){
g<-"K"
} else if (d[e]=="GAT"){
g<-"D"
} else if (d[e]=="GAC"){
g<-"D"
} else if (d[e]=="GAA"){
g<-"E"
} else if (d[e]=="GAG"){
g<-"E"
} else if (d[e]=="TGT"){
g<-"C"
} else if (d[e]=="TGC"){
g<-"C"
} else if (d[e]=="TGG"){
g<-"W"
} else if (d[e]=="CGT"){
g<-"R"
} else if (d[e]=="CGC"){
g<-"R"
} else if (d[e]=="CGA"){
g<-"R"
} else if (d[e]=="CGG"){
g<-"R"
} else if (d[e]=="AGT"){
g<-"S"
} else if (d[e]=="AGC"){
g<-"S"
} else if (d[e]=="AGA"){
g<-"R"
} else if (d[e]=="AGG"){
g<-"R"
} else if (d[e]=="GGT"){
g<-"G"
} else if (d[e]=="GGC"){
g<-"G"
} else if (d[e]=="GGA"){
g<-"G"
} else if (d[e]=="GGG"){
g<-"G"
} else {
g<-"*"
}
h<-c(h,g) #put converted amino acids into string vector
}
j<-paste(h,seq="",collapse="") #paste them together
k<-gsub(" ","",j) #clear the blanks
list<- list("aa_number"=length(unlist(strsplit(k,""))), "length_of_gene" = b, "aa_seq" = k)
return(list)
}
|
e0e63fe688a3cc93c3c56bca1361cbc030a836f1
|
ffdea92d4315e4363dd4ae673a1a6adf82a761b5
|
/data/genthat_extracted_code/multivator/examples/ipd.Rd.R
|
28edc23e04381574f1463b2cbdb05de2c609a667
|
[] |
no_license
|
surayaaramli/typeRrh
|
d257ac8905c49123f4ccd4e377ee3dfc84d1636c
|
66e6996f31961bc8b9aafe1a6a6098327b66bf71
|
refs/heads/master
| 2023-05-05T04:05:31.617869
| 2019-04-25T22:10:06
| 2019-04-25T22:10:06
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 199
|
r
|
ipd.Rd.R
|
library(multivator)
### Name: ipd
### Title: Positive definite matrices
### Aliases: ipd
### ** Examples
data(mtoys)
stopifnot(ipd(crossprod(matrix(rnorm(30),10))))
stopifnot(ipd(M(toy_mhp)))
|
80b51df9e4034e83f25f6cbeb1181e526d4a3ed3
|
f6684764416984a795998921bab4277e7b101c42
|
/vignettes/scripts/summarise_restrictions.R
|
fd29d948e7071bd182edb0f8e517be44fc276d78
|
[] |
no_license
|
ITSLeeds/roadworksUK
|
75a493591395391a2864d8842a1c816ff711bb7d
|
1dd63bf2e32889398ba7519d35249b9cabe2e2f0
|
refs/heads/master
| 2020-03-29T23:04:01.668818
| 2018-11-01T14:50:46
| 2018-11-01T14:50:46
| 150,454,682
| 1
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 3,505
|
r
|
summarise_restrictions.R
|
# Restrictions
library(sf)
library(dplyr)
library(lubridate)
restrictions = readRDS("data/restrictions_v3.Rds")
bounds = st_read("data/boundaries/ha_mod.gpkg", stringsAsFactors = F)
#Fix HA Names
restrictions$publisher = gsub(" City Council", "", restrictions$publisher)
restrictions$publisher = gsub(" County Council", "", restrictions$publisher)
restrictions$publisher = gsub(" Metropolitan District", "", restrictions$publisher)
restrictions$publisher = gsub(" Metropolitan Borough Council", "", restrictions$publisher)
restrictions$publisher = gsub(" London Borough Council", "", restrictions$publisher)
restrictions$publisher = gsub(" Council", "", restrictions$publisher)
restrictions$publisher = gsub(" Borough", "", restrictions$publisher)
restrictions$publisher = gsub(" Royal", "", restrictions$publisher)
restrictions$publisher[restrictions$publisher == "St Helens"] = "St. Helens"
restrictions$publisher[restrictions$publisher == "Cheshire West And Chester"] = "Cheshire West and Chester"
restrictions$publisher[restrictions$publisher == "Island Roads on behalf of the Isle of Wight"] = "Isle of Wight"
# find first for each HA
res_first = as.data.frame(restrictions)
res_first = res_first %>%
group_by(publisher) %>%
summarise(date = min(date_created))
rn = unique(res_first$publisher)
bn = unique(bounds$name)
rn[!rn %in% bn]
bn[!bn %in% rn]
map = left_join(bounds, res_first, by = c("name" = "publisher"))
map$date = floor_date(map$date,"6 months")
tmap_mode("plot")
png(filename = "plots/Restrictions_First.png" , height = 6, width = 4, units = 'in', res = 600, pointsize=12)
par(mar = c(0.01,0.01,0.01,0.01)) +
tm_shape(map) +
tm_fill(col = "date", title = "Restrictions Data Coverage", style = "fixed",
breaks = c(2013,2014,2015,2016,2017,2018),
palette = c("-Blues")
) +
tm_layout(legend.position = c(0,0.2),
#legend.title.size = 1.5,
#legend.text.size = 1.3,
frame = FALSE)
dev.off()
# Simplify works to a single record per restriction
restrictions = as.data.frame(restrictions[,c("ienid","date_created","publisher","notification_type_name","works_ref",
"notification_sequence_number","usrn","restriction_end_date","restriction_duration" )])
restrictions_group = restrictions %>%
group_by(works_ref) %>%
summarise(date_start = min(date_created),
publisher = unique(publisher)[1],
#ursn = unique(usrn)[!is.na(unique(usrn))],
ursn = paste(unique(usrn)[!is.na(unique(usrn))], collapse = " "),
duration = unique(restriction_duration)[!is.na(unique(restriction_duration))][1],
records = n())
restrictions_HA = restrictions_group %>%
filter(date_start > as.Date("2017-01-01")) %>%
group_by(publisher) %>%
summarise(count = n())
map = left_join(bounds, restrictions_HA, by = c("name" = "publisher") )
tmap_mode("plot")
png(filename = "plots/Restrictions_Count.png" , height = 6, width = 4, units = 'in', res = 600, pointsize=12)
par(mar = c(0.01,0.01,0.01,0.01)) +
tm_shape(map) +
tm_fill(col = "count", title = "Restrictions Jan 2017 - Jul 2018", style = "fixed",
palette = c("#d73027","#f46d43","#fdae61","#fee090","#ffffbf","#e0f3f8","#abd9e9","#74add1","#4575b4"),
breaks = c(0,25,50,100,150,200,300,500,1000,1600)) +
tm_layout(legend.position = c(0,0.2),
#legend.title.size = 1.5,
#legend.text.size = 1.3,
frame = FALSE)
dev.off()
|
fbf32b9d551d9ae9ab95b24c8d3e8d0449d310eb
|
7c19b5ee0f99615e8fbf0816b7256919061254a3
|
/test/identify_cluster.R
|
cee4e87bccce7f0685fdd8fc60d0e4d35e9a8636
|
[
"MIT"
] |
permissive
|
chiragjp/voe
|
c5a2a9b8196e0812aa9bdcda2be0b39404c613b7
|
4a9d6d304c23727f4a02ebbc0713454320d58e46
|
refs/heads/master
| 2021-01-18T21:47:52.290594
| 2015-02-25T15:35:55
| 2015-02-25T15:35:55
| 18,693,480
| 15
| 6
| null | null | null | null |
UTF-8
|
R
| false
| false
| 1,531
|
r
|
identify_cluster.R
|
source('post_process.R')
##
load('../vibration_data/all_v4/LBXTR_vibration_gather.Rdata')
vibFrame <- init_vibration_frame(vibFrame)
vibFrame <- recomputePvalue(vibFrame, 'z', 'pvalue')
nFactor <- length(unique(vibFrame$factor_level))
###
### how to identify clusters? kmeans/hclust/dbscan doesn't work well.
library(playwith)
playwith(plot(vibFrame$HR, -log10(vibFrame$pvalue), col=rgb(0,100,0,50,maxColorValue=255)))
save(upperPts, lowerPts, midPts, file='../cluster_data/LBXTR_clusters.Rdata')
###### now plot the lower, mid, and upper points
plot(vibFrame$HR, -log10(vibFrame$pvalue), col=rgb(0,100,0,50,maxColorValue=255))
points(vibFrame$HR[lowerPts], -log10(vibFrame$pvalue)[lowerPts], cex=1, col='green')
points(vibFrame$HR[midPts], -log10(vibFrame$pvalue)[midPts], cex=1, col='red')
points(vibFrame$HR[upperPts], -log10(vibFrame$pvalue)[upperPts], cex=1, col='black')
ks <- unique(vibFrame$k)
lower <- vibFrame[lowerPts,]
upper <- vibFrame[upperPts,]
mid <- vibFrame[midPts, ]
getVariableFrequency <- function(dataFrame, combinations, kIndex) {
varCount <- rep(0, 13)
for(ii in 1:nrow(dataFrame)) {
k <- dataFrame[ii, 'k']
ind <- which(kIndex == k)
combinationIndex <- dataFrame[ii, 'combination_index']
combos <- combinations[[ind]]
vars <- combos[, combinationIndex]
varCount[vars] <- varCount[vars] + 1
}
return(varCount)
}
freqLower <- getVariableFrequency(lower, combinations, ks)
freqUpper <- getVariableFrequency(upper, combinations, ks)
freqMid <- getVariableFrequency(mid, combinations, ks)
|
1f8b550308a869541aa3c3696fbaaf6ef2b64696
|
8e0e5f97ee34a7b09038b79e3d8e097b98ce72ae
|
/forecasting.R
|
d1934f742a20073d18bb2d9ce7951e36550458ef
|
[] |
no_license
|
KimMonks/ML_coursera
|
3ead956e79c9c3492a0d5a1682220a82490e312b
|
b1f041cfd079b9170130d9926ea9bf0ba6f17cc3
|
refs/heads/master
| 2021-01-16T19:57:50.889971
| 2014-07-26T12:46:44
| 2014-07-26T12:46:44
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 607
|
r
|
forecasting.R
|
library(quantmod)
library(forecast)
from.dat <- as.Date("01/01/08",format="%m/%d/%y")
to.dat <- as.Date("12/31/13",format="%m/%d/%y")
getSymbols("VNM",src="google",from=from.dat,to=to.dat)
head(VNM)
mVNM <- to.monthly(VNM)
vnmOpen <- Op(mVNM)
ts1 <- ts(vnmOpen,frequency=12)
plot(ts1,xlab="Years+1",ylab="VNM")
plot(decompose(ts1),xlabs="Years+1")
ts1Train <- window(ts1,start=1,end=5)
ts1Test <- window(ts1,start=5,end=c(7-.01))
plot(ts1Train)
lines(ma(ts1Train,order=3),col="red")
ets1 <- ets(ts1Train,model="MMM")
fcast <- forecast(ets1)
plot(fcast); lines(ts1Test,col="red")
accuracy(fcast,ts1Test)
|
fd2bd3b398056f9dc562ca93015f40cc271df26e
|
c19286206580c4d239c413782091ff07fb8bf5e5
|
/R/rowCollapse.R
|
1fe6e6ac7d18f313679c8e382bacf474e5e04e30
|
[] |
no_license
|
Shians/DelayedMatrixStats
|
5170a2abd926e645eb87d5248736d80811877e15
|
fff04672317547695d13a4b06aa70c38d644345c
|
refs/heads/master
| 2020-04-17T02:25:53.789802
| 2018-10-30T15:54:38
| 2018-10-30T15:54:38
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 3,488
|
r
|
rowCollapse.R
|
### ============================================================================
### rowCollapse
###
### ----------------------------------------------------------------------------
### Non-exported methods
###
#' @importMethodsFrom DelayedArray t
.DelayedMatrix_block_rowCollapse <- function(x, idxs, rows = NULL,
dim. = dim(x), ...) {
# Check input
stopifnot(is(x, "DelayedMatrix"))
DelayedArray:::.get_ans_type(x, must.be.numeric = FALSE)
# Subset
x <- ..subset(x, rows = rows)
if (!is.null(rows)) {
idxs <- idxs[rows]
}
# Compute result
# NOTE: Use colCollapse() on transposed input
val <- DelayedArray:::colblock_APPLY(x = t(x),
APPLY = matrixStats::colCollapse,
idxs = idxs,
...)
if (length(val) == 0L) {
return(numeric(nrow(x)))
}
# NOTE: Return value of matrixStats::rowCollapse() has no names
unlist(val, recursive = TRUE, use.names = FALSE)
}
### ----------------------------------------------------------------------------
### Exported methods
###
# ------------------------------------------------------------------------------
# General method
#
#' @importMethodsFrom DelayedArray seed
#' @rdname colCollapse
#' @export
#' @examples
#'
#' # Extract the 2nd column as a vector
#' # NOTE: An ordinary vector is returned regardless of the backend of
#' # the DelayedMatrix object
#' rowCollapse(dm_matrix, 2)
#' rowCollapse(dm_HDF5, 2)
setMethod("rowCollapse", "DelayedMatrix",
function(x, idxs, rows = NULL, dim. = dim(x),
force_block_processing = FALSE, ...) {
if (!hasMethod("rowCollapse", seedClass(x)) ||
force_block_processing) {
message2("Block processing", get_verbose())
return(.DelayedMatrix_block_rowCollapse(x = x,
idxs = idxs,
rows = rows,
dim. = dim.,
...))
}
message2("Has seed-aware method", get_verbose())
if (DelayedArray:::is_pristine(x)) {
message2("Pristine", get_verbose())
simple_seed_x <- seed(x)
} else {
message2("Coercing to seed class", get_verbose())
# TODO: do_transpose trick
simple_seed_x <- try(from_DelayedArray_to_simple_seed_class(x),
silent = TRUE)
if (is(simple_seed_x, "try-error")) {
message2("Unable to coerce to seed class", get_verbose())
return(rowCollapse(x = x,
idxs = idxs,
rows = rows,
dim. = dim.,
force_block_processing = TRUE,
...))
}
}
rowCollapse(x = simple_seed_x,
idxs = idxs,
rows = rows,
dim. = dim.,
...)
}
)
# ------------------------------------------------------------------------------
# Seed-aware methods
#
#' @export
setMethod("rowCollapse", "matrix", matrixStats::rowCollapse)
|
5daa9370f2d98d4926d1881c34f850f3ef9d660f
|
005a08fa343f1c36fc2c44639d35d3fb2502e8d5
|
/cachematrix.R
|
97b6a30c2f90cf45e1dc14ec47bbc1cbf2f63a50
|
[] |
no_license
|
yisongtao/ProgrammingAssignment2
|
58247e1badbb04c462fd28b8bd5f86fb961fa8e0
|
d47c3b76b30201afca2400079f13a9b34ba1e123
|
refs/heads/master
| 2020-12-28T22:53:07.639358
| 2015-07-25T06:54:15
| 2015-07-25T06:54:15
| 39,547,308
| 0
| 0
| null | 2015-07-23T05:21:23
| 2015-07-23T05:21:22
| null |
UTF-8
|
R
| false
| false
| 1,283
|
r
|
cachematrix.R
|
## The following two functions will cache and compute the inverse of a matrix.
## Example:
## > c=rbind(c(1, -1/4), c(-1/4, 1))
## > invC=cacheSolve(makeCacheMatrix(c))
## > invC
## [,1] [,2]
## [1,] 1.0666667 0.2666667
## [2,] 0.2666667 1.0666667
## This function creates an object that can be used to cache
## the inverse of a matrix.
makeCacheMatrix <- function(x = matrix()) {
inverse_matrix <- NULL
set <- function(y) {
x <<- y
inverse_matrix <<- NULL
}
get <- function() return(x)
set_inverse <- function(inverse) inverse_matrix <<- inverse
get_inverse <- function() return(inverse_matrix)
return(list(set = set, get = get, set_inverse = set_inverse,
get_inverse = get_inverse))
}
## This function computes the inverse of the matrix returned by
## "makeCacheMatrix" function. If the inverse is already calculated, this
## function will retrieve the inverse stored in the cache.
cacheSolve <- function(x, ...) {
inverse_matrix <- x$get_inverse()
if (!is.null(inverse_matrix)) {
message(" getting cached data")
return (inverse_matrix)
}
data <- x$get()
inverse <- solve(data, ...)
x$set_inverse(inverse)
## Return a matrix that is the inverse of 'x'
}
|
740993b7293412d4d43416a6c79c9b859971d9b1
|
60844443e772f9ec32e5c54c015a351f0ad7b40f
|
/app-venta-diaria/global.R
|
af011ad94f26f32b19f8e89202946e2fbb8ddfc3
|
[] |
no_license
|
danromuald/shiny-apps
|
e7176bc1eaabb331e1325e2ebde25d42dd4ca656
|
cd7c339a47924b3ac0d3a7d2c656416f9c8a87b6
|
refs/heads/master
| 2021-01-19T21:33:52.256358
| 2017-04-06T03:29:30
| 2017-04-06T03:29:30
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 2,764
|
r
|
global.R
|
# rm(list = ls())
library("shiny")
library("shinydashboard")
library("DT")
library("riskr")
library("ggplot2")
library("scales")
library("lubridate")
library("ggthemes")
library("dplyr")
library("riskr")
library("lubridate")
library("tidyr")
library("plotly")
library("RODBC")
options(stringsAsFactors = FALSE)
chn <- odbcConnect("riesgo")
options(DT.options = list(searching = FALSE, paging = FALSE, ordering = FALSE))
theme_set(theme_minimal(base_size = 11) +
theme(legend.position = "bottom"))
update_geom_defaults("line", list(colour = "#dd4b39", size = 1.05))
update_geom_defaults("point", list(colour = "#434348", size = 1.2))
update_geom_defaults("bar", list(fill = "#7cb5ec"))
update_geom_defaults("text", list(size = 4, colour = "gray30"))
maxper <- sqlQuery(chn, "select max(camada) from rt_scoring.dbo.sw_sols_res")[[1]]
query <-
"select camada, segmento_label, desc_path_sw, ri, resultado_sw, count(*) as count
from rt_scoring.dbo.sw_sols_res
where
camada >= 201301 and camada < %s
and desc_path_sw in ('Antiguo', 'Antiguo Campana', 'Nuevo', 'Nuevo Campana')
and ri in ('A','B','C','D','E')
group by camada, segmento_label, desc_path_sw, ri, resultado_sw" %>%
sprintf(maxper)
dfressol <- sqlQuery(chn, query) %>%
tbl_df() %>%
mutate(periodo = paste0(camada, "01") %>% ymd() + days(1)) %>%
rename(risk_indicator = ri)
save(dfressol, file = "data/dfressol.RData")
maxper2 <- paste0(maxper, "01") %>% ymd() %>% {. - months(12)} %>% format("%Y%m")
query <-
"
select
camada, segmento_label,
desc_path_sw, ri, scoresinacoficliente as score_sinacofi,
score_cf_ori_calculado, score_pb_ori_calculado, score_bhv,
score_interno = case
when segmento_label = 'Consumer Finance' and (desc_path_sw in ('Nuevo', 'Nuevo Campana')) then score_cf_ori_calculado
when segmento_label = 'Personal Banking' and (desc_path_sw in ('Nuevo', 'Nuevo Campana')) then score_pb_ori_calculado
when desc_path_sw in ('Antiguo', 'Antiguo Campana') then score_bhv
end,
bg = case when castigos_2 is null then 1 else 0 end
from rt_scoring.dbo.sw_sols_res
where
camada >= 201301 and camada < %s
and resultado_sw = 'Aceptado'
and desc_path_sw in ('Antiguo', 'Antiguo Campana', 'Nuevo', 'Nuevo Campana')
and ri in ('A','B','C','D','E')
" %>%
sprintf(maxper)
dfperf <- sqlQuery(chn, query) %>%
tbl_df() %>%
mutate(periodo = paste0(camada, "01") %>% ymd() + days(2)) %>%
filter(!is.na(score_interno))
save(dfperf, file = "data/dfperf.RData")
segmento_choices <- c("Personal Banking" ,"Consumer Finance")
path_choices <- c("Antiguo", "Antiguo Campana", "Nuevo", "Nuevo Campana")
slider_choices <- sort(unique(dfressol$camada))
# source("scripts/queries.R")
|
2940d22c2171ac90e4d2ed082083ee7af62a1631
|
9aafde089eb3d8bba05aec912e61fbd9fb84bd49
|
/codeml_files/newick_trees_processed/13783_0/rinput.R
|
b5e22265168c7719b6b13f23cc3eb7298bb9746b
|
[] |
no_license
|
DaniBoo/cyanobacteria_project
|
6a816bb0ccf285842b61bfd3612c176f5877a1fb
|
be08ff723284b0c38f9c758d3e250c664bbfbf3b
|
refs/heads/master
| 2021-01-25T05:28:00.686474
| 2013-03-23T15:09:39
| 2013-03-23T15:09:39
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 137
|
r
|
rinput.R
|
library(ape)
testtree <- read.tree("13783_0.txt")
unrooted_tr <- unroot(testtree)
write.tree(unrooted_tr, file="13783_0_unrooted.txt")
|
f7b894eddfc5008154ee2bf534e2efceb8ad3ab0
|
798fbef5e4e621078d01b7eb3e75f5b3f0901cc8
|
/logic-operators.R
|
c17a91549d6a466f57be86730d9c9cb13174ac21
|
[
"MIT"
] |
permissive
|
alikahwaji/project-R
|
dcc891c0d43f414c0393d84d67dad6df283173ab
|
94f7ca3b25d91e1f650f23ba7e51f51877aea398
|
refs/heads/master
| 2021-06-22T19:29:06.019214
| 2017-08-16T17:16:00
| 2017-08-16T17:16:00
| 91,201,732
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 255
|
r
|
logic-operators.R
|
#logical:
#TRUE
#FALSE
4 < 5
10 > 100
4 == 5
4 == 4
# ==
#!=
# >
#>=
#<
#<=
#!
#|
#&
#isTRUE(x)
result <- 4 < 5
typeof(result)
result2 <- !TRUE
result3 <- !(5>1)
result | result2
result2 | result3
result & result2
isTRUE(result)
isTRUE(result2)
|
bddf0e60a03ab8476c13aefad9a36f049c8dee19
|
1fd34acc44f63e5110b5d4058fcc99ce4e6a83c4
|
/man/set_me_up.Rd
|
d58fbaedf5c9a6d2bb008f5d19ba59dc12f4d9d7
|
[
"MIT"
] |
permissive
|
Mert-Cihangiroglu/DS-Project-Template
|
5cae627296f51b98b8b3edb83a1944fde7998fff
|
8013d3980036722f739bf67e4cc9a47a0a53e14b
|
refs/heads/main
| 2023-07-12T22:43:16.302763
| 2021-08-12T23:16:21
| 2021-08-12T23:16:21
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| true
| 482
|
rd
|
set_me_up.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/set_me_up.R
\name{set_me_up}
\alias{set_me_up}
\title{Create folder structure}
\usage{
set_me_up(projectname = "Template Project")
}
\arguments{
\item{projectname}{A character string of the project title}
}
\value{
A set of folders created in current directory
}
\description{
This function will create a folder structure that is needed for a data
science project, starting from the current directory.
}
|
886b08620f85294307d051808867153bc5432380
|
665f3842c37fc5730c9cefee95ff480cabf0ae36
|
/man/determineIDs.Rd
|
a208c72cfdcacc40977d1b8cb6c10513d3f60865
|
[] |
no_license
|
BarkleyBG/multilevelMatching
|
c0bc128c81d1513c52dda7eafa3b6bc7b6ee1aa6
|
58e7a0b2a612671d4d0214f0d545ca90bd599d70
|
refs/heads/develop
| 2021-01-23T04:13:49.838974
| 2018-03-03T16:54:11
| 2018-03-03T16:54:11
| 86,175,312
| 0
| 0
| null | 2018-01-26T17:04:46
| 2017-03-25T17:25:14
|
R
|
UTF-8
|
R
| false
| true
| 921
|
rd
|
determineIDs.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/prepareData.R
\name{determineIDs}
\alias{determineIDs}
\title{Determines Unique Unit Identifiers}
\usage{
determineIDs(Y, W, X)
}
\arguments{
\item{Y}{a continuous response vector (1 x n)}
\item{W}{a treatment vector (1 x n) with numerical values indicating
treatment groups}
\item{X}{A covariate matrix (p x n) with no intercept. When
match_on="existing", then X must be a vector (1 x n) of user-specified
propensity scores.}
}
\value{
\code{unit_ids}
}
\description{
This function attempts to determine unique identifying information,
\code{unit_ids}, for each unit in the dataset. Users can apply this function
on their raw data ahead of using \code{\link{multiMatch}} to ensure that the
matching procedure will work. \code{unit_ids} will be used to identify study
units in some of the information output from \code{\link{multiMatch}}.
}
|
5ce65a51f11ea1fe65601c1fcd22afb8bec3d0e0
|
c651c828ed7d003fdda81ed64c41b72e3af4be1b
|
/tests/testthat.R
|
e8f70ecf6fd4e9babdfc0a0776965152cf82d54b
|
[] |
no_license
|
saurfang/graphcoloring
|
820d3ac3210396e2f6356b354b3e7eea9d057505
|
65a98f6953ada8e2fdc1b202569c0cf50f37ede6
|
refs/heads/master
| 2020-03-29T05:15:06.838679
| 2020-01-18T20:05:54
| 2020-01-18T20:05:54
| 149,574,814
| 16
| 1
| null | null | null | null |
UTF-8
|
R
| false
| false
| 70
|
r
|
testthat.R
|
library(testthat)
library(graphcoloring)
test_check("graphcoloring")
|
f8068f66918af29a7751a60fe8d4b549dc2fc6c3
|
c55d7812cfc02b401397f3ef4acbb4269ef18284
|
/10x-pilot_region-specific_sACC_step04_comparison-publishedData_MNTAug2020.R
|
ced1ed518bb97a5751e5921cf335f39a1fd2602f
|
[] |
no_license
|
BertoLabMUSC/10xPilot_snRNAseq-human
|
af8836a0d1019260c691dafef692a24885c7f93b
|
4da6e15b7d9a4507c0997e375d6984bbaecb8995
|
refs/heads/master
| 2022-12-28T03:19:08.700352
| 2020-10-08T22:38:26
| 2020-10-08T22:38:26
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 6,583
|
r
|
10x-pilot_region-specific_sACC_step04_comparison-publishedData_MNTAug2020.R
|
### MNT 10x snRNA-seq workflow: step 04
### **Region-specific analyses**
### - (2x) sACC samples from: Br5161 & Br5212
### - Comparison to Velmeshev, et al (Science 2019)
#####################################################################
library(SingleCellExperiment)
library(EnsDb.Hsapiens.v86)
library(scater)
library(scran)
library(batchelor)
library(DropletUtils)
library(jaffelab)
library(limma)
library(pheatmap)
library(RColorBrewer)
### Palette taken from `scater`
tableau10medium = c("#729ECE", "#FF9E4A", "#67BF5C", "#ED665D",
"#AD8BC9", "#A8786E", "#ED97CA", "#A2A2A2",
"#CDCC5D", "#6DCCDA")
tableau20 = c("#1F77B4", "#AEC7E8", "#FF7F0E", "#FFBB78", "#2CA02C",
"#98DF8A", "#D62728", "#FF9896", "#9467BD", "#C5B0D5",
"#8C564B", "#C49C94", "#E377C2", "#F7B6D2", "#7F7F7F",
"#C7C7C7", "#BCBD22", "#DBDB8D", "#17BECF", "#9EDAE5")
# ===
### Comparison to Velmeshev, et al (PFC & ACC) ========
## Load within-ACC statistics
load("/dcl01/ajaffe/data/lab/singleCell/velmeshev2019/analysis_MNT/markers-stats_velmeshev-et-al_ASD-cortex-withinRegion_findMarkers-SN-LEVEL_MNTAug2020.rda",
verbose=T)
# markers.asdVelm.t.pfc, markers.asdVelm.t.acc
#rm(markers.asdVelm.t.pfc)
load("/dcl01/ajaffe/data/lab/singleCell/velmeshev2019/analysis_MNT/SCE_asd-velmeshev-etal_MNT.rda", verbose=T)
# sce.asd, sce.asd.pfc, sce.asd.acc
sce.asd.pfc <- sce.asd[ ,sce.asd$region=="PFC"]
sce.asd.acc <- sce.asd[ ,sce.asd$region=="ACC"]
# Need to convert Symbol in sce.dlpfc > EnsemblID, and also use n nuclei for t.stat
load("rdas/regionSpecific_sACC-n2_cleaned-combined_SCE_MNTFeb2020.rda", verbose=T)
# sce.sacc, clusterRefTab.sacc, pc.choice.sacc, chosen.hvgs.sacc, ref.sampleInfo
# Drop "Ambig.lowNtrxts" (43 nuclei)
sce.sacc <- sce.sacc[ ,sce.sacc$cellType != "Ambig.lowNtrxts"]
sce.sacc$cellType <- droplevels(sce.sacc$cellType)
## Load LIBD sACC stats (just need the "1vAll" result)
load("rdas/markers-stats_sACC-n2_findMarkers-SN-LEVEL_MNTMay2020.rda", verbose=T)
# markers.sacc.t.design, markers.sacc.wilcox.block, markers.sacc.binom.block, markers.sacc.t.1vAll
rm(markers.sacc.t.design, markers.sacc.wilcox.block, markers.sacc.binom.block)
for(i in names(markers.sacc.t.1vAll)){
rownames(markers.sacc.t.1vAll[[i]]) <- rowData(sce.sacc)$ID[match(rownames(markers.sacc.t.1vAll[[i]]),
rownames(sce.sacc))]
}
## Calculate and add t-statistic (= std.logFC * sqrt(N)) from contrasts
# and fix row order to the first entry "Astro"
fixTo <- rownames(markers.sacc.t.1vAll[["Astro"]])
for(s in names(markers.sacc.t.1vAll)){
markers.sacc.t.1vAll[[s]]$t.stat <- markers.sacc.t.1vAll[[s]]$std.logFC * sqrt(ncol(sce.sacc))
markers.sacc.t.1vAll[[s]] <- markers.sacc.t.1vAll[[s]][fixTo, ]
}
# Pull out the t's
ts.sacc <- sapply(markers.sacc.t.1vAll, function(x){x$t.stat})
rownames(ts.sacc) <- fixTo
## Then for Velmeshev et al. - fix row order to the first entry "AST-FB"
fixTo <- rownames(markers.asdVelm.t.acc[["AST-FB"]])
for(s in names(markers.asdVelm.t.acc)){
markers.asdVelm.t.acc[[s]]$t.stat <- markers.asdVelm.t.acc[[s]]$std.logFC * sqrt(ncol(sce.asd.acc))
markers.asdVelm.t.acc[[s]] <- markers.asdVelm.t.acc[[s]][fixTo, ]
}
# Pull out the t's
ts.velmeshev.acc <- sapply(markers.asdVelm.t.acc, function(x){x$t.stat})
rownames(ts.velmeshev.acc) <- fixTo
## Take intersecting between two and subset/reorder
sharedGenes <- intersect(rownames(ts.velmeshev.acc), rownames(ts.sacc))
length(sharedGenes) # 27,442
ts.velmeshev.acc <- ts.velmeshev.acc[sharedGenes, ]
ts.sacc <- ts.sacc[sharedGenes, ]
cor_t_sacc <- cor(ts.sacc, ts.velmeshev.acc)
rownames(cor_t_sacc) = paste0(rownames(cor_t_sacc),"_","libd")
colnames(cor_t_sacc) = paste0(colnames(cor_t_sacc),"_","asd.acc")
range(cor_t_sacc)
## Heatmap
theSeq.all = seq(-.95, .95, by = 0.01)
my.col.all <- colorRampPalette(brewer.pal(7, "BrBG"))(length(theSeq.all)-1)
pdf("pdfs/exploration/Velmeshev-ASD_pfc-acc/overlap-velmeshev-ASD-acc_with_LIBD-10x-sACC_Aug2020.pdf")
pheatmap(cor_t_sacc,
color=my.col.all,
cluster_cols=F, cluster_rows=F,
breaks=theSeq.all,
fontsize=10, fontsize_row=11, fontsize_col=11,
display_numbers=T, number_format="%.2f", fontsize_number=6.0,
legend_breaks=c(seq(-0.95,0.95,by=0.475)),
main="Correlation of cluster-specific t's between LIBD sACC to \n ACC from (Velmeshev et al. Science 2019)")
dev.off()
### What if compared between both the .acc set of stats vs the .pfc?? =============
## Set up PFC t's
fixTo <- rownames(markers.asdVelm.t.pfc[["AST-FB"]])
for(s in names(markers.asdVelm.t.pfc)){
markers.asdVelm.t.pfc[[s]]$t.stat <- markers.asdVelm.t.pfc[[s]]$std.logFC * sqrt(ncol(sce.asd.pfc))
markers.asdVelm.t.pfc[[s]] <- markers.asdVelm.t.pfc[[s]][fixTo, ]
}
# Pull out the t's
ts.velmeshev.pfc <- sapply(markers.asdVelm.t.pfc, function(x){x$t.stat})
rownames(ts.velmeshev.pfc) <- fixTo
sharedGenes.all <- intersect(rownames(ts.velmeshev.acc), rownames(ts.sacc))
sharedGenes.all <- intersect(sharedGenes.all, rownames(ts.velmeshev.pfc))
# of length 27,422
# Subset/order
ts.sacc <- ts.sacc[sharedGenes.all, ]
ts.velmeshev.pfc <- ts.velmeshev.pfc[sharedGenes.all, ]
ts.velmeshev.acc <- ts.velmeshev.acc[sharedGenes.all, ]
colnames(ts.velmeshev.pfc) <- paste0(colnames(ts.velmeshev.pfc),"_pfc")
colnames(ts.velmeshev.acc) <- paste0(colnames(ts.velmeshev.acc),"_acc")
ts.velmeshev.full <- cbind(ts.velmeshev.pfc, ts.velmeshev.acc)
cor_t_sacc.asd <- cor(ts.sacc, ts.velmeshev.full)
range(cor_t_sacc.asd)
## Heatmap
# Add some cluster info for add'l heatmap annotations
regionInfo <- data.frame(region=ss(colnames(ts.velmeshev.full), "_",2))
rownames(regionInfo) <- colnames(ts.velmeshev.full)
theSeq.all = seq(-.95, .95, by = 0.01)
my.col.all <- colorRampPalette(brewer.pal(7, "BrBG"))(length(theSeq.all)-1)
pdf("pdfs/exploration/Velmeshev-ASD_pfc-acc/overlap-velmeshev-ASD-bothRegions_with_LIBD-10x-sACC_Aug2020.pdf", width=10)
pheatmap(cor_t_sacc.asd,
color=my.col.all,
annotation_col=regionInfo,
cluster_cols=F, cluster_rows=F,
breaks=theSeq.all,
fontsize=10.5, fontsize_row=11, fontsize_col=10,
display_numbers=T, number_format="%.2f", fontsize_number=4.5,
legend_breaks=c(seq(-0.95,0.95,by=0.475)),
main="Correlation of cluster-specific t's between LIBD sACC to \n ACC & PFC from (Velmeshev et al. Science 2019)")
dev.off()
|
c17fdd8c56ac2882155546364613f8e07b595ee5
|
5caafae5175714d59b34af7978c51b5b49cb6a52
|
/man/default.chunkFormatter.Rd
|
a320e6154edf3e722b8d3b0eb71886c693933bc6
|
[] |
no_license
|
linearregression/ioregression
|
bd6a7ff4fa87d08317f665dd5de5e4249f2ca89a
|
003cadaccb446863d1e1d52b02d5b79a8da078a1
|
refs/heads/master
| 2020-12-27T12:08:40.807345
| 2015-05-26T21:12:00
| 2015-05-26T21:12:00
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 559
|
rd
|
default.chunkFormatter.Rd
|
% Generated by roxygen2 (4.1.1): do not edit by hand
% Please edit documentation in R/adf.r
\name{default.chunkFormatter}
\alias{default.chunkFormatter}
\title{Construct a default chunkFormatter}
\usage{
default.chunkFormatter(sep = sep, nsep = nsep, strict = strict)
}
\arguments{
\item{sep}{character seperating the data columns.}
\item{nsep}{character seperating the column of rownames. Set to
NA to generate automatic rownames.}
\item{strict}{logical. Whether the parser should run in strict mode.}
}
\description{
Construct a default chunkFormatter
}
|
a59e475498a43a867d5208c4bcdf79ec5a172034
|
fc2823709633c23bee9014ac442e0e7d00e1e618
|
/server.R
|
5df043c80aff944e628b10037c0009d4e60fa99c
|
[] |
no_license
|
NarayananAmudha/Predict_Word_NLP
|
601546dfb18c2fed325e3c8d6752e325e8162917
|
25a1a164a9cd786fc2dd273a1c2c3e6c3a7bdfd9
|
refs/heads/master
| 2022-11-12T20:51:52.349735
| 2020-07-11T12:48:21
| 2020-07-11T12:48:21
| 278,852,798
| 1
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 6,408
|
r
|
server.R
|
library(shiny)
source("word_prediction.R")
### Function `predict_w4(input_text, ngram4.dt)`
### predicts the next word from a trigram input.
### arguments: 1) input_text, char;
### 2) ngram4.dt,, data.table of 4grams and their frequencies.
### 1) processes the input text,
### 2) makes a data.table from the last 3 words of the input text,
### 3) selects from an existing data.table of 4grams
### those that match the input trigram, and
### 4) returns a list `tops` of the 4th words ordered by most frequent.
load("tot.freqs_ngram.RData")
### That loads the data.table of 4grams and frequencies
### `tot.freqs`
fix_apo <- function(word){
## fix the apostrophe in contractions.
wordN <- ifelse(grepl("'",word),gsub("'", "\\'",word,fixed=T),word)
wordN
}
na2commons <- function(word){
## `word` is a list of words.
#commons <- c("the", "be", "to", "of", "and", "a")
commons <- c("", "", "", "", "", "")
if(length(word)==1){
if(is.na(word) | grepl("^na$",word, ignore.case=T))
word <- commons[round(runif(1,1,6),0)]
} else{
for(i in 1:length(word))
if(is.na(word[i]) | grepl("^na$",word[i], ignore.case=T))
word[i] <- commons[i]
}
word
}
insert_choice <- function(word, end_space){
## amends the input text with the chosen word.
## `text1` is the input text field (see file 'ui.R').
## `end_space` is boolean, and is defined in the shinyServer function.
paste("$('#text1').val($('#text1').val() + '",
ifelse(end_space, ""," "),
word, " ", "').trigger('change');
var input = $('#text1');
input[0].selectionStart =
input[0].selectionEnd = input.val().length;",
sep='')
}
babble<-function(intext,N=1,top=TRUE ,X = 1){
phrase <- ""
for(i in 1:N){
ifelse(top,
wordnext <- na2commons(predict_w4(intext,tot.freqs)[X]),
#wordnext <- na2commons(predict_w4(intext,tot.freqs)[round(runif(1,1,3),0)])
)
phrase <- ifelse(phrase == "", wordnext, paste(phrase,wordnext))
intext <- paste(intext,phrase)
}
phrase
}
clear <- "$('#text1').val('');
var input = $('#text1');
input[0].selectionStart = input[0].selectionEnd = input.val().length;"
shinyServer(
function(input, output, session) {
intext <- reactive({input$text1})
end_space <- reactive( grepl(" $", intext()) )
phrase1 <- ""
phrase2 <- ""
phrase3 <- ""
phrase4 <- ""
phrase5 <- ""
output$midPanel <- renderUI({
tags$div(
tags$button(type="button", id="clearbut", rows=2,cols=50, "Clear",
class="btn action-button shiny-bound-input",
onclick=clear)
)
})
output$bottomPanel <- renderUI({
#tags$script(src="http://ajax.googleapis.com/ajax/libs/jquery/1.11.1/jquery.min.js")
#if(input$num_bab >1){
#if (str_sub(intext(), start=-1) == " ") {
phrase1 <- babble(intext(),input$num_bab,TRUE,1)
phrase2 <- babble(intext(),input$num_bab,TRUE,2)
phrase3 <- babble(intext(),input$num_bab,TRUE,3)
phrase4 <- babble(intext(),input$num_bab,TRUE,4)
phrase5 <- babble(intext(),input$num_bab,TRUE,5)
#}
buttonR1Click <- insert_choice(fix_apo(phrase1),end_space())
buttonR2Click <- insert_choice(fix_apo(phrase2),end_space())
buttonR3Click <- insert_choice(fix_apo(phrase3),end_space())
buttonR4Click <- insert_choice(fix_apo(phrase4),end_space())
buttonR5Click <- insert_choice(fix_apo(phrase5),end_space())
# ,tags$head(tags$style("#text1{color: blue;font-size: 22px;
# font-style: bold-italic;
# }" ) )
tags$div(
tags$button(type="button", id="wordR1but", phrase1,
class="btn action-button shiny-bound-input",
style="color:darkred",
onclick=buttonR1Click)
,tags$head(tags$style("#wordR1but{color: blue;font-size: 18px;
font-style: bold-italic;
}" ) )
,tags$button(type="button", id="wordR2but", phrase2,
class="btn action-button shiny-bound-input",
style="color:darkred",
onclick=buttonR2Click)
,tags$head(tags$style("#wordR2but{color: blue;font-size: 18px;
font-style: bold-italic;
}" ) )
,tags$button(type="button", id="wordR3but", phrase3,
class="btn action-button shiny-bound-input",
style="color:darkred",
onclick=buttonR3Click)
,tags$head(tags$style("#wordR3but{color: blue;font-size: 18px;
font-style: bold-italic;
}" ) )
,tags$button(type="button", id="wordR4but", phrase4,
class="btn action-button shiny-bound-input",
style="color:darkred",
onclick=buttonR4Click)
,tags$head(tags$style("#wordR4but{color: blue;font-size: 18px;
font-style: bold-italic;
}" ) )
,tags$button(type="button", id="wordR5but", phrase5,
class="btn action-button shiny-bound-input",
style="color:darkred",
onclick=buttonR5Click)
,tags$head(tags$style("#wordR5but{color: blue;font-size: 18px;
font-style: bold-italic;
}" ) )
)
})
}
)
|
fa5e8ceabe15c91b5d15e500b59e14e3bad91538
|
ab4e5c960c48a751d066adfad93d6bfda23271b8
|
/code/analysis/old_api/markov/markov_final.R
|
5212a18c508d514046b708a245e782f045ede17f
|
[] |
no_license
|
iburunat/doc_suomi
|
23cef5f4ed5000e362e0d66904b6aef9b1aea541
|
e9debe662df7e6be591a29943e11ba83db248c01
|
refs/heads/main
| 2023-04-17T21:22:12.268736
| 2021-05-13T22:45:25
| 2021-05-13T22:45:25
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 4,712
|
r
|
markov_final.R
|
setwd("/home/pa/Documents/github/doc_suomi/code")
source("utils.R")
source("data_cook.R")
cat(howto_data)
# final = c()
# for(run in 1:100){
# #Base conversion
dt = base() %>%
group_by(album_id) %>%
mutate(valence = greater(valence),
energy = greater(energy),
loudness = greater(loudness),
tempo = greater(tempo)) %>%
album_splitter()
#Random conversion
dtr = base() %>%
group_by(album_id) %>%
sample_n(length(valence)) %>%
mutate(valence = greater(valence),
energy = greater(energy),
loudness = greater(loudness),
tempo = greater(tempo)) %>%
album_splitter()
#crossval seeds
dt = sample(dt, length(dt))
dtr = sample(dtr, length(dtr))
# split into traning and test data
dt_train = dt[1:(length(dt)*0.8)];
dt_test = dt[(length(dt)*0.8):length(dt)]; dr_test = dtr[(length(dtr)*0.8):length(dtr)]
valence = c(); energy = c() ;loudness = c(); tempo = c()
valence_t = c(); energy_t = c() ;loudness_t = c(); tempo_t = c()
rv = c(); re = c() ; rl = c(); rt = c()
for(i in 1:length(dt_train)){
valence[[i]] <- c(dt_train[[i]]$valence)
energy[[i]] <- c(dt_train[[i]]$energy)
loudness[[i]] <- c(dt_train[[i]]$loudness)
tempo[[i]] <- c(dt_train[[i]]$tempo)
}
for(i in 1:length(dt_test)){
valence_t[[i]] <- c(dt_test[[i]]$valence)
energy_t[[i]] <- c(dt_test[[i]]$energy)
loudness_t[[i]] <- c(dt_test[[i]]$loudness)
tempo_t[[i]] <- c(dt_test[[i]]$tempo)
}
for(i in 1:length(dr_test)){
rv[[i]] <- c(dr_test[[i]]$valence)
re[[i]] <- c(dr_test[[i]]$energy)
rl[[i]] <- c(dr_test[[i]]$loudness)
rt[[i]] <- c(dr_test[[i]]$tempo)
}
states = as.character(c("greater", "smaller", "start"))
# #Getting the transition
v = data.frame(markovchainFit(data = valence)$estimate@transitionMatrix)
e = data.frame(markovchainFit(data = energy)$estimate@transitionMatrix)
l = data.frame(markovchainFit(data = loudness)$estimate@transitionMatrix)
t = data.frame(markovchainFit(data = tempo)$estimate@transitionMatrix)
#creating the object
v <- new("markovchain", states = states, transitionMatrix = matrix(data = as.vector(t(v)), byrow = TRUE, nrow = nrow(v)), name = "valence")
e <- new("markovchain", states = states, transitionMatrix = matrix(data = as.vector(t(e)), byrow = TRUE, nrow = nrow(e)), name = "energy")
l <- new("markovchain", states = states, transitionMatrix = matrix(data = as.vector(t(l)), byrow = TRUE, nrow = nrow(l)), name = "loudness")
t <- new("markovchain", states = states, transitionMatrix = matrix(data = as.vector(t(t)), byrow = TRUE, nrow = nrow(t)), name = "tempo")
result = tibble(empirical_valence = unlist(lapply(valence_t, function(x){return(mll(v, x))})),
empirical_energy = unlist(lapply(energy_t, function(x){return(mll(e, x))})),
empirical_loudness = unlist(lapply(loudness_t, function(x){return(mll(l, x))})),
empirical_tempo = unlist(lapply(tempo_t, function(x){return(mll(t, x))})),
random_valence = unlist(lapply(rv, function(x){return(mll(v, x))})),
random_energy = unlist(lapply(re, function(x){return(mll(e, x))})),
random_loudness = unlist(lapply(rl, function(x){return(mll(l, x))})),
random_tempo = unlist(lapply(rt, function(x){return(mll(t, x))}))
)
result %>%
melt() %>%
tidyr::separate(variable, c("condition", "feature"), "_") %>%
group_by(condition, feature) %>%
summarise(likelihood = mean(value),
stder = sd(value)/sqrt(length(value))) -> result
# final[[run]] = result
# }
oi = bind_rows(final)
colnames(oi)
oi %>%
# group_by(condition, feature) %>%
# summarise(lh= mean(value),
# stder = sd(value)/sqrt(length(value))) %>%
ggplot(aes(x=likelihood, fill = condition)) +
facet_wrap(~feature)+
geom_density(alpha = 0.8)
library(rstatix)
oi %>% filter(condition == 'empirical') %>% filter(feature == "energy") %>% select(likelihood) %>% c() -> emp
t.test(emp$likelihood, rand$likelihood)
library(effsize)
oi %>% dplyr::filter(feature == "energy") ->ha
cohen.d(emp$likelihood, rand$likelihood, paired = TRUE)
# print("Valence"); v@transitionMatrix;
# print("Energy"); e@transitionMatrix;
# print("Loudness"); l@transitionMatrix;
# print("Tempo"); t@transitionMatrix
|
d2293571c9bb1babdb08b86fc2f880847b4bff5b
|
eb74dde34a3b6b9f337e33a033ca27a119034245
|
/R/Survival.R
|
9b0543d34819b0b46549e8d98b7d8b55104aef8a
|
[] |
no_license
|
cran/DetLifeInsurance
|
3c9a1632fb0ddd78ae9c7dcc4a633ef429e281d8
|
d0b5d3a696c5bc72ce0692d6cf7d4e9921336cfc
|
refs/heads/master
| 2022-12-17T07:44:43.324629
| 2020-09-12T08:20:07
| 2020-09-12T08:20:07
| 278,226,350
| 0
| 1
| null | null | null | null |
UTF-8
|
R
| false
| false
| 1,208
|
r
|
Survival.R
|
#' @title Survival Probability
#' @description Calculates the probability of survival given a mortality table for an individual or a group.
#' @param x An integer or a vector including only integers representing the age of each individual.
#' @param n An integer. The term.
#' @param data A data.frame of the mortality table, with the first column being the age and the second one, the probability of death.
#' @param prop A numeric value. The proportion of the mortality table used, between 0 and 1.
#' @export
#' @keywords Survival Probability
#' @return NULL
#' @examples
#' Survival(20,2,CSO58MANB,1)
#' Survival(31,33,CSO80MANB,0.8)
#'
Survival<-function(x,n,data,prop=1){
dig<-getOption("digits")
on.exit(options(digits = dig))
options(digits = 15)
if(x>=0 & is_integer(x)==1 & n>=0 & is_integer(n)==1 & prop>0){
Prob<-1
if(n==0){
Prob<-1
}else{
for(l in x:(x+n-1)){
if(l==(nrow(data)-1)){
prop<-1
}
Prob<-Prob*(1-data[l+1,2]*prop)
if(is.na(Prob)==1){
Prob<-0
}
}
Prob<-as.numeric(Prob)
}
return(Prob)
} else{
stop("Check values")
}
}
|
6ad0b1245c9d443683e4a1b54e84867d3a07ed7b
|
376b1c2913cd60ba9c2dbf7e8c87ac24a398aedf
|
/Documents/data/plot4.R
|
9fc6446e2f63657dd41381d5955c0156b586d918
|
[] |
no_license
|
mohitsh/Exploratory-Data-Analysis_Course-Project-1
|
14d89427a50ba3a4d81cdb9d1ab750bfa0958475
|
202fcda1c1fe48b6cb3a556e5e57a613d9487430
|
refs/heads/master
| 2021-01-23T03:42:33.335446
| 2015-08-15T08:18:15
| 2015-08-15T08:18:15
| 35,385,090
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 1,604
|
r
|
plot4.R
|
#reading .txt file where header is TRUE to take care of the first row which conatins the row names
data <- read.table("household_power_consumption.txt", sep=";", header=TRUE)
#merging the date and time columns of the given data and converted it to date format
data$Timestamp <- strptime(paste(data$Date,data$Time), format="%d/%m/%Y %H:%M:%S")
#converting the Date column into Date format
data$Date <- as.Date(data$Date, format="%d/%m/%Y")
#subsetting the data for the given problem
data1 <- data[data$Date >= as.Date("2007-02-01") & data$Date <= as.Date("2007-02-02"),]
#plot4
par(mfrow=c(2,2), oma=c(0,0,2,0))
with(data1,
plot(Timestamp, Global_active_power, type="l", ylab="Global Active Power (kilowatts)",
xlab="Timestamp", main="Global Active Power and Timestamp"))
)
with(data1,
plot(Timestamp, Voltage, type="l", ylab="Voltage",
xlab="datetime", main="Voltage and Timestamp")
)
with(data1, plot(Timestamp, Sub_metering_1, col="green", type="l"))
with(data1, points(Timestamp, Sub_metering_2, col="orange", type="l"))
with(data1, points(Timestamp, Sub_metering_3, col="blue", type="l"))
legend("topright", lty=c(1,1,1), col=c("green","orange", "blue"),legend=c("Sub_metering_1",
"Sub_metering_2","Sub_metering_3"))
with(data1,
plot(Timestamp, Global_reactive_power, type="l", xlab="datetime", ylab="Global Reactive Power",
main="Global Reactive Power and Timestamp")
)
dev.copy(png, "plot4.png",width=480,height=480,units="px")
dev.off()
|
918bc67f6b3011c97cf1286f337295db58c14fb3
|
a7bc39f0ce5053cfde1616acd7b1ef27a87ca67a
|
/scripts/motivation.R
|
43efcb4d24df7bd7a152841c7d401fa039dbefea
|
[
"MIT"
] |
permissive
|
BillHuang01/CoMinED
|
cdd7ee778da21c9f9d09db94c7fafa2273ffabef
|
bc4ca183e9bf01a9928fa645b9e03df4d90cae97
|
refs/heads/main
| 2023-04-01T23:53:46.230843
| 2021-04-02T00:47:16
| 2021-04-02T00:47:16
| 352,440,023
| 3
| 1
| null | null | null | null |
UTF-8
|
R
| false
| false
| 6,812
|
r
|
motivation.R
|
#############################################################################
# motivation
rm(list = ls())
constraint <- function(x){
c1 <- (x[1] - sqrt(50 * (x[2] - 0.52)^2 + 2) + 1)
c2 <- (sqrt(120 * (x[2] - 0.48)^2 + 1) - 0.75 - x[1])
c3 <- (0.65^2 - x[1]^2 - x[2]^2)
return (c(c1,c2,c3))
}
# constraint contour
plot(NULL, type = 'n', xlim = c(0,1), ylim = c(0,1), ylab = "", xlab = "")
x2 <- seq(0,1,length.out = 1001)
c1.x1 <- sqrt(50 * (x2 - 0.52)^2 + 2) - 1
lines(c1.x1, x2, lty = 2)
c2.x1 <- sqrt(120 * (x2 - 0.48)^2 + 1) - 0.75
lines(c2.x1, x2, lty = 2)
c3.x1 <- sqrt(0.65^2 - x2[x2<=0.65]^2)
lines(c3.x1, x2[x2<=0.65], lty = 2)
polygon(x = c(c1.x1[383:536], c2.x1[536:529], c3.x1[529:412], c2.x1[412:383]),
y = c(x2[383:536], x2[536:529], x2[529:412], x2[412:383]),
col = "red")
# latin hypercube samples
library(lhs)
set.seed(20210320)
lhs <- randomLHS(4500, 2)
lhs.gval <- t(apply(lhs, 1, constraint))
lhs.out.idx <- apply(lhs.gval, 1, function(x) return(any(x>0)))
lhs.out <- lhs[lhs.out.idx,]
lhs.in <- lhs[!lhs.out.idx,]
nrow(lhs.in) # 22
# visulization
plot(lhs, col = "green", pch = 18, cex = 0.75,
xlim = c(0,1), ylim = c(0,1),xlab = "", ylab = "")
x2 <- seq(0,1,length.out = 1001)
c1.x1 <- sqrt(50 * (x2 - 0.52)^2 + 2) - 1
lines(c1.x1, x2, lty = 2)
c2.x1 <- sqrt(120 * (x2 - 0.48)^2 + 1) - 0.75
lines(c2.x1, x2, lty = 2)
c3.x1 <- sqrt(0.65^2 - x2[x2<=0.65]^2)
lines(c3.x1, x2[x2<=0.65], lty = 2)
plot(lhs.in, col = "red", pch = 16, cex = 0.75,
xlim = c(0.3,0.8), ylim = c(0.35,0.55), xlab = "", ylab = "")
x2 <- seq(0,1,length.out = 1001)
c1.x1 <- sqrt(50 * (x2 - 0.52)^2 + 2) - 1
lines(c1.x1, x2, lty = 2)
c2.x1 <- sqrt(120 * (x2 - 0.48)^2 + 1) - 0.75
lines(c2.x1, x2, lty = 2)
c3.x1 <- sqrt(0.65^2 - x2[x2<=0.65]^2)
lines(c3.x1, x2[x2<=0.65], lty = 2)
# doubling
lhs <- randomLHS(9000, 2)
lhs.gval <- t(apply(lhs, 1, constraint))
lhs.out.idx <- apply(lhs.gval, 1, function(x) return(any(x>0)))
lhs.out <- lhs[lhs.out.idx,]
lhs.in <- lhs[!lhs.out.idx,]
nrow(lhs.in) # 31
# scmc
setwd("~/gatech/research/sampling/CoMinED/scripts/")
source("lib.R")
set.seed(20210320)
tau <- c(0,exp(c(1:7)),1e6)
samp <- scmc(500, 2, tau, constraint, auto.scale = F, return.all = T)
# visualization
samp.all <- samp$samp.all
nrow(samp.all) # 4500
plot(samp.all[1:500,], col = "red", pch = 16, cex = 1,
xlim = c(0,1), ylim = c(0,1),xlab = "", ylab = "")
points(samp.all[501:nrow(samp.all),], col = "green", pch = 18, cex = 0.75)
x2 <- seq(0,1,length.out = 1001)
c1.x1 <- sqrt(50 * (x2 - 0.52)^2 + 2) - 1
lines(c1.x1, x2, lty = 2)
c2.x1 <- sqrt(120 * (x2 - 0.48)^2 + 1) - 0.75
lines(c2.x1, x2, lty = 2)
c3.x1 <- sqrt(0.65^2 - x2[x2<=0.65]^2)
lines(c3.x1, x2[x2<=0.65], lty = 2)
# feasible sample only
candF <- samp$samp.feasible
nrow(candF) # 2246
plot(candF, col = "red", pch = 16, cex = 0.75,
xlim = c(0.3,0.8), ylim = c(0.35,0.55), xlab = "", ylab = "")
x2 <- seq(0,1,length.out = 1001)
c1.x1 <- sqrt(50 * (x2 - 0.52)^2 + 2) - 1
lines(c1.x1, x2, lty = 2)
c2.x1 <- sqrt(120 * (x2 - 0.48)^2 + 1) - 0.75
lines(c2.x1, x2, lty = 2)
c3.x1 <- sqrt(0.65^2 - x2[x2<=0.65]^2)
lines(c3.x1, x2[x2<=0.65], lty = 2)
#############################################################################
# Adaptive Lattice Grid Refinement
# One Step ALGR Versus Maximin LHDs Augmentation
library(mined)
n <- 53
p <- 2
n.aug <- 11
ini <- Lattice(n,p)
# ALGR
set.seed(20210320)
min.dist <- min(dist(ini))/2
no.decimal <- attr(regexpr("(?<=\\.)0+", min.dist, perl = TRUE), "match.length") + 2
ini.dist <- as.matrix(dist(ini))
aug <- NULL
for (i in 1:n){
nn.idx <- order(ini.dist[i,])[2:(n.aug+1)]
aug <- rbind(aug, 0.5*(ini[nn.idx,]+rep(1,n.aug)%*%t(ini[i,])))
aug <- rbind(aug, 0.5*(3*ini[nn.idx,]-rep(1,n.aug)%*%t(ini[i,])))
}
# remove repeated samples
aug.rep <- round(aug, digits = no.decimal)
aug.dp <- duplicated(aug.rep)
aug <- aug[!aug.dp,]
# remove samples outside of boundary
aug.out <- apply(aug, 1, function(x) (any(x<0)|any(x>1)))
aug <- aug[!aug.out,]
# remove samples in candidate set already
no.aug <- nrow(aug)
all.rep <- rbind(aug, ini)
all.rep <- round(all.rep, digits = no.decimal)
all.rep.dp <- duplicated(all.rep, fromLast = TRUE)
aug <- aug[!(all.rep.dp[1:no.aug]),]
# visualization
plot(ini, col = "red", pch = 16, cex = 1, xlim = c(0,1), ylim = c(0,1),
xlab = "", ylab = "")
points(aug, col = "green", pch = 18, cex = 1)
nrow(aug) # 163
min(dist(rbind(ini,aug))) # 0.06868028
# maximin LHDs
set.seed(20210320)
library(lhs)
D.aug <- maximinLHS(n.aug, p)
D.aug <- 2 * (D.aug - 0.5)
ini.dist <- as.matrix(dist(ini))
diag(ini.dist) <- NA
rads <- apply(ini.dist, 1, min, na.rm=T) / sqrt(p)
aug <- NULL
for (i in 1:n){
aug <- rbind(aug, (rep(1,n.aug)%*%t(ini[i,])+rads[i]*D.aug[,sample(1:p,p)]))
}
# visualization
plot(ini, col = "red", pch = 16, cex = 1, xlim = c(0,1), ylim = c(0,1),
xlab = "", ylab = "")
points(aug, col = "green", pch = 18, cex = 1)
nrow(aug) # 583
min(dist(rbind(ini,aug))) # 0.00734764
# FULL Approach
rm(list = ls())
constraint <- function(x){
c1 <- (x[1] - sqrt(50 * (x[2] - 0.52)^2 + 2) + 1)
c2 <- (sqrt(120 * (x[2] - 0.48)^2 + 1) - 0.75 - x[1])
c3 <- (0.65^2 - x[1]^2 - x[2]^2)
return (c(c1,c2,c3))
}
x1 <- x2 <- matrix(NA, nrow = 3, ncol = 1001)
x2.seq <- seq(0,1,length.out = 1001)
x2[1,] <- x2.seq
x1[1,] <- sqrt(50 * (x2.seq - 0.52)^2 + 2) - 1
x2[2,] <- x2.seq
x1[2,] <- sqrt(120 * (x2.seq - 0.48)^2 + 1) - 0.75
x2[3,] <- x2.seq
x1[3,] <- sqrt(0.65^2 - x2.seq^2)
contour <- list(x1 = x1, x2 = x2)
setwd("~/gatech/research/sampling/CoMinED/scripts/")
source("lib.R")
set.seed(20210320)
tau <- c(0,exp(c(1:7)),1e6)
output <- comined(n = 53, p = 2, tau = tau, constraint = constraint,
n.aug = 5, auto.scale = F, s = 2, visualization = T,
visualization.params = list(unit.scale=TRUE,contour=contour))
min(dist(output$cand)) # 0.0004126795
nrow(output$cand) # 2155
# visualization
plot(output$cand[1:263,], col = "red", pch = 16, cex = 1,
xlim = c(0,1), ylim = c(0,1),xlab = "", ylab = "")
points(output$cand[264:nrow(output$cand),], col = "green", pch = 18, cex = 0.75)
x2 <- seq(0,1,length.out = 1001)
c1.x1 <- sqrt(50 * (x2 - 0.52)^2 + 2) - 1
lines(c1.x1, x2, lty = 2)
c2.x1 <- sqrt(120 * (x2 - 0.48)^2 + 1) - 0.75
lines(c2.x1, x2, lty = 2)
c3.x1 <- sqrt(0.65^2 - x2[x2<=0.65]^2)
lines(c3.x1, x2[x2<=0.65], lty = 2)
# feasible sample only
candF <- output$cand[output$feasible.idx,]
nrow(candF) # 915
plot(candF, col = "red", pch = 16, cex = 0.75,
xlim = c(0.3,0.8), ylim = c(0.35,0.55), xlab = "", ylab = "")
x2 <- seq(0,1,length.out = 1001)
c1.x1 <- sqrt(50 * (x2 - 0.52)^2 + 2) - 1
lines(c1.x1, x2, lty = 2)
c2.x1 <- sqrt(120 * (x2 - 0.48)^2 + 1) - 0.75
lines(c2.x1, x2, lty = 2)
c3.x1 <- sqrt(0.65^2 - x2[x2<=0.65]^2)
lines(c3.x1, x2[x2<=0.65], lty = 2)
|
48fbe55e47d57df55b82c27b54b0989132467d3d
|
f1721111e077d9e5d14b4fe8f40f6baa33308fcb
|
/tests/testthat/test_waterfallplot.R
|
bc63d20b38b3c5f3f34b4c7f43eb7e870e00594c
|
[] |
no_license
|
LabNeuroCogDevel/LNCDR
|
6d71d98c36a42ebef3479b9acc183680d0d0bb8d
|
f9944ce2ca03c38476975b59b0edb458d65ee227
|
refs/heads/master
| 2023-04-27T05:01:26.259112
| 2023-04-18T19:12:33
| 2023-04-18T19:12:33
| 41,372,116
| 3
| 1
| null | null | null | null |
UTF-8
|
R
| false
| false
| 464
|
r
|
test_waterfallplot.R
|
context("waterfall_plot")
test_that("group data", {
d <- data.frame(age=c(10, 20, 25, 13, 14, 10),
id=c(100, 100, 100, 200, 200, 300))
dg <- waterfall_group(d)
expect_equal(dg$age_id, c(1, 1, 1, 3, 3, 2))
expect_equal(dg$minage, c(10, 10, 10, 13, 13, 10))
})
test_that("plot", {
d <- data.frame(age=c(10, 20, 25, 13, 14, 10),
id=c(100, 100, 100, 200, 200, 300))
p <- waterfall_plot(d)
expect_true(!is.null(p))
})
|
d77b0f1e8f5f0e526defa9f276cc64c4c167eb2c
|
793a67eabb0af53f20a88e6dfc24227ba65673f8
|
/townMapGenerator.R
|
c727bbaf93e076bee8164669583fd4505fcc422a
|
[] |
no_license
|
kikichang/TWmap
|
2478a4af15e35248b7c2b83907de39b4793ed21a
|
9d5c420ffa282a0c9b15c890cc34536236c86215
|
refs/heads/master
| 2020-07-22T07:24:16.002853
| 2016-11-15T16:23:06
| 2016-11-15T16:23:06
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 2,750
|
r
|
townMapGenerator.R
|
#townMapGenerator.R
#160830
#Kiki Chang(ychang64@jhu.edu)
#This program generates a chorepleth map of the selected city/county
#using functions & vars from map.R and townMapVars.RData.
#
#Note: town_test.xlsx are provided as sample files for creating township map.
#The map is provided by Ministry of Transportation (src: https://gist-map.motc.gov.tw/Complex/MapTopic )
#TODO: might need to manually run gpclibPermit() on the console to start the program
#e.g.
#townMapGenerator(filePath, "台北市", "3 4", 6, 30, 60, FALSE, "指數")
#townMapGenerator(filePath, "全台", "3 4", 6, 180, 150, FALSE,"指數")
#----------------------------------------------------------------------------#
dir <- dirname(parent.frame(2)$ofile)
setwd(dir)
source('map.R')
load("townMapVars.RData", .GlobalEnv)
#filePath is the full path name of the data file
#color (integer 1-6 in string form) is a string of length 1 or 2 (e.g. "5", "3 6")
#number of intervals (nI) is an integer between 1-8; width(w) is a positive number (>0)
#color numbers can be selected from (1)red (2)orange (3)green (4)blue (5)purple (6)grey
#default settings: starting value (origin) = 0, county names are shown, data are labeled w/ percent sign(%), legend is "得票率",
# text size is 3, file extension is .svg
townMapGenerator <- function(filePath, cityName, color,nI, w, origin = 0, percent = TRUE, legend = "得票率", text = 3, extension = ".svg") {
#check if all required arguments are given
args <- c("filePath", "cityName", "color", "nI", "w")
for (i in 1:length(args)) {
x=as.name(args[i])
print(ifelse(missing(x), stop(paste(args[i], "not specified.")), paste(args[i], "is specified.") ))
}
#get color number(s)
cNum <- as.numeric(strsplit(color, split = " ")[[1]])
#subset myMap data for the selected county & flag allTown
if (cityName != "全台") {
cityName <- sub("台", "臺", cityName)
myMap <<- subset(myMap, countyname == cityName) #update myMap (global var)
allTown <<- FALSE
} else {
allTown <<- TRUE
}
#choose data from file
chooseFile(filePath)
#store data
storeTownData()
#set range and label (setLabel default argument: numDigit = 4, isPercent = TRUE)
ifelse((length(cNum) == 1), setSgColRange(nI, w, origin), setDbColRange(nI, w, origin))
setLabel(cNum, nI, w, origin, isPercent = percent)
#graph (default arguments: legendName = "得票率", isVillage = FALSE, textSize = 3, ext = ".svg", showName = TRUE, isTown = FALSE)
if (cityName != "全台") {
graphArea(cNum, nI, isTown = TRUE, legendName = legend) # all towns in the selected city/county
} else {
graphArea(cNum, nI, showName = FALSE, isTown = TRUE, legendName = legend) #all ~300 towns in Taiwan
}
}
|
7981a1aa586b432cda3cd2f4f8080b5f8fc93641
|
09703e02e04104a1b4e5940dfc6bacea6102bbad
|
/R/cachem.R
|
740ddd33cae170844e9fe75dac0f082abca72efa
|
[
"MIT"
] |
permissive
|
isabella232/cachem
|
52bd23ccc6165c865b433c4f8d876d17b2adf1d8
|
cfb26725bcd997864f8cdb091e6c273fe7f5a44b
|
refs/heads/master
| 2023-07-09T06:03:10.673449
| 2021-07-06T16:04:04
| 2021-07-06T16:04:30
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 808
|
r
|
cachem.R
|
#' @export
format.cachem <- function(x, ...) {
paste0(
paste0("<", class(x), ">", collapse= " "), "\n",
" Methods:\n",
paste0(
" ", format_methods(x),
collapse ="\n"
)
)
}
format_methods <- function(x) {
vapply(seq_along(x),
function(i) {
name <- names(x)[i]
f <- x[[i]]
if (is.function(f)) {
paste0(name, "(", format_args(f), ")")
} else {
name
}
}, character(1)
)
}
format_args <- function(x) {
nms <- names(formals(x))
vals <- as.character(formals(x))
args <- mapply(nms, vals, FUN = function(name, value) {
if (value == "") {
name
} else {
paste0(name, " = ", value)
}
})
paste(args, collapse = ", ")
}
#' @export
print.cachem <- function(x, ...) {
cat(format(x, ...))
}
|
50b189deac061511e9af50ea5ca31bd5ca170af1
|
54589dbe24c676c9bb573a0486fb2fdded99c77b
|
/3-real-data-cleaning.R
|
dcbc9e26b53ef42c4e6c7e3037a3fd6f28933984
|
[] |
no_license
|
yliu433/r-spsp
|
15769bb6caff7c644eeb922b5bc1c49880fa16f3
|
763d0b61bfb658808d9962ca08c63ba8c0123922
|
refs/heads/master
| 2021-01-12T00:13:47.265870
| 2017-07-07T00:29:49
| 2017-07-07T00:29:49
| 78,691,139
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 1,850
|
r
|
3-real-data-cleaning.R
|
# load GBM gene expression data ———
ge2 <- read.table("https://tcga.xenahubs.net/download/TCGA.GBM.sampleMap/AgilentG4502A_07_1",
header = T,sep="\t",fill = T,quote = "",stringsAsFactors = F) # 17814 by 103
ge3 <- read.table("https://tcga.xenahubs.net/download/TCGA.GBM.sampleMap/AgilentG4502A_07_2",
header = T,sep="\t",fill = T,quote = "",stringsAsFactors = F) # 17814 by 484
identical(sort(ge2[,1]),sort(ge3[,1])) ## TRUE
intersect(colnames(ge2),colnames(ge3)) ## "TCGA.06.0216.01"
ge3 <- ge3[match(ge2[,1],ge3[,1]),]
ge21 <- ge2[,"TCGA.06.0216.01"]
ge31 <- ge3[,"TCGA.06.0216.01"]
ge22 <- ge2[,-1];rownames(ge22) <- ge2$sample;ge22 <- t(ge22)
ge32 <- ge3[,-1];rownames(ge32) <- ge3$sample;ge32 <- t(ge32)
sam2 <- (rownames(ge22)) #
sam2 <- sam2[sam2!=("TCGA.06.0216.01")]
sam3 <- (rownames(ge32)) #
Xall <- ge32[,colnames(ge32)!="TP53"]
## use the second data for testing
expY_test <- ge22[,"TP53"]
Xall_test <- ge22[match(sam2,rownames(ge22)),]
identical(as.character(pheno2$sampleID[match(sam3,pheno2$sampleID)]),
as.character(rownames(Xall)))
Yall <- log(expY)
## center the reponse and standardize the predictors
n <- length(Yall) ### 370
Yc <- Yall-mean(Yall)
Xs <- apply(Xall,2,scale) * sqrt(n/(n-1))
sum(Xs[,1]^2)/n
corxy <- apply(Xs,2,function(x){cor(x,Yc)})
Xall1000 <- Xall[,order(abs(corxy),decreasing = TRUE)[1:1000]]
Xs1000 <- Xs[,order(abs(corxy),decreasing = TRUE)[1:1000]]
Y0_test <- log(expY_test)
X0_test <- Xall_test[,order(abs(corxy),decreasing = TRUE)[1:1000]]
identical(colnames(X0_test),colnames(Xs1000))
meanx <- apply(Xall1000,2,mean)
sdx <- apply(Xall1000,2,sd)/sqrt(n/(n-1))
Y_test <- Y0_test-mean(Yall)
X_test_tmp <- sweep(X0_test,2,meanx,"-")
X_test <- sweep(X_test_tmp,2,sdx,"/")
save(Yc,Xs1000,X_test,Y_test,file="./gbm.Rdata")
|
78484fa466bdc53a1be48e8704be837071945652
|
e0a398f6441a1102c16551473eec367d7c2ff960
|
/man/global.impact.Rd
|
68c2644b7565cb04d3a48afac5599d8f0689426d
|
[] |
no_license
|
ciaranodriscoll/networktools
|
254b8bc6c06878bcb48828305b715ab25ec1c030
|
74555e904a0e35d87f6dcd69ea544ec3a83f8ea1
|
refs/heads/master
| 2020-03-27T05:03:21.010259
| 2018-06-06T19:53:32
| 2018-06-06T19:53:32
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| true
| 2,704
|
rd
|
global.impact.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/global_impact.R
\name{global.impact}
\alias{global.impact}
\title{Global Strength Impact}
\usage{
global.impact(input, gamma, nodes = c("all"), binary.data = FALSE,
weighted = TRUE, split = c("median", "mean", "forceEqual", "cutEqual",
"quartiles"))
}
\arguments{
\item{input}{a matrix or data frame of observations (not a network/edgelist).
See included example datasets \code{\link{depression}} and \code{\link{social}}.}
\item{gamma}{the sparsity parameter used in generating networks. Defaults to 0.5 for interval data
and 0.25 for binary data}
\item{nodes}{indicates which nodes should be tested. Can be given
as a character string of desired nodes (e.g., c("node1","node2")) or as a numeric vector of
column numbers (e.g., c(1,2)).}
\item{binary.data}{logical. Indicates whether the input data is binary}
\item{weighted}{logical. Indicates whether resultant networks preserve edge weights or binarize edges.}
\item{split}{method by which to split network given non-binary data. "median": median split (excluding the median),
"mean": mean split, "forceEqual": creates equally sized groups by partitioning random median observations
to the smaller group, "cutEqual": creates equally sized groups by deleting random values
from the bigger group,"quartile": uses the top and bottom quartile as groups}
}
\value{
\code{global.impact()} returns a list of class "\code{global.impact}" which contains:
\item{impact}{a named vector containing the global strength impact for each node tested}
\item{lo}{a named vector containing the global strength estimate for the lower half}
\item{hi}{a named vector containing the global strength estimate for the upper half}
}
\description{
Generates the global strength impact of each specified node. Global strength impact can be interpreted
as the degree to which the level of a node impacts the overall connectivity of the network
}
\details{
For an explanation of impact functions in general, see \code{\link{impact}}.
Global strength is defined as the sum of the absolute value
of all edges in the network, and is closely related to the concept of density (density is the
sum of edges not accounting for absolute values). Global strength impact measures to what degree
the global strength varies as a function of each node.
}
\examples{
out <- global.impact(depression[,1:3])
\donttest{
out1 <- global.impact(depression)
out2 <- global.impact(depression, gamma=0.65,
nodes=c("sleep_disturbance", "psychomotor_retardation"))
out3 <- global.impact(social, binary.data=TRUE)
out4 <- global.impact(social, nodes=c(1:6, 9), binary.data=TRUE)
summary(out1)
plot(out1)
}
}
|
c3613c8ad2162cec43c999030e1de0a2af2e2b24
|
579745aedfcaf14cef6f612dc508f90c9c54ef66
|
/SVM.R
|
8ee81e69628cd96893b0b497c49a2dffc60d6b3b
|
[] |
no_license
|
sahilbhange/Machine-Learning-Assignments-1
|
ede694fd0a91f84ff45ef23eb590a1d5a96f1e9f
|
aeee3542f3a9824e5f2f210e033f758302e33e57
|
refs/heads/master
| 2021-04-15T03:46:37.434189
| 2018-03-26T19:10:51
| 2018-03-26T19:10:51
| 126,873,819
| 1
| 1
| null | null | null | null |
UTF-8
|
R
| false
| false
| 2,493
|
r
|
SVM.R
|
pollution_dt <- read.csv(file.choose(), header = TRUE, sep = ",",fileEncoding="UTF-8-BOM")
View(pollution_dt)
#Running the linear regression to get the model
model1 = lm(y~x1+x2+x3+x4+x5+x6+x7+x8+x9+x10+x11+x12+x13++x14+x15, data=pollution_dt)
#plot only Y values from the pollution dataset
plot(pollution_dt$y,pch=16)
plot(predY,pch=16)
# Prodict the Y values using the regression model build in the above step
predY = predict(model1,pollution_dt)
# Predicted Y values from the model (predY)
predY
# Original Y values from the Pollution dataset
View(pollution_dt$y)
# Plot predicted Y values and original Y vlaues on the plot in sequential order in the dataset
points(predY, col="Blue", pch=4)
rmse <- function(error)
{
sqrt(mean(error^2))
}
error1 = model1$residuals
lrPredMSE = rmse(error1)
lrPredMSE
library(e1071)
model2 = svm(y~x1+x2+x3+x4+x5+x6+x7+x8+x9+x10+x11+x12+x13++x14+x15, pollution_dt)
PredY2 = predict(model2,pollution_dt)
#plot only original Y values from the pollution dataset
plot(pollution_dt$y,pch=16)
#plot Predicted Y values using the SVM
points(PredY2, col="Red", pch=4)
error2 = pollution_dt$y - PredY2
svmPredRMSE = rmse(error2)
svmPredRMSE
lrPredMSE
?tune
model3 = tune(svm,y~x1+x2+x3+x4+x5+x6+x7+x8+x9+x10+x11+x12+x13++x14+x15, data=pollution_dt, ranges=list(epsilon=seq(0,1,0.1),cost=seq(1,10,1)))
bestmodel = model3$best.model
bestPred = predict(bestmodel,data=pollution_dt)
#plot only original Y values from the pollution dataset
plot(pollution_dt$y,pch=16)
#plot redicted Y values using the Best tuned SVM model
points(bestPred, col="maroon", pch=4)
best_error = pollution_dt$y - bestPred
best_RMSE = rmse(best_error)
best_RMSE
best_RMSE
svmPredRMSE
lrPredMSE
install.packages("mlbench")
library(e1071)
library(rpart)
library(mlbench)
data(Ozone, package="mlbench")
## split data into a train and test set
index <- 1:nrow(Ozone)
testindex <- sample(index, trunc(length(index)/3))
testset <- na.omit(Ozone[testindex,-3])
trainset <- na.omit(Ozone[-testindex,-3])
## svm
svm.model <- svm(V4 ~ ., data = trainset, cost = 1000, gamma = 0.0001)
svm.pred <- predict(svm.model, testset[,-3])
crossprod(svm.pred - testset[,3]) / length(testindex)
## rpart
rpart.model <- rpart(V4 ~ ., data = trainset)
rpart.pred <- predict(rpart.model, testset[,-3])
crossprod(rpart.pred - testset[,3]) / length(testindex)
|
0214351d2821d6277e64d7deb6264cff968d6545
|
aa5711d5be4e3dde1740aab48aee48a108adb5bc
|
/DataProducts/server.R
|
4adf9402f56ecb086d7285dfdacc7a5516e03bbe
|
[] |
no_license
|
jplooster/datasciencecoursera
|
b4cebecca2bab8f397ed72cc6ecfe8a2f5d37f48
|
522ecb5c17872f9edea15b405410ca8ab3696038
|
refs/heads/master
| 2021-01-23T13:29:50.086119
| 2014-08-19T07:08:48
| 2014-08-19T07:08:48
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 1,885
|
r
|
server.R
|
library(shiny)
blackscholes <- function(S, X, rf, T, sigma) {
values <- NULL
d1 <- (log(S/X)+(rf+sigma^2/2)*T)/(sigma*sqrt(T))
d2 <- d1 - sigma * sqrt(T)
values[1] <- S*pnorm(d1) - X*exp(-rf*T)*pnorm(d2)
values[2] <- X*exp(-rf*T) * pnorm(-d2) - S*pnorm(-d1)
names(values) <- c("call", "put")
round(values, 4)
}
shinyServer(
function(input, output) {
inputValues <- reactive({
# Compose data frame
data.frame(
Name = c("Spot Price",
"Strike Price",
"Volatility",
"Risk Free Rate",
"Time to maturity"),
Value = as.character(c(input$S1+input$S2,
input$X1+input$X2,
input$vol,
input$rate,
input$date - Sys.Date())),
stringsAsFactors=FALSE)
})
output$values <- renderTable({
inputValues()
})
output$prices <- renderPrint({
blackscholes(S = input$S1 + input$S2,
X = input$X1 + input$X2,
rf = input$rate/100,
T = as.numeric((input$date - Sys.Date())/365), sigma = input$vol/100)
})
} )
|
e116dd8fa30030aaec5db38590ebe56494a41f72
|
70b5810e0a9e8193baf3dc83f183c07cdc67cd5f
|
/Mineria_datos_clasificacion_no_supervisada/trabajo_asociativas/Script1.R
|
dd15492d23343e772e66525d356e2ea4ccd0c34c
|
[] |
no_license
|
Nico-Cubero/master-datcom-2019-2020
|
11924c895ff52f91cbdb254d932ee6cecc20800d
|
e956799382b147aff385d62749bf915116893449
|
refs/heads/main
| 2023-06-08T09:17:40.930193
| 2021-06-13T11:54:11
| 2021-06-13T11:54:11
| 376,529,939
| 1
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 5,076
|
r
|
Script1.R
|
# Librerías importados
library('arules')
library('ggplot2')
# Función para lectura del dataset
read.statlog.heart.dataset <- function(filename) {
dat <- read.csv(filename, sep=' ')
colnames(dat) <- c('age', 'sex', 'chest pain type',
'resting blood pressure', 'serum cholestoral',
'fasting blood sugar',
'resting electrocardiographic results',
'maximum heart rate achieved',
'exercise induced angina', 'oldpeak',
'slope of the peak exercise ST segment',
'number of major vessels', 'thal', 'heart dissease')
# Preprocesar el dataset para asignar los tipos de datos correctos
# Convertir age en integer
dat$age <- as.integer(dat$age)
# Convertir sex en factor
dat$sex <- factor(x=dat$sex, levels=c(1,0), labels=c('male','female'))
# Convertir chest pain type en factor
dat[,3] <- factor(dat[,3], levels=1:4, labels=c('typical angina',
'atypical angina',
'non-anginal pain',
'asymptomatic'))
# Convertir fasting blood sugar en factor binario
dat[,6] <- as.logical(dat[,6])
# Convertir resting electrocardiographic results en factores
dat[,7] <- factor(dat[,7], levels=0:2, labels=c('Normal',
'ST-T wave anormality',
'left ventricular hypertrophy'))
# Convertir exercise induced angina en factores binarios
dat[,9] <- as.logical(dat[,9])
# Convertir the slope of the peak exercise ST segment en factores
dat[,11] <- factor(dat[,11], levels=1:3, labels=c('Upsloping',
'Flat',
'Downsloping'))
# Convertir number of major vessels
dat[,12] <- as.integer(dat[,12])
# Convertir thal en factores
dat[,13] <- factor(dat[,13], levels=c(3,6,7), labels=c('Normal',
'Fixed defect',
'Reversable defect'))
# Convertir la clase en factor binaria
dat[,14] <- dat[,14] == 2.0
return(dat)
}
# Cargar dataset
heart <- read.statlog.heart.dataset('./heart.dat')
# Información breve del dataset
head(heart)
summary(heart)
# Estudiar los estadísticos de posición de age
summary(heart$age)
# Discretización del atributo age
heart[['age']] <- ordered(cut(heart[['age']],
c(29,60,+Inf),
labels=c('Adult', 'Elderly'),
right = F))
# Estudiar los estadísticos de posición resting blood pressure
summary(heart[['resting blood pressure']])
# Discretizar el atributo resting blood presssure
heart[['resting blood pressure']] <- ordered(
cut(heart[['resting blood pressure']],
c(94,120,130,140,+Inf),
labels=c('Normal', 'Elevated',
'Hypertension-stage1',
'Hypertension-stage2'),
right = F))
# Estudiar los estadísticos de posición de serum cholesterol
summary(heart[['serum cholestoral']])
# Discretizar el atributo serum cholesterol
heart[['serum cholestoral']] <- ordered(
cut(heart[['serum cholestoral']],
c(126,200,240,+Inf),
labels=c('Normal level',
'High level',
'Dangerous level'),
right = F))
# Estudiar los estadísticos de posición de maximum heart rate achieved
summary(heart[['maximum heart rate achieved']])
# Estudiar gráficamente el atributo "maximum heart rate achieved"
pdf('maximum_heart_rate_achieved.pdf')
graf <- ggplot2::ggplot(data=heart, aes(x=`maximum heart rate achieved`)) +
ggplot2::geom_histogram(binwidth = 2, colour='white',
fill='coral3') +
ggplot2::ylab('Frecuencia absoluta')
graf
dev.off()
# Discretizar en 4 intervalos
heart[['maximum heart rate achieved']] <- discretize(
heart[['maximum heart rate achieved']],
method = 'frequency', breaks=4)
# Estudiar los estadísticos de posición de oldpeek
summary(heart[['oldpeak']])
# Estudiar gráficamente el atributo "oldpeak"
pdf('oldpeak.pdf')
graf <- ggplot2::ggplot(data=heart, aes(x=oldpeak)) +
ggplot2::geom_histogram(binwidth = 0.1, colour='white',
fill='coral3') +
ggplot2::ylab('Frecuencia absoluta')
graf
dev.off()
# Discretizar el atributo oldpeak en 3 intervalos de igual frecuencia
heart[['oldpeak']] <- discretize(
heart[['oldpeak']], method = 'frequency',
breaks=3)
# Convertir exercise induced angina y heart dissease en factores
heart[['exercise induced angina']] <- factor(
heart[['exercise induced angina']], ordered = TRUE)
heart[['heart dissease']] <- factor(
heart[['heart dissease']], ordered = TRUE)
|
418e595db41d4fc283c78ab05b7bdc07cebf75af
|
f1606c44f5d1cdb4bdb26dcfb8fd4619cd49764c
|
/plot2.R
|
719e20642bd05fbe08775caaea9ed67b1736f3e0
|
[] |
no_license
|
flanclan4860/ExData_Plotting1
|
641c288f928080926aba880156958114fadda5f6
|
f18cc7e1f1fff3d708f5d208c1fc59aed9523287
|
refs/heads/master
| 2021-01-22T17:17:48.423895
| 2015-01-09T23:46:07
| 2015-01-09T23:46:07
| 28,944,523
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 1,010
|
r
|
plot2.R
|
# Exploratory Data Analysis Plot Assignment 1
# PLOT NUMBER 2
# Open the file
png(filename = "plot2.png", width=480, height=480)
# Read first 5 rows of file to get colClasses
first5rows <- read.table("household_power_consumption.txt", header = TRUE, sep=";",
nrows = 5)
classes <- sapply(first5rows, class)
# Read the file
DT <- read.table("household_power_consumption.txt", header=TRUE, sep=";",
na.strings = "?", colClasses=classes)
# Convert date, time to POSIXlt
DT$DateTime <- paste(DT$Date, DT$Time)
DT$DateTime <- strptime(DT$DateTime, format = "%d/%m/%Y %H:%M:%S")
# Subset for 2 day period 2/1 -2/2/2007
date1 <- strptime("2007-02-01", "%Y-%m-%d")
date2 <- strptime("2007-02-03", "%Y-%m-%d")
plotdata <- na.omit(DT[(DT$DateTime >= date1) & (DT$DateTime < date2), ])
# Global Active Power Line Plot
plot(x=plotdata$DateTime, y=plotdata$Global_active_power, type="l",
ylab="Global Active Power (kilowatts)", xlab="")
# Close the device
dev.off()
|
890856ccd52703a80130690ad6238c26d106d459
|
af06be512fbe0f94cbc89749d6c1da27e10724b9
|
/source/00_load-raw-data-and-clean.R
|
11105f00dae301e93ab6ba7e200e601eb8ff5632
|
[] |
no_license
|
keanarichards/stats-masters
|
f0fd80e2f7eec2fa57722c68aa0972de83360a7c
|
ffd79a42baff8ca46b40dd445a4a05bb2233fb39
|
refs/heads/master
| 2022-12-31T14:46:45.556745
| 2020-10-13T12:36:35
| 2020-10-13T12:36:35
| 262,153,887
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 9,154
|
r
|
00_load-raw-data-and-clean.R
|
# load packages -----------------------------------------------------------
## Package names
packages <- c("tidyverse", "reshape", "hablar", "here", "snakecase", "data.table", "conflicted")
## Install packages not yet installed
installed_packages <- packages %in% rownames(installed.packages())
if (any(installed_packages == FALSE)) {
install.packages(packages[!installed_packages])
}
## Packages loading
invisible(lapply(packages, library, character.only = TRUE))
select <- dplyr::select
rename <- reshape::rename
# load data ---------------------------------------------------------------
## note: downloaded from Qualtrics, made sure to export viewing order for randomized questions
raw <- read_csv(here("data", "raw.csv"))
# removing extra columns and row 2 --------------------------------------------------
raw <- raw %>%
slice(-2) %>%
select(-c(StartDate:UserLanguage, SC0, Condition))
# renaming columns --------------------------------------------------------
## first have to create sequences of numbers to use for identifying repeated measures
## creating empty data frame for entering sequences
y <- data.frame(
matrix(
ncol = 16, nrow = 32,
dimnames = list(NULL, c(
"idvoice", "trust", "dom", "threat",
"intell", "comm", "prob", "conf", "trait_do", "team", "frnd",
"neighbr", "employee", "boss", "role_do", "thoughts"
))
)
)
## changing "to" argument for cols 15 and 16 for code to work
y[15] <- seq(from = 17, to = 513, by = 16)
y[16] <- seq(from = 18, to = 514, by = 16)
seq_length <- seq(from = 1, to = 14)
for (i in seq_length) {
y[i] <- seq(2 + i, to = 16 * 32, by = 16)
}
## renaming repeated measures
for (i in names(y)) {
names(raw)[y[, i]] <- i
}
## renaming other columns
raw <- raw %>% rename(
c(
Q286 = "age", Q288 = "educ", Q290 = "race",
Q290_7_TEXT = "race_other", Q292 = "sex",
Q294_1 = "residence", Q294_4_TEXT = "residence_other",
Q296_1 = "pol_ideology", Q298 = "pol_party",
Q298_4_TEXT = "pol_party_other", Q32 = "suspicion",
Q32_4_TEXT = "suspicion_other", Q93 = "mc_name",
Q82 = "mc_race_w", Q84 = "mc_race_b", Q236 = "feedback",
Q93_DO = "mc_name_do", Q54_DO = "b_race_name_do",
Q55_DO = "w_race_name_do", FL_3_DO = "cond_do",
FL_153_DO = "name1", FL_154_DO = "voice1",
FL_191_DO = "name2", FL_201_DO = "voice2",
FL_219_DO = "name3", FL_232_DO = "voice3",
FL_316_DO = "name4", FL_257_DO = "voice4",
MCrace_DO = "mc_race_q_do", MCrace2_DO = "race_name_q_do",
headphones_speakers_3_TEXT = "headphones_speakers_other",
Score = "mc_name_score"
)
)
names(raw) <- gsub("Q54", "b_race_name", names(raw))
names(raw) <- gsub("Q55", "w_race_name", names(raw))
# merging repeated columns to remove NAs ---------------------------------------------------------
names(raw) <- make.unique(names(raw))
for (i in names(y)) {
raw[, i] <- raw %>%
unite(i, names(raw[grep(i, names(raw))]),
remove = T, na.rm = T
) %>%
select(i)
}
## removing extra columns
raw <- raw %>% select(-c(idvoice.1:thoughts.31))
## separating repeated measures
for (i in names(y)) {
raw[, paste0(i, 1:4)] <- raw %>%
separate(i, paste0(i, 1:4),
sep = "_", remove = T, extra = "drop"
) %>%
select(paste0(i, 1:4))
}
## removing extra columns
raw <- raw %>% select(-names(y))
# export names and descriptions -----------------------------------------------------------
## storing in DF called col_names_des
col_names <- names(x = raw)
des <- unlist(raw[1, ], use.names = F)
col_names_des <- data.frame(cbind(col_names, des))
col_names_des <- col_names_des %>% add_row(
col_names = c(
"leadership_1", "leadership_2", "leadership_3", "leadership_4", "trust_rev1",
"trust_rev2", "trust_rev3", "trust_rev4", "threatpotential_1",
"threatpotential_2", "threatpotential_3", "threatpotential_4"
),
des = c(
"leadership composite for condition with Black name and high pitched voice",
"leadership composite for condition with Black name and low pitched voice",
"leadership composite for condition with White name and high pitched voice",
"leadership composite for condition with White name and low pitched voice",
"reverse-scored trustworthiness ratings for condition with Black name and high pitched voice",
"reverse-scored trustworthiness ratings for condition with Black name and low pitched voice",
"reverse-scored trustworthiness ratings for condition with White name and high pitched voice",
"reverse-scored trustworthiness ratings for condition with White name and low pitched voice",
"threat potential for condition with Black name and high pitched voice",
"threat potential for condition with Black name and low pitched voice",
"threat potential for condition with White name and high pitched voice",
"threat potential for condition with White name and low pitched voice"
)
)
write.csv(col_names_des, here("data", "vars-and-labels.csv"), row.names = F)
## removing extra row (description from qualtrics)
raw <- raw[-1, ]
# converting vars to numeric ----------------------------------------------
raw <- raw %>% retype()
# composite leadership --------------------------------------------------------------
## selecting relevant columns
s <- grep("^intell|^comm|^prob|^conf", names(raw))
## have to find the mean for each group within condition & repeat for each condition
for (i in seq(1:4)) {
raw <- raw %>% mutate(!!paste0("leadership", i) := rowMeans(raw[seq(from = s[i], to = s[length(s)], by = 4)], na.rm = T))
}
# composite threat potential ----------------------------------------------
## threat potential measure will be calculated by reverse-scoring the trustworthiness measure (100-trust scores)
## first reverse-scoring all four trust items
raw <- raw %>% mutate(
trust_rev1 = 100 - trust1,
trust_rev2 = 100 - trust2,
trust_rev3 = 100 - trust3,
trust_rev4 = 100 - trust4
)
## selecting relevant columns
s <- grep("^dom|^trust_rev", names(raw))
for (i in seq(1:4)) {
raw <- raw %>% mutate(!!paste0("threatpotential", i) := rowMeans(raw[c(s[i], s[i + 4])], na.rm = T))
}
# recoding display order vars --------------------------------------------
raw$name1 <- dplyr::recode(raw$name1, FL_171 = "Deshawn", FL_172 = "Tyrone", FL_182 = "Terrell", FL_183 = "Keyshawn")
raw$voice1 <- dplyr::recode(raw$voice1,
FL_155 = "a", FL_156 = "b", FL_157 = "c", FL_158 = "d",
FL_159 = "e", FL_160 = "f", FL_161 = "g", FL_162 = "h"
)
raw$name2 <- dplyr::recode(raw$name2, FL_193 = "Deshawn", FL_195 = "Tyrone", FL_197 = "Terrell", FL_199 = "Keyshawn")
raw$voice2 <- dplyr::recode(raw$voice2,
FL_202 = "a", FL_204 = "b", FL_205 = "c", FL_206 = "d",
FL_207 = "e", FL_208 = "f", FL_209 = "g", FL_210 = "h"
)
raw$name3 <- dplyr::recode(raw$name3, FL_220 = "Scott", FL_221 = "Brad", FL_222 = "Logan", FL_223 = "Brett")
raw$voice3 <- dplyr::recode(raw$voice3,
FL_233 = "a", FL_236 = "b", FL_238 = "c", FL_240 = "d",
FL_242 = "e", FL_244 = "f", FL_246 = "g", FL_248 = "h"
)
raw$name4 <- dplyr::recode(raw$name4, FL_317 = "Scott", FL_319 = "Brad", FL_321 = "Logan", FL_323 = "Brett")
raw$voice4 <- dplyr::recode(raw$voice4,
FL_258 = "a", FL_259 = "b", FL_260 = "c", FL_261 = "d",
FL_262 = "e", FL_263 = "f", FL_264 = "g", FL_265 = "h"
)
# long format -------------------------------------------------------------
raw[, "id"] <- seq(1, nrow(raw))
setnames(raw, old = raw %>% select(idvoice1:conf4, team1:boss4, leadership1:leadership4, name1:voice4, threatpotential1:threatpotential4) %>% names(), new = snakecase::to_any_case(raw %>% select(idvoice1:conf4, team1:boss4, leadership1:leadership4, name1:voice4, threatpotential1:threatpotential4) %>% names()))
long <- raw %>%
gather(Column, Value, idvoice_1:conf_4, team_1:boss_4, leadership_1:leadership_4, name_1:voice_4, threatpotential_1:threatpotential_4) %>%
separate(Column, into = c("Column", "condition"), sep = "_") %>%
spread(Column, Value)
long$condition <- dplyr::recode(long$condition, "1" = "BH", "2" = "BL", "3" = "WH", "4" = "WL")
## separating race and voice pitch variables to test & plot interaction effects
x <- do.call(rbind, strsplit(long$condition, ""))
long <- cbind(long, x)
long <- long %>% rename(c("1" = "cond_race", "2" = "cond_pitch"))
long$cond_pitch <- factor(long$cond_pitch)
levels(long$cond_pitch) <- c("High", "Low")
long$cond_race <- factor(long$cond_race)
levels(long$cond_race) <- c("Black", "White")
##changing var type
long <- long %>% retype()
## recoding condition & race vars
long$cond_pitchC <- as.numeric(ifelse(long$cond_pitch == "Low",1,2)) - 1.5
long$cond_raceC <- as.numeric(ifelse(long$cond_race == "Black",1,2)) - 1.5
# export clean wide and long data -------------------------------------------------------
write.csv(raw, here("data", "wide.csv"), row.names = F)
write.csv(long, here("data", "long.csv"), row.names = F)
|
e09b0d24de233db4f95d38105b7b75242f7535cf
|
9c68af77a33dbbb2dace315bc0617d0ede1eaefe
|
/R/convert_pinyin.R
|
2166124ec784826750838af30a4d2048d1142691
|
[] |
no_license
|
qu-cheng/ChinSimi
|
fd59e62aae9324c7a683af824c1ec15b85a0569d
|
5bdcbe74d1480253774975a22491f9b4329ca6b5
|
refs/heads/master
| 2021-09-23T19:45:16.646394
| 2018-09-26T22:00:26
| 2018-09-26T22:00:26
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 1,929
|
r
|
convert_pinyin.R
|
#' Convert Chinese strings to pinyin.
#'
#' @param Chin.str The string need to be converted
#' @param method Whether the output should be toned or toneless.
#' @param multi Whether the output should contain more than one pinyin
#' @param sep Character used to seperate different characters
#' @param parallel Whether or not use parallel calculation
#' @return pinyin of \code{Chin.str}.
#' @examples
#' ChStr2py(c("海上生明月","天涯共此时"))
ChStr2py <- function(Chin.strs = "", method = c("toneless", "tone"), multi = TRUE, sep = "_", parallel = FALSE)
{
method <- match.arg(method)
# Convert a string to pinyin
ChStr2py <- function(Chin.str, pylib){
Sys.setlocale(category = 'LC_ALL', locale = 'chs')
if(is.na(Chin.str)) return(NA)
Chin.char <- unlist(strsplit(Chin.str, split = "")) # divide the string to characters
# convert a single character to pinyin
ChChar2Py <- function(Chin.char){
ChCharpy <- pylib[[Chin.char]]
if(length(ChCharpy)==0){
ChCharpy <- Chin.char
}else{
ChCharpy <- switch(method, tone = ChCharpy, toneless = gsub("[1-4]","", ChCharpy))
if(multi){
ChCharpy <- ifelse(grepl(",", ChCharpy), paste0("[", ChCharpy, "]"), ChCharpy)
} else {
ChCharpy <- ifelse(grepl(",",ChCharpy), substr(ChCharpy, 1, gregexpr(pattern =',', ChCharpy)[[1]][1]-1), ChCharpy)
}
}
return(ChCharpy)
}
paste(sapply(Chin.char, ChChar2Py), collapse = sep)
}
# Use parallel computing to convert strings if parallel is TRUE
if(parallel)
{
no_cores <- parallel::detectCores() - 1 # Get the number of available string
cl <- parallel::makeCluster(no_cores) # Initiate cluster
pinyin <- parallel::parSapply(cl, X = Chin.strs, FUN = ChStr2py, pylib)
parallel::stopCluster(cl)
return(pinyin)
} else {
sapply(Chin.strs, ChStr2py, pylib)
}
}
|
b8787ccf6315a0dc1d9f20ffaa27c28f029d9815
|
3c45d4244992bf0f8df12d35c1eb305208e1097a
|
/main.R
|
18ccc6186f8eefbe6b849e71fc0217e636b0db4b
|
[] |
no_license
|
TristanMngr/mood_tweets
|
0e5ff45fe31b877b051c881df6033055f2475840
|
e28b8587f4c6f0c0d8b6d444012fc1df4713073e
|
refs/heads/master
| 2020-03-30T02:04:38.302726
| 2018-12-28T16:25:39
| 2018-12-28T16:25:39
| 150,611,745
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 3,162
|
r
|
main.R
|
install.packages("twitteR")
install.packages("tidyverse")
install.packages("reshape")
graph package
install.packages("ggplot2")
# stringr and dplyr
library(reshape)
library(dplyr)
library(stringr)
library(twitteR)
library(lubridate)
library(ggplot2)
GROUP_DAYS_TWEETS = 10
BAD_WORDS = c("bad", "attack", "terrorist", "military", "disaster", "die", "problem", "failing",
"fake", "nuclear", "deaths", "dramatic", "hostile", "deadly", "corruption")
GOOD_WORDS = c("good","happy", "thanks", "victory", "great", "honor", "win", "love", "fantastic",
"proud", "amazing", "excitement")
# Twitter's keys and tokens
consumer_key <- "your_consumer_key"
consumer_secret <- "your_consumer_secret"
access_token <- "your_access_token"
access_secret <- "your_access_secret"
# oauth twitter (with plugin twitteR)
setup_twitter_oauth(consumer_key, consumer_secret, access_token, access_secret)
# donald_trump user
donald_trump = getUser('realDonaldTrump')
user_tweets = userTimeline(donald_trump, n=3200, maxID=NULL, sinceID=NULL, includeRts=TRUE,
excludeReplies=TRUE)
# transform object user_tweet into dataframe
tweets_df = twListToDF(user_tweets)
# Dataframe with column:
# - date
# - count (tweet with bad words)
bad_mood_df = tweets_df %>%
select(text, created) %>%
filter(str_detect(text, paste(BAD_WORDS, collapse = "|"))) %>%
group_by(date = floor_date(as.Date(created), paste(GROUP_DAYS_TWEETS, "days", sep = " "))) %>%
summarise(count_bad = n())
# Dataframe with column:
# - date
# - count (tweet with good words)
good_mood_df = tweets_df %>%
select(text, created) %>%
filter(str_detect(text, paste(GOOD_WORDS, collapse = "|"))) %>%
group_by(date = floor_date(as.Date(created), paste(GROUP_DAYS_TWEETS, "days", sep = " "))) %>%
summarise(count_good = n())
# get max and min date of all Donald Trump's tweets
min_date = min(tweets_df[, "created"])
max_date = max(tweets_df[, "created"])
# Create new DataFrame with column:
# - date
# - count (tweet with good word)
# - count (tweet with bad word)
all_mood_df = data.frame(date = seq(as.Date(min_date),as.Date(max_date), GROUP_DAYS_TWEETS)) %>%
right_join(bad_mood_df, by = 'date') %>%
right_join(good_mood_df, by = 'date')
# Replace all NA row by 0 and melt to get all values on one column
all_mood_df[is.na(all_mood_df)] = 0
all_mood_df = melt(all_mood_df, id=c("date"))
# PLOT !
ggplot(all_mood_df, aes(date, y = value, colour = variable)) +
geom_step(aes(y = value)) +
labs(title = "Mood Donald", y = "count tweets")
ggplot(all_mood_df, aes(date, y = value, colour = variable)) +
geom_point(aes(y = value)) +
labs(title = "Mood Donald", y = "count tweets")
ggplot(all_mood_df, aes(date, y = value, colour = variable)) +
geom_line(aes(y = value)) +
labs(title = "Mood Donald", y = "count tweets")
ggplot(all_mood_df, aes(date, weight = displ, fill = variable)) +
geom_bar(aes(weight = value)) +
labs(title = "Mood Donald", y = "count tweets")
# improvements:
# - more words
# - case sensitive
# - create method
# - separate files and sequencial => procedural (api call file, method file, main file)
# - tests
|
73c3239585b462a69b9020f3c10d5d42a6689da5
|
509cd023e3ec861db9e7f031e4fea77e58b148e0
|
/man/extract_moves.Rd
|
7512210b08c240976bdcb2ea6569cec57cfc5d43
|
[] |
no_license
|
JaseZiv/chessR
|
02f22289daae8037bd1b25676fc88a9eccea4493
|
7d6633c06d2f37f8a6875445f534c162b10d3d81
|
refs/heads/master
| 2023-08-16T21:25:32.385108
| 2022-12-14T10:15:23
| 2022-12-14T10:15:23
| 208,428,241
| 32
| 7
| null | 2023-08-20T23:01:51
| 2019-09-14T11:05:36
|
R
|
UTF-8
|
R
| false
| true
| 463
|
rd
|
extract_moves.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/moves.R
\name{extract_moves}
\alias{extract_moves}
\title{Extract moves from a game as a data.frame}
\usage{
extract_moves(moves_string)
}
\arguments{
\item{moves_string}{string containing moves built by `chessR` (e.g. from \url{https://www.chess.com/})
or the filename of a PGN file}
}
\value{
cleaned moves as a data.frame
}
\description{
Extract moves from a game as a data.frame
}
|
20fdd633b0c0133c7370b8b1878046f1979be9b9
|
0d3eaa66183310fad4dafa4611d4ab25162f46ab
|
/day2/test.R
|
1adc763d7c2035193588e1f46c08d3d8174d1409
|
[] |
no_license
|
Paul9615/linkplus_textmining
|
a7618edf740d972805499c7090777b7f391b00af
|
2a1f710362aee383beab4618209d7ead89cd14da
|
refs/heads/master
| 2022-07-17T18:32:56.590766
| 2020-02-15T14:22:47
| 2020-02-15T14:22:47
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 677
|
r
|
test.R
|
library(dplyr)
library(tidyverse)
library(ggplot2)
# 문제
mpg <- as.data.frame(ggplot2::mpg)
str(mpg)
mpg_4 <- mpg
mpg_4 %>%
select(model, displ) %>%
group_by(model) %>%
filter(displ<=4) %>%
summarise(m_dipl=mean(displ))
mpg_5 <- mpg
mpg_5 %>%
select(model, displ) %>%
group_by(model) %>%
filter(displ>=5) %>%
summarise(m_dipl_5 = mean(displ))
mpg_t <- mpg
mpg_t %>%
select(manufacturer, cty) %>%
group_by(manufacturer) %>%
filter(manufacturer == "audi" | manufacturer == "ford") %>%
summarise(m_cty = mean(cty))
"
https://kaggle-kr.tistory.com/32?category=868318&fbclid=IwAR161mOOZql3jFHggS5-IxsVXFUBCfHSZmmeiewUguPAn2u9gPtQPsRLP6o
"
|
d2c189aad413f13cf3769935aa5264b35ef60a48
|
2728ef3e951c20fe5f21f3ef3041f36d9b5a2281
|
/Dropbox/rexamples/complete.R
|
20a42fc138b04d1a1045e47bdf054827fb8c691e
|
[] |
no_license
|
hypatiad/datasciencecoursera
|
784d0e3a510402684015ce448a40d1bdc6fb8680
|
74235e0d454535c1c101b4fd750c47b1bdd3813a
|
refs/heads/master
| 2021-01-01T18:37:30.698172
| 2014-09-11T17:58:02
| 2014-09-11T17:58:02
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 913
|
r
|
complete.R
|
complete <- function(directory, id = 1:332) {
## 'directory' is a character vector of length 1 indicating
## the location of the CSV files
## 'id' is an integer vector indicating the monitor ID numbers
## to be used
## Return a data frame of the form:
## id nobs
## 1 117
## 2 1041
## ...
## where 'id' is the monitor ID number and 'nobs' is the
## number of complete cases
nobs=numeric()
for (i in id){
file.names=paste(directory,"/",sprintf('%03d',i),'.csv',sep="")
file=read.csv(file.names)
my_data<-complete.cases(file)
nobs=c(nobs,sum(my_data))
}
return(data.frame(id,nobs))
}
##Alternative
#complete <- function(directory, id = 1:332) {
#f <- function(i) {
# data = read.csv(paste(directory, "/", sprintf('%03d',i), '.csv', sep = ""))
#sum(complete.cases(data))
# }
# nobs = sapply(id, f)
#return(data.frame(id, nobs))
|
9aa1122be9def07f8d01a799510453a0935c3c31
|
d2061a237532f631fde4eb2651093bed8593c403
|
/man/Impeach.Rd
|
955d886bae94dddd48b9374ba2d3f8abe74601f3
|
[] |
no_license
|
cran/sur
|
23fb12f03ea816e80e0b2fd0a1491f2ca81218dd
|
612cc5e2391a019ed157f90958d31f40e9466560
|
refs/heads/master
| 2020-12-22T19:13:21.454433
| 2020-08-25T21:30:02
| 2020-08-25T21:30:02
| 236,903,605
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| true
| 1,591
|
rd
|
Impeach.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/data-Impeach.R
\docType{data}
\name{Impeach}
\alias{Impeach}
\title{Clinton Impeachment Votes}
\format{A data frame with 100 rows and 11 variables:
\describe{
\item{name}{senator’s name}
\item{state}{state the senator represents}
\item{region}{geographic region of the U.S.}
\item{vote1}{vote on perjury}
\item{vote2}{vote on obstruction of justice}
\item{guilty}{total number of guilty votes}
\item{party}{political party of senator}
\item{conserva}{conservatism score, defined as the senator’s degree of ideological conservatism, based on 1997 voting records as judged by the American Conservative Union, where the scores ranged from 0 to 100 and 100 is most conservative}
\item{supportc}{state voter support for Clinton, defined as the percent of the vote Clinton received in the 1996 presidential election in the senator’s state}
\item{reelect}{year the senator’s seat is up for reelection}
\item{newbie}{indicator for whether the senator is in their first-term}
}}
\usage{
Impeach
}
\description{
On February 12, 1999, for only the second time in the nation’s history, the U.S. Senate voted on whether to remove a president, based on impeachment articles passed by the U.S. House. Professor Alan Reifman of Texas Tech University created the dataset consisting of descriptions of each senator that can be used to understand some of the reasons that the senators voted the way they did. The data are taken from the Journal of Statistics Education [online].
}
\keyword{datasets}
|
af35110e8a0898daebc4f71fb4eadf45de792b39
|
ffdea92d4315e4363dd4ae673a1a6adf82a761b5
|
/data/genthat_extracted_code/BayesMallows/examples/generate_transitive_closure.Rd.R
|
0d14ed24233b293bf70ab5b14027c644ed9c9170
|
[] |
no_license
|
surayaaramli/typeRrh
|
d257ac8905c49123f4ccd4e377ee3dfc84d1636c
|
66e6996f31961bc8b9aafe1a6a6098327b66bf71
|
refs/heads/master
| 2023-05-05T04:05:31.617869
| 2019-04-25T22:10:06
| 2019-04-25T22:10:06
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 1,878
|
r
|
generate_transitive_closure.Rd.R
|
library(BayesMallows)
### Name: generate_transitive_closure
### Title: Generate Transitive Closure
### Aliases: generate_transitive_closure
### ** Examples
# Let us first consider a simple case with two assessors, where assessor 1
# prefers item 1 to item 2, and item 1 to item 5, while assessor 2 prefers
# item 3 to item 5. We then have the following dataframe of pairwise
# comparisons:
library(dplyr)
pair_comp <- tribble(
~assessor, ~bottom_item, ~top_item,
1, 2, 1,
1, 5, 1,
2, 5, 3
)
# We then generate the transitive closure of these preferences:
(pair_comp_tc <- generate_transitive_closure(pair_comp))
# In this case, no additional relations we implied by the ones
# stated in pair_comp, so pair_comp_tc has exactly the same rows
# as pair_comp.
# Now assume that assessor 1 also preferred item 5 to item 3, and
# that assessor 2 preferred item 4 to item 3.
pair_comp <- tribble(
~assessor, ~bottom_item, ~top_item,
1, 2, 1,
1, 5, 1,
1, 3, 5,
2, 5, 3,
2, 3, 4
)
# We generate the transitive closure again:
(pair_comp_tc <- generate_transitive_closure(pair_comp))
# We now have one implied relation for each assessor.
# For assessor 1, it is implied that 1 is preferred to 3.
# For assessor 2, it is implied that 4 is preferred to 5.
## Not run:
##D # If assessor 1 in addition preferred item 3 to item 1,
##D # the preferences would not be consistent. This is not yet supported by compute_mallows,
##D # so it causes an error message. It will be supported in a future release of the package.
##D # First, we add the inconsistent row to pair_comp
##D pair_comp <- bind_rows(pair_comp,
##D tibble(assessor = 1, bottom_item = 1, top_item = 3))
##D
##D # This causes an error message and prints out the problematic rankings:
##D (pair_comp_tc <- generate_transitive_closure(pair_comp))
## End(Not run)
|
1bd742736eb96a23d445d875a496f65c781aa517
|
82814f3de96f63ad0f94aa64b2123a4be13d918e
|
/man/fit.icc.regression.model.Rd
|
3b4616f0f6b31a75e5c107c116a1789a66a057f1
|
[] |
no_license
|
mattmoo/SteppedWedgeAnalysis
|
07bbee9c9d3d5affdeebce468a5269ead3f3230c
|
731812f367af8469766d022da58ecffcf3640d80
|
refs/heads/master
| 2020-12-05T04:04:51.041312
| 2020-01-13T18:52:43
| 2020-01-13T18:52:43
| 232,005,077
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| true
| 786
|
rd
|
fit.icc.regression.model.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/simulate.helpers.R
\name{fit.icc.regression.model}
\alias{fit.icc.regression.model}
\title{Fits a regression model predicting ICC from provided variables.}
\usage{
fit.icc.regression.model(
icc.dt,
predictor.vars = NULL,
include.interactions = F,
model.type = c("lm", "loess")[1]
)
}
\arguments{
\item{icc.dt}{A data.table with the parameters of the various simulations and
ICC.}
\item{predictor.vars}{Calculate interation effects, or just simple effects
(Default: F)}
\item{model.type}{Type of model to fit, currently supports linear ('lm') and
quadratic ('loess') (Default: 'lm')}
}
\value{
A linear model object.
}
\description{
Fits a regression model predicting ICC from provided variables.
}
|
ecf9c0c2c8300c38e6083f07fcd8e57611fefe5b
|
7d02198a16df914713b8e883562a2e4b419f1bcb
|
/hw08_script.R
|
e5326607216620f75b750521798cb895ac71494e
|
[] |
no_license
|
panders225/Categorical-Data-Analysis
|
0f7d308ace6158f6a2eaa81c22554434f355f1d6
|
776c499953626465dda920b686f047fb7a56cd0d
|
refs/heads/master
| 2021-01-24T07:12:37.295113
| 2017-07-22T12:31:33
| 2017-07-22T12:31:33
| 93,336,313
| 1
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 6,839
|
r
|
hw08_script.R
|
library("tidyverse")
# 8.2
(1 - pnorm(10.91))*2
(1 - pnorm(10.91))
sqrt( ((132+107) - (132-107)**2)/1144)
sqrt((132+107) - ((132-107)**2)/1144)/1144
sqrt((125+2) - ((125-2)**2)/1120)/1120
qnorm(p = 0.95)
# 8.6
help("mcnemar.test")
mcnemar.test(matrix(c(114, 181, 11, 48), nrow=2, byrow=T))
help("mcnemar.test")
# 8.11
# read in some matrix functions
source("C:/Users/Philip/Schools/TAMU/STAT_659/homework/git_hw/Categorical-Data-Analysis/square_matrix_functions.R")
freq <- c(1228, 39, 2, 158, 100, 649, 1, 107, 1, 0, 54, 9, 73, 12, 4, 137)
relig <- matrix(freq
,nrow=4
,byrow=TRUE
,dimnames=list(first=c("1","2","3","4"),second=c("1","2","3","4")))
relig_dat <- table2case(relig)
relig_mod <- glm(Count ~ factor(Rows) + factor(Cols)
,family=poisson
,data=relig_dat
)
summary(relig_mod)
reshape2::melt(relig_mod$residuals)
relig_dat
1 - pchisq(2.3, df=3)
1 - pchisq(148.3, df=3)
# 8.13D
# fit the marginal model
relig
rowSums(relig)
colSums(relig)
relig_dat
# 8.14
freq=c(425,17,80,36,10,555,74,47,7,34,771,33,5,14,29,452)
migration=matrix(freq,nrow=4,byrow=TRUE,dimnames=list(first=c("ne","mw","so","we"),second=c("ne","mw","so","we")))
mig_dat <- table2case(migration)
# get ready for the symmetry model
diags <- makediags(mig_dat)
symm <- makeoffdiags(mig_dat)
# fit the symmetry model
mig_mod <- glm(Count ~ diags + symm,family=poisson,data=mig_dat)
summary(mig_mod)
1 - pchisq(134.45, df=6)
# fit the quasi-symmetry model
mig_mod2 <- glm(Count ~ factor(Rows) + factor(Cols)+symm,family=poisson,data=mig.data)
summary(mig_mod2)
1 - pchisq(6.984, df=3)
deviance(mig_mod)
deviance(mig_mod2)
anova(mig_mod, mig_mod2)
1 - pchisq(127.47, df=3)
# 8.16
# input the data
freq <- c(66,39,3,227,359,48,150,216,108)
cycle <- matrix(data = freq, nrow=3
, byrow=TRUE
, dimnames = list(chem=c("1", "2", "3"), recycle=c("1", "2", "3"))
)
cycle_dat <- table2case(cycle)
diags=makediags(cycle_dat)
symm=makeoffdiags(cycle_dat)
#A - fit the symmetry model
cycle_mod1 <- glm(Count ~ diags + symm
,family=poisson
,data=cycle_dat)
summary(cycle_mod1)
1 - pchisq(445.23, df=3)
# Fit quasi-symmetry model
cycle_mod2 <- glm(Count ~ factor(Rows) + factor(Cols)+symm
,family=poisson
,data=cycle_dat
)
summary(cycle_mod2)
1 - pchisq(1.2266, df=1)
# fit the model of ordinal quasi-symmetry
cycle_mod3 <- glm(Count ~ Rows + diags + symm
,family=poisson
,data=cycle_dat
)
summary(cycle_mod3)
1 - pchisq(2.4688, df=2)
anova(cycle_mod2, cycle_mod3)
1 - pchisq(2.4688 - 1.2266, df = 2 -1 )
# 8.17
freq <- c(95,72,32,8,66,129,116,13,31,101,233,82,5,4,24,26)
pollute <- matrix(freq, nrow=4, byrow=TRUE
, dimnames=list(car=c("1", "2", "3", "4"), c("1", "2", "3", "4") ))
pollute_dat <- table2case(pollute)
# first fit the symmetry model to make sure that isnt appropriate
diags=makediags(pollute_dat)
symm=makeoffdiags(pollute_dat)
pollute_mod1 <- glm(Count ~ diags + symm
,family=poisson
,data=pollute_dat)
summary(pollute_mod1)
1 - pchisq(40.577, df=6)
# ordinal quasi-symmetry
pollute_mod2 <- glm(Count ~ Rows + diags + symm
,family=poisson
,data=pollute_dat
)
summary(pollute_mod2)
1 - pchisq(27.489, df=5)
#quasi-symmetry
pollute_mod3 <- glm(Count ~ factor(Rows) + factor(Cols)+symm
,family=poisson
,data=pollute_dat
)
summary(pollute_mod3)
1 - pchisq(2.2273, df=3)
# 8.19
# re-bring in the migration data
freq=c(425,17,80,36,10,555,74,47,7,34,771,33,5,14,29,452)
migration=matrix(freq,nrow=4,byrow=TRUE,dimnames=list(first=c("ne","mw","so","we"),second=c("ne","mw","so","we")))
mig_dat <- table2case(migration)
# get ready for the symmetry model
diags <- makediags(mig_dat)
symm <- makeoffdiags(mig_dat)
# fit the independence model
mig_mod1 <- glm(Count ~ factor(Rows) + factor(Cols)
,family=poisson
,data=mig_dat
)
summary(mig_mod1)
# now fit the quasi-independence model
mig_mod2 <- glm(Count ~ factor(Rows) + factor(Cols)+diags
,family=poisson
,data=mig_dat
)
summary(mig_mod2)
1 - pchisq(9.7032, df=5)
reshape2::melt(mig_mod1$residuals)
plot(density(mig_mod1$residuals))
reshape2::melt(mig_mod2$residuals)
plot(density(mig_mod2$residuals))
# 8.20
#input the data
freq <- c(38,5,0,1,33,11,3,0,10,14,5,6,3,7,3,10)
neuro <- matrix(freq, nrow=4, byrow=TRUE,
dimnames=list(A=c("1", "2", "3","4"), B=c("1", "2", "3", "4")))
neuro_dat <- table2case(neuro)
diags <- makediags(neuro_dat)
symm <- makeoffdiags(neuro_dat)
# first fit the independence model
neuro_mod1 <- glm(Count ~ factor(Rows) + factor(Cols)
,family=poisson
,data=neuro_dat
)
summary(neuro_mod1)
plot(density(neuro_mod1$residuals))
1 - pchisq(69, df=9)
# now fit the quasi-independence model
neuro_mod2 <- glm(Count ~ factor(Rows) + factor(Cols)+diags
,family=poisson
,data=neuro_dat
)
summary(neuro_mod2)
# quasi-symmetry model
neuro_mod3 <- glm(Count ~ factor(Rows) + factor(Cols)+symm
,family=poisson
,data=neuro_dat
)
summary(neuro_mod3)
1 - pchisq(6.184, df=3)
# ordinal quasi-symmetry model
neuro_mod4 <- glm(Count ~ Rows + diags + symm
,family=poisson
,data=neuro_dat
)
summary(neuro_mod4)
# Fit the Bradley-Terry Model
# first arrange the data
library("BradleyTerry2")
data("citations", package = "BradleyTerry2")
citations.sf <- countsToBinomial(citations)
class(citations.sf)
names(citations.sf)[1:2] <- c("journal1", "journal2")
cite_model <- BTm(cbind(win1, win2), journal1, journal2, ~ journal,
id = "journal", data = drinks)
cite_model
summary(cite_model)
drinks <- data.frame(
journal1=as.factor(c("coke", "coke_classic", "coke", "pepsi"))
, journal2=as.factor(c("pepsi", "pepsi", "coke_classic", "coke"))
, win1=c(29,19,31, 0)
, win2=c(20,28,19, 0)
)
drinks <- drinks[1:3 , ]
str(drinks)
cite_model <- BTm(cbind(win1, win2), journal1, journal2, ~ journal,
id = "journal", data = drinks)
cite_model
# 8.23
data("citations", package = "BradleyTerry2")
#(citations <- t(citations))
(citations.sf <- countsToBinomial(citations))
names(citations.sf)[1:2] <- c("journal1", "journal2")
citations.sf
cite_model <- BTm(cbind(win1, win2), journal1, journal2, ~ journal,
id = "journal", data = citations.sf)
cite_model
1 - pchisq(4.293, df=3)
|
c3f31761b8a7026a3f6ad71b5ab7f0776d5d4768
|
1f24d9b3358198d779a689dc19a960debd20ebb2
|
/R/packages.R
|
31eb7ce3b68276b5fe6d35f7adf9e62da5b294ba
|
[] |
no_license
|
ChanceNoff/Continuous-character-models
|
a5385786f317db08639bc88f2cd67d7ddc5c4e79
|
797820df77765f3b584950d3dbd85e9028b975f5
|
refs/heads/main
| 2023-04-15T12:01:09.106310
| 2021-05-07T03:16:10
| 2021-05-07T03:16:10
| 344,346,493
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 194
|
r
|
packages.R
|
library(drake)
library(ape) #utility fns
library(geiger) #utilty fns
library(OUwie)
library(devtools)
#devtools::install_github('FranzKrah/rMyCoPortal')
library('rMyCoPortal')
library(tidyverse)
|
5d9dcbcd594170937ff47ae198741a1d3744afaf
|
d42bef58873857d5766955bb8af2dfec6d02d177
|
/practice_1_markdown.R
|
6d814f42bf98c0f23004aaf5e41bd391aaf7b8b5
|
[] |
no_license
|
veesta/practice_1
|
3fd483104e299704adfaddab281e12efa833a619
|
c0baec7c532a33d5e83a67ff91e282f29ef51664
|
refs/heads/master
| 2021-01-12T13:23:23.083285
| 2016-09-23T01:23:04
| 2016-09-23T01:23:04
| 68,973,554
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 2,173
|
r
|
practice_1_markdown.R
|
"1_raw_to_analytic_data_practice_one_script.R"
library(tidyverse)
#load raw data
raw_data <- read_csv(file="practice1_analytic_data.csv")
#View (raw_data)
#remove missing data
raw_data <- read_csv(file="practice1_analytic_data.csv", na=c("","NA","-999"))
#View (raw_data)
#tell R about categorical variables sex and major
categorical_variables <- select(raw_data, sex, major)
categorical_variables$sex <- as.factor(categorical_variables$sex)
levels(categorical_variables$sex) <- list("Male"=1, "Female"=2)
categorical_variables$major <- as.factor(categorical_variables$major)
levels(categorical_variables$major) <- list("Psychology"=1, "Sociology"=2, "Math"=3, "Engineering"=4, "Science"=5)
#View(raw_data)
#create your scaled item sets
self_esteem_items <- select (raw_data, SE1, SE2, SE3, SE4, SE5)
dep_items <- select (raw_data, D1, D2, D3, D4, D5)
job_sat_items <- select (raw_data, JS1, JS2, JS3, JS4, JS5)
#View (dep_items)
#check for impossible values
psych::describe(self_esteem_items)
is_bad_value <- self_esteem_items<1 | self_esteem_items>7
self_esteem_items[is_bad_value] <- NA
psych::describe(self_esteem_items)
psych::describe(dep_items)
is_bad_value <- dep_items<1 | dep_items>4
dep_items[is_bad_value] <- NA
psych::describe(dep_items)
psych::describe(job_sat_items)
is_bad_value <- job_sat_items<1 | job_sat_items>6
job_sat_items[is_bad_value] <- NA
psych::describe(job_sat_items)
#deal with reverse keyed items
#View(self_esteem_items)
self_esteem_items <- mutate(self_esteem_items,SE1=8-SE1)
#View(self_esteem_items)
dep_items <- mutate(dep_items, D4=5-D4)
dep_items <- mutate(dep_items, D5=5-D5)
#View(dep_items)
job_sat_items <- mutate(job_sat_items, JS1=7-JS1)
job_sat_items <- mutate(job_sat_items, JS2=7-JS2)
#View(job_sat_items)
#obtain scaled mean scores
self_esteem <- psych::alpha(as.data.frame(self_esteem_items), check.keys=FALSE)$scores
dep <- psych::alpha(as.data.frame(dep_items), check.keys=FALSE)$scores
job_sat <- psych::alpha(as.data.frame(job_sat_items), check.keys=FALSE)$scores
#combine into analytic_data
analytic_data <- cbind(categorical_variables,self_esteem,dep,job_sat)
analytic_data
|
41fbf41897f9ccb65399fe8310c748e71a82841b
|
d58b6bcfc847a1f8fafa5de52f308d6546a422ac
|
/2018_HAWA/1-1_Data_Management.R
|
0543779a9198b92242c6374409b7cc660d37209d
|
[
"MIT"
] |
permissive
|
baruuum/Replication_Code
|
814d25f873c6198971afcff62b6734d3847f38f6
|
23e4ec0e6df4cf3666784a0d18447452e3e79f97
|
refs/heads/master
| 2022-03-01T19:42:13.790846
| 2022-02-15T07:32:47
| 2022-02-15T07:32:47
| 138,830,546
| 9
| 10
| null | null | null | null |
UTF-8
|
R
| false
| false
| 13,064
|
r
|
1-1_Data_Management.R
|
################################################
################################################
#### HOW ARE WE APART ####
#### ####
#### DATA MANAGEMENT 1 ####
#### Preparing Data for DIF Model ####
#### ####
#### Last Update: 08/09/2016 ####
#### ####
################################################
################################################
rm(list=ls())
library(gdata)
library(foreign)
#set wd
wd <- "Working Directory Here"
if (getwd() != wd) setwd(wd)
wd.raw <- paste0(wd, '/raw_data')
setwd(wd.raw)
# get nes raw data
nes.raw <- read.table("anes_timeseries_cdf_rawdata.txt", header=T, sep="|")
setwd(wd)
# presidential years
years.measured <- c(1972, 1976, 1980, 1984, 1988, 1992, 1996, 2000, 2004, 2008, 2012)
no.years <- length(years.measured)
# only years of presidential elections
nes.cum <- nes.raw[nes.raw$VCF0004 %in% years.measured,]
# exclude web-based studies
nes.cum <- nes.cum[nes.cum$VCF0017!=4,]
# variables to be scaled, in order: R-placement, Demparty, Repparty, Pres, Demcand, Repcand, pid
vars <- c('VCF0803', 'VCF0503', 'VCF0504', 'VCF9080', 'VCF9088', 'VCF9096', 'VCF0301')
no.vars <- length(vars)
# missing values
for (v in vars) {
print(table(nes.cum[[v]]))
}
for (v in vars) {
nes.cum[[v]][nes.cum[[v]]==0] <- NA
nes.cum[[v]][nes.cum[[v]]> 7] <- NA
}
#rename vars
nes.cum <- rename.vars(nes.cum, c('VCF0006','VCF0004','VCF0803', 'VCF0503', 'VCF0504', 'VCF9080', 'VCF9088', 'VCF9096', 'VCF0301'),
c('id','year','self', 'dem.p', 'rep.p', 'pres', 'dem.cand', 'rep.cand','pid'))
cum.data <- nes.cum[,c('id','year','self', 'dem.p', 'rep.p', 'pres', 'dem.cand', 'rep.cand','pid')]
# summary df for years & candidates
sum.cands <- matrix(c(1972, 'Nixon', 'McGovern', 'Nixon', 1976,"Ford","Carter","Ford",1980,"Carter","Carter","Reagan",1984,"Reagan","Mondale","Reagan",1988,"Reagan","Dukakis","H.W.Bush",1992,"H.W.Bush","Clinton","H.W.Bush",1996,"Clinton","Clinton","Dole",2000,"Clinton","Gore","W.Bush",2004,"W.Bush","Kerry","W.Bush", 2008, 'W.Bush', 'Obama','McCain', 2012, 'Obama', 'Obama','Romney'),ncol=4, byrow=T)
colnames(sum.cands) <- c('year', 'pres', 'dem.cand', 'rep.cand')
# generating separate vars for parties in each year
for (y in years.measured) {
temp.str.dem <- paste('dem.',y, sep='')
temp.str.rep <- paste('rep.',y, sep='')
cum.data[[temp.str.dem]] <- cum.data$dem.p
cum.data[[temp.str.dem]][cum.data$year != y] <- NA
cum.data[[temp.str.rep]] <- cum.data$rep.p
cum.data[[temp.str.rep]][cum.data$year != y] <- NA
}
# generating separate vars for each figure
figs <- c('Nixon','McGovern','Ford', 'Carter', 'Reagan', 'Mondale', 'Dukakis', 'H.W.Bush', 'Clinton', 'Dole', 'Gore', 'W.Bush', 'Kerry', 'Obama', 'McCain','Romney')
for (f in figs) {
cum.data[[f]] <- rep(NA,dim(cum.data)[1])
ind <- which(sum.cands==f, arr.ind=T)
for (r in 1:dim(ind)[1]){
temp.year <- sum.cands[ind[r,][1],1]
temp.col <- colnames(sum.cands)[ind[r,][2]]
cum.data[[f]][cum.data$year==temp.year] <- cum.data[[temp.col]][cum.data$year==temp.year]
}
print(f)
print(rbind(table(cum.data[[f]], cum.data$year),colSums(table(cum.data[[f]], cum.data$year))))
}
# generate unique id & sort data set
cum.data$unique <- cum.data$year*10000 + cum.data$id
cum.data <- cum.data[order(cum.data$unique), ]
############# Adding new variables of lib-cons placement ######
## 1972 ##
setwd(wd.raw)
# read 1972 data
nes.1972 <- read.dta('NES1972.dta', convert.factors=F)
# rename
nes.1972 <- rename.vars(nes.1972, c('V720002','V720655', 'V720652'), c('id', 'Wallace', 'self'))
# add year & unique id
nes.1972$year <- 1972
nes.1972$unique <- nes.1972$year*10000 + nes.1972$id
# missings
nes.1972$Wallace[nes.1972$Wallace == 0 | nes.1972$Wallace > 7] <- NA
nes.1972$self[nes.1972$self == 0 | nes.1972$self > 7] <- NA
# test
identical(table(nes.1972$unique, nes.1972$self), table(cum.data$unique[cum.data$year==1972], cum.data$self[cum.data$year==1972]))
# selecting variables to merge
nes.m.1972 <- nes.1972[, c('unique','Wallace')]
# merge data
cum.data <- merge(cum.data, nes.m.1972, by='unique', all.x=T)
## 1976 ##
# read 1976 data
nes.1976 <- read.dta('NES1976.dta', convert.factors=F)
# rename
nes.1976 <- rename.vars(nes.1976, c('V763002', 'V763286', 'V763292', 'V763291', 'V763293', 'V763294', 'V763295', 'V763296'),
c('id', 'self', 'Wallace', 'Humphrey', 'McGovern', 'Reagan', 'Mondale', 'Dole'))
# add year & unique id
nes.1976$year <- 1976
nes.1976$unique <- nes.1976$year*10000 + nes.1976$id
nes.1976 <- nes.1976[order(nes.1976$unique),]
# missings
for (f in c('self', 'Wallace', 'Humphrey', 'McGovern', 'Reagan', 'Mondale', 'Dole')) {
nes.1976[[f]][nes.1976[[f]] == 0 | nes.1976[[f]] > 7] <- NA
if (max(nes.1976[[f]], na.rm=T) != 7 | min(nes.1976[[f]], na.rm=T) != 1) stop('check range')
}
# test
identical(table(nes.1976$unique, nes.1976$self), table(cum.data$unique[cum.data$year==1976], cum.data$self[cum.data$year==1976]))
# selecting variables to merge
nes.m.1976 <- nes.1976[, c('unique','Wallace', 'Humphrey', 'McGovern', 'Reagan', 'Mondale', 'Dole')]
cands <- c('Wallace', 'Humphrey', 'McGovern', 'Reagan', 'Mondale', 'Dole')
for (v in cands) {
if (v %in% names(cum.data)) {
print(v)
cum.data[cum.data$year==1976, v] <- nes.m.1976[[v]]
} else cum.data <- merge(cum.data, nes.m.1976[, c('unique',v)], by='unique', all.x=T)
}
# test
for (f in cands) {
print(all.equal(cbind(cum.data$unique[cum.data$year==1976], cum.data[cum.data$year==1976, f ]),
cbind(nes.m.1976$unique, nes.m.1976[[f]] )))
}
## 1980 ##
# read 1976 data
nes.1980 <- read.dta('NES1980.dta', convert.factors=F)
# rename
nes.1980 <- rename.vars(nes.1980, c('V800004', 'V800267', 'V800270', 'V800271', 'V800272', 'V800273', 'V800274', 'V800275', 'V800276', 'V800277'),
c('id', 'self', 'T.Kennedy', 'Connally', 'Ford', 'Brown', 'Anderson', 'H.W.Bush', 'Mondale', 'Lucey'))
# add year & unique id
nes.1980$year <- 1980
nes.1980$unique <- nes.1980$year*10000 + nes.1980$id
nes.1980 <- nes.1980[order(nes.1980$unique),]
# missings
for (f in c('self', 'T.Kennedy', 'Connally', 'Ford', 'Brown', 'Anderson', 'H.W.Bush', 'Mondale', 'Lucey')) {
nes.1980[[f]][nes.1980[[f]] == 0 | nes.1980[[f]] > 7] <- NA
if (max(nes.1980[[f]], na.rm=T) != 7 | min(nes.1980[[f]], na.rm=T) != 1) stop('check range')
}
# test
identical(table(nes.1980$unique, nes.1980$self), table(cum.data$unique[cum.data$year==1980], cum.data$self[cum.data$year==1980]))
# selecting variables to merge
nes.m.1980 <- nes.1980[, c('unique','T.Kennedy', 'Connally', 'Ford', 'Brown', 'Anderson', 'H.W.Bush', 'Mondale', 'Lucey')]
cands <- c('T.Kennedy', 'Connally', 'Ford', 'Brown', 'Anderson', 'H.W.Bush', 'Mondale', 'Lucey')
# merge data (have to run this twice, don't know why)
for (v in cands) {
if (v %in% names(cum.data)) {
print(v)
cum.data[cum.data$year==1980, v] <- nes.m.1980[[v]]
} else cum.data <- merge(cum.data, nes.m.1980[, c('unique',v)], by='unique', all.x=T)
}
# test
for (f in cands) {
print(all.equal(cbind(cum.data$unique[cum.data$year==1980], cum.data[cum.data$year==1980, f ]),
cbind(nes.m.1980$unique, nes.m.1980[[f]] )))
}
## 1984 : No new candidates
## 1988
nes.1988 <- read.dta('NES1988.dta', convert.factors=F)
# rename
nes.1988 <- rename.vars(nes.1988, c('V880004','V880228', 'V880233'), c('id', 'self', 'Jackson'))
# add year & unique id
nes.1988$year <- 1988
nes.1988$unique <- nes.1988$year*10000 + nes.1988$id
nes.1988 <- nes.1988[order(nes.1988$unique),]
# missings
for (f in c('self', 'Jackson')) {
nes.1988[[f]][nes.1988[[f]] == 0 | nes.1988[[f]] > 7] <- NA
if (max(nes.1988[[f]], na.rm=T) != 7 | min(nes.1988[[f]], na.rm=T) != 1) stop('check range')
}
# test
identical(table(nes.1988$unique, nes.1988$self), table(cum.data$unique[cum.data$year==1988], cum.data$self[cum.data$year==1988]))
# selecting variables to merge
nes.m.1988 <- nes.1988[, c('unique','Jackson')]
cum.data <- merge(cum.data, nes.m.1988 , by='unique', all.x=T)
# test
print(all.equal(cbind(cum.data$unique[cum.data$year==1988], cum.data[cum.data$year==1988, 'Jackson' ]),
cbind(nes.m.1988$unique, nes.m.1988[['Jackson']] )))
## 1992
nes.1992 <- read.dta('NES1992.dta', convert.factors=F)
# rename
nes.1992 <- rename.vars(nes.1992, c('V923004', 'V923509', 'V923516'),
c('id', 'self', 'Perot'))
# add year & unique id
nes.1992$year <- 1992
nes.1992$unique <- nes.1992$year*10000 + nes.1992$id
nes.1992 <- nes.1992[order(nes.1992$unique),]
# missings
for (f in c('self', 'Perot')) {
nes.1992[[f]][nes.1992[[f]] == 0 | nes.1992[[f]] > 7] <- NA
if (max(nes.1992[[f]], na.rm=T) != 7 | min(nes.1992[[f]], na.rm=T) != 1) stop('check range')
}
# test
identical(table(nes.1992$unique, nes.1992$self), table(cum.data$unique[cum.data$year==1992], cum.data$self[cum.data$year==1992]))
# selecting variables to merge
nes.m.1992 <- nes.1992[, c('unique','Perot')]
cum.data <- merge(cum.data, nes.m.1992 , by='unique', all.x=T)
# test
print(identical(cbind(cum.data$unique[cum.data$year==1992], cum.data[cum.data$year==1992, 'Perot' ]),
cbind(nes.m.1992$unique, nes.m.1992[['Perot']] )))
## 1996 ##
nes.1996 <- read.dta('NES1996.dta', convert.factors=F)
# rename
nes.1996 <- rename.vars(nes.1996, c('V960001', 'V960009', 'V960365', 'V960373'),
c('id', 'id.1992','self', 'Perot'))
# add year & unique id
nes.1996$year <- 1996
nes.1996$unique <- nes.1996$year*10000 + nes.1996$id
nes.1996 <- nes.1996[order(nes.1996$unique),]
# missings
for (f in c('self', 'Perot')) {
nes.1996[[f]][nes.1996[[f]] == 0 | nes.1996[[f]] > 7] <- NA
if (max(nes.1996[[f]], na.rm=T) != 7 | min(nes.1996[[f]], na.rm=T) != 1) stop('check range')
}
# test
identical(table(nes.1996$unique, nes.1996$self), table(cum.data$unique[cum.data$year==1996], cum.data$self[cum.data$year==1996]))
# selecting variables to merge
nes.m.1996 <- nes.1996[, c('unique','Perot')]
#merge
cum.data[cum.data$year==1996, 'Perot'] <- nes.m.1996[['Perot']]
# test
print(all.equal(cbind(cum.data$unique[cum.data$year==1996], cum.data[cum.data$year==1996, 'Perot']),
cbind(nes.m.1996$unique, nes.m.1996[['Perot']] )))
## 2000
nes.2000 <- read.dta('NES2000.dta', convert.factors=F)
nes.2000$std <- ifelse(nes.2000$V000005d==1,T,F)
nes.2000 <- rename.vars(nes.2000, c('V000001', 'V000439', 'V000439a', 'V000475', 'V000475a'),
c('id', 'self.ftf','self.t', 'Buchanan.ftf', 'Buchanan.t'))
# add year & unique id
nes.2000$year <- 2000
nes.2000$unique <- nes.2000$year*10000 + nes.2000$id
nes.2000 <- nes.2000[order(nes.2000$unique),]
for (f in c('self.ftf', 'self.t', 'Buchanan.ftf', 'Buchanan.t')) {
nes.2000[[f]][nes.2000[[f]] > 7] <- 0
}
nes.2000$self <- nes.2000$self.ftf + nes.2000$self.t
nes.2000$Buchanan <- nes.2000$Buchanan.ftf + nes.2000$Buchanan.t
# missings
for (f in c('self', 'Buchanan')) {
nes.2000[[f]][nes.2000[[f]] == 0 | nes.2000[[f]] > 7] <- NA
if (max(nes.2000[[f]], na.rm=T) != 7 | min(nes.2000[[f]], na.rm=T) != 1) stop('check range')
}
# test
identical(table(nes.2000$unique, nes.2000$self), table(cum.data$unique[cum.data$year==2000], cum.data$self[cum.data$year==2000]))
# selecting variables to merge
nes.m.2000 <- nes.2000[, c('unique','Buchanan')]
#merge
cum.data <- merge(cum.data, nes.m.2000, by = 'unique', all.x=T)
# test
print(all.equal(cbind(cum.data$unique[cum.data$year==2000], cum.data[cum.data$year==2000, 'Buchanan']),
cbind(nes.m.2000$unique, nes.m.2000[['Buchanan']] )))
## 2004 ##
nes.2004 <- read.dta('NES2004.dta', convert.factors=F)
nes.2004 <- rename.vars(nes.2004, c('V040001', 'V043085', 'V043089'),
c('id', 'self', 'Nader'))
# add year & unique id
nes.2004$year <- 2004
nes.2004$unique <- nes.2004$year*10000 + nes.2004$id
nes.2004 <- nes.2004[order(nes.2004$unique),]
# missings
for (f in c('self', 'Nader')) {
nes.2004[[f]][nes.2004[[f]] == 0 | nes.2004[[f]] > 7] <- NA
if (max(nes.2004[[f]], na.rm=T) != 7 | min(nes.2004[[f]], na.rm=T) != 1) stop('check range')
}
# test
identical(table(nes.2004$unique, nes.2004$self), table(cum.data$unique[cum.data$year==2004], cum.data$self[cum.data$year==2004]))
# selecting variables to merge
nes.m.2004 <- nes.2004[, c('unique','Nader')]
#merge
cum.data <- merge(cum.data, nes.m.2004, by = 'unique', all.x=T)
# test
print(all.equal(cbind(cum.data$unique[cum.data$year==2004], cum.data[cum.data$year==2004, 'Nader']),
cbind(nes.m.2004$unique, nes.m.2004[['Nader']] )))
setwd(wd)
saveRDS(cum.data, 'pres.year.Rda')
### END OF CODE ###
|
f7e7739ef395cce06e0f79c8478192998be8e927
|
b68440d87536643ff275a852dbaa641f41c791f3
|
/R/iscam_binom.R
|
10b99a05dd212b4ac0264cbb84284dcd51dcc23f
|
[] |
no_license
|
apjacobson/iscam
|
3f07a077ff32409802676d41a2785f4e4bfa8bd5
|
148c30cadadf1b65705b0934f6fe3fa584f07ca2
|
refs/heads/master
| 2020-04-22T07:06:58.178789
| 2019-03-01T18:28:11
| 2019-03-01T18:28:11
| 170,210,748
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 1,102
|
r
|
iscam_binom.R
|
#' iscam_binom Function
#'
#' This function calculates either a tail probability or the inverse cumulative
#' probability depending on whether k or prob is passed a question mark.
#' @param k number of successes
#' @param prob probability of interest
#' @param n number of trials (zero or more)
#' @param pi probability of success on each trial
#' @param lower.tail logical; if TRUE, probabilities are P[X <= k], otherwise, P[X > k]
#' @keywords binomial
#' @import graphics ggplot2
#' @export
#' @examples
#' iscam_binom("?", 0.05, 20, 0.3, lower.tail = TRUE)
#' iscam_binom(10, "?", 20, 0.3, TRUE)
iscam_binom <- function(k, prob, n, pi, lower.tail) {
Description = "iscambinomprob(k, prob, n, pi, lower.tail) \n This function calculates either a tail probability or the inverse cumulative probability depending on whether k or prob is passed a question mark."
if (as.character(prob) == "?") # Calculating binomial probability
iscam_binomprob(k, n, pi, lower.tail)
if (as.character(k) == "?") # Calculating inverse cumulative probability
iscam_invbinom(prob, n, pi, lower.tail)
}
|
1be7ae4bce303d6d6fd2dc4bb43efb23ae9500e4
|
443a9486aefd820615f8f964620c60e5dbdc53c6
|
/302-Main-Paper-Figure-3-a-b-c.R
|
7d9308ea73490f874add7356d4397c0dc1410ff7
|
[] |
no_license
|
eriqande/Pearse_etal_NEE_NMFS_Data_Analysis
|
7ce19f76b09a1dfb252e0c1d7b8cba690fbb8bde
|
a84ccbf7fd4ea74f32d871aaa9aad840412960fa
|
refs/heads/master
| 2020-07-25T00:26:20.481162
| 2019-09-13T22:42:08
| 2019-09-13T22:42:08
| 208,098,192
| 0
| 0
| null | 2019-09-13T14:03:46
| 2019-09-12T16:41:51
|
R
|
UTF-8
|
R
| false
| false
| 5,393
|
r
|
302-Main-Paper-Figure-3-a-b-c.R
|
library(tidyverse)
library(gridExtra)
library(cowplot)
Geno_long <- readRDS("data/Geno_long.rds")
# now, we compute the proportion of each genotype in each interval, grouped by sex
# and we compute the SE around that estimate.
geno_summary <- Geno_long %>%
filter(!is.na(LENGTH), !is.na(geno), !is.na(GenSex)) %>%
group_by(GenSex, len_interval, locus) %>%
mutate(ave_length = mean(LENGTH)) %>%
group_by(GenSex, len_interval, ave_length, locus, geno) %>% # include ave_length so it is retained in the output
tally() %>%
rename(counts = n) %>%
mutate(total = sum(counts),
ppn = counts / total,
se = sqrt(ppn * (1 - ppn) / total)) %>%
ungroup
# OK, now we can whittle that down to just the omy5locus:
omy5locus <- geno_summary %>%
filter(locus == "SH114448.87") %>%
mutate(`MAR genotype` = factor(plyr::revalue(geno, c("0" = "RR", "1" = "AR", "2" = "AA")), levels = c("RR", "AR", "AA")),
Sex = GenSex)
# and we can also summarize the different length classes and look at the proportion
# of the different sexes of each
sex_ppns <- Geno_long %>%
filter(!is.na(GenSex), !is.na(LENGTH), locus == "SexID") %>% # we just filter it down to the SexID locus...
mutate(len_interval = cut_number(LENGTH, 16)) %>% # cut this into 16 equal sized classes, so we have roughly 170 fish per size class still
group_by(len_interval) %>%
mutate(ave_length = mean(LENGTH)) %>%
group_by(len_interval, ave_length, GenSex) %>%
tally() %>%
rename(counts = n) %>%
mutate(total = sum(counts),
ppn = counts / total,
se = sqrt(ppn * (1 - ppn) / total)) %>%
ungroup %>%
rename(Sex = GenSex)
#### Make the Plots ####
# set colors to use:
AA_col <- "royalblue4"
RR_col <- "red"
AR_col <- "#FF7F00"
our_cols <- c(AA = AA_col, AR = AR_col, RR = RR_col)
#our_cols <- dichromat::dichromat(c(AA = AA_col, AR = AR_col, RR = RR_col), type = "deutan")
geno_by_sex <- ggplot(omy5locus, aes(x = ave_length, y = ppn, colour = `MAR genotype`, linetype = Sex, shape = `MAR genotype`)) +
geom_line() +
geom_linerange(aes(ymin = ppn + 1 * se, ymax = ppn - 1 * se), position = position_dodge(width = 1.8)) +
geom_point(size = 3) +
ylim(0, NA) +
xlim(50, 200) +
theme_bw() +
theme(legend.key.width = grid::unit(2.0, "cm"),
panel.grid.major = element_blank(),
panel.grid.minor = element_blank()) +
ylab("Genotype frequency") +
xlab("Average length (mm) of fish in size class") +
guides(colour = guide_legend(reverse=T, title = "Omy05\nrearrangement"),
shape = guide_legend(reverse=T, title = "Omy05\nrearrangement")) +
scale_colour_manual(values = our_cols)
ggsave(geno_by_sex, filename = "length_vs_mar_geno_by_sex.pdf", width = 6.5, height = 5)
length_vs_sex <- ggplot(sex_ppns, aes(x = ave_length, y = ppn, linetype = Sex)) +
geom_line() +
geom_linerange(aes(ymin = ppn + 1 * se, ymax = ppn - 1 * se), position = position_dodge(width = 1.8)) +
# geom_point(size = 3) +
ylim(0, NA) +
xlim(50, 200) +
theme_bw() +
theme(legend.key.width = grid::unit(2.0, "cm"),
panel.grid.major = element_blank(),
panel.grid.minor = element_blank()) +
ylab("Relative frequency of sex") +
xlab("Average length (mm) of fish in size class")
ggsave(length_vs_sex, filename = "length_vs_sex.pdf", width = 6.5, height = 5)
# if we want to put those on the same page we can do:
combo <- grid.arrange(geno_by_sex, length_vs_sex, nrow = 2)
ggsave(combo, width = 6.5, height = 8, filename = "two-stacked-figs.pdf")
#### Now, get Steve's data and use it:
lindley <- readRDS("data/lindley-results.rds") %>%
mutate(geno = str_match(fishtype, "ale([AR][AR])")[,2]) %>%
mutate(sex = str_match(fishtype, "([MF].*ale)")[,2]) %>%
mutate(geno = ifelse(geno == "RA", "AR", geno)) %>%
mutate(geno = factor(geno, levels = c("AA", "AR", "RR"))) %>%
mutate(sex = factor(sex)) %>%
mutate(xpos = (as.integer(sex) - 1) + (0.1 * as.integer(geno)) - 0.4 * (sex == "Male"))
# so now we can make something that looks like this plot
# using ggplot
lindley_plot <- ggplot(lindley, aes(colour = geno, linetype = sex)) +
geom_segment(aes(x = xpos, xend = xpos, y = plusSE, yend = minusSE)) +
geom_point(aes(x = xpos, y = emig.frac, shape = geno), size = 3) +
scale_colour_manual(values = our_cols) +
ylab("Detection probability") +
ylim(0, 1) +
xlim(0, 1) +
annotate("segment", x = 0.5, xend = 0.5, y = 0, yend = 1) +
# annotate("") +
theme_bw() +
theme(panel.grid.major = element_blank(),
panel.grid.minor = element_blank(),
axis.title.x = element_blank(),
axis.ticks.x = element_blank(), axis.text.x = element_blank()) +
theme(legend.position="none") +
annotate("text", x = 0.23, y = 0.9, label = "Females", hjust = 0.5) +
annotate("text", x = 0.76, y = 0.9, label = "Males", hjust = 0.5) +
scale_shape_manual(values = c(AA = "square", AR = "triangle", RR = "circle"))
# or we can use cowplot::plot_grid and then get labels on there
# too...
left_col <- cowplot::plot_grid(lindley_plot, labels = "A")
right_col <- cowplot::plot_grid(geno_by_sex, length_vs_sex, labels = c("B", "C"), align = "v", ncol = 1)
full_panel <- cowplot::plot_grid(left_col, right_col, rel_widths = c(0.45, 1))
# OK, that is pretty slick...
ggsave(full_panel, filename = "fig3-abc-sex-geno-length-detection.pdf", width = 7, height = 6)
|
cd38e940c6a01f2f9693fc71cac04b4a958e5b6e
|
f2ca5431d921b1189a6ebaacd88aef3a9a1a1820
|
/R/LearnerClustAgnes.R
|
4d5f6a67be2256f6daaede71c1f9d5b47b50d0c1
|
[] |
no_license
|
mlr-org/mlr3cluster
|
44747d2b4fae9170b5ea20704cccfdad777f198f
|
161aee5e75aa299bea29617020339768a8d9a75c
|
refs/heads/main
| 2023-06-22T09:58:51.455583
| 2023-06-15T22:32:15
| 2023-06-15T22:32:15
| 157,852,274
| 15
| 7
| null | 2023-03-10T01:08:56
| 2018-11-16T10:32:38
|
R
|
UTF-8
|
R
| false
| false
| 2,769
|
r
|
LearnerClustAgnes.R
|
#' @title Agglomerative Hierarchical Clustering Learner
#'
#' @name mlr_learners_clust.agnes
#' @include LearnerClust.R
#' @include aaa.R
#'
#' @description
#' A [LearnerClust] for agglomerative hierarchical clustering implemented in [cluster::agnes()].
#' The predict method uses [stats::cutree()] which cuts the tree resulting from
#' hierarchical clustering into specified number of groups (see parameter `k`).
#' The default number for `k` is 2.
#'
#' @templateVar id clust.agnes
#' @template learner
#' @template example
#'
#' @export
LearnerClustAgnes = R6Class("LearnerClustAgnes",
inherit = LearnerClust,
public = list(
#' @description
#' Creates a new instance of this [R6][R6::R6Class] class.
initialize = function() {
ps = ps(
metric = p_fct(default = "euclidean", levels = c("euclidean", "manhattan"), tags = "train"),
stand = p_lgl(default = FALSE, tags = "train"),
method = p_fct(default = "average", levels = c("average", "single", "complete", "ward", "weighted", "flexible", "gaverage"), tags = "train"),
trace.lev = p_int(lower = 0L, default = 0L, tags = "train"),
k = p_int(lower = 1L, default = 2L, tags = "predict"),
par.method = p_uty(tags = "train", custom_check = function(x) {
if (test_numeric(x) || test_list(x)) {
if (length(x) %in% c(1L, 3L, 4L)) {
return(TRUE)
}
stop("`par.method` needs be of length 1, 3, or 4")
} else {
stop("`par.method` needs to be a numeric vector")
}
})
)
# param deps
ps$add_dep("par.method", "method", CondAnyOf$new(c("flexible", "gaverage")))
ps$values = list(k = 2L)
super$initialize(
id = "clust.agnes",
feature_types = c("logical", "integer", "numeric"),
predict_types = "partition",
param_set = ps,
properties = c("hierarchical", "exclusive", "complete"),
packages = "cluster",
man = "mlr3cluster::mlr_learners_clust.agnes",
label = "Agglomerative Hierarchical Clustering"
)
}
),
private = list(
.train = function(task) {
pv = self$param_set$get_values(tags = "train")
m = invoke(cluster::agnes, x = task$data(), diss = FALSE, .args = pv)
if (self$save_assignments) {
self$assignments = stats::cutree(m, self$param_set$values$k)
}
return(m)
},
.predict = function(task) {
if (self$param_set$values$k > task$nrow) {
stopf("`k` needs to be between 1 and %i", task$nrow)
}
warn_prediction_useless(self$id)
PredictionClust$new(task = task, partition = self$assignments)
}
)
)
learners[["clust.agnes"]] = LearnerClustAgnes
|
a0a12cf6617c29c7de002aef2287dd2e8288835b
|
f635bcd87e9c9e663e0daf59685daf72d2c7fc74
|
/cachematrix.R
|
be0ae0e28737ba84d925cfd828bdd93a1148b85c
|
[] |
no_license
|
RichCoursera/ProgrammingAssignment2
|
af66315c8d45ec1f9687f7673ef23520a1ee14d7
|
3702f5f6d1607c1fe44060562835b0aab1342a94
|
refs/heads/master
| 2020-04-08T04:41:51.511398
| 2015-02-18T05:35:50
| 2015-02-18T05:35:50
| 30,890,365
| 0
| 0
| null | 2015-02-16T21:55:56
| 2015-02-16T21:55:53
|
R
|
UTF-8
|
R
| false
| false
| 1,485
|
r
|
cachematrix.R
|
## The two functions below will take a matrix and return
## the inverse of that matix.
##There is also a function for an error message for invalid data.
## This function creates a special "matrix" object
## that caches its inverse.
makeCacheMatrix <- function(x = matrix() )
{
m <- NULL
set <- function(y) {
x <<- y
m <<- NULL
}
get <- function() x
setinverse <- function(solve) m <<- solve
getinverse <- function() m
list(set = set, get = get,
setinverse = setinverse,
getinverse = getinverse)
}
## This function computes the inverse of the special "matrix"
## returned by makeCacheMatrix above. If the inverse has already
## been calculated (and the matrix has not changed), then the
## cacheSolve will retrieve the inverse from the cache.
cacheSolve <- function(x, ...)
{
m <- x$getinverse()
if(!is.null(m))
{
message("getting cached data")
return(m)
}
data <- x$get()
##m <- solve(data, ...)
## credit for the tryCatch goes to the URL: http://stackoverflow.com/questions/3440373/functions-and-try-in-r
tryCatch(m <- solve(data, ...), error=function(e)
{print(errmsg()) ; return()} )
x$setinverse(m)
m
}
## Error message handling
errmsg<- function()
{
message("The data entered was not valid. It was either")
message("a singular matrix or not a squre matrix")
message("PS. I cannot figure out how to stop the function")
message("before the two NULLs show below :)")
return()
}
|
fa1d1525bb2b71319b3839919a8621a17dc28ca6
|
27d25a6d65a1106c399f55e3222d7ecaa2187f2c
|
/R/basic/04_basicStochasticModels.R
|
12e10701fe288989210b3501d5f3441297bd928b
|
[] |
no_license
|
damiansp/TS
|
d57925a7093a250a968a8517feaba5849282118e
|
668e86f3073721784e4bb992edb30de6fc1a04f6
|
refs/heads/master
| 2022-08-09T00:22:19.634731
| 2022-07-27T01:57:47
| 2022-07-27T01:57:47
| 150,122,489
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 6,593
|
r
|
04_basicStochasticModels.R
|
#=========#=========#=========#=========#=========#=========#=========#=========
rm(list=ls())
lapply(paste('package:', names(sessionInfo()$otherPkgs), sep=''),
detach,
character.only=T,
unload=T)
setwd('~/Learning/TS/R/basic')
options(digits=5)
library(MASS)
DATA <- paste0("https://raw.githubusercontent.com/dallascard/",
"Introductory_Time_Series_with_R_datasets/master/")
# 2 White Noise-----------------------------------------------------------------
# 2.3 Simulation in R
N <- 100
w <- rnorm(N)
plot(w, type='l')
N <- 1000
x <- seq(-4, 4, length=N)
hist(rnorm(N), prob=T, col=4)
lines(x, dnorm(x), col=5, lw=2)
# 2.4 Second-Order Properties and the Correlogram
acf(rnorm(N))
# 3 Random Walks----------------------------------------------------------------
# 3.7 Simulation
x <- w <- rnorm(1000)
for(t in 2:1000) { x[t] <- x[t-1] + w[t] }
par(mfrow = c(3, 1))
plot(x, type = 'l')
acf(x)
# 4 Fitted Models and Diagnostic Plots------------------------------------------
# 4.1 Simulated random walk series
acf(diff(x))
# 4.2 Exchange rate series
Z <- read.table(paste(DATA, 'pounds_nz.dat', sep='/'))
Z.ts <- ts(Z, start=1991, frequency=4)
plot(Z.ts)
plot(diff(Z.ts))
acf(diff(Z.ts))
Z.hw <- HoltWinters(Z.ts, alpha=1, gamma=0)
par(mfrow=c(2, 1))
plot(Z.hw)
acf(resid(Z.hw))
Z.hw2 <- HoltWinters(Z.ts)
plot(Z.hw2)
acf(resid(Z.hw2))
# 4.3 Random walk with drift
HP.dat <- read.table(paste(DATA, 'HP.txt', sep=''), header = T)
plot(as.ts(HP.dat$Price))
DP <- diff(HP.dat$Price)
par(mfrow=c(2, 1))
plot(as.ts(DP))
abline(h=0, col='grey')
mean(DP) + c(-2, 2)*sd(DP) / sqrt(length(DP))
abline(h=mean(DP), col=2)
abline(h=mean(DP) + c(-2, 2)*sd(DP) / sqrt(length(DP)), col=4, lty=2)
acf(DP)
# Simulate data from model w HP parameters to see how well it applies
par(mfrow=c(1, 1))
n <- dim(HP.dat)[1]
plot(as.ts(HP.dat$Price), ylim=c(0, 80), xlim=c(0, n + 100))
mu <- mean(DP)
sigma <- sd(DP)
x <- numeric(n + 100)
iters <- 1000
outM <- matrix(0, (n + 100), iters)
x[1] <- HP.dat[1, ]
for (i in 1:iters) {
for (j in 2:(n + 100)) {
x[j] <- x[j - 1] + mu + rnorm(1, 0, sigma)
}
outM[, i] <- x
lines(x, col=rgb(0, 0.8, 1, alpha=0.03))
x[1] <- HP.dat[1, ]
}
ci95 <- apply(outM, 1, quantile, probs=c(0.025, 0.25, 0.5, 0.75, 0.975))
lines(ci95[1,], lty = 1, col=rgb(0, 0.8, 1))
lines(ci95[3,], lty = 1, col=rgb(0, 0.8, 1))
lines(ci95[5,], lty = 1, col=rgb(0, 0.8, 1))
lines(ci95[2,], lty = 2, col=rgb(0, 0.8, 1))
lines(ci95[4,], lty = 2, col=rgb(0, 0.8, 1))
# forecast
x <- numeric(100)
outM <- matrix(0, 100, iters)
x[1] <- HP.dat[n, ]
for (i in 1:iters) {
for (j in 2:100) {
x[j] <- x[j - 1] + mu + rnorm(1, 0, sigma)
}
outM[, i] <- x
lines(y=x, x=n:(n + 99), col=rgb(1, 0, 0.8, alpha = 0.03))
x[1] <- HP.dat[n, ]
}
ci95 <- apply(outM, 1, quantile, probs=c(0.025, 0.25, 0.5, 0.75, 0.975))
lines(x=n:(n + 99), y=ci95[1,], lty=1, col=rgb(1, 0, 0.8))
lines(x=n:(n + 99), y=ci95[3,], lty=1, col=rgb(1, 0, 0.8))
lines(x=n:(n + 99), y=ci95[5,], lty=1, col=rgb(1, 0, 0.8))
lines(x=n:(n + 99), y=ci95[2,], lty=2, col=rgb(1, 0, 0.8))
lines(x=n:(n + 99), y=ci95[4,], lty=2, col=rgb(1, 0, 0.8))
# 5. Autoregressive Models-----------------------------------------------------
# 5.1 Definition:
# A series {x[t]} is autoregressive of order p: AR(p) ->
# x[t] = alpha[1]x[t - 1] + alpha[2]x[t - 2] + ... + alpha[p]x[t - p] + w[t]
# with {w[t]} as white noise, and alphas as parameters;
# equivlently:
# theta[p](B)x[t] = w[t] =
# (1 - alpha[1]B - alpha[2]B^2 - ... - alpha[p]B^p)x[t]
# NOTES:
# AR(1) is a special case = the random walk
# Exponential smoothing is the special case where
# alpha[i] = alpha(1 - alpha)^i for i = 1, 2, ..., and p -> Inf
# 5.3 Second-Order Properties of an AR(1) Model
# AR(1): x[t] = alpha*x[t - 1] + w[t]; w[t] ~N(0, sigma^2);
# mean[x] = 0
# cov[k] = alpha^k * sigma^2 / (1 - alpha^2)
# 5.5 Correlogram of an AR(1) process
rho <- function(k, alpha) alpha^k
layout(1:2)
plot(0:10,
rho(0:10, 0.7),
type='l',
xlab='k',
ylab=expression(rho[k]),
main=expression(alpha == 0.7) )
plot(0:10,
rho(0:10, -0.7),
type='l',
xlab='k',
ylab=expression(rho[k]),
main=expression(alpha == -0.7) )
# 5.7 Simulation
x <- w <- rnorm(1000)
for(t in 2:1000) { x[t] <- 0.7*x[t - 1] + w[t] }
plot(x, type='l')
plot(x[1:100], type='l')
acf(x)
pacf(x)
# 6 Fitted Models--------------------------------------------------------------
# 6.1 Model fitted to simulated series
plot(x, type='l')
x.ar <- ar(x, method = 'mle')
x.ar$order # 1
x.ar$ar # 0.68 (cf. with 0.7 in the specified model above)
x.ar$ar + c(-2, 2)*c(sqrt(x.ar$asy.var)) # appx 95% CI [0.622, 0.716]
# (includes 0.7)
# 4.6.2 Exchange rate series: fitted AR model
Z.ar <- ar(Z.ts)
mean(Z.ts) # 20.5
Z.ar$order # 1 (means Z.ar is AR(1))
Z.ar$ar # coefficient for AR term = 0.85
Z.ar$ar + c(-2, 2) * c(sqrt(Z.ar$asy.var)) # appx 95%CI = [0.74, 1.04]
acf(Z.ar$res[-1])
#The model can now be reconstructed as:
# z.hat[t] = mean + coef(z[t-1] - mean) or
# z.hat[t] = 20.5 + 0.85(z[t-1] - 20.5)
Z.ar
par(mfrow = c(3,1))
plot(Z.ts)
abline(h=mean(Z.ts), col='grey')
MU <- mean(Z.ts)
LEN <- length(Z.ts)
PHI <- Z.ar$ar
t <- numeric(LEN)
t[1] <- MU
for(i in 2:LEN){
t[i] <- MU + PHI * (t[i-1] - MU) + rnorm(1, 0, sd(Z.ts))
}
plot(t, type='l')
abline(h=mean(t), col='grey')
for(i in 2:LEN){
t[i] <- MU +PHI * (t[i-1] - MU) + rnorm(1, 0, sd(Z.ts))
}
plot(t, type='l')
abline(h=mean(t), col='grey')
# 6.3 Global temperature series: fitted AR model
Global <- scan(paste(DATA, 'global.dat', sep='/'))
Global.ts <- ts(Global, st=c(1856, 1), end=c(2005, 12), fr=12)
Global.ar <- ar(aggregate(Global.ts, FUN=mean), method='mle')
mean(aggregate(Global.ts, FUN=mean)) # -0.138
Global.ar$order # 4 = AR(4); regressive over prev 4 time steps
Global.ar$ar # coefs: 0.588, 0.126, 0.111, 0.268
acf(Global.ar$res[-(1:Global.ar$order)], lag=50)
plot(Global.ts)
plot(aggregate(Global.ts, FUN=mean), ylim=c(-1.5, 1))
t <- numeric(length(aggregate(Global.ts, FUN=mean)))
t[1:4] <- aggregate(Global.ts, FUN=mean)[1:4]
mu <- mean(aggregate(Global.ts, FUN=mean))
stdev <- sd(aggregate(Global.ts, FUN=mean))
iters <- 1000
for (iter in 1:iters) {
for (i in 5:length(t)) {
t[i] <- (mu + 0.588 * (t[i - 1] - mu) + 0.013 * (t[i - 2] - mu)
+ 0.111 * (t[i - 3] - mu) + 0.268 * (t[i - 4] - mu)
+ rnorm(1, 0, stdev))
}
lines(seq(1856, 2005, length=length(t)), t, col=rgb(1, 0, 0, 0.05))
}
lines(aggregate(Global.ts, FUN=mean))
|
f48b73c5070ab0329f1bdf4a3345565b03772169
|
41d619d06279d576cc6cca9f21cacff69f934917
|
/Exploratory-Data-Analysis/plot1.R
|
a94abb87dd3eb2abefa176d03753d82d3701e9f4
|
[] |
no_license
|
phashemi/ExData_Plotting1
|
10f863c790aa6c2af1595ea0e9a002b9845d7e86
|
342b7d1c8cded27f1aa805623ab064ccd6c7a4af
|
refs/heads/master
| 2021-01-18T05:53:00.268954
| 2015-08-08T13:05:32
| 2015-08-08T13:05:32
| 40,359,099
| 0
| 0
| null | 2015-08-07T12:21:00
| 2015-08-07T12:21:00
| null |
UTF-8
|
R
| false
| false
| 646
|
r
|
plot1.R
|
## Read the full dataset
data <- read.csv("C:/Users/PAYMON/data/exdata-data-household_power_consumption/household_power_consumption.txt", header=T, sep=';', na.strings="?", nrows=2075259, stringsAsFactors=F, quote="")
## Subset the data to February 1-2, 2007
data <- subset(data, (data$Date == "1/2/2007" | data$Date== "2/2/2007"))
## Draw a new histogram plot
hist(data$Global_active_power, col="red", main="Global Active Power", xlab="Global Active Power (kilowatts)")
## Save the histogram to a PNG file named "plot1.png" with a width of 480 X 480
dev.copy(png, file = "plot1.png", width=480, height=480)
## Close the PNG device
dev.off()
|
c3bccc30a258ce1f8500d8ee93ea23a2b6d1062f
|
6d33a4b4474bf05ec47a94cf03911cf00f493c17
|
/Classification/Code/R/runScripts/6cancer_RBM_Script.R
|
0e632c8ddee8b69925db4c2a362e1a37568155c9
|
[] |
no_license
|
Genuity-Science/unconventionalML
|
a6d394e1f88b0a9174fb593ba9a4ddec96159e2f
|
7e9d1eb5c13524b66da787fc88f2938831d7308a
|
refs/heads/master
| 2023-08-05T22:02:55.025946
| 2021-09-22T16:28:07
| 2021-09-22T16:28:07
| 310,676,584
| 1
| 1
| null | 2020-11-06T20:19:57
| 2020-11-06T18:31:50
|
R
|
UTF-8
|
R
| false
| false
| 4,373
|
r
|
6cancer_RBM_Script.R
|
library(deepnet)
library(feather)
library(HandTill2001)
library(caret)
sig <- function(x) {1/(1+exp(-x))}
softmax <- function(x) {
x2 = x - max(x)
y = exp(x2)/sum(exp(x2))
}
predict_rbm <- function(r1,test_data,classes) {
energy_mat = matrix(nrow=nrow(test_data),ncol=length(classes))
for (m in 1 : length(classes)) {
test_data[classes] = 0
test_data[classes[m]] = 1
test_data_tmp = as.matrix(test_data)
h = rbm.up(r1,as.matrix(test_data_tmp))
e_cross = diag(h %*% r1$W %*% t(test_data_tmp))
vb_e = test_data_tmp %*% r1$B
hb_e = h %*% r1$C
energy_mat[,m] = hb_e + vb_e + e_cross
}
# prob_df = data.frame(sig(energy_mat[,1] - energy_mat[,2]),sig(energy_mat[,2]-energy_mat[,1])) # only works for two classes
prob_df = as.data.frame(t(apply(energy_mat/sqrt(nrow(test_data))/2,1,softmax)))
names(prob_df) = classes
return(prob_df)
}
probs_list = list()
ii=1
files='6cancer'
save_dir = '/boston_ailab/users/rli/quantum/Results/'
print(files[[ii]])
fdir = sprintf('/boston_ailab/users/rli/quantum/Data/%s_bootstrap_resamples/',files[[ii]])
pos_class = NULL
info=data.frame(dataset=character(),method=character(),tr_acc=double(),tst_acc=double(),
tr_bacc=double(),tst_bacc=double(),tr_auroc=double(), tst_auroc=double(),
tr_prec=double(), tst_prec=double(), tr_recall=double(), tst_recall=double(),
tr_F1=double(),tst_F1=double(),stringsAsFactors=FALSE)
# initialize list of confusion matrices
cm_train_list = list()
cm_test_list = list()
# initialize list of response probabilities
response_train_list = list()
response_test_list = list()
for (n in 1 : 100) {
train_data = read_feather(sprintf('%sresample_%d_train.feather',fdir,n))
test_data = read_feather(sprintf('%sresample_%d_test.feather',fdir,n))
train_labels = read.csv(sprintf('%sresample_%d_train_labels.txt',fdir,n),header=F)[[1]]
test_labels = read.csv(sprintf('%sresample_%d_test_labels.txt',fdir,n),header=F)[[1]]
classes = levels(train_labels)
for (m in 1 : length(classes)) {
train_data[classes[m]] = train_labels == classes[m]
}
r1<-rbm.train(as.matrix(train_data),30,numepochs=100,batchsize=8,cd=1)
# calculate response probabilities
response_train = predict_rbm(r1,train_data,classes)
response_test = predict_rbm(r1,test_data,classes)
response_train_list[[n]] = response_train
response_test_list[[n]] = response_test
pred_train = as.factor(apply(response_train,1,which.max))
levels(pred_train) = colnames(response_train)
pred_test = as.factor(apply(response_test,1,which.max))
levels(pred_test) = colnames(response_test)
cm_train = confusionMatrix(pred_train, train_labels, positive = pos_class)
cm_test = confusionMatrix(pred_test, test_labels, positive = pos_class)
# ROC train and test
auc.train = auc(multcap(response = train_labels, predicted = data.matrix(response_train)))
auc.test = auc(multcap(response = test_labels, predicted = data.matrix(response_test)))
info[n,'dataset']=files[[ii]]
info[n,'method']='RBM'
info[n,'tr_acc']=cm_train$overall["Accuracy"]
info[n,'tst_acc']=cm_test$overall["Accuracy"]
cm_train_list[[n]] = cm_train
cm_test_list[[n]] = cm_test
info[n,'tr_bacc']=mean(cm_train$byClass[,'Balanced Accuracy'])
info[n,'tst_bacc']=mean(cm_test$byClass[,'Balanced Accuracy'])
info[n,'tr_prec']=mean(cm_train$byClass[,'Precision'])
info[n,'tst_prec']=mean(cm_test$byClass[,'Precision'])
info[n,'tr_recall']=mean(cm_train$byClass[,'Recall'])
info[n,'tst_recall']=mean(cm_test$byClass[,'Recall'])
info[n,'tr_F1']=mean(cm_train$byClass[,'F1'])
info[n,'tst_F1']=mean(cm_test$byClass[,'F1'])
info[n,'tr_auroc']=auc.train
info[n,'tst_auroc']=auc.test
}
save_vars = list(cm_train_list,cm_test_list,info,response_train_list,response_test_list)
names(save_vars) = c("cm_train_list","cm_test_list","info","response_train_list","response_test_list")
saveRDS(save_vars,paste(save_dir,files[[ii]],"_rbm_save_all.RDS",sep=""))
|
4e150dacbb0326be8d5db8630255817961096df9
|
29585dff702209dd446c0ab52ceea046c58e384e
|
/frbs/R/frbs-package.R
|
cdea3628478d3bcbe342483fbfaee671b8573a00
|
[] |
no_license
|
ingted/R-Examples
|
825440ce468ce608c4d73e2af4c0a0213b81c0fe
|
d0917dbaf698cb8bc0789db0c3ab07453016eab9
|
refs/heads/master
| 2020-04-14T12:29:22.336088
| 2016-07-21T14:01:14
| 2016-07-21T14:01:14
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 21,173
|
r
|
frbs-package.R
|
#############################################################################
#
# This file is a part of the R package "frbs".
#
# Author: Lala Septem Riza
# Co-author: Christoph Bergmeir
# Supervisors: Francisco Herrera Triguero and Jose Manuel Benitez
# Copyright (c) DiCITS Lab, Sci2s group, DECSAI, University of Granada.
#
# This package is free software: you can redistribute it and/or modify it under
# the terms of the GNU General Public License as published by the Free Software
# Foundation, either version 2 of the License, or (at your option) any later version.
#
# This package is distributed in the hope that it will be useful, but WITHOUT
# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR
# A PARTICULAR PURPOSE. See the GNU General Public License for more details.
#
#############################################################################
#' Fuzzy rule-based systems (FRBSs) are based on the fuzzy concept
#' proposed by Zadeh in 1965, which represents the reasoning of human experts in production
#' rules (a set of IF-THEN rules) to handle real-life problems from domains
#' such as control, prediction and inference, data mining, bioinformatics data processing,
#' robotics, and speech recognition. FRBSs are also known as fuzzy inference systems and
#' fuzzy models. When applied to specific tasks, they may also be known under specific names
#' such as fuzzy associative memories or fuzzy controllers.
#' In this package, we consider systems with multi-inputs and single-output (MISO),
#' with real-valued data.
#'
#' FRBSs are a competitive alternative to other classic models and algorithms in order to
#' solve classification and regression problems. Generally,
#' an FRBS consists of four functional parts:
#' \itemize{
#' \item a fuzzification interface which transforms the crisp inputs into degrees
#' of membership functions of the linguistic term of each variable.
#' See \code{\link{fuzzifier}}.
#' \item a knowledge base consisting of a database (DB) and a rulebase (RB). While the database includes
#' the fuzzy set definitions, the rulebase contains fuzzy IF-THEN rules.
#' We will represent the knowledge as a set of rules. Each one has the following structure.
#'
#' \code{IF premise (antecedent) THEN conclusion (consequent)}
#'
#' See \code{\link{rulebase}}.
#' \item an inference engine which performs the inference operations on the fuzzy IF-THEN rules.
#' There are two kinds of inference for fuzzy systems based on linguistic rules:
#' The Mamdani and the Takagi Sugeno Kang model. See \code{\link{inference}}.
#' \item a defuzzification process to obtain the crisp values from linguistic values. There are several methods for
#' defuzzification such as the weighted average, centroid, etc.
#' See \code{\link{defuzzifier}}.
#' }
#'
#' Since it may be difficult to obtain information from human experts in the form required,
#' an alternative and effective way to acquire the knowledge is to generate
#' the fuzzy IF-THEN rules automatically from the numerical training data.
#' In general, when modeling an FRBS, there are two important processes which should be conducted,
#' namely structure identification and parameter estimation.
#' Structure identification is a process to find appropriate fuzzy IF-THEN rules
#' and to determine the overall number of rules.
#' Parameter estimation is applied to tune parameters of membership functions.
#' Many approaches have been proposed
#' in order to perform this modeling such as a table-lookup scheme, heuristic procedures,
#' neuro-fuzzy techniques, clustering methods, genetic algorithms, least squares methods,
#' gradient descent, etc. In this package, the following approaches to generate
#' fuzzy IF-THEN rules have been implemented:
#' \enumerate{
#' \item FRBS based on space partition
#' \itemize{
#' \item Wang and Mendel's technique (\code{WM}): It is used to solve regression tasks. See \code{\link{WM}}.
#' \item Chi's technique (\code{FRBCS.CHI}): It is used to solve classification tasks. See \code{\link{FRBCS.CHI}}.
#' \item Ishibuchi's technique using weight factor (\code{FRBCS.W}): It is used to solve classification tasks. See \code{\link{FRBCS.W}}.
#' }
#' \item FRBS based on neural networks
#' \itemize{
#' \item The adaptive-network-based fuzzy inference system (\code{ANFIS}):
#' It is used to solve regression tasks. See \code{\link{ANFIS}}.
#' \item The hybrid neural fuzzy inference system (\code{HYFIS}): It is used to solve regression tasks. See \code{\link{HyFIS}}.
#' }
#' \item FRBS based on clustering approach
#' \itemize{
#' \item The subtractive clustering and fuzzy c-means (\code{SBC}): It is used to solve regression tasks. See \code{\link{SBC}}.
#' \item The dynamic evolving neural-fuzzy inference system (\code{DENFIS}):
#' It is used to solve regression tasks. See \code{\link{DENFIS}}.
#' }
#' \item FRBS based on genetic algorithms
#' \itemize{
#' \item The Thrift's method (\code{GFS.THRIFT}): It is used to solve regression tasks. See \code{\link{GFS.Thrift}}.
#' \item The Genetic fuzzy systems for fuzzy rule learning based on the MOGUL methodology (\code{GFS.FR.MOGUL}):
#' It is used to solve regression tasks. See \code{\link{GFS.FR.MOGUL}}.
#' \item The Ishibuchi's method based on genetic cooperative-competitive learning (\code{GFS.GCCL}):
#' It is used to solve classification tasks. See \code{\link{GFS.GCCL}}.
#' \item The Ishibuchi's method based on hybridization of genetic cooperative-competitive learning (GCCL) and Pittsburgh (\code{FH.GBML}):
#' It is used to solve classification tasks. See \code{\link{FH.GBML}}.
#' \item The structural learning algorithm on vague environtment (\code{SLAVE}):
#' It is used to solve classification tasks. See \code{\link{SLAVE}}.
#' \item The genetic for lateral tuning and rule selection of linguistic fuzzy system (\code{GFS.LT.RS}):
#' It is used to solve regression tasks. See \code{\link{GFS.LT.RS}}.
#' }
#' \item FRBS based on the gradient descent method
#' \itemize{
#' \item The FRBS using heuristics and gradient descent method (\code{FS.HGD}):
#' It is used to solve regression tasks. See \code{\link{FS.HGD}}
#' \item The fuzzy inference rules by descent method (\code{FIR.DM}):
#' It is used to solve regression tasks. See \code{\link{FIR.DM}}
#' }
#' }
#' The functions documented in the manual for the single methods are all called internally
#' by \code{\link{frbs.learn}}, which is the central function of the package.
#' However, in the documentation of each of the internal learning functions,
#' we give some theoretical background and references to the original literature.
#'
#'
#' \bold{Usage of the package:}
#'
#' First of all, if you have problems using the package, find a bug, or have suggestions,
#' please contact the package maintainer by email, instead of writing to the general R lists
#' or to other internet forums and mailing lists.
#'
#' The main functions of the package are the following:
#' \itemize{
#' \item The function \code{\link{frbs.learn}} allows to generate the model by
#' creating fuzzy IF-THEN rules or cluster centers from training data.
#' In other words, users just need to call this function to generate an FRBS model from
#' training data. The different algorithms mentioned above are all accessible through this function.
#' The outcome of the function is an \code{\link{frbs-object}}.
#' \item Even though the main purpose of this package is to generate
#' the FRBS models from training data automatically, we provide the function \code{\link{frbs.gen}},
#' which can be used to build a model manually without using a learning method.
#' Moreover, we provide the following features: linguistic hedges, the "and" and "or" operators,
#' and the "dont_care" value for representing the degree of 1. The higher degree of interpretability
#' can also be achieved by using the "dont_care" value. If we want to define various length of rules
#' in the rulebase, we can also define the "dont_care" value. See \code{\link{rulebase}}.
#' \item The purpose of the function \code{\link{predict}} is to obtain predicted values
#' according to the testing data and the model (analogous to the \code{predict} function
#' that is implemented in many other R packages).
#' \item There exist functions \code{\link{summary.frbs}} and \code{\link{plotMF}} to
#' show a summary about an \code{\link{frbs-object}}, and to plot the shapes of
#' the membership functions.
#' \item Exporting an FRBS model to the frbsPMML format can be done by executing \code{\link{frbsPMML}} and \code{\link{write.frbsPMML}}.
#' The frbsPMML format is a universal framework adopted from the Predictive Model Markup Language (PMML) format. Then,
#' in order to consume/import the frbsPMML format to an FRBS model, we call \code{\link{read.frbsPMML}}.
#' }
#'
#' To get started with the package, the user can have a look at some examples included in
#' the documentation of the functions \code{\link{frbs.learn}} and \code{\link{frbs.gen}} for generating models and
#' \code{\link{predict}} for the prediction phase.
#'
#' Also, there are many demos that ship with the package. To get a list of them, type:
#'
#' \code{demo()}
#'
#' Then, to start a demo, type \code{demo(<demo_name_here>)}. All the demos are present as
#' R scripts in the package sources in the \code{"demo"} subdirectory. Note that
#' some of them may take quite a long time which depends on specification hardwares.
#'
#' Currently, there are the following demos available:
#'
#' Regression using the Gas Furnance dataset:
#'
#' \code{demo(WM.GasFur)},
#' \code{demo(SBC.GasFur)},
#' \code{demo(ANFIS.GasFur)},
#'
#' \code{demo(FS.HGD.GasFur)},
#' \code{demo(DENFIS.GasFur)},
#' \code{demo(HyFIS.GasFur)},
#'
#' \code{demo(FIR.DM.GasFur)},
#' \code{demo(GFS.FR.MOGUL.GasFur)},
#'
#' \code{demo(GFS.THRIFT.GasFur)},
#' \code{demo(GFS.LT.RS.GasFur)}.
#'
#' Regression using the Mackey-Glass dataset:
#'
#' \code{demo(WM.MG1000)},
#' \code{demo(SBC.MG1000)},
#' \code{demo(ANFIS.MG1000)},
#'
#' \code{demo(FS.HGD.MG1000)},
#' \code{demo(DENFIS.MG1000)},
#' \code{demo(HyFIS.MG1000)},
#'
#' \code{demo(GFS.THRIFT.MG1000)},
#' \code{demo(FIR.DM.MG1000)},
#'
#' \code{demo(GFS.FR.MOGUL.MG1000)},
#' \code{demo(GFS.LT.RS.MG1000)}.
#'
#' Classification using the Iris dataset:
#'
#' \code{demo(FRBCS.W.Iris)},
#' \code{demo(FRBCS.CHI.Iris)},
#' \code{demo(GFS.GCCL.Iris)},
#'
#' \code{demo(FH.GBML.Iris)},
#' \code{demo(SLAVE.Iris)}.
#'
#' Generating FRBS model without learning process:
#'
#' \code{demo(FRBS.Mamdani.Manual)},
#' \code{demo(FRBS.TSK.Manual)},
#' \code{demo(FRBS.Manual)}.
#'
#' Exporting/importing to/from frbsPMML:
#'
#' \code{demo(WM.GasFur.PMML)},
#' \code{demo(ANFIS.GasFur.PMML)},
#' \code{demo(GFS.GCCL.Iris.PMML)}.
#'
#' The Gas Furnance data and Mackey-Glass data are included in the package,
#' please see \code{\link{frbsData}}. The Iris data is the standard Iris dataset that
#' ships with R.
#'
#' Also have a look at the package webpage \url{http://sci2s.ugr.es/dicits/software/FRBS},
#' where we provide a more extensive introduction as well as additional explanations of
#' the procedures.
#'
#' @name frbs-package
#' @aliases frbs
#' @docType package
#' @title Getting started with the frbs package
#' @seealso \code{\link{frbs.learn}}, \code{\link{frbs.gen}}, \code{\link{frbsPMML}}, and \code{\link{predict}}.
#' @references
#' A. Guazzelli, M. Zeller, W.C. Lin, and G. Williams.,
#' "pmml: An open standard for sharing models", The R Journal, Vol. 1, No. 1, pp. 60-65 (2009).
#'
#' C.C. Lee, "Fuzzy Logic in control systems: Fuzzy logic controller part I",
#' IEEE Trans. Syst., Man, Cybern.,
#' vol. 20, no. 2, pp. 404 - 418 (1990).
#'
#' C.C. Lee, "Fuzzy Logic in control systems: Fuzzy logic controller part II",
#' IEEE Trans. Syst., Man, Cybern.,
#' vol. 20, no. 2, pp. 419 - 435 (1990).
#'
#' E.H. Mamdani and S. Assilian, "An experiment in linguistic synthesis with
#' a fuzzy logic controller," International Journal of Man Machine Studies, vol. 7, no. 1,
#' pp. 1 - 13 (1975).
#'
#' W. Pedrycz, "Fuzzy Control and Fuzzy Systems," New York: Wiley (1989).
#'
#' L.S. Riza, C. Bergmeir, F. Herrera, and J.M. Benitez,
#' "frbs: Fuzzy Rule-Based Systems for Classification and Regression in R,"
#' Journal of Statistical Software, vol. 65, no. 6, pp. 1 - 30 (2015).
#'
#' M. Sugeno and G.T. Kang, "Structure identification of fuzzy model,"
#' Fuzzy Sets Syst., vol. 28, pp. 15 - 33 (1988).
#'
#' T. Takagi and M. Sugeno, "Fuzzy identification of systems and its application to
#' modelling and control", IEEE Transactions on Systems, Man and Cybernetics, vol. 15, no. 1,
#' pp. 116 - 132 (1985).
#'
#' L.A. Zadeh, "Fuzzy sets", Information and Control, vol. 8, pp. 338 - 353 (1965).
#'
# @keywords package fuzzy rule based systems inference frbs regression classification
# @encoding UTF-8
# @encoding Latin-1
#' @author Lala Septem Riza \email{lala.s.riza@@decsai.ugr.es},
#'
#' Christoph Bergmeir \email{c.bergmeir@@decsai.ugr.es},
#'
#' Francisco Herrera \email{herrera@@decsai.ugr.es},
#'
#' and Jose Manuel Benitez \email{j.m.benitez@@decsai.ugr.es}
#'
#' DiCITS Lab, SCI2S group, DECSAI, University of Granada.
#'
#' \url{http://sci2s.ugr.es/dicits/}, \url{http://sci2s.ugr.es}
#'
#' @examples
#' ##################################
#' ## I. Regression Problem
#' ## In this example, we are using the gas furnace dataset that
#' ## contains two input and one output variables.
#' ##################################
#'
#' ## Input data: Using the Gas Furnace dataset
#' ## then split the data to be training and testing datasets
#' data(frbsData)
#' data.train <- frbsData$GasFurnance.dt[1 : 204, ]
#' data.tst <- frbsData$GasFurnance.dt[205 : 292, 1 : 2]
#' real.val <- matrix(frbsData$GasFurnance.dt[205 : 292, 3], ncol = 1)
#'
#' ## Define interval of data
#' range.data <-apply(data.train, 2, range)
#'
#' ## Set the method and its parameters,
#' ## for example, we use Wang and Mendel's algorithm
#' method.type <- "WM"
#' control <- list(num.labels = 15, type.mf = "GAUSSIAN", type.defuz = "WAM",
#' type.tnorm = "MIN", type.snorm = "MAX", type.implication.func = "ZADEH",
#' name = "sim-0")
#'
#' ## Learning step: Generate an FRBS model
#' object.reg <- frbs.learn(data.train, range.data, method.type, control)
#'
#' ## Predicting step: Predict for newdata
#' res.test <- predict(object.reg, data.tst)
#'
#' ## Display the FRBS model
#' summary(object.reg)
#'
#' ## Plot the membership functions
#' plotMF(object.reg)
#'
#' ##################################
#' ## II. Classification Problem
#' ## In this example, we are using the iris dataset that
#' ## contains four input and one output variables.
#' ##################################
#'
#' ## Input data: Using the Iris dataset
#' data(iris)
#' set.seed(2)
#'
#' ## Shuffle the data
#' ## then split the data to be training and testing datasets
#' irisShuffled <- iris[sample(nrow(iris)), ]
#' irisShuffled[, 5] <- unclass(irisShuffled[, 5])
#' tra.iris <- irisShuffled[1 : 105, ]
#' tst.iris <- irisShuffled[106 : nrow(irisShuffled), 1 : 4]
#' real.iris <- matrix(irisShuffled[106 : nrow(irisShuffled), 5], ncol = 1)
#'
#' ## Define range of input data. Note that it is only for the input variables.
#' range.data.input <- apply(iris[, -ncol(iris)], 2, range)
#'
#' ## Set the method and its parameters. In this case we use FRBCS.W algorithm
#' method.type <- "FRBCS.W"
#' control <- list(num.labels = 7, type.mf = "GAUSSIAN", type.tnorm = "MIN",
#' type.snorm = "MAX", type.implication.func = "ZADEH")
#'
#' ## Learning step: Generate fuzzy model
#' object.cls <- frbs.learn(tra.iris, range.data.input, method.type, control)
#'
#' ## Predicting step: Predict newdata
#' res.test <- predict(object.cls, tst.iris)
#'
#' ## Display the FRBS model
#' summary(object.cls)
#'
#' ## Plot the membership functions
#' plotMF(object.cls)
#'
#' #################################################
#' ## III. Constructing an FRBS model from human expert.
#' ## In this example, we only consider the Mamdani model for regression. However,
#' ## other models can be done in the same way.
#' ## Note:
#' ## In the examples, let us consider four input and one output variables.
#' #################################################
#'
#' ## Define a matrix representing shape and parameters of membership functions of input variables.
#' ## The matrix has 5 rows where the first row represent the type of the membership function whereas
#' ## others are values of its parameters.
#' ## Detailed explanation can be seen in the fuzzifier function to construct the matrix.
#' varinp.mf <- matrix(c(2, 0, 20, 40, NA, 4, 20, 40, 60, 80, 3, 60, 80, 100, NA,
#' 2, 0, 35, 75, NA, 3, 35, 75, 100, NA,
#' 2, 0, 20, 40, NA, 1, 20, 50, 80, NA, 3, 60, 80, 100, NA,
#' 2, 0, 20, 40, NA, 4, 20, 40, 60, 80, 3, 60, 80, 100, NA),
#' nrow = 5, byrow = FALSE)
#'
#' ## Define number of linguistic terms of input variables.
#' ## Suppose, we have 3, 2, 3, and 3 numbers of linguistic terms
#' ## for the first, second, third and fourth variables, respectively.
#' num.fvalinput <- matrix(c(3, 2, 3, 3), nrow=1)
#'
#' ## Give the names of the linguistic terms of each input variables.
#' varinput.1 <- c("low", "medium", "high")
#' varinput.2 <- c("yes", "no")
#' varinput.3 <- c("bad", "neutral", "good")
#' varinput.4 <- c("low", "medium", "high")
#' names.varinput <- c(varinput.1, varinput.2, varinput.3, varinput.4)
#'
#' ## Set interval of data.
#' range.data <- matrix(c(0, 100, 0, 100, 0, 100, 0, 100, 0, 100), nrow = 2)
#'
#' ## Define inference parameters.
#' ## Detailed information about values can be seen in the inference function.
#' type.defuz <- "WAM"
#' type.tnorm <- "MIN"
#' type.snorm <- "MAX"
#' type.implication.func <- "ZADEH"
#'
#' ## Give the name of simulation.
#' name <- "Sim-0"
#'
#' ## Provide new data for testing.
#' newdata<- matrix(c(25, 40, 35, 15, 45, 75, 78, 70), nrow = 2, byrow = TRUE)
#' ## the names of variables
#' colnames.var <- c("input1", "input2", "input3", "input4", "output1")
#'
#' ## Define number of linguistic terms of output variable.
#' ## In this case, we set the number of linguistic terms to 3.
#' num.fvaloutput <- matrix(c(3), nrow = 1)
#'
#' ## Give the names of the linguistic terms of the output variable.
#' varoutput.1 <- c("bad", "neutral", "good")
#' names.varoutput <- c(varoutput.1)
#'
#' ## Define the shapes and parameters of the membership functions of the output variables.
#' varout.mf <- matrix(c(2, 0, 20, 40, NA, 4, 20, 40, 60, 80, 3, 60, 80, 100, NA),
#' nrow = 5, byrow = FALSE)
#'
#' ## Set type of model which is "MAMDANI".
#' type.model <- "MAMDANI"
#'
#' ## Define the fuzzy IF-THEN rules;
#' ## In this example we are using the Mamdani model
#' ## Note: e.g.,
#' ## "a1", "and", "b1, "->", "e1" means that
#' ## "IF inputvar.1 is a1 and inputvar.2 is b1 THEN outputvar.1 is e1"
#' ## Make sure that each rule has a "->" sign.
#' rule <- matrix(
#' c("low", "and", "yes", "and", "bad", "and", "low", "->", "bad",
#' "medium", "and", "no", "and", "neutral", "and", "medium", "->", "neutral",
#' "high", "and", "no", "and", "neutral", "and", "low", "->", "good"),
#' nrow = 3, byrow = TRUE)
#'
#' ## Generate a fuzzy model with frbs.gen.
#' object <- frbs.gen(range.data, num.fvalinput, names.varinput,
#' num.fvaloutput, varout.mf, names.varoutput, rule,
#' varinp.mf, type.model, type.defuz, type.tnorm,
#' type.snorm, func.tsk = NULL, colnames.var, type.implication.func, name)
#'
#' ## Plot the membership function.
#' plotMF(object)
#'
#' ## Predicting using new data.
#' res <- predict(object, newdata)$predicted.val
#'
#' #################################################
#' ## IV. Specifying an FRBS model in the frbsPMML format.
#' ## other examples can be seen in the frbsPMML function.
#' #################################################
#' ## Input data
#' data(frbsData)
#' data.train <- frbsData$GasFurnance.dt[1 : 204, ]
#' data.fit <- data.train[, 1 : 2]
#' data.tst <- frbsData$GasFurnance.dt[205 : 292, 1 : 2]
#' real.val <- matrix(frbsData$GasFurnance.dt[205 : 292, 3], ncol = 1)
#' range.data<-matrix(c(-2.716, 2.834, 45.6, 60.5, 45.6, 60.5), nrow = 2)
#'
#' ## Set the method and its parameters
#' method.type <- "WM"
#' control <- list(num.labels = 3, type.mf = "GAUSSIAN", type.defuz = "WAM",
#' type.tnorm = "MIN", type.snorm = "MAX",
#' type.implication.func = "ZADEH", name="sim-0")
#'
#' ## Generate fuzzy model
#' object <- frbs.learn(data.train, range.data, method.type, control)
#'
#' ## 2. Constructing the frbsPMML format
#' frbsPMML(object)
NULL
|
8a9fbfe02c9f6c624e70c82e2f4f3bcda805d991
|
9f8e0df3419d674cc62a1177e5c5b9fc8685d282
|
/WM2018/R/play_game.R
|
3aea48231434cd3891fffc0896063ee82c895d53
|
[
"MIT"
] |
permissive
|
STATWORX/blog
|
89c8b7fbe1f9cd212a9982d13041252ea967fbd4
|
a7fcc793ce9ed7f78edf9d6e86066889236ad9a5
|
refs/heads/master
| 2022-11-28T12:29:15.667372
| 2022-11-16T10:41:33
| 2022-11-16T10:41:33
| 122,983,287
| 119
| 601
|
MIT
| 2022-06-21T21:40:04
| 2018-02-26T14:44:03
|
R
|
UTF-8
|
R
| false
| false
| 3,038
|
r
|
play_game.R
|
# Input: First three arguments are fixed. Default values are given
# Returns: a matrix with length(team1) rows
# and 2 columns with goals
play_game <- function(team_data,
team1,
team2,
musthavewinner = FALSE,
play_fun = "simplest",
i.sim = numeric(),
settings = list()) {
# Sanity checks
if (length(team1) != length(team2))
stop("Lengths of team should be the same")
if (any(team1==team2))
stop("A team cannot play against itself")
## Simplest version.
## All teams are equal
if (play_fun == "simple") {
result <- cbind(rpois(length(team1), lambda = settings$normalgoals/2),
rpois(length(team1), lambda = settings$normalgoals/2))
} else if (play_fun == "skellam") {
## Skellam distribution
## Uncomment the code below to use the skellam model
p1 <- .91/team_data$rating[team1]
p2 <- .91/team_data$rating[team2]
prob <- p1 / (p1 + p2)
lambdaA <- FindParameter(prob, settings$eta)
Agoals <- rpois(length(prob), lambdaA)
Bgoals <- rpois(length(prob), settings$normalgoals - lambdaA)
result <- cbind(Agoals, Bgoals)
} else if (play_fun == "elo") {
## ELO version (no update here). Using sapply here instead of
## vectorization in case the elo ranking should be updated after each match.
# Uncomment below to use the ELO model
result <- t(sapply(seq_len(length(team1)),
function(i) {
AWinProb <- 1/(1 + 10^((team_data$elo[team2[i]] - team_data$elo[team1[i]])/400))
myres <- rbinom(1, size=1, prob=AWinProb)
fakegoals <- c(1,0)
if (myres==0)
fakegoals <- c(0,1)
fakegoals
}
)
)
} else if (play_fun == "history") {
result <- scores_agg[Round == ifelse(musthavewinner, "Finale", "Prefinale"),
sample(result, length(team1), replace = TRUE, prob = percent)]
result <- matrix(as.integer(unlist(strsplit(result, "-"))),
ncol = 2, byrow = TRUE)
} else if (play_fun == "fussballmathe") {
if (length(i.sim) == 0 ) {
stop("i.sim needed for fussballmathe 'run'")
} else if (fussballmathe[, max(run)] < i.sim) {
stop("there are not so many different simulations")
}
result <- fussballmathe[run == i.sim, matrix(c(Goal1, Goal2), ncol = 2)]
} else {
stop("play_fun not defined")
}
# If we MUST have a winner then one simple trick is to add a random goal
# to one of the two teams that have the same score. Penalty goals seem rather
# random anyway
if (musthavewinner) {
result[result[,1]==result[,2],1] + 2*rbinom(sum(result[,1]==result[,2]), size=1, prob=.5) - 1
}
# set to integer
result <- matrix(as.integer(result), ncol = 2, byrow = FALSE)
return(result)
}
|
1c9b82a79e26fe1f7a6537e85e2b244b529b1c6d
|
fd0622e97276bba2c04d3c2fcba902cdfb65e214
|
/packages/nimble/R/cppDefs_RCfunction.R
|
b43f0f77877034cc2d9f39979dbcf94739e81604
|
[
"BSD-3-Clause",
"CC-BY-4.0",
"GPL-2.0-only",
"GPL-1.0-or-later",
"MPL-2.0",
"GPL-2.0-or-later"
] |
permissive
|
nimble-dev/nimble
|
7942cccd73815611e348d4c674a73b2bc113967d
|
29f46eb3e7c7091f49b104277502d5c40ce98bf1
|
refs/heads/devel
| 2023-09-01T06:54:39.252714
| 2023-08-21T00:51:40
| 2023-08-21T00:51:40
| 20,771,527
| 147
| 31
|
BSD-3-Clause
| 2023-08-12T13:04:54
| 2014-06-12T14:58:42
|
C++
|
UTF-8
|
R
| false
| false
| 29,974
|
r
|
cppDefs_RCfunction.R
|
RCfunctionDef <- setRefClass('RCfunctionDef',
contains = 'cppFunctionDef',
fields = list(
SEXPinterfaceFun = 'ANY',
SEXPinterfaceCname = 'ANY', ## character
ADtemplateFun = 'ANY',
RCfunProc = 'ANY' ## RCfunProcessing object
),
methods = list(
initialize = function(...) {
SEXPinterfaceCname <<- character()
Hincludes <<- c(Hincludes,
nimbleIncludeFile("NimArr.h"),
"<Rinternals.h>",
nimbleIncludeFile("accessorClasses.h"),
nimbleIncludeFile("nimDists.h"),
nimbleIncludeFile("nimOptim.h"),
nimbleIncludeFile("nimbleCppAD.h"),
nimbleIncludeFile("nimDerivs_dists.h"))
CPPincludes <<- c(CPPincludes,
'<Rmath.h>',
'<math.h>',
nimbleIncludeFile("EigenTypedefs.h"),
nimbleIncludeFile("Utils.h"),
nimbleIncludeFile("accessorClasses.h"))
CPPusings <<- c(CPPusings)
callSuper(...)
},
getDefs = function() {
c(list(.self),
if(!inherits(SEXPinterfaceFun, 'uninitializedField')) list(SEXPinterfaceFun) else list(),
if(!inherits(ADtemplateFun, 'uninitializedField')) list(ADtemplateFun) else list()
)
},
getHincludes = function() {
Hinc <- c(Hincludes,
if(!inherits(SEXPinterfaceFun, 'uninitializedField')) SEXPinterfaceFun$getHincludes())
Hinc
},
getCPPincludes = function() {
CPPinc <- c(CPPincludes,
unlist(lapply(CPPincludes, function(x) if(is.character(x)) NULL else x$getCPPincludes()), recursive = FALSE),
if(!inherits(SEXPinterfaceFun, 'uninitializedField')) SEXPinterfaceFun$getCPPincludes())
CPPinc
},
getCPPusings = function() {
CPPuse <- unique(c(CPPusings,
if(!inherits(SEXPinterfaceFun, 'uninitializedField')) SEXPinterfaceFun$getCPPusings()))
CPPuse
},
genNeededTypes = function() { ## this could be extracted and combined with the one for cppDefs_nimbleFunction
for(i in seq_along(RCfunProc$neededRCfuns)) {
neededType<- RCfunProc$neededRCfuns[[i]]
if(inherits(neededType, 'nfMethodRC')) {
thisCppDef <- nimbleProject$getRCfunCppDef(neededType, NULLok = TRUE)
if(is.null(thisCppDef)) {
thisCppDef <- nimbleProject$needRCfunCppClass(neededType, genNeededTypes = TRUE)
neededTypeDefs[[neededType$uniqueName]] <<- thisCppDef
} else {
Hincludes <<- c(Hincludes, thisCppDef)
CPPincludes <<- c(CPPincludes, thisCppDef)
}
next
}
if(inherits(neededType, 'symbolNimbleList')) {
CPPincludes <<- c(CPPincludes, nimbleIncludeFile("smartPtrs.h"))
generatorName <- neededType$nlProc$name
thisCppDef <- nimbleProject$getNimbleListCppDef(generatorName = generatorName)
if(is.null(thisCppDef)){
className <- names(RCfunProc$neededRCfuns)[i]
thisCppDef <- nimbleProject$buildNimbleListCompilationInfo(className = generatorName, fromModel = fromModel)
neededTypeDefs[[ className ]] <<- thisCppDef
Hincludes <<- c(Hincludes, thisCppDef)
CPPincludes <<- c(CPPincludes, thisCppDef)
}
next
}
stop("There is a neededType for an RCfun that is not valid.", call. = FALSE)
}
},
buildFunction = function(RCfun, parentST = NULL) {
RCfunProc <<- RCfun
name <<- RCfunProc$name
const <<- RCfunProc$const
argNames <- RCfunProc$compileInfo$origLocalSymTab$getSymbolNames() ## this has only the original arguments
args <<- symbolTable2cppVars(RCfunProc$compileInfo$newLocalSymTab, argNames, include = argNames, parentST = parentST)
allNames <- RCfunProc$compileInfo$newLocalSymTab$getSymbolNames() ## this has had local variables added
localArgs <- symbolTable2cppVars(RCfunProc$compileInfo$newLocalSymTab, argNames, include = allNames[!(allNames %in% argNames)], parentST = args)
code <<- cppCodeBlock(code = RCfunProc$compileInfo$nimExpr,
objectDefs = localArgs)
if(is.null(RCfunProc$compileInfo$returnSymbol)) stop("returnType not valid. If a nimbleList is being returned, returnType must be the name of the nimbleList definition.")
returnType <<- RCfunProc$compileInfo$returnSymbol$genCppVar()
## For external calls:
CPPincludes <<- c(CPPincludes, RCfunProc$RCfun$externalCPPincludes)
Hincludes <<- c(Hincludes, RCfunProc$RCfun$externalHincludes)
## to be wrapped in conditional
buildADtemplateFun <- FALSE
if(isTRUE(nimbleOptions("enableDerivs")))
if(is.list(RCfunProc$RCfun$buildDerivs))
buildADtemplateFun <- TRUE
if(buildADtemplateFun)
ADtemplateFun <<- makeTypeTemplateFunction(name, .self, derivControl = RCfunProc$RCfun$buildDerivs)$fun
invisible(NULL)
},
buildRwrapperFunCode = function(className = NULL, eval = FALSE, includeLHS = TRUE, returnArgsAsList = TRUE, includeDotSelf = '.self', env = globalenv(), dll = NULL, includeDotSelfAsArg = FALSE) {
returnVoid <- returnType$baseType == 'void'
asMember <- !is.null(className)
argsCode = RCfunProc$RCfun$arguments
argNames <- names(argsCode)
if(is.character(SEXPinterfaceCname) && is.null(dll) && eval) {
warning("creating a .Call() expression with no DLL information")
}
# avoid R CMD check problem with registration
# ok not to use getNativeSymbolInfo with a dll argument because SEXPinterfaceCname can't possible be in nimble.so, so it is unique to the project dll.
txt <- ".Call(SEXPname)"
dotCall <- eval(substitute(substitute(txt1, list(SEXPname = SEXPinterfaceCname)), list(txt1 = parse(text = txt)[[1]])))
for(i in seq_along(argNames)) dotCall[[i+2]] <- as.name(argNames[i])
if(asMember & is.character(includeDotSelf)) dotCall[[length(argNames) + 3]] <- as.name(includeDotSelf)
returnInvisible <- FALSE
if(returnArgsAsList) {
ansReturnName <- substitute(ans$return, list())
argNamesAssign <- if(length(argNames) > 0) paste0('\"',argNames, '\"') else character(0)
if(!returnVoid) argNamesAssign <- c(argNamesAssign, '\"return\"')
if(length(argNamesAssign) > 0)
namesAssign <- parse(text = paste0('names(ans) <- c(', paste(argNamesAssign, collapse = ', '), ')'), keep.source = FALSE)[[1]]
else
namesAssign <- quote(ans <- NULL)
} else {
ansReturnName <- substitute(ans, list())
if(length(argNames)+!returnVoid > 0 & !returnVoid)
namesAssign <- parse(text = paste0('ans <- ans[[',length(argNames)+!returnVoid,']]'), keep.source = FALSE)[[1]]
else {
namesAssign <- quote(ans <- NULL)
returnInvisible <- TRUE
}
}
argNamesCall = argNames
for(i in seq_along(argNamesCall) ){
if(argsCode[i] != '')
argNamesCall[i] = paste(argNames[i], " = ", deparse(argsCode[[i]]) )
## deparse instead of as.character is needed in the above line if there is a negative default
## because it will be parsed as a unary - operator with the number as an argument
}
if(includeDotSelfAsArg) argNamesCall <- c(argNamesCall, includeDotSelf)
if(inherits(RCfunProc$compileInfo$returnSymbol, 'symbolNimbleList')){
returnNimListGen <- RCfunProc$compileInfo$returnSymbol$nlProc$nlGenerator
addGenListToUserNamespace <- function(nimListGen, genList = list()){
nimList <- nimListGen$new()
if(is.null(nimbleUserNamespace$nimListGens[[nimList$nimbleListDef$className]]))
nimbleUserNamespace$nimListGens[[nimList$nimbleListDef$className]] <- nimListGen
nestedNimLists <- nimList$nestedListGenList
for(i in seq_along(nestedNimLists)){
tempGenList <- nestedNimLists[[i]]
addGenListToUserNamespace(tempGenList, genList)
}
}
addGenListToUserNamespace(returnNimListGen)
}
funCode <- parse(text = paste0('function(', paste0(argNamesCall, collapse = ','),') A'), keep.source = FALSE)[[1]]
## the first warning may be removed later if there is no CnativeSymbolInfo_ to be created or if eval is FALSE (as for a nimbleFunction member
if(asMember & is.character(includeDotSelf))
bodyCode <- substitute({
if(is.null(CnativeSymbolInfo_)) {warning("Trying to call compiled nimbleFunction that does not exist (may have been cleared)."); return(NULL)};
if(is.null(DOTSELFNAME)) stop('Object for calling this function is NULL (may have been cleared)');
ans <- DOTCALL; NAMESASSIGN; RETURN}, list(DOTCALL = dotCall, NAMESASSIGN = namesAssign,
DOTSELFNAME = includeDotSelf,
RETURN = if(returnInvisible) quote(invisible(ans)) else quote(ans)
))
else
bodyCode <- substitute({
if(is.null(CnativeSymbolInfo_)) {warning("Trying to call compiled nimbleFunction that does not exist (may have been cleared)."); return(NULL)};
ans <- DOTCALL; NAMESASSIGN; RETURN},
list(DOTCALL = dotCall, NAMESASSIGN = namesAssign,
RETURN = if(returnInvisible) quote(invisible(ans)) else quote(ans)
))
funCode[[3]] <- bodyCode
funCode[[4]] <- NULL
if(includeLHS) funCode <- substitute(FUNNAME <- FUNCODE, list(FUNNAME = as.name(paste0('R',name)), FUNCODE = funCode))
if(eval) {
fun = eval(funCode)
newenv <- eval(quote(new.env()), envir = env)
environment(fun) = newenv
if(!is.null(dll)) {
# replace the name of the symbol in the .Call() with the resolved symbol.
body(fun)[[3]][[3]][[2]] = quote(CnativeSymbolInfo_)
assign('CnativeSymbolInfo_', getNativeSymbolInfo(SEXPinterfaceCname, dll), envir = newenv)
} else {
body(fun)[[2]] <- NULL ## remove the check for valid CnativeSymbolInfo_
}
fun
} else {
funCode[[3]][[2]] <- NULL
funCode
}
},
buildSEXPinterfaceFun = function(className = NULL) {
asMember <- !is.null(className)
objects <- symbolTable2cppVars(RCfunProc$compileInfo$origLocalSymTab)
argNames <- RCfunProc$compileInfo$origLocalSymTab$getSymbolNames()
Snames <- character(length(argNames))
copyLines <- list()
conditionalLineList <- list()
interfaceArgs <- symbolTable()
objects$setParentST(interfaceArgs)
returnVoid <- returnType$baseType == 'void'
copyLineCounter <- 1
for(i in seq_along(argNames)) {
if(exists('const', RCfunProc$compileInfo$origLocalSymTab$getSymbolObject(argNames[i]), inherits=FALSE)){
objects$symbols[[i]] <- symbolDouble(objects$symbols[[i]]$name, NA, 1)$genCppVar() ## remove 'const' local vars from sexpInterfaceFun
}
Snames[i] <- Rname2CppName(paste0('S_', argNames[i]))
## For each argument to the RCfunction we need a corresponding SEXP argument to the interface function
interfaceArgs$addSymbol(cppSEXP(name = Snames[i]))
## and we need a line to copy from the SEXP to the local variable
## The to argument uses the origLocalSymbolObject rather than the objects (which has cppVars) because that has the nDim
## The name of that and the new one in objects must match
tempLines <- buildCopyLineFromSEXP(interfaceArgs$getSymbolObject(Snames[i]),
RCfunProc$compileInfo$origLocalSymTab$getSymbolObject(argNames[i]))
if(inherits(RCfunProc$compileInfo$origLocalSymTab$getSymbolObject(argNames[i]), 'symbolNimbleList')){
CPPincludes <<- c(CPPincludes, nimbleIncludeFile("smartPtrs.h"))
}
copyLines <- c(copyLines, tempLines)
}
RHScall <- as.call(c(list(as.name(name)),
lapply(argNames, as.name)))
if(asMember) {
## Add a final argument for the extptr
interfaceArgs$addSymbol(cppSEXP(name = 'SextPtrToObject'))
## And make the RHScall
RHScall <- substitute(cppMemberDereference(
template(static_cast, cppPtrType(CN))(R_ExternalPtrAddr(SextPtrToObject)), RHS),
list(CN = as.name(className), RHS = RHScall))
}
if(returnVoid) {
fullCall <- RHScall
} else {
objects$addSymbol(cppSEXP(name = 'S_returnValue_1234')) ## Object for the return statement: "return(S_returnValue_1234)"
LHSvar <- RCfunProc$compileInfo$returnSymbol$genCppVar()
LHSvar$name <- "LHSvar_1234"
objects$addSymbol(LHSvar)
fullCall <- substitute(LHS <- RHS, list(LHS = as.name(LHSvar$name), RHS = RHScall))
}
## Put GetRNGstate() and PutRNGstate() around the call.
fullCall <- substitute({GetRNGstate(); FULLCALL; PutRNGstate()}, list(FULLCALL = fullCall))
returnAllArgs <- TRUE
## Pack up all inputs and the return value in a list.
if(returnAllArgs) {
numArgs <- length(argNames)
if(numArgs + !returnVoid > 0) {
objects$addSymbol(cppSEXP(name = 'S_returnValue_LIST_1234'))
allocVectorLine <- substitute(PROTECT(S_returnValue_LIST_1234 <- Rf_allocVector(VECSXP, nAp1)), list(nAp1 = numArgs + !returnVoid))
conditionalLineList <- list()
returnListLines <- list()
if(numArgs > 0) {
for(i in 1:numArgs) {
argSymTab <- RCfunProc$compileInfo$origLocalSymTab$getSymbolObject(argNames[i])
## generateConditionalLines() is the same as a call to buildCopyLineToSEXP() with one exception:
## if the from (c++) argument is a nimbleList, we first check whether that nimbleList contains a pointer to a sexp object.
## If so, we copy the nl to that object. If not, we create a new nl sexp object, and copy the c++ nl into that .
conditionalLineList <- c(conditionalLineList,
generateConditionalLines(argSymTab, interfaceArgs$getSymbolObject(Snames[i])))
returnListLines <- c(returnListLines,
substitute(SET_VECTOR_ELT(S_returnValue_LIST_1234, Im1, THISSEXP),
list(Im1 = i-1, THISSEXP = as.name(Snames[i]))))
isList <- inherits(argSymTab, 'symbolNimbleList')
if(isList){
resetRCopiedFlag <- paste0(argSymTab$name,"->resetFlags();")
resetRCopiedFlagLine <- substitute(cppLiteral(resetText), list(resetText = resetRCopiedFlag))
returnListLines <- c(returnListLines,
resetRCopiedFlagLine)
}
}
}
if(!returnVoid) {
RCfunProc$compileInfo$returnSymbol$name <<- LHSvar$name
returnSymTab <- RCfunProc$compileInfo$returnSymbol
conditionalLineList <- c(generateConditionalLines(returnSymTab,
objects$getSymbolObject('S_returnValue_1234')), conditionalLineList)
returnListLines <- c(returnListLines,
substitute(SET_VECTOR_ELT(S_returnValue_LIST_1234, I, THISSEXP),
list(I = numArgs, THISSEXP = as.name('S_returnValue_1234'))))
isList <- inherits(returnSymTab, 'symbolNimbleList')
if(isList){
resetRCopiedFlag <- paste0(returnSymTab$name,"->resetFlags();")
resetRCopiedFlagLine <- substitute(cppLiteral(resetText), list(resetText = resetRCopiedFlag))
returnListLines <- c(returnListLines,
resetRCopiedFlagLine)
}
}
returnLine <- quote(return(S_returnValue_LIST_1234))
unprotectLine <- substitute(UNPROTECT(N), list(N = numArgs + 1 + !returnVoid))
allCode <- embedListInRbracket(c(copyLines, list(fullCall),
list(allocVectorLine),
conditionalLineList,
returnListLines,
list(unprotectLine),
list(returnLine)))
} else { ## No input or return objects
returnLine <- quote(return(R_NilValue))
allCode <- embedListInRbracket(c(copyLines, list(fullCall),
list(returnLine)))
}
} else {
writeLines("Haven't written the single return case yet")
}
SEXPinterfaceCname <<- paste0('CALL_',Rname2CppName(paste0(if(!is.null(className)) paste0(className,'_') else NULL, name))) ##Rname2CppName needed for operator()
SEXPinterfaceFun <<- cppFunctionDef(name = SEXPinterfaceCname,
args = interfaceArgs,
code = cppCodeBlock(code = RparseTree2ExprClasses(allCode), objectDefs = objects),
returnType = cppSEXP(),
externC = TRUE,
CPPincludes = list(nimbleIncludeFile("RcppUtils.h")))
invisible(NULL)
}
))
SEXPscalarConvertFunctions <- list(double = 'SEXP_2_double',
integer = 'SEXP_2_int',
logical = 'SEXP_2_bool',
character = 'STRSEXP_2_string')
toSEXPscalarConvertFunctions <- list(double = 'double_2_SEXP',
integer = 'int_2_SEXP',
logical = 'bool_2_SEXP',
character = 'string_2_STRSEXP')
buildCopyLineFromSEXP <- function(fromSym, toSym) {
if(inherits(toSym, c('symbolNimbleList', 'symbolNimbleListGenerator'))){
ans <- list()
ansText <- paste0(toSym$name, " = new ",toSym$nlProc$name, ";")
ans[[1]] <- substitute(cppLiteral(answerText), list(answerText = ansText))
ansText <- paste0(toSym$name, "->copyFromSEXP(", fromSym$name, ");")
ans[[2]] <- substitute(cppLiteral(answerText), list(answerText = ansText))
return(ans)
}
if(inherits(toSym, 'symbolBasic')) {
if(toSym$nDim == 0) {
ans <- substitute(TO <- CONVERT(FROM), list(TO = as.name(toSym$name),
FROM = as.name(fromSym$name),
CONVERT = as.name(SEXPscalarConvertFunctions[[toSym$type]]) ) )
} else {
if(toSym$type == 'character') {
ans <- substitute(STRSEXP_2_vectorString(FROM, TO), list(TO = as.name(toSym$name),
FROM = as.name(fromSym$name)))
} else {
ans <- substitute(template(SEXP_2_NimArr,NDIM)(FROM, TO), list(TO = as.name(toSym$name),
FROM = as.name(fromSym$name),
NDIM = toSym$nDim))
}
}
return(ans)
}
if(inherits(toSym, 'symbolInternalType')) {
thisInternalType <- as.character(toSym[['argList']][[1]])
if(thisInternalType == 'indexedNodeInfoClass') {
ans <- substitute(TO <- indexedNodeInfo(SEXP_2_vectorDouble(FROM)), list(TO = as.name(toSym$name),
FROM = as.name(fromSym$name)))
return(ans)
} else{
stop(paste("Error, don't know how to make a SEXP copy line for something of class internal type, case", thisInternalType))
}
}
stop(paste("Error, don't know how to make a SEXP copy line for something of class", class(toSym)))
}
buildCopyLineToSEXP <- function(fromSym, toSym, writeCall = FALSE, conditionalText = "") {
if(inherits(fromSym, c('symbolNimbleList', 'symbolNimbleListGenerator'))){
if(writeCall == TRUE) ansText <- paste0(conditionalText, fromSym$name, "->createNewSEXP();")
else ansText <- paste0(conditionalText, 'PROTECT(', toSym$name, ' = ', fromSym$name, "->copyToSEXP());")
ans <- substitute(cppLiteral(answerText), list(answerText = ansText))
return(ans)
}
if(inherits(fromSym, 'symbolBasic')) {
if(fromSym$nDim == 0) {
ans <- substitute(PROTECT(TO <- CONVERT(FROM)), list(TO = as.name(toSym$name),
FROM = as.name(fromSym$name),
CONVERT = as.name(toSEXPscalarConvertFunctions[[fromSym$type]] ) ) )
} else {
if(fromSym$type == 'character') {
ans <- substitute(PROTECT(TO <- vectorString_2_STRSEXP(FROM)), list(TO = as.name(toSym$name),
FROM = as.name(fromSym$name)))
} else {
ans <- substitute(PROTECT(TO <- template(NimArr_2_SEXP, NDIM)(FROM)), list(TO = as.name(toSym$name),
FROM = as.name(fromSym$name),
NDIM = fromSym$nDim))
}
}
return(ans)
}
if(inherits(fromSym, 'symbolInternalType')) {
thisInternalType <- as.character(fromSym[['argList']][[1]])
if(thisInternalType == 'indexedNodeInfoClass') {
ans <- substitute(PROTECT(TO <- (vectorDouble_2_SEXP(FROM))), list(TO = as.name(toSym$name),
FROM = as.name(fromSym$name) ) )
return(ans)
} else {
stop(paste("Error, don't know how to make a SEXP copy line for something of class internal type, case", thisInternalType))
}
}
stop(paste("Error, don't know how to make a copy line to SEXP for something of class", class(fromSym)))
}
generateConditionalLines <- function(LHSSymTab,
RHSSymTab){
conditionalLines <- list()
isList <- inherits(LHSSymTab, 'symbolNimbleList')
if(isList){
conditionalText <- paste0('if (!(*', LHSSymTab$name, ').RObjectPointer) ')
conditionalLines <- c(conditionalLines, buildCopyLineToSEXP(LHSSymTab, RHSSymTab,
writeCall = TRUE, conditionalText = conditionalText))
}
conditionalLines <- c(conditionalLines, buildCopyLineToSEXP(LHSSymTab, RHSSymTab,
writeCall = FALSE))
return(conditionalLines)
}
|
14e08923d8855d79e7dbec616e24cbd1e8025848
|
eb8cd6719d2f9e4855f05e39e10d23b26bfa2d7c
|
/R/SRM_PRINT_SUMMARY_LAYOUT2.R
|
cd431dada506c4ea03b138d4bb02e26a73283585
|
[] |
no_license
|
alexanderrobitzsch/srm
|
060bb5ed866d2dd8ee5a2cbd587be2336d8952bd
|
5e403a8e8ebd8387203a104930eaaec56bf3e188
|
refs/heads/master
| 2022-11-04T00:32:02.408800
| 2022-11-03T10:14:46
| 2022-11-03T10:14:46
| 163,948,083
| 6
| 1
| null | null | null | null |
UTF-8
|
R
| false
| false
| 2,228
|
r
|
SRM_PRINT_SUMMARY_LAYOUT2.R
|
## File Name: SRM_PRINT_SUMMARY_LAYOUT2.R
## File Version: 0.09
SRM_PRINT_SUMMARY_LAYOUT2 <- function(object, digits)
{
#- print call
SRM_SUMMARY_PRINT_CALL(object=object)
#* R session info
SRM_SUMMARY_PACKAGE_VERSIONS(object=object)
#* print informations about computation time
parm.table <- object$parm.table
cat("\nDate of analysis:", paste(object$time_start),"\n")
cat("Time pre-processing: "); print(object$time$time_pre)
cat("Time estimation: "); print(object$time$time_opt)
cat("Time post-processing: "); print(object$time$time_post)
#* information about estimation
cat("\nINFORMATIONS ABOUT ESTIMATION\n\n")
cat("Log-likelihood","=", round(object$loglike, digits),"\n")
cat("Deviance","=", round(object$dev, digits),"\n")
cat("Number of estimated parameters","=", attr(parm.table, "npar"), "\n")
res_opt <- object$res_opt
cat("Converged","=", res_opt$converged, "\n")
cat("Number of iterations","=", res_opt$iter, "\n")
cat("optimizer","=", res_opt$optimizer, "\n")
cat("Optimization function","=", res_opt$opt_label, "\n")
cat("Maximum absolute value of relative gradient", "=", object$grad_maxabs, "\n")
cat("\nINPUT DATA\n\n")
cat("Number of groups","=", object$ngroups, "\n")
cat("Number of Round-Robin groups","=", object$nrr, "\n")
cat("Number of persons","=", object$npersons, "\n")
cat("Number of dyads","=", object$ndyads, "\n")
#* display parameter table
cat("\nESTIMATED PARAMETERS\n\n")
# select columns
sel <- c("index", "group", "lhs", "op", "rhs", "mat", "row", "col",
"fixed", "est", "se", "lower")
obji <- parm.table
obji <- obji[, sel]
round_vars <- c("est","se")
obji[,round_vars] <- round( obji[,round_vars], digits)
print(obji)
#*** model implied covariance matrices
cat("\nMODEL IMPLIED COVARIANCE MATRICES\n")
for (gg in 1:object$ngroups){
cat(paste0("\nGroup ", gg, ", Person Level\n\n"))
obji <- object$sigma[["U"]][[gg]]
print(round(obji, digits))
cat(paste0("\nGroup ", gg, ", Dyad Level\n\n"))
obji <- object$sigma[["D"]][[gg]]
print(round(obji, digits))
}
}
|
7a5d1dc6e32e23275a5df78f96948757b24c1d44
|
ffdea92d4315e4363dd4ae673a1a6adf82a761b5
|
/data/genthat_extracted_code/fs/examples/file_info.Rd.R
|
c0c535988d2c2cfe94b954b9a748a60ddbe37640
|
[] |
no_license
|
surayaaramli/typeRrh
|
d257ac8905c49123f4ccd4e377ee3dfc84d1636c
|
66e6996f31961bc8b9aafe1a6a6098327b66bf71
|
refs/heads/master
| 2023-05-05T04:05:31.617869
| 2019-04-25T22:10:06
| 2019-04-25T22:10:06
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 488
|
r
|
file_info.Rd.R
|
library(fs)
### Name: file_info
### Title: Query file metadata
### Aliases: file_info
### ** Examples
## Don't show:
.old_wd <- setwd(tempdir())
## End(Don't show)
write.csv(mtcars, "mtcars.csv")
file_info("mtcars.csv")
# Files in the working directory modified more than 20 days ago
files <- file_info(dir_ls())
files$path[difftime(Sys.time(), files$modification_time, units = "days") > 20]
# Cleanup
file_delete("mtcars.csv")
## Don't show:
setwd(.old_wd)
## End(Don't show)
|
33fb3943ccdca3eeb6997e740d2dc28860c073f5
|
b00e1393dea3bab1fd54772445600641b5364b5d
|
/graphicsforstatistics_2e_figures_scripts_r/Chapter 2/fig02x002.R
|
29a6c6eafa6f5b377738c5cb92df4e4286901651
|
[] |
no_license
|
saqibarfeen/coding_time
|
549afc9e46d85e62de5d722aec0d6d1101fde428
|
a963cb993d03b368c6e91d5b360624530fa7d7e9
|
refs/heads/master
| 2020-07-06T19:18:44.859104
| 2019-09-11T03:42:02
| 2019-09-11T03:42:02
| 203,114,689
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 1,197
|
r
|
fig02x002.R
|
fig02x002<-function(){
item<-c("Overall coordination",
"Political affairs",
"International law",
"International cooperation",
"Regional cooperation",
"Human rights",
"Public information",
"Management",
"Internal oversight",
"Administrative",
"Capital",
"Safety & security",
"Development",
"Staff assessment")
#
amount<-c(718555600,626069600,87269400,398449400,
477145600,259227500,184000500,540204300,35997700,
108470900,58782600,197169300,18651300,461366000)
amount<-amount/1000000
#
item<-item[length(item):1]
amount<-amount[length(amount):1]
#
graphics.off()
windows(width=4.5,height=4.5,pointsize=12)
par(fin=c(4.45,4.45),pin=c(4.45,4.45),
mai=c(0.85,2.0,0.0,0.25))
#
plot(amount,1:length(item),type="n",
xaxt="n",yaxt="n",xlim=c(0,800/1.04),
ylim=c(0,length(item)+1),
xlab='Millions of US Dollars',
ylab='',xaxs="r",yaxs="i")
#
for (i in 1:length(item)) lines(x=c(0,amount[i]),
y=c(i,i),lty=3)
points(x=amount,y=1:length(item),pch=19,cex=1.0)
axis(1,at=200*(0:4),labels=TRUE,tick=TRUE,outer=FALSE)
axis(2,at=1:14+0.25,labels=item,tick=FALSE,
outer=FALSE,las=2,hadj=1,padj=1)
#
dev.copy2eps(file="fig02x002.eps")
dev.copy2pdf(file="fig02x002.pdf")
}
|
4464345ece7100f065955435acb0ee0fc0c2b32a
|
3a3f54faa30b28fdd03a1407a0d06357a83d9404
|
/Classes/Generics.R
|
146f38b3c43968b45dc9b5666f85ece976efc038
|
[] |
no_license
|
marc-fred/SpatialCoalescent
|
865c7d55fb75a87e48acc0eb8709fd0b208ddbb8
|
a8e032c234cf0c044fd17cdcf4ed6b8d355cc005
|
refs/heads/master
| 2021-01-18T20:22:51.821149
| 2015-06-24T17:47:08
| 2015-06-24T17:47:08
| 39,379,883
| 0
| 0
| null | 2015-07-20T11:23:54
| 2015-07-20T11:23:54
| null |
UTF-8
|
R
| false
| false
| 886
|
r
|
Generics.R
|
setGeneric(
name = "ToStream",
def = function(object, con, description) { return(standardGeneric("ToStream"))})
setGeneric(
name = "myWrite",
def = function(object, file) { return(standardGeneric("myWrite"))})
setGeneric(
name = "getValues",
def = function(object) { return(standardGeneric("getValues"))})
setGeneric(
name = "myPlot",
def = function(object) { return(standardGeneric("myPlot"))})
setGeneric(
name = "getParameters",
def = function(object) { return(standardGeneric("getParameters"))})
setGeneric(
name = "applyFunction",
def = function(object, xval) { return(standardGeneric("applyFunction"))})
setGeneric(
name = "applyModel",
def = function(object) { return(standardGeneric("applyModel"))})
setGeneric(
name = "computeDistanceMatrix",
def = function(object, con, description) { return(standardGeneric("computeDistanceMatrix"))})
|
07ff68acd1ecbbc8824744751ff9da01ead2ff47
|
c78f4092081a7e68b62073d46f385e25255f566b
|
/R/forecast_loops.R
|
95fc1cf53f2f590c792df69d267728ece6b3136b
|
[] |
no_license
|
DrRoad/shiny.forecast
|
37d7ecb763754bd7a12f97c4a57b7623f66df5e4
|
8337e9f49562ec23a77fd1163b8ce7e8b4733957
|
refs/heads/master
| 2020-04-26T21:09:13.508135
| 2016-03-14T19:01:43
| 2016-03-14T19:01:43
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 7,115
|
r
|
forecast_loops.R
|
#require(data.table)
#source('forecast_functions.R')
append.dt <- function(BI=NULL) {
files <- list.files(path="output/",pattern="fc*")
x <- NULL
for(i in 1:length(files)){
x <- rbind(x,fread(paste0("output/",files[i]),colClasses = c('Date','integer','character','character','character')))
}
#Ensure date type for join with listing
x[,WK_END_DT:=as.Date(x$WK_END_DT,'%Y-%m-%d')]
x[value==0,value:=NA]
AvgCols <- unique(x$method)
x <- rbind(x,validate)
x <- dcast.data.table(x,WK_END_DT+TT_CUSTOMER+GIN ~ method,fun.aggregate = sum)
#Straight Average All Forecast methods
x[,mean:=round(rowMeans(.SD,na.rm=T),0),keyby=.(WK_END_DT,TT_CUSTOMER,GIN),.SDcols=AvgCols]
x <- x[order(TT_CUSTOMER,GIN,WK_END_DT)]
setkey(x,TT_CUSTOMER,GIN,WK_END_DT)
setkey(tblListing,TT_CUSTOMER,GIN,WK_END_DT)
x <- tblListing[x]
x[!is.na(LISTED) & !is.na(mean),mean_listed:=mean*LISTED]
##IF provided with BI data.table, then calc Inventory Rundown
if(!is.null(BI)){
setkey(x,TT_CUSTOMER,GIN,WK_END_DT)
setkey(BI,TT_CUSTOMER,GIN,WK_END_DT)
x <- BI[x]
}
x[,YEAR:=year(WK_END_DT)]
return(x)
}
allocation.forecast <- function(train,horizon,fname,end_date){
}
group.forecast <- function(train, horizon, fname, end_date, lookback, ...){
#Prep Forecast Output Matrix
test <- matrix(NA,nrow=horizon,ncol=ncol(train))
#Forecast Method Options
FNAMES <- c('seasonal.naive',
'snaive.drift',
'naive.trackto',
'naive.profile',
'simple.ets',
'stlf.ets',
'stlf.arima',
'stlf.arima.xreg',
'fourier.arima',
'stlf.nn',
'seasonal.arima.svd',
'tslm.basic')
#Validate selected fc_method
if(fname %in% FNAMES){
f <- get(fname)
}else{
stop(fname,' not legal forecast option')
}
#Loop through columns and run actual forecast, store in test matrix
for(i in 1:ncol(train)) {
#message(paste('column:',i,'of',ncol(train)))
if(sum(tail(train[,i],4)) > 20){
cust <- strsplit(colnames(train)[1],'_', fixed = TRUE)[[1]][2]
test[,i] <- round(f(train[,i],h=horizon,lookback=(lookback), end_date=end_date, CUST=cust)$mean,0)
} else {
test[,i] <- rep(0,horizon)
}
#test[,i] <- round(f(train[,i],h=horizon)$mean,0)
#print(paste(fname,colnames(train)[i]))
#print(accuracy(test[,i],validate[,i]))
}
#Wipe out negatives
test[test<0] <- 0
test <- data.table(test)
test_periods <- seq.Date(end_date+7,by=7,length.out = horizon)
test <- (cbind(as.character.Date(test_periods),(test)))
colnames(test) <- c("WK_END_DT",colnames(train))
test <- melt.data.table(test,id.vars='WK_END_DT',variable.name='GIN_ACCT')
test[, c("GIN", "TT_CUSTOMER") := tstrsplit((GIN_ACCT), "_", fixed=TRUE)]
test[,GIN_ACCT:=NULL]
test[,method:=(fname)][,END_DATE:=end_date]
#test_periods <- train_test_periods[(nrow(train)+1):length(train_test_periods)]
#test <- cbind(as.character.Date(test_periods),test)
#colnames(test) <- c("WK_END_DT",colnames(train))
#train_test <- cbind(as.character.Date(train_test_periods),year(train_test_periods),train_test)
#write.csv(test,paste0("output/fc_",fname,".csv"),row.names = F)
as.data.frame(test)
}
item.forecasts.cv <- function(train_test, horizon, fname, end_date, lookback, ...){
#ar <- list(...)
print(str(train_test))
avg_instock <- train_test[!INSTOCK == 0,mean(INSTOCK)]
train_test[INSTOCK == 0,INSTOCK:=avg_instock]
cat("subsetting train set by date")
train <- train_test[WK_END_DT <= end_date, UNITS]
cat("converting to TS")
train <- data.frame(UNITS=ts(train, frequency = 52))
print("counting rows")
n_train <- nrow(train)
n_test <- nrow(train_test) - n_train
cat("n_train:",n_train,"n_test",n_test,"n_train_test",nrow(train_test))
print("Convert xreg to model.matrix")
train_test[,WK_END_DT := NULL]
#print(train_test)
xreg <- model.matrix(UNITS ~ ., train_test)
print("xreg = ")
print(str(xreg))
#print(xreg)
print("Splitting out newxreg")
newxreg <- xreg[(n_train+1):nrow(train_test),]
print("Splitting out xreg")
xreg <- xreg[1:n_train,]
print("sending to item.forecasts")
item.forecasts(train, horizon, fname, end_date, lookback=4, xreg = xreg, newxreg = newxreg, ...)
}
item.forecasts <- function(train, horizon, fname, end_date, lookback, xreg, newxreg, ...){
#Prep Forecast Output Matrix
if(is.data.frame(train))
train <- train[,1]
if('Date' %in% class(horizon))
horizon <- as.integer(((horizon - end_date)/7))
if(horizon < 1) {
message("zero or negative horizon, defaulting to 52 (weeks)")
horizon = 52
}
if(!is.null(newxreg)) {
if(nrow(newxreg) > horizon)
newxreg <- newxreg[1:horizon,]
print(horizon)
cat('newxreg Dims:',dim(newxreg))
cat('xreg Dims:',dim(xreg))
}
if(is.numeric(horizon))
horizon = as.integer(horizon)
if(!('integer' %in% class(horizon)))
stop('horizon must either be type Date or integer')
message("horizon set to:",horizon)
test <- matrix(NA,nrow=horizon,ncol=length(fname))
#Forecast Method Options
FNAMES <- c('seasonal.naive',
'snaive.drift',
'naive.trackto',
'naive.profile',
'simple.ets',
'stlf.ets',
'stlf.arima',
'stlf.arima.xreg',
'fourier.arima',
'stlf.nn',
'seasonal.arima.svd',
'tslm.basic')
#Validate selected fc_method
fname <- fname[fname %in% FNAMES]
if(length(fname) == 0)
stop(fname,' not legal forecast option(s)')
#convert to list of actual functions for lapply
ffname <- lapply(fname,get)
Model_Store <- lapply(ffname, function(f) f(train,
h=horizon,
lookback=(lookback),
end_date=end_date,
CUST=cust,
newxreg=newxreg,
xreg=xreg, ...))
names(Model_Store) <- fname
###create summary table
test_periods <- seq.Date(end_date+7,by=7,length.out = horizon)
consolidated <- round(sapply(Model_Store, "[[","mean"),0)
names(Model_Store) <- fname
consolidated <- data.frame(WK_END_DT = as.character(test_periods), consolidated)
Model_Store[["consolidated"]] <- consolidated
return(Model_Store)
}
|
4c874b8dca43180c6228d3545fcbef7db92db333
|
250e2044d4e81fedacd7e8f6edabb4162fc05566
|
/misc/set_up_database.R
|
23d79c3f07b3ea22b27c6ff715566cb2c461cdc0
|
[] |
no_license
|
joebrew/brewpalau
|
b601a6e88086756bbe4603ca29e41f81795617e4
|
2565916a701bbab206c9027a4cf7f6f49d3c5827
|
refs/heads/master
| 2020-05-03T13:00:00.909348
| 2019-04-15T04:33:07
| 2019-04-15T04:33:07
| 178,641,769
| 1
| 1
| null | null | null | null |
UTF-8
|
R
| false
| false
| 1,259
|
r
|
set_up_database.R
|
library(dplyr)
library(readr)
library(RPostgreSQL)
# # From within psql
# CREATE DATABASE twitter;
# # Now from command line:
# psql twitter
set_up_database <- function(files){
# Files should be a vector of paths for the .csv files
# For example:
# files = c('~/Desktop/file1.csv', '~/Desktop/file2.csv')
# Loop through each file and read into R
data_list <- list()
for(i in 1:length(files)){
this_file <- files[i]
this_data <- read_csv(this_file)
this_data$file_path <- this_file
data_list[[i]] <- this_data
}
# Combine all the data
tl <- bind_rows(data_list)
# If you want, consider merging here with your dictionary of
# file-paths to queries, so that you can have your query
# saved in the database too
# .........
# Write the database
pg = dbDriver("PostgreSQL")
con = dbConnect(pg, dbname="twitter")
# In the below we are writing to a table named twitter within the
# database you created named twitter. Feel free
dbWriteTable(con,'twitter',tl, row.names=FALSE)
# Read back in R
# dtab = dbGetQuery(con, "select * from twitter")
# disconnect from the database
dbDisconnect(con)
}
# To run the above
# set_up_database(files = c('~/Desktop/file1.csv', '~/Desktop/file2.csv'))
|
764c07e36f6466b21a8e2e5b8404c7a89eb859d0
|
5874ae0a22213c3a692763f285a64433bd512f94
|
/R/IMM_forecast_v1.R
|
c2f8288106bfc73f9c1fe864eea73c85380a0467
|
[] |
no_license
|
d8aninja/code
|
8356486291a2db9f419549edaa525d9bbe39abfc
|
80a836db49a31ecd6d0e62aaf8b6e4417c49df68
|
refs/heads/master
| 2021-09-06T11:35:13.956195
| 2018-02-06T04:11:33
| 2018-02-06T04:11:33
| 80,898,202
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 2,568
|
r
|
IMM_forecast_v1.R
|
# /////////////////////////////////////
# //// ///
# //// FORECASTING AREA ///
# //// PLOT SPARINGLY ///
# //// (USE GFX!) ///
# //// ///
# //// KEEP IT CLEAN! ///
# //// -mgmt ///
# //// ///
# /////////////////////////////////////
# so library
libraries <- function(x) lapply(x, require, character.only=T)
libraries(c("dplyr", "ggplot2", "ggthemes", "scales", "fpp"))
options(scipen=999)
# such data
setwd("Z:\\DA&DS\\Jeff\\Data")
ds <- read.csv("ds_fc_trial.csv")
# convert the date column
ds$Day <- as.Date(ds$Day, format = "%m/%d/%Y")
sales <- as.ts(data.frame(Date = ds$Day, Sales = ds$Total.Sales))
# run'em
# xxxxx <- stl(ds$Total.Sales,
# s.window = 12, s.degree = 1,
# t.window = 12, t.degree = 1,
# l.window = 12, l.degree = 1,
# robust = T)
# plot(xxxxx)
# plot(forecast(xxxxx))
#
# acf(xxxxx,
# lag.max = 36,
# plot = TRUE,
# na.action = na.pass,
# demean = TRUE, main = "DEMO ONLY")
# # Adjust for seasonality
# d_adj <- seasadj(sales)
# acf(d_adj)
# plot(xxxxx)
# plot(d_adj, main = "Annual Expenditures, Seasonally Adj.") # eg
# SUBSAMPLE DATES
# e_ts <- e_ts %>% filter(Date > as.Date("2010-01-01"))
# ETS IS USED TO FIND THE s,t,l PARAMS USED
# ABOVE TO BRING THE MODEL TO STAIONARITY
fc1 <- ets(sales)
summary(fit9)
x <- forecast(fit7, h = 24)
simulate = T,
bootstrap = T)
accuracy(x)
summary(x)
plot(x)
# Slice and serve
C.fcast <- data.table(Date = seq(as.Date("2017-01-01"), as.Date("2018-12-31"), "months"),
point_est = x$mean,
U80 = x$fitted[,1],
U95 = x$upper[,2],
L80 = x$lower[,1],
L95 = x$lower[,2])
View(x)
plot(x)
C.summary <- C.fcast %>%
dplyr::summarise(FY15_H95 = sum(U95[1:12]),
FY15_H80 = sum(U80[1:12]),
FY15_point = sum(point_est[1:12]),
FY15_L80 = sum(L80[1:12]),
FY15_L95 = sum(L95[1:12]),
FY16_H95 = sum(U95[13:24]),
FY16_H80 = sum(U80[13:24]),
FY16_point = sum(point_est[13:24]),
FY16_L80 = sum(L80[13:24]),
FY16_L95 = sum(L95[13:24])
)
|
2ae5867342fb77004a8a6efa20700b3d1a917734
|
bbb10152f02ca9c7b841f89c1e10df0c1f47746c
|
/plot2.R
|
ed0066941b3ecec73a882ed0d0a46d97ff1f6a88
|
[] |
no_license
|
jakobdor/ExData_Plotting1
|
53d91228352df63588fc475cdae6c9df31f0568c
|
0b1bf0f61eba7745c680fdec093ace1932a8874a
|
refs/heads/master
| 2021-05-08T09:55:38.822990
| 2018-02-08T15:38:47
| 2018-02-08T15:38:47
| 119,814,274
| 0
| 0
| null | 2018-02-01T09:34:50
| 2018-02-01T09:34:50
| null |
UTF-8
|
R
| false
| false
| 387
|
r
|
plot2.R
|
data <- read.table("household_power_consumption.txt", header=TRUE, sep=";", na.strings = "?")
data2 <- data[data$Date %in% c("1/2/2007","2/2/2007") ,]
date_time <- strptime(paste(data2$Date, data2$Time, sep=" "), "%d/%m/%Y %H:%M:%S")
png("plot2.png", width=480, height=480)
plot(date_time, data2$Global_active_power, type="l", xlab="", ylab="Global Active Power (kilowatts)")
dev.off()
|
92763c36c4c8592d4983d90a8fb7571fcd9fd967
|
72f2cc395c440b81cbaed3fd7a49dd2da25854e1
|
/BR US DS Model mghosh/2.Code/BR US Scoring with GBM.R
|
c89e5b1afde0e043e027b6525b8f17406f20bedf
|
[] |
no_license
|
ghoshmithun/ImpProjectDoc
|
496f3b0ea4713908c6f3a0f2a5dfdba1d8daa54e
|
c66eb2f03e65c8f44e847fcd453c5eeb6e376e93
|
refs/heads/master
| 2021-05-16T00:42:34.793458
| 2017-12-11T11:01:12
| 2017-12-11T11:01:12
| 106,398,843
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 4,910
|
r
|
BR US Scoring with GBM.R
|
library(readr)
header <- cbind("Customer_key","Response","MeanScore","MedianScore")
write.table(x = header,file="//10.8.8.51/lv0/Move to Box/Mithun/projects/9.BR_US_DS_Project/1.data/BR_US_Train_Data_Score.txt",sep=",",col.names=F,row.names=F,append=F)
# No need for in time validation
BR_data_min_max_value <- read.table(file="//10.8.8.51/lv0/Move to Box/Mithun/projects/9.BR_US_DS_Project/1.data/full_data_min_max_value.txt",sep="|",header=T)
require(R.utils)
total_row_scoredata <- countLines("//10.8.8.51/lv0/Move to Box/Mithun/projects/9.BR_US_DS_Project/1.data/BR_data_full.txt")
#5206884
chunk_size <- 6000000
num_loop <- total_row_scoredata %/% chunk_size
reminder_size <- total_row_scoredata %% chunk_size
start_row <- 1
end_row <- 0
for (loop in 0:num_loop){
if(loop==0){
start_row <- 1
end_row <- reminder_size
}
else {
start_row <- end_row + 1
end_row <- end_row + chunk_size
}
print("#################################################################################")
print(paste("scoring start for data chunk", loop , " , start_row:", start_row , " , end_row :" , end_row))
print("#################################################################################")
BR_data <- read.table(file = "//10.8.8.51/lv0/Move to Box/Mithun/projects/9.BR_US_DS_Project/1.data/BR_data_full.txt",
stringsAsFactors = F ,nrows = (end_row - start_row), skip = start_row -1, sep = "|",header = T )
model_predictor <- c('percent_disc_last_12_mth', 'num_days_on_books','per_elec_comm','num_em_campaign','num_units_12mth','disc_ats','time_since_last_retail_purchase',
'avg_order_amt_last_6_mth','br_gp_net_sales_ratio','non_disc_ats','ratio_rev_wo_rewd_12mth','on_sales_item_rev_12mth','br_hit_ind_tot',
'br_on_sales_ratio','on_sales_rev_ratio_12mth','ratio_rev_rewd_12mth','num_order_num_last_6_mth','card_status','ratio_order_6_12_mth',
'ratio_order_units_6_12_mth','ratio_disc_non_disc_ats','sale_hit_ind_tot','num_disc_comm_responded','mobile_ind_tot','gp_hit_ind_tot',
'br_bf_net_sales_ratio','num_dist_catg_purchased','total_plcc_cards','on_hit_ind_tot','pct_off_hit_ind_tot','br_go_net_sales_ratio','at_hit_ind_tot',
'purchased','searchdex_ind_tot','factory_hit_ind_tot','clearance_hit_ind_tot','markdown_hit_ind_tot')
#model_predictor[!model_predictor %in% colnames(BR_data)]
#colnames(BR_data)[!colnames(BR_data) %in% model_predictor]
BR_data <- BR_data[,colnames(BR_data) %in% c('customer_key',"Response",model_predictor)]
gc()
#Load the model to score
samplesize <- c('5000' ,'5000' ,'7000' ,'7000' ,'7000' ,'7000' ,'8000' ,'8000' ,'9000' ,'9000' ,'9000' ,'9000' ,'11000' ,
'13000' ,'15000' ,'16000' ,'16000' ,'16000' ,'18000' ,'18000')
run <- c('3' ,'3' ,'2' ,'5' ,'5' ,'5' ,'1' ,'3' ,'4' ,'4' ,'4' ,'4' ,'5' ,'2' ,'5' ,'1' ,'2' ,'2' ,'4' ,'4')
shrinkage <- c('0.04' ,'0.09' ,'0.07' ,'0.04' ,'0.05' ,'0.09' ,'0.01' ,'0.05' ,'0.001' ,'0.03' ,'0.06' ,'0.07' ,'0.1' ,'0.09' ,
'0.09' ,'0.1' ,'0.08' ,'0.1' ,'0.07' ,'0.1')
LossFunc <- c('bernoulli' ,'bernoulli' ,'adaboost' ,'bernoulli' ,'bernoulli' ,'bernoulli' ,'bernoulli' ,'bernoulli' ,'bernoulli' ,
'adaboost' ,'bernoulli' ,'adaboost' ,'bernoulli' ,'bernoulli' ,'bernoulli' ,'bernoulli' ,'bernoulli' ,'bernoulli' ,
'bernoulli' ,'bernoulli')
print(paste("start_row:",start_row,"end_row:",end_row))
score_file <- matrix(0,nrow=nrow(BR_data),ncol=24)
for (j in 1:20){
require(gbm)
load(paste0("//10.8.8.51/lv0/Move to Box/Mithun/projects/9.BR_US_DS_Project/Model_objects/gbm/gbm_" ,samplesize[j] , "_" , run[j] , "_" , LossFunc[j] , "_" , shrinkage[j] , ".RData"))
opt.num.trees <- gbm.perf(gbm.model,plot.it =FALSE)
score_file[,1]<- BR_data$customer_key
score_file[,2]<- BR_data$Response
score_file[,j+1] <- predict.gbm(object=gbm.model, newdata=data.frame(BR_data), type='response', n.trees=opt.num.trees)
rm(gbm.model)
gc()
print(paste("scoring for model", j))
}
print(paste("start_row:",start_row ,"end_row:",end_row))
score_file[,23]<- rowMeans(score_file[,3:22])
score_file[,24] <- apply(score_file[,3:22],1,median)
write.table(x = score_file[,c(1,2,23,24)],file="//10.8.8.51/lv0/Move to Box/Mithun/projects/9.BR_US_DS_Project/1.data/BR_US_Train_Data_Score.txt",sep=",",col.names=F,row.names=F,append=T)
print("##################################################################")
print(paste("scoring for data chunk", loop , "complete"))
print("##################################################################")
}
Score_Data <- read.table(file = "//10.8.8.51/lv0/Move to Box/Mithun/projects/9.BR_US_DS_Project/1.data/BR_US_Train_Data_Score.txt",header=T,sep=",",stringsAsFactors=FALSE)
|
ac490a2a86e219282cebd3c073dc3756938f82af
|
5e748c6e82b6e5a99c2b02e2493ca2d782b952be
|
/R/scrape_sofifa.R
|
89014a6728c1606472c21c110220742313d9e7da
|
[] |
no_license
|
NlIceD/PlusMinusData
|
c94e01bd0d12a21488d78e00e5ff48c92d29eb18
|
8388bbc11fe5853cf6d7a0e59b94c4510b5dde29
|
refs/heads/master
| 2020-06-19T17:39:26.358685
| 2018-10-19T13:39:13
| 2018-10-19T13:39:13
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 2,095
|
r
|
scrape_sofifa.R
|
#' Scrape sofifa tables
#'
#' @param url url for the sofifa table
#'
scrape_sofifa_table <- function(url) {
xml_url <- xml2::read_html(url)
tables <- rvest::html_nodes(xml_url, "table")
sofifa_table <- rvest::html_table( tables[1][[1]] )
links <- rvest::html_nodes(x = tables, css = "td") %>% rvest::html_nodes("a") %>% rvest::html_attr("href")
titles <- rvest::html_nodes(x = tables, css = "td") %>% rvest::html_nodes("a") %>% rvest::html_attr("title")
link_title_df <- data.frame(link=as.character(links), title=as.character(titles))
return(list(sofifa_table=sofifa_table, link_title_df=link_title_df))
}
#' Clean raw sofifa table
#'
#' @param tab data-frame of sofifa table extracted from \code{scrape_sofifa_table}
#'
clean_sofifa_table <- function(tab) {
sofifa_table <- tab$sofifa_table[2:nrow(tab$sofifa_table), c(2, 3, 4, 5, 6, 8, 9)]
names(sofifa_table) <- c("name", "age", "overall", "potential", "team", "value", "wage")
# Extract full player names from the links
player_name_rows <- grep(pattern = "/player/", tab$link_title_df$link)
player_names <- as.character(tab$link_title_df$title[player_name_rows])
stopifnot(length(player_names) == nrow(sofifa_table))
sofifa_table$full_name <- player_names
# Extract player coutry from links
player_country_rows <- player_name_rows - 1
# ERROR IN THIS LINE IN RUSSIA: grep(pattern = "/players\\?na=", x = tab$link_title_df$link)
player_country <- as.character(tab$link_title_df$title[player_country_rows])
stopifnot(length(player_country) == nrow(sofifa_table))
sofifa_table$country <- player_country
# Split the first column into "name" and "position"
name_pos_split <-stringr::str_split_fixed(string = sofifa_table$name, pattern = "\t", n=2)
sofifa_table$name <- name_pos_split[, 1]
pos <- name_pos_split[, 2]
pos <- stringr::str_replace_all(string = pos, pattern = "\t", replacement = "")
sofifa_table$position <- pos
# Parse Team
team_split <- stringr::str_split_fixed(sofifa_table$team, pattern = "\t", n=2)
sofifa_table$team <- team_split[, 1]
return(sofifa_table)
}
|
d3bb857a9ed0854b1398faebf4748e4b489ef214
|
68e1ac98bf1c17a77f1074bf190e1acb763df791
|
/R/plotCamera.R
|
ea5e7ec9dbb91bf397986f2eb5e34e635431dd8e
|
[
"MIT"
] |
permissive
|
mukundvarma/pledger
|
db4656ce07140354f2bc65c65e04962fe57f3013
|
c41a865a541dc5038d0ebe7b7f2766abf4ad1b7b
|
refs/heads/master
| 2021-05-15T22:28:26.458772
| 2017-10-12T21:31:14
| 2017-10-12T21:31:14
| 106,713,748
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 2,001
|
r
|
plotCamera.R
|
#' Bubble plot to display results of CAMERA test
#'
#' plotGeneBarPlot takes as input a counts matrix (or dataframe), a metadata dataframe
#' and the names of two metadata variables to plot a barplot with error bars of expression
#' values grouped along the x axis and with different fill colors to show
#' summarized expression of a gene across multiple conditions
#'
#' @param camera.obj Output of edgeR::camera or edgeR::fry test
#' @param max.pvalue P-value cutoff for displaying category labels
#' @param use.fdr (boolean) Whether to use FDR (default) or nominal p-value
#' @param n.label Maximum number of category labels to display
#' @param plot.title Title for plot
#' @param plot.subtitle Subtitle for plot
#' @param do.return Whether to return ggplot object (default: False)
#'
#' @examples
#'
#' plotCamera(camera.obj = camera.trrustdb, max.pvalue = 0.05, use.fdr=T, n.label=20,
#' plot.title="Camera test for transcription factors",
#' plot.subtitle="LPS-treatment vs nostim", do.return=F)
plotCamera <- function(camera.obj, max.pvalue, use.fdr=T, n.label,
plot.title, plot.subtitle, do.return=F){
plotdf = camera.obj
plotdf$names = rownames(plotdf)
plotdf$Sig = plotdf$PValue
pval.type = "nominal"
if (use.fdr)
plotdf$Sig = plotdf$FDR
pval.type = "adjusted"
n.label= min(plotdf[plotdf$Sig < max.pvalue,] %>% nrow, 40)
p = ggplot(plotdf) + geom_point(aes(x=NGenes, y=-log10(Sig), color=Direction),
alpha=0.7, size = 5) +
geom_text_repel(data=plotdf[1:n.label,], aes(x=NGenes,y=-log10(Sig),
label=names), size = 4,segment.size = 0.1) +
scale_x_log10() + theme(legend.position = "top") +
scale_colour_manual(values = c("#1b9e77","#d95f02","#7570b3")) +
theme_bw() +
labs(title=plot.title, subtitle=plot.subtitle,
x="Term Size", y=paste(expression(-log[10]~PValue~), pval.type))
if(do.return)
return(p)
else
plot(p)
}
|
472cc494d226ea8b1ad313e49db53d5b7e2f625f
|
cf606e7a3f06c0666e0ca38e32247fef9f090778
|
/test/integration/example-models/Bayesian_Cognitive_Modeling/ParameterEstimation/LatentMixtures/Exams_1_Stan.R
|
99bacb8d219cd0a38d4ee238afcc59ae8024c575
|
[
"BSD-3-Clause",
"LicenseRef-scancode-free-unknown"
] |
permissive
|
nhuurre/stanc3
|
32599a71d5f82c759fd6768b8b699fb5f2b2d072
|
5612b357c1cd5a08cf2a57db97ce0e789bb87018
|
refs/heads/master
| 2023-07-05T02:27:08.083259
| 2020-11-12T15:37:42
| 2020-11-12T15:37:42
| 222,684,189
| 0
| 0
|
BSD-3-Clause
| 2019-11-19T11:50:39
| 2019-11-19T11:50:38
| null |
UTF-8
|
R
| false
| false
| 1,750
|
r
|
Exams_1_Stan.R
|
# clears workspace:
rm(list=ls())
library(rstan)
model <- "
// Exam Scores
data {
int<lower=1> p;
int<lower=0> k[p];
int<lower=1> n;
}
transformed data {
real psi;
// First Group Guesses
psi <- .5;
}
parameters {
// Second Group Has Some Unknown Greater Rate Of Success
real<lower=.5,upper=1> phi;
}
transformed parameters {
vector[2] lp_parts[p];
// Data Follow Binomial With Rate Given By Each Person's Group Assignment
for (i in 1:p) {
lp_parts[i,1] <- log(.5) + binomial_log(k[i], n, phi);
lp_parts[i,2] <- log(.5) + binomial_log(k[i], n, psi);
}
}
model {
for (i in 1:p)
increment_log_prob(log_sum_exp(lp_parts[i]));
}
generated quantities {
int<lower=0,upper=1> z[p];
for (i in 1:p) {
vector[2] prob;
prob <- softmax(lp_parts[i]);
z[i] <- bernoulli_rng(prob[1]);
}
}"
k <- c(21, 17, 21, 18, 22, 31, 31, 34, 34, 35, 35, 36, 39, 36, 35)
p <- length(k) # number of people
n <- 40 # number of questions
data <- list(p=p, k=k, n=n) # to be passed on to Stan
myinits <- list(
list(phi=.75))
parameters <- c("phi", "z") # parameters to be monitored
# The following command calls Stan with specific options.
# For a detailed description type "?rstan".
samples <- stan(model_code=model,
data=data,
init=myinits, # If not specified, gives random inits
pars=parameters,
iter=20000,
chains=1,
thin=1,
# warmup = 100, # Stands for burn-in; Default = iter/2
# seed = 123 # Setting seed; Default is random seed
)
# Now the values for the monitored parameters are in the "samples" object,
# ready for inspection.
print(samples, digits=3)
|
4064d49ec98a3d4d953fb4f39e3ce24e42549147
|
94eac0978570c5c33e8bbc1f588b81fae98bcca7
|
/.Rprofile
|
bcf0d3a17436e89e54b2b1141c75a904cd547fc2
|
[
"MIT"
] |
permissive
|
fdrennan/initr
|
ca8c4ba7ecdfdcd831e3528d47ca1a80758a1f78
|
1d4b73076e33ff2edf5096d5ecd21f596adbd28f
|
refs/heads/master
| 2022-12-22T10:08:24.990650
| 2020-09-28T16:25:43
| 2020-09-28T16:25:43
| 298,953,900
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 166
|
rprofile
|
.Rprofile
|
source("renv/activate.R")
IN_RENV_LOCK = TRUE; paste("IN_RENV_LOCK", IN_RENV_LOCK);
message('beep boop .init initialized - You can delete this message in .Rprofile')
|
40422e3e05841230a6c4ca1b7efe9611157a844c
|
a48e198d0def40f8a70fbba29fa88eadde4e2735
|
/R_script/P02_reviews.R
|
7a5d7759f822fbb1e0e07eabf85ee95d4e2d46ba
|
[
"Apache-2.0"
] |
permissive
|
intensive-carlito/project-ds
|
d17da1d28bdcb041f6bd121acbf7b27200fd607d
|
625b0970734730b6529d22c0a5818507d1ae5fb6
|
refs/heads/master
| 2020-04-11T11:43:27.269433
| 2019-03-24T22:05:32
| 2019-03-24T22:05:32
| 161,757,252
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 123
|
r
|
P02_reviews.R
|
reviews=fread("C:/temp/projet/reviews.csv",stringsAsFactors=FALSE, encoding = 'UTF-8')
reviews2=reviews[listing_id==36490]
|
15acacd1fa5534aeea6b1daa2299d9a7e2a9ed99
|
0d74c6026340636cb7a73da2b53fe9a80cd4d5a5
|
/simsem/R/analyze.R
|
10bc293114d8b23b29db4232d00a52b198fbf139
|
[] |
no_license
|
simsem/simsem
|
941875bec2bbb898f7e90914dc04b3da146954b9
|
f2038cca482158ec854a248fa2c54043b1320dc7
|
refs/heads/master
| 2023-05-27T07:13:55.754257
| 2023-05-12T11:56:45
| 2023-05-12T11:56:45
| 4,298,998
| 42
| 23
| null | 2015-06-02T03:50:52
| 2012-05-11T16:11:35
|
R
|
UTF-8
|
R
| false
| false
| 5,136
|
r
|
analyze.R
|
### Sunthud Pornprasertmanit & Terrence D. Jorgensen (anyone else?)
### Last updated: 3 June 2018
### functions to fit a model (lavaan, OpenMx, SimSem, or custom function) to data
analyze <- function(model, data, package = "lavaan", miss = NULL,
aux = NULL, group = NULL, mxMixture = FALSE, ...) {
mc <- match.call()
args <- list(...)
if (is(model, "SimSem")) {
if(!("group" %in% names(args)) & "group" %in% names(mc)) args$group <- group
args <- c(list(model = model, data = data, package = package, miss = miss, aux = aux), args)
out <- do.call("analyzeSimSem", args)
} else if (is(model, "MxModel")) {
out <- analyzeMx(object = model, data = data, groupLab = group, mxMixture = mxMixture, ...)
} else {
stop("Please specify an appropriate object for the 'model' argument: ",
"simsem model template or OpenMx object. If users wish to analyze ",
"the lavaan script, please use the functions in the lavaan package ",
"directly (e.g., sem, cfa, growth, or lavaan).")
}
return(out)
}
analyzeSimSem <- function(model, data, package = "lavaan",
miss = NULL, aux = NULL, ...) {
Output <- NULL
groupLab <- model@groupLab
args <- list(...)
if (is(data, "list")) {
if ("data" %in% names(data)) {
data <- data$data
} else {
stop("The list does not contain any 'data' slot.")
}
}
if (is.null(colnames(data)))
colnames(data) <- paste0("x", 1:ncol(data))
if (is.null(aux)) {
if (!is.null(miss) && !(length(miss@cov) == 1 && miss@cov == 0) && miss@covAsAux)
aux <- miss@cov
}
if (length(unique(model@pt$group[model@pt$op %in% c("=~", "~~", "~", "~1", "|")])) == 1) {
args$group <- NULL
groupLab <- NULL
}
## TDJ 2 June 2016: lavaan >= 0.6-1 requires a ParTable to have "block"
if (is.null(model@pt$block)) model@pt$block <- model@pt$group
if (!is.null(miss) && length(miss@package) != 0 && miss@package %in% c("Amelia", "mice")) {
miArgs <- miss@args
if (miss@package == "Amelia") {
if (model@groupLab %in% colnames(data)) {
if (!is.null(miArgs$idvars)) {
miArgs$idvars <- c(miArgs$idvars, model@groupLab)
} else {
miArgs <- c(miArgs, list(idvars = model@groupLab))
}
}
}
Output <- semTools::runMI(model@pt, data, fun = "lavaan", ..., m = miss@m,
miArgs = miArgs, miPackage = miss@package)
} else {
## If the missing argument is not specified and data have NAs, the default is fiml.
if (is.null(args$missing)) {
missing <- "default"
if (any(is.na(data))) missing <- "fiml"
} else {
missing <- args$missing
args$missing <- NULL
}
model.type <- if (tolower(model@modelType) == "sem") "sem" else "cfa"
if (!is.null(aux)) {
if (is.numeric(aux)) aux <- colnames(data)[aux]
attribute <- list(model=model@pt, aux = aux, data = data, group = groupLab,
model.type = model.type, missing = missing, fun = "lavaan")
attribute <- c(attribute, args)
Output <- do.call(semTools::auxiliary, attribute)
} else {
attribute <- list(model = model@pt, data = data, group = groupLab,
model.type = model.type, missing = missing)
attribute <- c(attribute, args)
Output <- do.call(lavaan::lavaan, attribute)
}
}
return(Output)
}
# To be used internally
anal <- function(model, data, package = "lavaan", ...) {
groupLab <- model@groupLab
if (length(unique(model@pt$group[model@pt$op %in% c("=~", "~~", "~", "~1", "|")])) == 1L) {
groupLab <- NULL
}
Output <- lavaan::lavaan(model@pt, data = data, group = groupLab,
model.type = model@modelType, ...)
return(Output)
}
analyzeLavaan <- function(args, fun = "lavaan", miss = NULL, aux = NULL) {
Output <- NULL
if (is.null(aux)) {
if (!is.null(miss) && !(length(miss@cov) == 1 && miss@cov == 0) && miss@covAsAux)
aux <- miss@cov
}
if (!is.null(miss) && length(miss@package) != 0 && miss@package %in% c("Amelia", "mice")) {
miArgs <- miss@args
if (miss@package == "Amelia") {
if (!is.null(args$group)) {
if (args$group %in% colnames(data)) {
if (!is.null(miArgs$idvars)) {
miArgs$idvars <- c(miArgs$idvars, args$group)
} else {
miArgs <- c(miArgs, list(idvars = args$group))
}
}
}
}
args$fun <- fun
args$m <- miss@m
args$miArgs <- miArgs
args$miPackage <- miss@package
Output <- do.call(semTools::runMI, args)
} else {
## If the missing argument is not specified and data have NAs, the default is fiml.
if(is.null(args$missing)) {
args$missing <- "default"
if ((!is.null(miss) && (miss@m == 0)) || any(is.na(args$data))) {
args$missing <- "fiml"
}
}
if (!is.null(aux)) {
if (is.numeric(aux)) aux <- colnames(model$data)[aux]
args$aux <- aux
args$fun <- fun
Output <- do.call(semTools::auxiliary, args)
} else {
Output <- do.call(fun, args)
}
}
return(Output)
}
|
1f9615692ec4eb93ddb446eac83c68f8b518932a
|
f0e4d7881d5c9006aaa1f04df5180e79b4b09583
|
/scripts/02_betategarch_estimation.R
|
41d1ea59097c9418e2a880d678fe410c601573c1
|
[] |
no_license
|
daianemarcolino/gascoreinflation
|
744ab8c847289322239c40511975984a0bf537d2
|
4f04f3b232eec0ea38faf7b1a8faac6aa7490bf7
|
refs/heads/master
| 2021-01-19T17:34:04.700943
| 2018-03-09T11:58:56
| 2018-03-09T11:58:56
| 101,070,075
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 2,633
|
r
|
02_betategarch_estimation.R
|
library(betategarch)
library(dygraphs)
source("functions/betategarch_estimation.R")
# exemplo NASDAQ ---------------------------------
data(nasdaq)
y <- ts(nasdaq[,2])
# usando o pacote betategarch - modelo y[t] = exp(f2[t])*epsilon[t], epsilon[t] ~ t(v)
m <- tegarch(y, skew = F, asym = F)
summary(m)
yhat1 <- ts(as.vector(fitted(m)), end = end(y), freq = 1)
data <- cbind(y,yhat1)
graph_betategarch <- dygraph(data) %>%
dySeries("y", label = "NASDAQ", color = "darkgrey", strokeWidth = 1) %>%
dySeries("yhat1", label = "Sigma2", color = "orangered", strokeWidth = 2) %>%
dyRangeSelector()
graph_betategarch
# usando a rotina desenvolvida - modelo y[t] = exp(f2[t])*epsilon[t], epsilon[t] ~ t(v)
k <- betategarch_estimation(y, initial = c(0.02, 0.05, 0.95,10), type = "var")
yhat2 <- k$out[,"sigma"]
k
graph_rotina <- dygraph(cbind(y,yhat2)) %>%
dySeries("y", label = "NASDAQ", color = "darkgrey", strokeWidth = 1) %>%
dySeries("yhat2", label = "Sigma2", color = "steelblue", strokeWidth = 2) %>%
dyRangeSelector()
graph_rotina
# exportar pro shiny
# graphs <- list(graph_betategarch = graph_betategarch, graph_rotina = graph_rotina)
# saveRDS(graphs, "shiny_simulacao/data/dygraphs_nasdaq.rds")
# usando a rotina desenvolvida - modelo y[t] = f1 + exp(f2[t])*epsilon[t], epsilon[t] ~ t(v)
k <- betategarch_estimation(y, initial = c(0.02, 0.05, 0.95,10,5), type = "mean-var")
yhat3 <- k$out[,"sigma"]
k
graph_rotina2 <- dygraph(cbind(y,yhat3)) %>%
dySeries("y", label = "NASDAQ", color = "darkgrey", strokeWidth = 1) %>%
dySeries("yhat3", label = "Sigma2", color = "steelblue", strokeWidth = 2) %>%
dyRangeSelector()
graph_rotina2
# usando a rotina desenvolvida - modelo y[t] = f1[t] + exp(f2[t])*epsilon[t], epsilon[t] ~ t(v)
k <- betategarch_estimation(y, initial = c(0.02,0.02,0.05, 0.05, 0.95,0.95,10), type = "mean-var2")
yhat4 <- cbind(y, k$out[,c("f1","sigma")])
colnames(yhat4) <- c("y","f1","f2")
k
graph_rotina3 <- dygraph(yhat4[,1:3]) %>%
dySeries("y", label = "NASDAQ", color = "darkgrey", strokeWidth = 1) %>%
dySeries("f1", label = "Média", color = "orangered", strokeWidth = 2) %>%
dySeries("f2", label = "Sigma2", color = "steelblue", strokeWidth = 2) %>%
dyRangeSelector()
graph_rotina3
todos_sigmas <- cbind(yhat1, yhat2, yhat3)
graph_sigmas <- dygraph(cbind(yhat1,yhat2,yhat3)) %>%
dySeries("yhat1", label = "betategarch", color = "darkgrey", strokeWidth = 1) %>%
dySeries("yhat2", label = "rotina_var", color = "steelblue", strokeWidth = 2) %>%
dySeries("yhat3", label = "rotina_varmean", color = "orangered", strokeWidth = 2) %>%
dyRangeSelector()
graph_sigmas
|
cd46b6076b8329f5ed80f7263767d4abc047ea18
|
1b658e814505a97dedabd6be1611e17220e7ef28
|
/plot2.R
|
61652c621364646aefaccedb89dd592c12940f6b
|
[] |
no_license
|
pan0va/ExData_Plotting1
|
7fc6c7e1681d9688ad659ccaaedf067561bcc4e6
|
5c4a4ba119e1ef3e82f183ea79ffb3deee1708ba
|
refs/heads/master
| 2020-12-24T16:42:04.133108
| 2015-02-08T23:23:47
| 2015-02-08T23:23:47
| 30,505,260
| 0
| 0
| null | 2015-02-08T20:49:18
| 2015-02-08T20:49:18
| null |
UTF-8
|
R
| false
| false
| 529
|
r
|
plot2.R
|
data<- read.table ("household_power_consumption.txt", header = TRUE, nrows=2075260, sep = ";", na.strings = "?")
data<- subset(data, data$Date %in% c("1/2/2007","2/2/2007"))
data$DateTime<- strptime(paste(data$Date,data$Time), format = "%d/%m/%Y %H:%M:%S" )
data<- na.omit(data)
Sys.setlocale("LC_TIME", "English")
data$DateTime<-as.POSIXct(data$DateTime)
dev.copy(png,'plot2.png', width = 480, height = 480)
plot( data$DateTime, data$Global_active_power, type = "l", ylab = "Global Active Power (kilowatts)", xlab =NA)
dev.off()
|
b161152494929652a9015f391513944d44d82029
|
904961d334ac204bc3bdb60eadb6e6a185b8790b
|
/R/it_IT.R
|
5a66747a6df816a1a4ea4ece015ab76781b73acd
|
[] |
no_license
|
LuYang19/faker
|
38de6b5a81e64e9afe1771565aa246646a37233b
|
c52c94971790c337813af32c01ede4ee1d46c088
|
refs/heads/master
| 2020-12-03T16:20:59.262361
| 2020-01-02T13:34:59
| 2020-01-02T13:34:59
| 231,081,511
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 27,922
|
r
|
it_IT.R
|
# bank --------------------------------------
bank_it_IT = R6Class(
"bank_it_IT",
inherit = bank_init,
cloneable = FALSE,
private = list(
bban_format = '?######################',
country_code = 'IT'
)
)
# person ---------------------------------------
person_it_IT = R6Class(
"person_it_IT",
inherit = person_init,
cloneable = FALSE,
private = list(
formats = c(
'{first_name} {last_name}',
'{first_name} {last_name}',
'{first_name} {last_name}',
'{first_name} {last_name}',
'{first_name} {last_name}',
'{prefix} {first_name} {last_name}',
'{first_name} {last_name}',
'{prefix} {first_name} {last_name}'),
first_names = c(
'Aaron', 'Akira', 'Alberto', 'Alessandro', 'Alighieri',
'Amedeo', 'Amos', 'Anselmo', 'Antonino', 'Arcibaldo',
'Armando', 'Artes',
'Audenico', 'Ausonio', 'Bacchisio', 'Battista', 'Bernardo',
'Boris', 'Caio', 'Carlo', 'Cecco', 'Cirino',
'Cleros', 'Costantino',
'Damiano', 'Danny', 'Davide', 'Demian', 'Dimitri', 'Domingo',
'Dylan', 'Edilio', 'Egidio', 'Elio', 'Emanuel',
'Enrico', 'Ercole',
'Ermes', 'Ethan', 'Eusebio', 'Evangelista', 'Fabiano',
'Ferdinando', 'Fiorentino', 'Flavio', 'Fulvio',
'Gabriele', 'Gastone', 'Germano',
'Giacinto', 'Gianantonio', 'Gianleonardo', 'Gianmarco',
'Gianriccardo', 'Gioacchino', 'Giordano', 'Giuliano',
'Graziano', 'Guido', 'Harry',
'Iacopo', 'Ilario', 'Ione', 'Italo', 'Jack', 'Jari',
'Joey', 'Joseph', 'Kai', 'Kociss', 'Laerte', 'Lauro',
'Leonardo', 'Liborio', 'Lorenzo',
'Ludovico', 'Maggiore', 'Manuele', 'Mariano', 'Marvin',
'Matteo', 'Mauro', 'Michael', 'Mirco', 'Modesto',
'Muzio', 'Nabil', 'Nathan',
'Nick', 'Noah', 'Odino', 'Olo', 'Oreste', 'Osea', 'Pablo',
'Patrizio', 'Piererminio', 'Pierfrancesco',
'Piersilvio', 'Priamo', 'Quarto',
'Quirino', 'Radames', 'Raniero', 'Renato', 'Rocco',
'Romeo', 'Rosalino', 'Rudy', 'Sabatino', 'Samuel', 'Santo',
'Sebastian', 'Serse',
'Silvano', 'Sirio', 'Tancredi', 'Terzo', 'Timoteo',
'Tolomeo', 'Trevis', 'Ubaldo', 'Ulrico', 'Valdo', 'Neri',
'Vinicio', 'Walter', 'Xavier', 'Yago', 'Zaccaria', 'Abramo',
'Adriano', 'Alan', 'Albino', 'Alessio',
'Alighiero',
'Amerigo', 'Anastasio', 'Antimo', 'Antonio', 'Arduino',
'Aroldo', 'Arturo', 'Augusto', 'Avide', 'Baldassarre',
'Bettino',
'Bortolo', 'Caligola', 'Carmelo', 'Celeste', 'Ciro',
'Costanzo', 'Dante', 'Danthon', 'Davis', 'Demis', 'Dindo',
'Domiziano', 'Edipo', 'Egisto', 'Eliziario', 'Emidio',
'Enzo', 'Eriberto', 'Erminio', 'Ettore', 'Eustachio',
'Fabio', 'Fernando',
'Fiorenzo', 'Folco', 'Furio', 'Gaetano', 'Gavino',
'Gerlando', 'Giacobbe', 'Giancarlo', 'Gianmaria', 'Giobbe',
'Giorgio', 'Giulio',
'Gregorio', 'Hector', 'Ian', 'Ippolito', 'Ivano', 'Jacopo',
'Jarno', 'Joannes', 'Joshua', 'Karim', 'Kris',
'Lamberto',
'Lazzaro', 'Leone', 'Lino', 'Loris', 'Luigi', 'Manfredi',
'Marco', 'Marino', 'Marzio', 'Mattia', 'Max',
'Michele', 'Mirko', 'Moreno',
'Nadir', 'Nazzareno', 'Nestore', 'Nico', 'Noel', 'Odone',
'Omar', 'Orfeo', 'Osvaldo', 'Pacifico', 'Pericle',
'Pietro', 'Primo',
'Quasimodo', 'Radio', 'Raoul', 'Renzo', 'Rodolfo', 'Romolo',
'Rosolino', 'Rufo', 'Sabino', 'Sandro', 'Sasha',
'Secondo', 'Sesto',
'Silverio', 'Siro', 'Tazio', 'Teseo', 'Timothy', 'Tommaso',
'Tristano', 'Umberto', 'Ariel', 'Artemide', 'Assia',
'Azue', 'Benedetta',
'Bibiana', 'Brigitta', 'Carmela', 'Cassiopea', 'Cesidia',
'Cira', 'Clea', 'Cleopatra', 'Clodovea', 'Concetta',
'Cosetta', 'Cristyn',
'Damiana', 'Danuta', 'Deborah', 'Demi', 'Diamante', 'Diana',
'Donatella', 'Doriana', 'Edvige', 'Elda', 'Elga',
'Elsa', 'Emilia', 'Enrica',
'Erminia', 'Eufemia', 'Evita', 'Fatima', 'Felicia',
'Filomena', 'Flaviana', 'Fortunata', 'Gelsomina',
'Genziana', 'Giacinta', 'Gilda',
'Giovanna', 'Giulietta', 'Grazia', 'Guendalina', 'Helga',
'Ileana', 'Ingrid', 'Irene', 'Isabel', 'Isira',
'Ivonne', 'Jelena', 'Jole',
'Claudia', 'Kayla', 'Kristel', 'Laura', 'Lucia', 'Lia',
'Lidia', 'Lisa', 'Loredana', 'Loretta', 'Luce',
'Lucrezia', 'Luna', 'Maika',
'Marcella', 'Maria', 'Mariagiulia', 'Marianita', 'Mariapia',
'Marieva', 'Marina', 'Maristella', 'Maruska',
'Matilde', 'Mecren',
'Mercedes', 'Mietta', 'Miriana', 'Miriam', 'Monia',
'Morgana', 'Naomi', 'Nayade', 'Nicoletta', 'Ninfa', 'Noemi',
'Nunzia', 'Olimpia',
'Oretta', 'Ortensia', 'Penelope', 'Piccarda', 'Prisca',
'Rebecca', 'Rita', 'Rosalba', 'Rosaria', 'Rosita',
'Ruth', 'Samira',
'Sarita', 'Selvaggia', 'Shaira', 'Sibilla', 'Soriana',
'Sue ellen', 'Thea', 'Tosca', 'Ursula', 'Vania', 'Vera',
'Vienna', 'Violante', 'Vitalba', 'Zelida'),
last_names = c(
'Rossi',
'Russo',
'Ferrari',
'Esposito',
'Bianchi',
'Romano',
'Colombo',
'Ricci',
'Marino',
'Greco',
'Bruno',
'Gallo',
'Conti',
'De luca',
'Mancini',
'Costa',
'Giordano',
'Rizzo',
'Lombardi',
'Moretti',
'Barbieri',
'Fontana',
'Santoro',
'Mariani',
'Rinaldi',
'Caruso',
'Ferrara',
'Galli',
'Martini',
'Leone',
'Longo',
'Gentile',
'Martinelli',
'Vitale',
'Lombardo',
'Serra',
'Coppola',
'De Santis',
'D\'angelo',
'Marchetti',
'Parisi',
'Villa',
'Conte',
'Ferraro',
'Ferri',
'Fabbri',
'Bianco',
'Marini',
'Grasso',
'Valentini',
'Messina',
'Sala',
'De Angelis',
'Gatti',
'Pellegrini',
'Palumbo',
'Sanna',
'Farina',
'Rizzi',
'Monti',
'Cattaneo',
'Morelli',
'Amato',
'Silvestri',
'Mazza',
'Testa',
'Grassi',
'Pellegrino',
'Carbone',
'Giuliani',
'Benedetti',
'Barone',
'Rossetti',
'Caputo',
'Montanari',
'Guerra',
'Palmieri',
'Bernardi',
'Martino',
'Fiore',
'De rosa',
'Ferretti',
'Bellini',
'Basile',
'Riva',
'Donati',
'Piras',
'Vitali',
'Battaglia',
'Sartori',
'Neri',
'Costantini',
'Milani',
'Pagano',
'Ruggiero',
'Sorrentino',
'D\'amico',
'Orlando',
'Damico',
'Negri'),
prefixes_female = c('Dott.', 'Sig.ra'),
prefixes_male = c('Dott.', 'Sig.'),
prefixes = c('Dott.', 'Sig.', 'Sig.ra')
)
)
# company --------------------------------------------
company_it_IT = R6Class(
"company_it_IT",
inherit = company_init,
cloneable = FALSE,
private = list(
first_names = (person_it_IT$new())$.__enclos_env__$private$first_names,
first_name = (person_it_IT$new())$first_name,
last_names = (person_it_IT$new())$.__enclos_env__$private$last_names,
last_name = (person_it_IT$new())$last_name,
formats = c(
'{last_name} {company_suffix}',
'{last_name}-{last_name} {company_suffix}',
'{last_name}, {last_name} e {last_name} {company_suffix}'),
catch_phrase_words = list(
c('Abilit\u00e0',
'Access',
'Adattatore',
'Algoritmo',
'Alleanza',
'Analizzatore',
'Applicazione',
'Approccio',
'Architettura',
'Archivio',
'Intelligenza artificiale',
'Array',
'Attitudine',
'Benchmark',
'Capacit\u00e0',
'Sfida',
'Circuito',
'Collaborazione',
'Complessit\u00e0',
'Concetto',
'Conglomerato',
'Contingenza',
'Core',
'Database',
'Data-warehouse',
'Definizione',
'Emulazione',
'Codifica',
'Criptazione',
'Firmware',
'Flessibilit\u00e0',
'Previsione',
'Frame',
'framework',
'Funzione',
'Funzionalit\u00e0',
'Interfaccia grafica',
'Hardware',
'Help-desk',
'Gerarchia',
'Hub',
'Implementazione',
'Infrastruttura',
'Iniziativa',
'Installazione',
'Set di istruzioni',
'Interfaccia',
'Soluzione internet',
'Intranet',
'Conoscenza base',
'Matrici',
'Matrice',
'Metodologia',
'Middleware',
'Migrazione',
'Modello',
'Moderazione',
'Monitoraggio',
'Moratoria',
'Rete',
'Architettura aperta',
'Sistema aperto',
'Orchestrazione',
'Paradigma',
'Parallelismo',
'Policy',
'Portale',
'Struttura di prezzo',
'Prodotto',
'Produttivit\u00e0',
'Progetto',
'Proiezione',
'Protocollo',
'Servizio clienti',
'Software',
'Soluzione',
'Standardizzazione',
'Strategia',
'Struttura',
'Successo',
'Sovrastruttura',
'Supporto',
'Sinergia',
'Task-force',
'Finestra temporale',
'Strumenti',
'Utilizzazione',
'Sito web',
'Forza lavoro'),
c('adattiva',
'avanzata',
'migliorata',
'assimilata',
'automatizzata',
'bilanciata',
'centralizzata',
'compatibile',
'configurabile',
'cross-platform',
'decentralizzata',
'digitalizzata',
'distribuita',
'piccola',
'ergonomica',
'esclusiva',
'espansa',
'estesa',
'configurabile',
'fondamentale',
'orizzontale',
'implementata',
'innovativa',
'integrata',
'intuitiva',
'inversa',
'gestita',
'obbligatoria',
'monitorata',
'multi-canale',
'multi-laterale',
'open-source',
'operativa',
'ottimizzata',
'organica',
'persistente',
'polarizzata',
'proattiva',
'programmabile',
'progressiva',
'reattiva',
'riallineata',
'ricontestualizzata',
'ridotta',
'robusta',
'sicura',
'condivisibile',
'stand-alone',
'switchabile',
'sincronizzata',
'sinergica',
'totale',
'universale',
'user-friendly',
'versatile',
'virtuale',
'visionaria'),
c('24 ore',
'24/7',
'terza generazione',
'quarta generazione',
'quinta generazione',
'sesta generazione',
'asimmetrica',
'asincrona',
'background',
'bi-direzionale',
'biforcata',
'bottom-line',
'coerente',
'coesiva',
'composita',
'sensibile al contesto',
'basta sul contesto',
'basata sul contenuto',
'dedicata',
'didattica',
'direzionale',
'discreta',
'dinamica',
'eco-centrica',
'esecutiva',
'esplicita',
'full-range',
'globale',
'euristica',
'alto livello',
'olistica',
'omogenea',
'ibrida',
'impattante',
'incrementale',
'intangibile',
'interattiva',
'intermediaria',
'locale',
'logistica',
'massimizzata',
'metodica',
'mission-critical',
'mobile',
'modulare',
'motivazionale',
'multimedia',
'multi-tasking',
'nazionale',
'neutrale',
'nextgeneration',
'non-volatile',
'object-oriented',
'ottima',
'ottimizzante',
'radicale',
'real-time',
'reciproca',
'regionale',
'responsiva',
'scalabile',
'secondaria',
'stabile',
'statica',
'sistematica',
'sistemica',
'tangibile',
'terziaria',
'uniforme',
'valore aggiunto')),
bs_words = list(
c('partnerships',
'comunit\u00e0',
'ROI',
'soluzioni',
'e-services',
'nicchie',
'tecnologie',
'contenuti',
'supply-chains',
'convergenze',
'relazioni',
'architetture',
'interfacce',
'mercati',
'e-commerce',
'sistemi',
'modelli',
'schemi',
'reti',
'applicazioni',
'metriche',
'e-business',
'funzionalit\u00e0',
'esperienze',
'webservices',
'metodologie'),
c('implementate',
'utilizzo',
'integrate',
'ottimali',
'evolutive',
'abilitate',
'reinventate',
'aggregate',
'migliorate',
'incentivate',
'monetizzate',
'sinergizzate',
'strategiche',
'deploy',
'marchi',
'accrescitive',
'target',
'sintetizzate',
'spedizioni',
'massimizzate',
'innovazione',
'guida',
'estensioni',
'generate',
'exploit',
'transizionali',
'matrici',
'ricontestualizzate'),
c('valore aggiunto',
'verticalizzate',
'proattive',
'forti',
'rivoluzionari',
'scalabili',
'innovativi',
'intuitivi',
'strategici',
'e-business',
'mission-critical',
'24/7',
'globali',
'B2B',
'B2C',
'granulari',
'virtuali',
'virali',
'dinamiche',
'magnetiche',
'web',
'interattive',
'sexy',
'back-end',
'real-time',
'efficienti',
'front-end',
'distributivi',
'estensibili',
'mondiali',
'open-source',
'cross-platform',
'sinergiche',
'out-of-the-box',
'enterprise',
'integrate',
'di impatto',
'wireless',
'trasparenti',
'next-generation',
'cutting-edge',
'visionari',
'plug-and-play',
'collaborative',
'olistiche',
'ricche')),
company_suffixes = c('SPA', 'e figli', 'Group', 's.r.l.')
)
)
# address -----------------------------------
address_it_IT = R6Class(
"address_it_IT",
inherit = address_init,
cloneable = FALSE,
private = list(
first_names = (person_it_IT$new())$.__enclos_env__$private$first_names,
first_name = (person_it_IT$new())$first_name,
last_names = (person_it_IT$new())$.__enclos_env__$private$last_names,
last_name = (person_it_IT$new())$last_name,
city_prefixes = c(
'San', 'Borgo', 'Sesto', 'Quarto', 'Settimo'),
city_suffixes = c(
'a mare', 'lido', 'ligure', 'del friuli', 'salentino',
'calabro', 'veneto', 'nell\'emilia', 'umbro', 'laziale',
'terme', 'sardo'),
building_number_formats = c('###', '##', '#'),
street_suffixes = c(
'Piazza', 'Strada', 'Via', 'Borgo', 'Contrada',
'Rotonda', 'Incrocio', 'Viale', 'Stretto', 'Vicolo',
'Canale'
),
postcode_formats = '#####',
states = c(
'Agrigento', 'Alessandria', 'Ancona', 'Aosta', 'Arezzo',
'Ascoli Piceno', 'Asti', 'Avellino', 'Bari',
'Barletta-Andria-Trani', 'Belluno', 'Benevento', 'Bergamo',
'Biella', 'Bologna', 'Bolzano', 'Brescia', 'Brindisi',
'Cagliari', 'Caltanissetta', 'Campobasso', 'Carbonia-Iglesias',
'Caserta', 'Catania', 'Catanzaro', 'Chieti', 'Como', 'Cosenza',
'Cremona', 'Crotone', 'Cuneo', 'Enna', 'Fermo', 'Ferrara',
'Firenze', 'Foggia', 'Forl\u00ec-Cesena', 'Frosinone', 'Genova',
'Gorizia', 'Grosseto', 'Imperia', 'Isernia', 'La Spezia',
'L\'Aquila', 'Latina', 'Lecce', 'Lecco', 'Livorno', 'Lodi',
'Lucca', 'Macerata', 'Mantova', 'Massa-Carrara', 'Matera',
'Messina', 'Milano', 'Modena', 'Monza e della Brianza', 'Napoli',
'Novara', 'Nuoro', 'Olbia-Tempio', 'Oristano', 'Padova',
'Palermo', 'Parma', 'Pavia', 'Perugia', 'Pesaro e Urbino',
'Pescara', 'Piacenza', 'Pisa', 'Pistoia', 'Pordenone', 'Potenza',
'Prato', 'Ragusa', 'Ravenna', 'Reggio Calabria', 'Reggio Emilia',
'Rieti', 'Rimini', 'Roma', 'Rovigo', 'Salerno',
'Medio Campidano', 'Sassari', 'Savona', 'Siena', 'Siracusa',
'Sondrio', 'Taranto', 'Teramo', 'Terni', 'Torino', 'Ogliastra',
'Trapani', 'Trento', 'Treviso', 'Trieste', 'Udine', 'Varese',
'Venezia', 'Verbano-Cusio-Ossola', 'Vercelli', 'Verona',
'Vibo Valentia', 'Vicenza', 'Viterbo'),
states_abbr = c(
'AG', 'AL', 'AN', 'AO', 'AR', 'AP', 'AT', 'AV', 'BA', 'BT',
'BL', 'BN', 'BG', 'BI', 'BO', 'BZ', 'BS', 'BR', 'CA', 'CL',
'CB', 'CI', 'CE', 'CT', 'CZ', 'CH', 'CO', 'CS', 'CR', 'KR',
'CN', 'EN', 'FM', 'FE', 'FI', 'FG', 'FC', 'FR', 'GE', 'GO',
'GR', 'IM', 'IS', 'SP', 'AQ', 'LT', 'LE', 'LC', 'LI', 'LO',
'LU', 'MC', 'MN', 'MS', 'MT', 'ME', 'MI', 'MO', 'MB', 'NA',
'NO', 'NU', 'OT', 'OR', 'PD', 'PA', 'PR', 'PV', 'PG', 'PU',
'PE', 'PC', 'PI', 'PT', 'PN', 'PZ', 'PO', 'RG', 'RA', 'RC',
'RE', 'RI', 'RN', 'RM', 'RO', 'SA', 'VS', 'SS', 'SV', 'SI',
'SR', 'SO', 'TA', 'TE', 'TR', 'TO', 'OG', 'TP', 'TN', 'TV',
'TS', 'UD', 'VA', 'VE', 'VB', 'VC', 'VR', 'VV', 'VI', 'VT'),
countries = c(
'Afghanistan', 'Albania', 'Algeria', 'American Samoa', 'Andorra',
'Angola', 'Anguilla', 'Antartide (territori a sud del 60\u00b0 parallelo)',
'Antigua e Barbuda', 'Argentina', 'Armenia', 'Aruba', 'Australia',
'Austria', 'Azerbaijan', 'Bahamas', 'Bahrain', 'Bangladesh',
'Barbados', 'Bielorussia', 'Belgio', 'Belize', 'Benin', 'Bermuda',
'Bhutan', 'Bolivia', 'Bosnia e Herzegovina', 'Botswana',
'Bouvet Island (Bouvetoya)', 'Brasile',
'Territorio dell\'arcipelago indiano', 'Isole Vergini Britanniche',
'Brunei Darussalam', 'Bulgaria', 'Burkina Faso', 'Burundi', 'Cambogia',
'Cameroon', 'Canada', 'Capo Verde', 'Isole Cayman',
'Repubblica Centrale Africana', 'Chad', 'Cile', 'Cina',
'Isola di Pasqua', 'Isola di Cocos (Keeling)', 'Colombia', 'Comoros',
'Congo', 'Isole Cook', 'Costa Rica', 'Costa d\'Avorio', 'Croazia',
'Cuba', 'Cipro', 'Repubblica Ceca', 'Danimarca', 'Gibuti',
'Repubblica Dominicana', 'Equador', 'Egitto', 'El Salvador',
'Guinea Equatoriale', 'Eritrea', 'Estonia', 'Etiopia', 'Isole Faroe',
'Isole Falkland (Malvinas)', 'Fiji', 'Finlandia', 'Francia',
'Guyana Francese', 'Polinesia Francese', 'Territori Francesi del sud',
'Gabon', 'Gambia', 'Georgia', 'Germania', 'Ghana', 'Gibilterra',
'Grecia', 'Groenlandia', 'Grenada', 'Guadalupa', 'Guam', 'Guatemala',
'Guernsey', 'Guinea', 'Guinea-Bissau', 'Guyana', 'Haiti',
'Heard Island and McDonald Islands', 'Citt\u00e0 del Vaticano', 'Honduras',
'Hong Kong', 'Ungheria', 'Islanda', 'India', 'Indonesia', 'Iran',
'Iraq', 'Irlanda', 'Isola di Man', 'Israele', 'Italia', 'Giamaica',
'Giappone', 'Jersey', 'Giordania', 'Kazakhstan', 'Kenya', 'Kiribati',
'Korea', 'Kuwait', 'Republicca Kirgiza', 'Repubblica del Laos',
'Latvia', 'Libano', 'Lesotho', 'Liberia', 'Libyan Arab Jamahiriya',
'Liechtenstein', 'Lituania', 'Lussemburgo', 'Macao', 'Macedonia',
'Madagascar', 'Malawi', 'Malesia', 'Maldive', 'Mali', 'Malta',
'Isole Marshall', 'Martinica', 'Mauritania', 'Mauritius', 'Mayotte',
'Messico', 'Micronesia', 'Moldova', 'Principato di Monaco', 'Mongolia',
'Montenegro', 'Montserrat', 'Marocco', 'Mozambico', 'Myanmar',
'Namibia', 'Nauru', 'Nepal', 'Antille Olandesi', 'Olanda',
'Nuova Caledonia', 'Nuova Zelanda', 'Nicaragua', 'Niger', 'Nigeria',
'Niue', 'Isole Norfolk', 'Northern Mariana Islands', 'Norvegia',
'Oman', 'Pakistan', 'Palau', 'Palestina', 'Panama',
'Papua Nuova Guinea', 'Paraguay', 'Peru', 'Filippine',
'Pitcairn Islands', 'Polonia', 'Portogallo', 'Porto Rico', 'Qatar',
'Reunion', 'Romania', 'Russia', 'Rwanda', 'San Bartolomeo',
'Sant\'Elena', 'Saint Kitts and Nevis', 'Saint Lucia', 'Saint Martin',
'Saint Pierre and Miquelon', 'Saint Vincent and the Grenadines',
'Samoa', 'San Marino', 'Sao Tome and Principe', 'Arabia Saudita',
'Senegal', 'Serbia', 'Seychelles', 'Sierra Leone', 'Singapore',
'Slovenia', 'Isole Solomon', 'Somalia', 'Sud Africa',
'Georgia del sud e South Sandwich Islands', 'Spagna', 'Sri Lanka',
'Sudan', 'Suriname', 'Svalbard & Jan Mayen Islands', 'Swaziland',
'Svezia', 'Svizzera', 'Siria', 'Taiwan', 'Tajikistan', 'Tanzania',
'Tailandia', 'Timor-Leste', 'Togo', 'Tokelau', 'Tonga',
'Trinidad e Tobago', 'Tunisia', 'Turchia', 'Turkmenistan',
'Isole di Turks and Caicos', 'Tuvalu', 'Uganda', 'Ucraina',
'Emirati Arabi Uniti', 'Regno Unito', 'Stati Uniti d\'America',
'United States Minor Outlying Islands', 'Isole Vergini Statunitensi',
'Uruguay', 'Uzbekistan', 'Vanuatu', 'Venezuela', 'Vietnam',
'Wallis and Futuna', 'Western Sahara', 'Yemen', 'Zambia', 'Zimbabwe'),
city_formats = c(
'{city_prefix} {first_name} {city_suffix}',
'{city_prefix} {first_name}',
'{first_name} {city_suffix}',
'{last_name} {city_suffix}'
),
street_name_formats = c(
'{street_suffix} {first_name}',
'{street_suffix} {last_name}'
),
street_address_formats = c(
'{street_name} {building_number}',
'{street_name} {building_number} {secondary_address}'),
address_formats = c(
"{street_address}\n{city}, {postcode} {state} ({state_abbr})"),
secondary_address_formats = c('Appartamento ##', 'Piano #')
),
public = list(
city_prefix = function(){
return(private$random_element(private$city_prefixes))
},
secondary_address = function(){
return(private$numerify(private$random_element(
private$secondary_address_formats)))
},
state = function(){
return(private$random_element(private$states))
},
state_abbr = function(){
return(private$random_element(private$states_abbr))
}
)
)
# internet --------------------------------------------
internet_it_IT = R6Class(
"internet_it_IT",
inherit = internet_init,
cloneable = FALSE,
private = list(
first_names = (person_it_IT$new())$.__enclos_env__$private$first_names,
first_name = (person_it_IT$new())$first_name,
last_names = (person_it_IT$new())$.__enclos_env__$private$last_names,
last_name = (person_it_IT$new())$last_name,
safe_email_tlds = c('com', 'net', 'eu', 'it', 'it', 'it'),
free_email_domains = c(
'libero.it', 'libero.it', 'libero.it',
'tim.it',
'tin.it',
'alice.it',
'virgilio.it',
'tiscali.it',
'fastwebnet.it',
'vodafone.it',
'poste.it',
'gmail.com', 'gmail.com', 'gmail.com',
'outlook.com',
'live.com',
'hotmail.com',
'hotmail.it',
'yahoo.com',
'tele2.it'
),
tlds = c('com', 'com', 'com', 'net', 'org', 'eu', 'it', 'it', 'it', 'it'),
replacements = list(
c('\u00e0', 'a'), c('\u00e9', 'e'), c('\u00e8', 'e'),
c('\u00ec', 'i'), c('\u00f2', 'o'), c('\u00f9', 'u')
)
),
public = list(
domain_word = function() {
company = (company_it_IT$new())$company()
company_elements = str_split(company, " ")[[1]]
company = private$to_ascii(company_elements[1])
return(private$slugify(company, allow_unicode = TRUE))
}
)
)
# phone_number -------------------------------------
phone_number_it_IT = R6Class(
"phone_number_it_IT",
inherit = phone_number_init,
cloneable = FALSE,
private = list(
formats = c(
'+39 ### ## ## ####',
'+39 ## #######',
'+39 ## ########',
'+39 ### #######',
'+39 ### ########',
'+39 #### #######',
'+39 #### ########'
)
)
)
# ssn -----------------------------------------
ssn_it_IT = R6Class(
"ssn_it_IT",
inherit = ssn_init,
cloneable = FALSE,
private = list(
locale = "it_IT",
fiscal_code_format = '??????##?##?###',
vat_id_formats = 'IT###########',
ALPHANUMERICS = c(seq(0, 9), LETTERS),
CHECKSUM_TABLE = list(
c(1, 0, 5, 7, 9, 13, 15, 17, 19, 21, 1, 0, 5, 7, 9, 13, 15, 17, 19,
21, 2, 4, 18, 20, 11, 3, 6, 8, 12, 14, 16, 10, 22, 25, 24, 23),
c(0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10,
11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25)),
checksum = function(value) {
# Calculates the checksum char used for the 16th char.
# Author: Vincenzo Palazzo
temp = 0
for (i in seq_along(value)){
char = value[i]
index = which(private$ALPHANUMERICS == char)
temp = temp + private$CHECKSUM_TABLE[[(i %% 2) +1]][index]
}
return(str_count(65 + (temp %% 26)))
}
),
public = list(
ssn = function(){
code = str_to_upper(private$bothify(
private$fiscal_code_format), locale = private$locale)
return(str_c(code, private$checksum(code)))
},
vat_id = function(){
# http://ec.europa.eu/taxation_customs/vies/faq.html#item_11
# :return: A random Italian VAT ID
return(private$bothify(private$random_element(private$vat_id_formats)))
}
)
)
# credit_card -----------------
credit_card_it_IT = R6Class(
"credit_card_it_IT",
inherit = credit_card_init,
cloneable = FALSE,
private = list(
first_names = (person_it_IT$new())$.__enclos_env__$private$first_names,
first_name = (person_it_IT$new())$first_name,
last_names = (person_it_IT$new())$.__enclos_env__$private$last_names,
last_name = (person_it_IT$new())$last_name
)
)
# profile ------------------
profile_it_IT = R6Class(
"profile_it_IT",
inherit = profile_init,
cloneable = FALSE,
public = list(
simple_profile = function(sex) {
# Generates a basic profile with personal informations
SEX = c("F", "M")
if (missing(sex)) {
sex = sample(SEX, 1)
}
if (!(sex %in% SEX)) {
sex = sample(SEX, 1)
}
name = ifelse(sex == "F",
(person_it_IT$new())$name_female(),
(person_it_IT$new())$name_male())
temp = list(
"username" = (internet_it_IT$new())$user_name(),
"name" = name,
"sex" = sex,
"address" = (address_it_IT$new())$address(),
"mail" = (internet_it_IT$new())$free_email(),
"birthdate" = (date_time_init$new())$date_of_birth()
)
return(temp)
},
profile = function(fields, sex) {
# Generates a complete profile.
# If "fields" is not empty, only the fields in the list will be returned
if (missing(fields)) fields = c()
field = list(
"job" = (job_init$new())$job(),
"company" = (company_it_IT$new())$company(),
"ssn" = (ssn_en_US$new())$ssn(),
"residence" = (address_it_IT$new())$address(),
"current_location" = c((geo_init$new())$latitude(),
(geo_init$new())$longitude()),
"blood_group" = sample(c(
"A+", "A-", "B+", "B-", "AB+", "AB-", "O+", "O-"), 1),
"website" = replicate(sample(seq(4), 1), (internet_it_IT$new())$url())
)
field = append(field, self$simple_profile(sex))
# field selection
if (length(fields)) {
nms = intersect(names(field), fields)
field = field[nms]
}
return(field)
}
)
)
|
65908bdfdccbb708c78f2c7f9fd983efd998331c
|
8b0dee9d51374e8bced0f0fd8efa8b6f0c14c9d7
|
/R/rkay.R
|
03513bf8bdcf98f95a69664abd3094740609e6c9
|
[] |
no_license
|
rwoldford/qqtest
|
4b9c595ea4c8f7e9ee6f1947e5f94e20c72be0a0
|
f3737db73bfd00e36067d394d749a7232c3f3bb9
|
refs/heads/master
| 2021-02-11T16:26:56.146877
| 2020-03-16T15:49:28
| 2020-03-16T15:49:28
| 244,509,892
| 1
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 1,990
|
r
|
rkay.R
|
#' @title \code{rkay} The K distribution - generating pseudo-random values
#'
#' @description Random generation for the K distribution on \code{df} degrees of freedom having non-centrality parameter \code{ncp}.
#'
#' A K distribution is the square root of a chi-square divided by its degrees of freedom. That is, if x is chi-squared on m degrees of freedom, then y = sqrt(x/m) is K on m degrees of freedom.
#' Under standard normal theory, K is the distribution of the pivotal quantity s/sigma where s is the sample standard deviation and sigma is the standard deviation parameter of the normal density. K is the natural distribution for tests and confidence intervals about sigma.
#' K densities are more nearly symmetric than are chi-squared and concentrate near 1. As the degrees of freedom increase, they become more symmetric, more concentrated, and more nearly normally distributed.
#'
#'
#' @export rkay
#'
#' @param n Number of observations. If \code{length(n) > 1}, the length is taken to be the number required.
#' @param df Degrees of freedom (non-negative, but can be non-integer).
#' @param ncp Non-centrality parameter (non-negative).
#'
#' @return \code{rkay} returns pseudo-randomly generated values.
#'
#' Invalid arguments will result in return value NaN, with a warning.
#'
#' @note Depends on call to analogous chi-squared functions. See \code{rchisq} for details on non-centrality parameter calculations.
#'
#'
#' @examples
#'
#' x <- rkay(100, 20)
#' hist(x, main="100 observations from a K(20)")
#' # Certainly looks like it comes from a K on 20
#' qqtest(x, dist="kay",df=20)
#' # for this many degrees of freedom it looks
#' # a lot like a gaussian (normal) distribution
#' qqtest(x, dist="gau",df=1)
#' # But not like it came from a K on 1 degree of freedom
#' qqtest(x, dist="kay",df=1)
#' #
#' # See the vignette for more on the "K-distribution"
#' #
rkay <- function(n, df, ncp=0) {
chincp <- df * ncp^2
sqrt(rchisq(n, df, chincp) / df)}
|
1eb459e9d78bfb79ba4382082d39a78da8ab8070
|
76abe33b0dac505b1f7d771c799e18b57a8f4417
|
/shiny/View real-time data.R
|
01efe1bed0f26faf8e7e60f047b42ec349979d45
|
[] |
no_license
|
jyeazell/DataCamp_practice
|
4ddaf889b07a2ef3fcd0965bee7d71372e3eb2f3
|
de4443e01d5414913aa555a5771d5eadc9f83700
|
refs/heads/master
| 2022-12-19T23:27:19.410533
| 2020-10-09T20:31:07
| 2020-10-09T20:31:07
| 183,300,581
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 1,172
|
r
|
View real-time data.R
|
# View real-time data
# Now that you've read in the real-time data as reactive_starwars_data(), you
# can examine it as it updates using renderTable(). If you save this reactive
# table, you can then render it using the tableOutput() function. For example,
# if we had a reactive data frame called my_reactive_data() we could save this
# as output$my_table and render it using tableOutput() with the following code:
#
# server <- function(input, output, session) {
# output$my_table <- renderTable({
# my_reactive_data()
# })
# }
#
# body <- dashboardBody(
# tableOutput("my_table")
# )
library(shiny)
server <- function(input, output, session) {
reactive_starwars_data <- reactiveFileReader(
intervalMillis = 1000,
session = session,
filePath = starwars_url,
readFunc = function(filePath) {
read.csv(url(filePath))
}
)
output$table <- renderTable({reactive_starwars_data()})
}
body <- dashboardBody(
tableOutput("table")
)
ui <- dashboardPage(header = dashboardHeader(),
sidebar = dashboardSidebar(),
body = body
)
shinyApp(ui, server)
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.