content
large_stringlengths 0
6.46M
| path
large_stringlengths 3
331
| license_type
large_stringclasses 2
values | repo_name
large_stringlengths 5
125
| language
large_stringclasses 1
value | is_vendor
bool 2
classes | is_generated
bool 2
classes | length_bytes
int64 4
6.46M
| extension
large_stringclasses 75
values | text
stringlengths 0
6.46M
|
|---|---|---|---|---|---|---|---|---|---|
#The assignment is about cache the inverse of matix
#Rather than computing the inverse of the matrix its better to cache it avoid repeated computation
<<<<<<< HEAD
#The below function creates a special object matrix to cache the inverse.
=======
#The below function creates a special object matrix to cache the inverse. ]
>>>>>>> origin/master
makeCacheMatrix <- function(x = matrix()) {
inv_mat <- NULL
set <- function(y) {
x <<- y
inv_mat <<- NULL
}
get <- function() x
setInverse <- function(inverse) inv_mat <<- inverse
getInverse <- function() inv_mat
list(set = set,
get = get,
setInverse = setInverse,
getInverse = getInverse)
}
#The below function computes the matrix inverse of the special matrix from the above function.
#The inverse of the matrix is cached if its not the same matrix
cacheSolve <- function(x, ...) {
## Return a matrix that is the inverse of 'x'
inv_mat <- x$getInverse()
if (!is.null(inv_mat)) {
message("getting cached data")
return(inv_mat)
}
mat <- x$get()
inv_mat <- solve(mat, ...)
x$setInverse(inv_mat)
inv_mat
}
|
/cachematrix.R
|
no_license
|
Vijay08/ProgrammingAssignment2
|
R
| false
| false
| 1,121
|
r
|
#The assignment is about cache the inverse of matix
#Rather than computing the inverse of the matrix its better to cache it avoid repeated computation
<<<<<<< HEAD
#The below function creates a special object matrix to cache the inverse.
=======
#The below function creates a special object matrix to cache the inverse. ]
>>>>>>> origin/master
makeCacheMatrix <- function(x = matrix()) {
inv_mat <- NULL
set <- function(y) {
x <<- y
inv_mat <<- NULL
}
get <- function() x
setInverse <- function(inverse) inv_mat <<- inverse
getInverse <- function() inv_mat
list(set = set,
get = get,
setInverse = setInverse,
getInverse = getInverse)
}
#The below function computes the matrix inverse of the special matrix from the above function.
#The inverse of the matrix is cached if its not the same matrix
cacheSolve <- function(x, ...) {
## Return a matrix that is the inverse of 'x'
inv_mat <- x$getInverse()
if (!is.null(inv_mat)) {
message("getting cached data")
return(inv_mat)
}
mat <- x$get()
inv_mat <- solve(mat, ...)
x$setInverse(inv_mat)
inv_mat
}
|
# documentation separate from implementation because roxygen can't handle adding methods to another package's R6 classes
#' Create Azure Cosmos DB account
#'
#' Method for the [AzureRMR::az_resource_group] class.
#'
#' @rdname create_cosmosdb_account
#' @name create_cosmosdb_account
#' @aliases create_cosmosdb_account
#' @section Usage:
#' ```
#' create_cosmosdb_account(
#' name,
#' location = self$location,
#' interface = c("sql", "cassandra", "mongo", "table", "graph"),
#' serverless = FALSE,
#' free_tier = FALSE,
#' properties = list(),
#' ...
#' )
#' ```
#' @section Arguments:
#' - `name`: The name of the Cosmos DB account.
#' - `location`: The location/region in which to create the account. Defaults to the resource group's location.
#' - `interface`: The default API by which to access data in the account.
#' - `serverless`: Whether this account should use provisioned throughput or a serverless mode. In the latter, you are charged solely on the basis of the traffic generated by your database operations. Serverless mode is best suited for small-to-medium workloads with light and intermittent traffic that is hard to forecast; it is currently (January 2021) in preview.
#' - `free_tier`: Whether this account should be in the free tier, in which a certain amount of database operations are provided free of charge. You can have one free tier account per subscription.
#' - `properties`: Additional properties to set for the account.
#' - `wait`: Whether to wait until the Cosmos DB account provisioning is complete.
#' - `...`: Optional arguments to pass to `az_cosmosdb$new()`.
#' @section Details:
#' This method creates a new Azure Cosmos DB account in the given resource group. Azure Cosmos DB is a globally distributed multi-model database that supports the document, graph, and key-value data models.
#'
#' The ARM resource object provides methods for working in the management plane. For working in the data plane, AzureCosmosR provides a client framework that interfaces with the core (SQL) API. Other packages provide functionality for other APIs, such as AzureTableStor for table storage and mongolite for MongoDB.
#' @section Value:
#' An object of class `az_cosmosdb` representing the Cosmos DB account.
#' @seealso
#' [get_cosmosdb_account], [delete_cosmosdb_account]
#'
#' For the SQL API client framework: [cosmos_endpoint], [cosmos_database], [cosmos_container], [query_documents]
#'
#' For the table storage API: [AzureTableStor::table_endpoint]
#'
#' For the MongoDB API: [cosmos_mongo_endpoint], [mongolite::mongo]
NULL
#' Get Azure Cosmos DB account
#'
#' Method for the [AzureRMR::az_resource_group] class.
#'
#' @rdname get_cosmosdb_account
#' @name get_cosmosdb_account
#' @aliases get_cosmosdb_account list_cosmosdb_accounts
#' @section Usage:
#' ```
#' get_cosmosdb_account(name)
#' list_cosmosdb_accounts()
#' ```
#' @section Arguments:
#' - `name`: The name of the Cosmos DB account.
#' @section Details:
#' `get_cosmosdb_account` retrieves the details for an existing Azure Cosmos DB account. `list_cosmosdb_accounts` retrieves all the Cosmos DB accounts within the resource group.
#' @section Value:
#' For `get_cosmosdb_account`, an object of class `az_cosmosdb` representing the Cosmos DB account. For `list_cosmosdb_accounts`, a list of such objects.
#' @seealso
#' [create_cosmosdb_account], [delete_cosmosdb_account]
#'
#' For the SQL API client framework: [cosmos_endpoint], [cosmos_database], [cosmos_container], [query_documents]
#'
#' For the table storage API: [AzureTableStor::table_endpoint]
#'
#' For the MongoDB API: [cosmos_mongo_endpoint], [mongolite::mongo]
NULL
#' Delete Azure Cosmos DB account
#'
#' Method for the [AzureRMR::az_resource_group] class.
#'
#' @rdname delete_cosmosdb_account
#' @name delete_cosmosdb_account
#' @aliases delete_cosmosdb_account
#' @section Usage:
#' ```
#' delete_cosmosdb_account(name, confirm = TRUE, wait = FALSE)
#' ```
#' @section Arguments:
#' - `name`: The name of the Cosmos DB account.
#' - `confirm`: Whether to ask for confirmation before deleting.
#' - `wait`: Whether to wait until the deletion has completed before returning.
#' @section Details:
#' This method deletes an existing Azure Cosmos DB account.
#' @seealso
#' [create_cosmosdb_account], [get_cosmosdb_account]
#'
#' For the SQL API client framework: [cosmos_endpoint], [cosmos_database], [cosmos_container], [query_documents]
#'
#' For the table storage API: [AzureTableStor::table_endpoint]
#'
#' For the MongoDB API: [cosmos_mongo_endpoint], [mongolite::mongo]
NULL
add_methods <- function()
{
az_resource_group$set("public", "create_cosmosdb_account", overwrite=TRUE,
function(name, location=self$location,
interface=c("sql", "cassandra", "mongo", "table", "graph"),
serverless=FALSE, free_tier=FALSE,
properties=list(), wait=TRUE, ...)
{
interface <- match.arg(interface)
kind <- if(interface == "mongo") "MongoDB" else "GlobalDocumentDB"
capabilities <- if(interface == "cassandra")
list(list(name="EnableCassandra"))
else if(interface == "mongo")
list(
list(name="EnableMongo"),
list(name="DisableRateLimitingResponses")
)
else if(interface == "table")
list(list(name="EnableTable"))
else if(interface == "graph")
list(list(name="EnableGremlin"))
else list()
if(serverless)
capabilities <- c(capabilities, list(list(name="EnableServerless")))
properties <- utils::modifyList(properties, list(
databaseAccountOfferType="standard",
enableFreeTier=free_tier,
capabilities=capabilities,
locations=list(
list(
id=paste0(name, "-", location),
failoverPriority=0,
locationName=location
)
)
))
AzureCosmosR::az_cosmosdb$new(self$token, self$subscription, self$name,
type="Microsoft.documentDB/databaseAccounts", name=name, location=location,
kind=kind, properties=properties, wait=wait, ...)
})
az_resource_group$set("public", "get_cosmosdb_account", overwrite=TRUE,
function(name)
{
AzureCosmosR::az_cosmosdb$new(self$token, self$subscription, self$name,
type="Microsoft.documentDB/databaseAccounts", name=name)
})
az_resource_group$set("public", "list_cosmosdb_accounts", overwrite=TRUE,
function()
{
provider <- "Microsoft.documentDB"
path <- "databaseAccounts"
api_version <- az_subscription$
new(self$token, self$subscription)$
get_provider_api_version(provider, path)
op <- file.path("resourceGroups", self$name, "providers", provider, path)
cont <- call_azure_rm(self$token, self$subscription, op, api_version=api_version)
lst <- lapply(cont$value,
function(parms) AzureCosmosR::az_cosmosdb$new(self$token, self$subscription, deployed_properties=parms))
# keep going until paging is complete
while(!is_empty(cont$nextLink))
{
cont <- call_azure_url(self$token, cont$nextLink)
lst <- lapply(cont$value,
function(parms) AzureCosmosR::az_cosmosdb$new(self$token, self$subscription, deployed_properties=parms))
}
named_list(lst)
})
az_resource_group$set("public", "delete_cosmosdb_account", overwrite=TRUE,
function(name, confirm=TRUE, wait=FALSE)
{
self$get_cosmosdb_account(name)$delete(confirm=confirm, wait=wait)
})
}
|
/R/add_methods.R
|
no_license
|
cran/AzureCosmosR
|
R
| false
| false
| 7,893
|
r
|
# documentation separate from implementation because roxygen can't handle adding methods to another package's R6 classes
#' Create Azure Cosmos DB account
#'
#' Method for the [AzureRMR::az_resource_group] class.
#'
#' @rdname create_cosmosdb_account
#' @name create_cosmosdb_account
#' @aliases create_cosmosdb_account
#' @section Usage:
#' ```
#' create_cosmosdb_account(
#' name,
#' location = self$location,
#' interface = c("sql", "cassandra", "mongo", "table", "graph"),
#' serverless = FALSE,
#' free_tier = FALSE,
#' properties = list(),
#' ...
#' )
#' ```
#' @section Arguments:
#' - `name`: The name of the Cosmos DB account.
#' - `location`: The location/region in which to create the account. Defaults to the resource group's location.
#' - `interface`: The default API by which to access data in the account.
#' - `serverless`: Whether this account should use provisioned throughput or a serverless mode. In the latter, you are charged solely on the basis of the traffic generated by your database operations. Serverless mode is best suited for small-to-medium workloads with light and intermittent traffic that is hard to forecast; it is currently (January 2021) in preview.
#' - `free_tier`: Whether this account should be in the free tier, in which a certain amount of database operations are provided free of charge. You can have one free tier account per subscription.
#' - `properties`: Additional properties to set for the account.
#' - `wait`: Whether to wait until the Cosmos DB account provisioning is complete.
#' - `...`: Optional arguments to pass to `az_cosmosdb$new()`.
#' @section Details:
#' This method creates a new Azure Cosmos DB account in the given resource group. Azure Cosmos DB is a globally distributed multi-model database that supports the document, graph, and key-value data models.
#'
#' The ARM resource object provides methods for working in the management plane. For working in the data plane, AzureCosmosR provides a client framework that interfaces with the core (SQL) API. Other packages provide functionality for other APIs, such as AzureTableStor for table storage and mongolite for MongoDB.
#' @section Value:
#' An object of class `az_cosmosdb` representing the Cosmos DB account.
#' @seealso
#' [get_cosmosdb_account], [delete_cosmosdb_account]
#'
#' For the SQL API client framework: [cosmos_endpoint], [cosmos_database], [cosmos_container], [query_documents]
#'
#' For the table storage API: [AzureTableStor::table_endpoint]
#'
#' For the MongoDB API: [cosmos_mongo_endpoint], [mongolite::mongo]
NULL
#' Get Azure Cosmos DB account
#'
#' Method for the [AzureRMR::az_resource_group] class.
#'
#' @rdname get_cosmosdb_account
#' @name get_cosmosdb_account
#' @aliases get_cosmosdb_account list_cosmosdb_accounts
#' @section Usage:
#' ```
#' get_cosmosdb_account(name)
#' list_cosmosdb_accounts()
#' ```
#' @section Arguments:
#' - `name`: The name of the Cosmos DB account.
#' @section Details:
#' `get_cosmosdb_account` retrieves the details for an existing Azure Cosmos DB account. `list_cosmosdb_accounts` retrieves all the Cosmos DB accounts within the resource group.
#' @section Value:
#' For `get_cosmosdb_account`, an object of class `az_cosmosdb` representing the Cosmos DB account. For `list_cosmosdb_accounts`, a list of such objects.
#' @seealso
#' [create_cosmosdb_account], [delete_cosmosdb_account]
#'
#' For the SQL API client framework: [cosmos_endpoint], [cosmos_database], [cosmos_container], [query_documents]
#'
#' For the table storage API: [AzureTableStor::table_endpoint]
#'
#' For the MongoDB API: [cosmos_mongo_endpoint], [mongolite::mongo]
NULL
#' Delete Azure Cosmos DB account
#'
#' Method for the [AzureRMR::az_resource_group] class.
#'
#' @rdname delete_cosmosdb_account
#' @name delete_cosmosdb_account
#' @aliases delete_cosmosdb_account
#' @section Usage:
#' ```
#' delete_cosmosdb_account(name, confirm = TRUE, wait = FALSE)
#' ```
#' @section Arguments:
#' - `name`: The name of the Cosmos DB account.
#' - `confirm`: Whether to ask for confirmation before deleting.
#' - `wait`: Whether to wait until the deletion has completed before returning.
#' @section Details:
#' This method deletes an existing Azure Cosmos DB account.
#' @seealso
#' [create_cosmosdb_account], [get_cosmosdb_account]
#'
#' For the SQL API client framework: [cosmos_endpoint], [cosmos_database], [cosmos_container], [query_documents]
#'
#' For the table storage API: [AzureTableStor::table_endpoint]
#'
#' For the MongoDB API: [cosmos_mongo_endpoint], [mongolite::mongo]
NULL
add_methods <- function()
{
az_resource_group$set("public", "create_cosmosdb_account", overwrite=TRUE,
function(name, location=self$location,
interface=c("sql", "cassandra", "mongo", "table", "graph"),
serverless=FALSE, free_tier=FALSE,
properties=list(), wait=TRUE, ...)
{
interface <- match.arg(interface)
kind <- if(interface == "mongo") "MongoDB" else "GlobalDocumentDB"
capabilities <- if(interface == "cassandra")
list(list(name="EnableCassandra"))
else if(interface == "mongo")
list(
list(name="EnableMongo"),
list(name="DisableRateLimitingResponses")
)
else if(interface == "table")
list(list(name="EnableTable"))
else if(interface == "graph")
list(list(name="EnableGremlin"))
else list()
if(serverless)
capabilities <- c(capabilities, list(list(name="EnableServerless")))
properties <- utils::modifyList(properties, list(
databaseAccountOfferType="standard",
enableFreeTier=free_tier,
capabilities=capabilities,
locations=list(
list(
id=paste0(name, "-", location),
failoverPriority=0,
locationName=location
)
)
))
AzureCosmosR::az_cosmosdb$new(self$token, self$subscription, self$name,
type="Microsoft.documentDB/databaseAccounts", name=name, location=location,
kind=kind, properties=properties, wait=wait, ...)
})
az_resource_group$set("public", "get_cosmosdb_account", overwrite=TRUE,
function(name)
{
AzureCosmosR::az_cosmosdb$new(self$token, self$subscription, self$name,
type="Microsoft.documentDB/databaseAccounts", name=name)
})
az_resource_group$set("public", "list_cosmosdb_accounts", overwrite=TRUE,
function()
{
provider <- "Microsoft.documentDB"
path <- "databaseAccounts"
api_version <- az_subscription$
new(self$token, self$subscription)$
get_provider_api_version(provider, path)
op <- file.path("resourceGroups", self$name, "providers", provider, path)
cont <- call_azure_rm(self$token, self$subscription, op, api_version=api_version)
lst <- lapply(cont$value,
function(parms) AzureCosmosR::az_cosmosdb$new(self$token, self$subscription, deployed_properties=parms))
# keep going until paging is complete
while(!is_empty(cont$nextLink))
{
cont <- call_azure_url(self$token, cont$nextLink)
lst <- lapply(cont$value,
function(parms) AzureCosmosR::az_cosmosdb$new(self$token, self$subscription, deployed_properties=parms))
}
named_list(lst)
})
az_resource_group$set("public", "delete_cosmosdb_account", overwrite=TRUE,
function(name, confirm=TRUE, wait=FALSE)
{
self$get_cosmosdb_account(name)$delete(confirm=confirm, wait=wait)
})
}
|
/MyNotes/02 - R Programming/applyFunctions.R
|
no_license
|
vitorefigenio/datasciencecoursera
|
R
| false
| false
| 2,871
|
r
| ||
library(fgsea)
library(data.table)
library(ggplot2)
#cargo los datos
paths_data<-read.delim("~/Escritorio/WGCNA/KEGG_annotation/GMT_Files/LbrM2903_parsed.gmt",
header = F,
sep = "\t")
#selecciono los nombres
nams <- paths_data[, 1]
#eliminar la columna de los nombres y la que no ocupo
paths_data <- paths_data[, -c(1:2)]
#hacer la lista
pathways <- split(paths_data, seq_len(nrow(paths_data)))
#eliminar los elementos en blanco
pathways <- lapply(pathways, function(x) x[x != ""])
#agragar los nombres de a la lista
names(pathways) <- nams
str(head(pathways))
ranks <- read.table('~/Escritorio/WGCNA/LbraM2903_median.htseq',
header=TRUE,
sep = '\t',
colClasses = c("character", "numeric"))
ranks <- setNames(ranks$median, ranks$gene_id)
str(ranks)
fgseaRes <- fgsea(pathways = pathways,
stats = ranks,
nperm=10000)
head(fgseaRes[order(pval), ])
sum(fgseaRes[, padj < 0.05])
topPathwaysUp <- fgseaRes[ES > 0][head(order(pval), n=10), pathway]
topPathwaysDown <- fgseaRes[ES < 0][head(order(pval), n=10), pathway]
topPathways <- c(topPathwaysUp, rev(topPathwaysDown))
plotGseaTable(pathways[topPathways], ranks, fgseaRes,
gseaParam = 0.05)
barplot(sort(ranks, decreasing = T))
plotEnrichment(pathways, ranks)
|
/WGCNA/GSEA.R
|
no_license
|
lalomartinez/ncRNA_leish
|
R
| false
| false
| 1,404
|
r
|
library(fgsea)
library(data.table)
library(ggplot2)
#cargo los datos
paths_data<-read.delim("~/Escritorio/WGCNA/KEGG_annotation/GMT_Files/LbrM2903_parsed.gmt",
header = F,
sep = "\t")
#selecciono los nombres
nams <- paths_data[, 1]
#eliminar la columna de los nombres y la que no ocupo
paths_data <- paths_data[, -c(1:2)]
#hacer la lista
pathways <- split(paths_data, seq_len(nrow(paths_data)))
#eliminar los elementos en blanco
pathways <- lapply(pathways, function(x) x[x != ""])
#agragar los nombres de a la lista
names(pathways) <- nams
str(head(pathways))
ranks <- read.table('~/Escritorio/WGCNA/LbraM2903_median.htseq',
header=TRUE,
sep = '\t',
colClasses = c("character", "numeric"))
ranks <- setNames(ranks$median, ranks$gene_id)
str(ranks)
fgseaRes <- fgsea(pathways = pathways,
stats = ranks,
nperm=10000)
head(fgseaRes[order(pval), ])
sum(fgseaRes[, padj < 0.05])
topPathwaysUp <- fgseaRes[ES > 0][head(order(pval), n=10), pathway]
topPathwaysDown <- fgseaRes[ES < 0][head(order(pval), n=10), pathway]
topPathways <- c(topPathwaysUp, rev(topPathwaysDown))
plotGseaTable(pathways[topPathways], ranks, fgseaRes,
gseaParam = 0.05)
barplot(sort(ranks, decreasing = T))
plotEnrichment(pathways, ranks)
|
##required libraries
library(osfr)
library(tidyverse)
library(here)
library(psych)
library(MOTE)
library(lmerTest)
library(lavaan)
library(semTools)
library(broom)
library(tidyLPA)
library(semPlot)
## reading in data
osf_retrieve_file("https://osf.io/86upq/") %>%
osf_download(overwrite = T)
survey_data <- read_csv(here::here('/Documents/data-science/Sloan_grant/Survey/cleaned_data.csv'), col_types = cols(.default = col_number(),
StartDate = col_datetime(format = '%m/%d/%y %H:%M'),
EndDate = col_datetime(format = '%m/%d/%y %H:%M'),
ResponseId = col_character(),
position_7_TEXT = col_character(),
familiar = col_factor(),
preprints_submitted = col_factor(),
preprints_used = col_factor(),
position = col_factor(),
acad_career_stage = col_factor(),
country = col_factor(),
continent = col_factor(),
discipline = col_character(),
discipline_specific = col_character(),
discipline_other = col_character(),
bepress_tier1 = col_character(),
bepress_tier2 = col_character(),
bepress_tier3 = col_character(),
discipline_collapsed = col_factor(),
how_heard = col_character(),
hdi_level = col_factor(),
age = col_character())) %>%
mutate(hdi_level = fct_relevel(hdi_level, c('low', 'medium', 'high', 'very high')),
preprints_used = recode_factor(preprints_used, `Not sure` = NA_character_),
preprints_used = fct_relevel(preprints_used, c('No', 'Yes, once', 'Yes, a few times', 'Yes, many times')),
preprints_submitted = recode_factor(preprints_submitted, `Not sure` = NA_character_),
preprints_submitted = fct_relevel(preprints_submitted, c('No', 'Yes, once', 'Yes, a few times', 'Yes, many times')),
familiar = fct_relevel(familiar, c('Not familiar at all', 'Slightly familiar', 'Moderately familiar', 'Very familiar', 'Extremely familiar')),
acad_career_stage = fct_relevel(acad_career_stage, c('Grad Student', 'Post doc', 'Assist Prof', 'Assoc Prof', 'Full Prof'))) %>%
mutate(hdi_level = fct_explicit_na(hdi_level, '(Missing)'),
familiar = fct_explicit_na(familiar, '(Missing)'),
discipline_collapsed = fct_explicit_na(discipline_collapsed, '(Missing)')) %>%
mutate(missing_qs = rowSums(is.na(survey_data)))
#### basic sample characteristics ####
# total sample who consented
nrow(survey_data)
#percentage of respondents who only consented
round(100*sum(survey_data$missing_qs == 54)/nrow(survey_data), 2)
#for those who answered 1 question, attrition rate
round(100 * sum(survey_data$missing_qs < 54 & survey_data$Progress != 100)/sum(survey_data$missing_qs < 54), 2)
#number who answered at least 1 question after consent
sum(survey_data$missing_qs < 54)
# familiarity level of sample
survey_data %>%
group_by(familiar) %>%
tally()
100*sum(survey_data$familiar == 'Extremely familiar' | survey_data$familiar == 'Very familiar', na.rm = T)/nrow(survey_data) #percentage familiar
100*sum(survey_data$familiar == 'Not familiar at all', na.rm = T)/nrow(survey_data) #percentage unfamiliar
# favorability level of sample
survey_data %>%
group_by(favor_use) %>%
tally()
100*sum(survey_data$favor_use < 0, na.rm = T)/nrow(survey_data) #percentage unfavorable
100*sum(survey_data$favor_use == 0, na.rm = T)/nrow(survey_data) #percentage neutral
100*sum(survey_data$favor_use > 0, na.rm = T)/nrow(survey_data) #percentage favorable
# preprint usage
100* sum(survey_data$preprints_used == 'Yes, many times' | survey_data$preprints_used == 'Yes, a few times' | survey_data$preprints_submitted == 'Yes, many times' | survey_data$preprints_submitted == 'Yes, a few times', na.rm = T)/nrow(survey_data)
survey_data %>%
group_by(preprints_submitted) %>%
tally()
survey_data %>%
group_by(preprints_used) %>%
tally()
100*sum(survey_data$preprints_used == 'Yes, many times' | survey_data$preprints_used == 'Yes, a few times' , na.rm = T)/nrow(survey_data) #percentage unfamiliar
100*sum(survey_data$preprints_submitted == 'Yes, many times' | survey_data$preprints_submitted == 'Yes, a few times' , na.rm = T)/nrow(survey_data) #percentage unfamiliar
# demographics #
survey_data %>%
group_by(acad_career_stage) %>%
tally()
100*sum(survey_data$acad_career_stage == 'Grad Student' | survey_data$acad_career_stage == 'Post doc' , na.rm = T)/nrow(survey_data) #percentage unfamiliar
100*sum(grepl('Prof', survey_data$acad_career_stage))/nrow(survey_data) #percentage unfamiliar
100*sum(is.na(survey_data$acad_career_stage))/nrow(survey_data) #percentage unfamiliar
survey_data %>%
group_by(bepress_tier1) %>%
summarize(n = n(), percentage = 100*n/nrow(survey_data))
100*sum(survey_data$discipline_collapsed == 'Psychology', na.rm = T)/sum(survey_data$bepress_tier1 == 'Social and Behavioral Sciences', na.rm = T)
# country related variables
survey_data %>%
group_by(hdi_level) %>%
summarize(n = n(), percentage = 100*n/nrow(survey_data))
survey_data %>%
group_by(continent) %>%
summarize(n = n(), percentage = 100*n/nrow(survey_data)) %>%
arrange(desc(n))
survey_data %>%
filter(continent == 'North America') %>%
group_by(country) %>%
summarize(n = n(), percentage = 100*n/nrow(survey_data)) %>%
arrange(desc(n))
# does favoring use correlate with use and/or submission?
rcis_favor_use <- survey_data %>%
mutate(preprints_used = as.numeric(preprints_used)-1,
preprints_submitted = as.numeric(preprints_submitted)-1) %>%
select(preprints_used, preprints_submitted, favor_use) %>%
corr.test(adjust = 'none', method = 'spearman')
### initial career/disicpline analyses ###
credibility_data_long <- survey_data %>%
dplyr::select(ResponseId, starts_with('preprint_cred'), discipline_collapsed, acad_career_stage) %>%
drop_na() %>%
pivot_longer(cols = starts_with('preprint_cred'), names_to = 'question', values_to = 'response') %>%
mutate(question = as.factor(question))
# by discipline analysis #
discipline_model <- lmer(response ~ discipline_collapsed + question + discipline_collapsed:question + (1|ResponseId), credibility_data_long %>% filter(discipline_collapsed != 'Other' & discipline_collapsed != 'Engineering'))
anova_output <- anova(discipline_model)
discipline_gespartial <- ges.partial.SS.mix(dfm = anova_output[1, 3], dfe = anova_output[1, 4], ssm = anova_output[1, 1], sss = (anova_output[1, 1] * anova_output[1, 4])/(anova_output[1, 3] * anova_output[1, 5]), sse = (anova_output[2, 1] * anova_output[2, 4])/(anova_output[2, 3] * anova_output[2, 5]), Fvalue = anova_output[1, 5], a = .05)
question_gespartial <- ges.partial.SS.mix(dfm = anova_output[2, 3], dfe = anova_output[2, 4], ssm = anova_output[2, 1], sss = (anova_output[1, 1] * anova_output[1, 4])/(anova_output[1, 3] * anova_output[1, 5]), sse = (anova_output[2, 1] * anova_output[2, 4])/(anova_output[2, 3] * anova_output[2, 5]), Fvalue = anova_output[2, 5], a = .05)
discipline_gespartial$ges
discipline_gespartial$geslow
discipline_gespartial$geshigh
question_gespartial$ges
question_gespartial$geslow
question_gespartial$geshigh
# by academic position analysis #
position_model <- lmer(response ~ acad_career_stage + question + acad_career_stage:question + (1|ResponseId), credibility_data_long)
anova_output <- anova(position_model)
academic_gespartial <- ges.partial.SS.mix(dfm = anova_output[1, 3], dfe = anova_output[1, 4], ssm = anova_output[1, 1], sss = (anova_output[1, 1] * anova_output[1, 4])/(anova_output[1, 3] * anova_output[1, 5]), sse = (anova_output[2, 1] * anova_output[2, 4])/(anova_output[2, 3] * anova_output[2, 5]), Fvalue = anova_output[1, 5], a = .05)
question_gespartial <- ges.partial.SS.mix(dfm = anova_output[2, 3], dfe = anova_output[2, 4], ssm = anova_output[2, 1], sss = (anova_output[1, 1] * anova_output[1, 4])/(anova_output[1, 3] * anova_output[1, 5]), sse = (anova_output[2, 1] * anova_output[2, 4])/(anova_output[2, 3] * anova_output[2, 5]), Fvalue = anova_output[2, 5], a = .05)
academic_gespartial$ges
academic_gespartial$geslow
academic_gespartial$geshigh
question_gespartial$ges
question_gespartial$geslow
question_gespartial$geshigh
#### exploratory factor analysis ####
credibilty_qs <- survey_data %>%
dplyr::select(ResponseId,starts_with('preprint_cred')) %>%
column_to_rownames('ResponseId')
fa.parallel(credibilty_qs)
fa6 <- fa(credibilty_qs, nfactors = 6, rotate = 'oblimin')
fa6
fa.diagram(fa6)
fa5 <- fa(credibilty_qs, nfactors = 5, rotate = 'oblimin')
fa5
fa.diagram(fa5)
fa4 <- fa(credibilty_qs, nfactors = 4, rotate = 'oblimin')
fa4
fa.diagram(fa4)
#### SEM model of favorability, preprint use, & preprint submission on 6 factors ####
sem_data <- survey_data %>%
mutate(preprints_used = as.numeric(preprints_used) - 1,
preprints_submitted = as.numeric(preprints_submitted) - 1)
favor_use_model <- 'traditional =~ preprint_cred1_1 + preprint_cred1_2 + preprint_cred1_3
open_icons =~ preprint_cred4_1 + preprint_cred4_2 + preprint_cred4_3 + preprint_cred4_4
verifications =~ preprint_cred5_1 + preprint_cred5_2 + preprint_cred5_3
opinions =~ preprint_cred3_1 + preprint_cred3_2 + preprint_cred3_3
other =~ preprint_cred1_4 + preprint_cred2_1
usage =~ preprint_cred2_3 + preprint_cred2_4
traditional ~ favor_use + preprints_used + preprints_submitted
open_icons ~ favor_use + preprints_used + preprints_submitted
verifications ~ favor_use + preprints_used + preprints_submitted
opinions ~ favor_use + preprints_used + preprints_submitted
other ~ favor_use + preprints_used + preprints_submitted
usage ~ favor_use + preprints_used + preprints_submitted'
favoruse_fit <- cfa(favor_use_model, sem_data)
summary(favoruse_fit, fit.measures=TRUE)
parameterEstimates(favoruse_fit, ci = T, level = .95, standardized = T) %>%
filter(op == '~')
semPaths(favoruse_fit)
# measurement invariance of factor model across positions
base_model <- 'traditional =~ preprint_cred1_1 + preprint_cred1_2 + preprint_cred1_3
open_icons =~ preprint_cred4_1 + preprint_cred4_2 + preprint_cred4_3 + preprint_cred4_4
verifications =~ preprint_cred5_1 + preprint_cred5_2 + preprint_cred5_3
opinions =~ preprint_cred3_1 + preprint_cred3_2 + preprint_cred3_3
other =~ preprint_cred1_4 + preprint_cred2_1
usage =~ preprint_cred2_3 + preprint_cred2_4'
fit <- cfa(base_model, data = survey_data)
summary(fit, fit.measures = T)
# sem model
sem_data <- sem_data %>%
mutate(career_code1 = case_when(acad_career_stage == 'Post doc' ~ 1,
acad_career_stage != 'Post doc' ~ 0),
career_code2 = case_when(acad_career_stage == 'Assist Prof' ~ 1,
acad_career_stage != 'Assist Prof' ~ 0),
career_code3 = case_when(acad_career_stage == 'Assoc Prof' ~ 1,
acad_career_stage != 'Assoc Prof' ~ 0),
career_code4 = case_when(acad_career_stage == 'Full Prof' ~ 1,
acad_career_stage != 'Full Prof' ~ 0))
career_model <- 'traditional =~ preprint_cred1_1 + preprint_cred1_2 + preprint_cred1_3
open_icons =~ preprint_cred4_1 + preprint_cred4_2 + preprint_cred4_3 + preprint_cred4_4
verifications =~ preprint_cred5_1 + preprint_cred5_2 + preprint_cred5_3
opinions =~ preprint_cred3_1 + preprint_cred3_2 + preprint_cred3_3
other =~ preprint_cred1_4 + preprint_cred2_1
usage =~ preprint_cred2_3 + preprint_cred2_4
traditional ~ career_code1 + career_code2 + career_code3 + career_code4
open_icons ~ career_code1 + career_code2 + career_code3 + career_code4
verifications ~ career_code1 + career_code2 + career_code3 + career_code4
opinions ~ career_code1 + career_code2 + career_code3 + career_code4
other ~ career_code1 + career_code2 + career_code3 + career_code4
usage ~ career_code1 + career_code2 + career_code3 + career_code4'
career_fit <- cfa(career_model, sem_data)
summary(career_fit, fit.measures=TRUE)
parameterEstimates(career_fit, ci = T, level = .95, standardized = T) %>%
filter(op == '~')
# by group measurement invariance
position_models <- cfa(model = base_model, data = survey_data, group = 'acad_career_stage')
summary(position_models, fit.measures = T)
measurementInvariance(model = base_model, data = survey_data, group = 'acad_career_stage')
# by group measurement invariance
discipline_models <- cfa(model = base_model, data = survey_data %>% filter(discipline_collapsed != 'Other' & discipline_collapsed != 'Engineering'), group = 'discipline_collapsed')
summary(discipline_models , fit.measures = T)
measurementInvariance(model = base_model, data = survey_data %>% filter(discipline_collapsed != 'Other' & discipline_collapsed != 'Engineering'), group = 'discipline_collapsed')
|
/Sloan_grant/Survey/Survey_analyses.R
|
permissive
|
bgonzalezbustamante/data-science
|
R
| false
| false
| 14,618
|
r
|
##required libraries
library(osfr)
library(tidyverse)
library(here)
library(psych)
library(MOTE)
library(lmerTest)
library(lavaan)
library(semTools)
library(broom)
library(tidyLPA)
library(semPlot)
## reading in data
osf_retrieve_file("https://osf.io/86upq/") %>%
osf_download(overwrite = T)
survey_data <- read_csv(here::here('/Documents/data-science/Sloan_grant/Survey/cleaned_data.csv'), col_types = cols(.default = col_number(),
StartDate = col_datetime(format = '%m/%d/%y %H:%M'),
EndDate = col_datetime(format = '%m/%d/%y %H:%M'),
ResponseId = col_character(),
position_7_TEXT = col_character(),
familiar = col_factor(),
preprints_submitted = col_factor(),
preprints_used = col_factor(),
position = col_factor(),
acad_career_stage = col_factor(),
country = col_factor(),
continent = col_factor(),
discipline = col_character(),
discipline_specific = col_character(),
discipline_other = col_character(),
bepress_tier1 = col_character(),
bepress_tier2 = col_character(),
bepress_tier3 = col_character(),
discipline_collapsed = col_factor(),
how_heard = col_character(),
hdi_level = col_factor(),
age = col_character())) %>%
mutate(hdi_level = fct_relevel(hdi_level, c('low', 'medium', 'high', 'very high')),
preprints_used = recode_factor(preprints_used, `Not sure` = NA_character_),
preprints_used = fct_relevel(preprints_used, c('No', 'Yes, once', 'Yes, a few times', 'Yes, many times')),
preprints_submitted = recode_factor(preprints_submitted, `Not sure` = NA_character_),
preprints_submitted = fct_relevel(preprints_submitted, c('No', 'Yes, once', 'Yes, a few times', 'Yes, many times')),
familiar = fct_relevel(familiar, c('Not familiar at all', 'Slightly familiar', 'Moderately familiar', 'Very familiar', 'Extremely familiar')),
acad_career_stage = fct_relevel(acad_career_stage, c('Grad Student', 'Post doc', 'Assist Prof', 'Assoc Prof', 'Full Prof'))) %>%
mutate(hdi_level = fct_explicit_na(hdi_level, '(Missing)'),
familiar = fct_explicit_na(familiar, '(Missing)'),
discipline_collapsed = fct_explicit_na(discipline_collapsed, '(Missing)')) %>%
mutate(missing_qs = rowSums(is.na(survey_data)))
#### basic sample characteristics ####
# total sample who consented
nrow(survey_data)
#percentage of respondents who only consented
round(100*sum(survey_data$missing_qs == 54)/nrow(survey_data), 2)
#for those who answered 1 question, attrition rate
round(100 * sum(survey_data$missing_qs < 54 & survey_data$Progress != 100)/sum(survey_data$missing_qs < 54), 2)
#number who answered at least 1 question after consent
sum(survey_data$missing_qs < 54)
# familiarity level of sample
survey_data %>%
group_by(familiar) %>%
tally()
100*sum(survey_data$familiar == 'Extremely familiar' | survey_data$familiar == 'Very familiar', na.rm = T)/nrow(survey_data) #percentage familiar
100*sum(survey_data$familiar == 'Not familiar at all', na.rm = T)/nrow(survey_data) #percentage unfamiliar
# favorability level of sample
survey_data %>%
group_by(favor_use) %>%
tally()
100*sum(survey_data$favor_use < 0, na.rm = T)/nrow(survey_data) #percentage unfavorable
100*sum(survey_data$favor_use == 0, na.rm = T)/nrow(survey_data) #percentage neutral
100*sum(survey_data$favor_use > 0, na.rm = T)/nrow(survey_data) #percentage favorable
# preprint usage
100* sum(survey_data$preprints_used == 'Yes, many times' | survey_data$preprints_used == 'Yes, a few times' | survey_data$preprints_submitted == 'Yes, many times' | survey_data$preprints_submitted == 'Yes, a few times', na.rm = T)/nrow(survey_data)
survey_data %>%
group_by(preprints_submitted) %>%
tally()
survey_data %>%
group_by(preprints_used) %>%
tally()
100*sum(survey_data$preprints_used == 'Yes, many times' | survey_data$preprints_used == 'Yes, a few times' , na.rm = T)/nrow(survey_data) #percentage unfamiliar
100*sum(survey_data$preprints_submitted == 'Yes, many times' | survey_data$preprints_submitted == 'Yes, a few times' , na.rm = T)/nrow(survey_data) #percentage unfamiliar
# demographics #
survey_data %>%
group_by(acad_career_stage) %>%
tally()
100*sum(survey_data$acad_career_stage == 'Grad Student' | survey_data$acad_career_stage == 'Post doc' , na.rm = T)/nrow(survey_data) #percentage unfamiliar
100*sum(grepl('Prof', survey_data$acad_career_stage))/nrow(survey_data) #percentage unfamiliar
100*sum(is.na(survey_data$acad_career_stage))/nrow(survey_data) #percentage unfamiliar
survey_data %>%
group_by(bepress_tier1) %>%
summarize(n = n(), percentage = 100*n/nrow(survey_data))
100*sum(survey_data$discipline_collapsed == 'Psychology', na.rm = T)/sum(survey_data$bepress_tier1 == 'Social and Behavioral Sciences', na.rm = T)
# country related variables
survey_data %>%
group_by(hdi_level) %>%
summarize(n = n(), percentage = 100*n/nrow(survey_data))
survey_data %>%
group_by(continent) %>%
summarize(n = n(), percentage = 100*n/nrow(survey_data)) %>%
arrange(desc(n))
survey_data %>%
filter(continent == 'North America') %>%
group_by(country) %>%
summarize(n = n(), percentage = 100*n/nrow(survey_data)) %>%
arrange(desc(n))
# does favoring use correlate with use and/or submission?
rcis_favor_use <- survey_data %>%
mutate(preprints_used = as.numeric(preprints_used)-1,
preprints_submitted = as.numeric(preprints_submitted)-1) %>%
select(preprints_used, preprints_submitted, favor_use) %>%
corr.test(adjust = 'none', method = 'spearman')
### initial career/disicpline analyses ###
credibility_data_long <- survey_data %>%
dplyr::select(ResponseId, starts_with('preprint_cred'), discipline_collapsed, acad_career_stage) %>%
drop_na() %>%
pivot_longer(cols = starts_with('preprint_cred'), names_to = 'question', values_to = 'response') %>%
mutate(question = as.factor(question))
# by discipline analysis #
discipline_model <- lmer(response ~ discipline_collapsed + question + discipline_collapsed:question + (1|ResponseId), credibility_data_long %>% filter(discipline_collapsed != 'Other' & discipline_collapsed != 'Engineering'))
anova_output <- anova(discipline_model)
discipline_gespartial <- ges.partial.SS.mix(dfm = anova_output[1, 3], dfe = anova_output[1, 4], ssm = anova_output[1, 1], sss = (anova_output[1, 1] * anova_output[1, 4])/(anova_output[1, 3] * anova_output[1, 5]), sse = (anova_output[2, 1] * anova_output[2, 4])/(anova_output[2, 3] * anova_output[2, 5]), Fvalue = anova_output[1, 5], a = .05)
question_gespartial <- ges.partial.SS.mix(dfm = anova_output[2, 3], dfe = anova_output[2, 4], ssm = anova_output[2, 1], sss = (anova_output[1, 1] * anova_output[1, 4])/(anova_output[1, 3] * anova_output[1, 5]), sse = (anova_output[2, 1] * anova_output[2, 4])/(anova_output[2, 3] * anova_output[2, 5]), Fvalue = anova_output[2, 5], a = .05)
discipline_gespartial$ges
discipline_gespartial$geslow
discipline_gespartial$geshigh
question_gespartial$ges
question_gespartial$geslow
question_gespartial$geshigh
# by academic position analysis #
position_model <- lmer(response ~ acad_career_stage + question + acad_career_stage:question + (1|ResponseId), credibility_data_long)
anova_output <- anova(position_model)
academic_gespartial <- ges.partial.SS.mix(dfm = anova_output[1, 3], dfe = anova_output[1, 4], ssm = anova_output[1, 1], sss = (anova_output[1, 1] * anova_output[1, 4])/(anova_output[1, 3] * anova_output[1, 5]), sse = (anova_output[2, 1] * anova_output[2, 4])/(anova_output[2, 3] * anova_output[2, 5]), Fvalue = anova_output[1, 5], a = .05)
question_gespartial <- ges.partial.SS.mix(dfm = anova_output[2, 3], dfe = anova_output[2, 4], ssm = anova_output[2, 1], sss = (anova_output[1, 1] * anova_output[1, 4])/(anova_output[1, 3] * anova_output[1, 5]), sse = (anova_output[2, 1] * anova_output[2, 4])/(anova_output[2, 3] * anova_output[2, 5]), Fvalue = anova_output[2, 5], a = .05)
academic_gespartial$ges
academic_gespartial$geslow
academic_gespartial$geshigh
question_gespartial$ges
question_gespartial$geslow
question_gespartial$geshigh
#### exploratory factor analysis ####
credibilty_qs <- survey_data %>%
dplyr::select(ResponseId,starts_with('preprint_cred')) %>%
column_to_rownames('ResponseId')
fa.parallel(credibilty_qs)
fa6 <- fa(credibilty_qs, nfactors = 6, rotate = 'oblimin')
fa6
fa.diagram(fa6)
fa5 <- fa(credibilty_qs, nfactors = 5, rotate = 'oblimin')
fa5
fa.diagram(fa5)
fa4 <- fa(credibilty_qs, nfactors = 4, rotate = 'oblimin')
fa4
fa.diagram(fa4)
#### SEM model of favorability, preprint use, & preprint submission on 6 factors ####
sem_data <- survey_data %>%
mutate(preprints_used = as.numeric(preprints_used) - 1,
preprints_submitted = as.numeric(preprints_submitted) - 1)
favor_use_model <- 'traditional =~ preprint_cred1_1 + preprint_cred1_2 + preprint_cred1_3
open_icons =~ preprint_cred4_1 + preprint_cred4_2 + preprint_cred4_3 + preprint_cred4_4
verifications =~ preprint_cred5_1 + preprint_cred5_2 + preprint_cred5_3
opinions =~ preprint_cred3_1 + preprint_cred3_2 + preprint_cred3_3
other =~ preprint_cred1_4 + preprint_cred2_1
usage =~ preprint_cred2_3 + preprint_cred2_4
traditional ~ favor_use + preprints_used + preprints_submitted
open_icons ~ favor_use + preprints_used + preprints_submitted
verifications ~ favor_use + preprints_used + preprints_submitted
opinions ~ favor_use + preprints_used + preprints_submitted
other ~ favor_use + preprints_used + preprints_submitted
usage ~ favor_use + preprints_used + preprints_submitted'
favoruse_fit <- cfa(favor_use_model, sem_data)
summary(favoruse_fit, fit.measures=TRUE)
parameterEstimates(favoruse_fit, ci = T, level = .95, standardized = T) %>%
filter(op == '~')
semPaths(favoruse_fit)
# measurement invariance of factor model across positions
base_model <- 'traditional =~ preprint_cred1_1 + preprint_cred1_2 + preprint_cred1_3
open_icons =~ preprint_cred4_1 + preprint_cred4_2 + preprint_cred4_3 + preprint_cred4_4
verifications =~ preprint_cred5_1 + preprint_cred5_2 + preprint_cred5_3
opinions =~ preprint_cred3_1 + preprint_cred3_2 + preprint_cred3_3
other =~ preprint_cred1_4 + preprint_cred2_1
usage =~ preprint_cred2_3 + preprint_cred2_4'
fit <- cfa(base_model, data = survey_data)
summary(fit, fit.measures = T)
# sem model
sem_data <- sem_data %>%
mutate(career_code1 = case_when(acad_career_stage == 'Post doc' ~ 1,
acad_career_stage != 'Post doc' ~ 0),
career_code2 = case_when(acad_career_stage == 'Assist Prof' ~ 1,
acad_career_stage != 'Assist Prof' ~ 0),
career_code3 = case_when(acad_career_stage == 'Assoc Prof' ~ 1,
acad_career_stage != 'Assoc Prof' ~ 0),
career_code4 = case_when(acad_career_stage == 'Full Prof' ~ 1,
acad_career_stage != 'Full Prof' ~ 0))
career_model <- 'traditional =~ preprint_cred1_1 + preprint_cred1_2 + preprint_cred1_3
open_icons =~ preprint_cred4_1 + preprint_cred4_2 + preprint_cred4_3 + preprint_cred4_4
verifications =~ preprint_cred5_1 + preprint_cred5_2 + preprint_cred5_3
opinions =~ preprint_cred3_1 + preprint_cred3_2 + preprint_cred3_3
other =~ preprint_cred1_4 + preprint_cred2_1
usage =~ preprint_cred2_3 + preprint_cred2_4
traditional ~ career_code1 + career_code2 + career_code3 + career_code4
open_icons ~ career_code1 + career_code2 + career_code3 + career_code4
verifications ~ career_code1 + career_code2 + career_code3 + career_code4
opinions ~ career_code1 + career_code2 + career_code3 + career_code4
other ~ career_code1 + career_code2 + career_code3 + career_code4
usage ~ career_code1 + career_code2 + career_code3 + career_code4'
career_fit <- cfa(career_model, sem_data)
summary(career_fit, fit.measures=TRUE)
parameterEstimates(career_fit, ci = T, level = .95, standardized = T) %>%
filter(op == '~')
# by group measurement invariance
position_models <- cfa(model = base_model, data = survey_data, group = 'acad_career_stage')
summary(position_models, fit.measures = T)
measurementInvariance(model = base_model, data = survey_data, group = 'acad_career_stage')
# by group measurement invariance
discipline_models <- cfa(model = base_model, data = survey_data %>% filter(discipline_collapsed != 'Other' & discipline_collapsed != 'Engineering'), group = 'discipline_collapsed')
summary(discipline_models , fit.measures = T)
measurementInvariance(model = base_model, data = survey_data %>% filter(discipline_collapsed != 'Other' & discipline_collapsed != 'Engineering'), group = 'discipline_collapsed')
|
#script for open jobs data profiling
library(DataExplorer)
library(data.table)
ojobs <- fread('./data/stem_edu/working/allOpenjobsParsed.csv')
introduce(ojobs)
|
/src/burn_glass_validation/openjobs_profile/openjobs_profiling.R
|
no_license
|
uva-bi-sdad/stem_edu
|
R
| false
| false
| 161
|
r
|
#script for open jobs data profiling
library(DataExplorer)
library(data.table)
ojobs <- fread('./data/stem_edu/working/allOpenjobsParsed.csv')
introduce(ojobs)
|
\name{BLUP}
\alias{BLUP}
\alias{ENV}
\alias{gwnam}
\alias{mrr}
\title{
Best Linear Unbias Predictor
}
\description{
Genetic values for a given trait computed by REML.
}
\usage{
BLUP(trait="yield",family="all",env="all",dereg=FALSE,
MAF=0.05,use.check=TRUE,impute="FM",rm.rep=TRUE)
}
\arguments{
\item{trait}{
Character. Trait of interest. The options are: "yield" (grain yield in Kg/ha), "maturity" (days to maturity), "height" (plant height in cm), "lodging" (lodging score from 1 to 5), "protein" (protein percentage in the grain), "oil" (oil percentage in the grain) and "size" (seed size, mass of 100 seeds in grams).
}
\item{family}{
Numberic vector or "all". Which SoyNAM families to use.
}
\item{env}{
Numberic vector or "all". Which environments to use. The environments are coded as follows: 1 (IA_2012), 2 (IA_2013), 3 (IL_2011), 4 (IL_2012), 5 (IL_2013), 6 (IN_2012), 7 (IN_2013), 8 (KS_2012), 9 (KS_2013), 10 (MI_2012), 11 (MO_2012), 12 (MO_2013), 13 (NE_2011), 14 (NE_2012), 15 (OHmc_2012), 16 (OHmc_2013), 17 (OHmi_2012) and 18 (OHmi_2013).
}
\item{dereg}{
Logic. If true, deregress BLUPs (remove shrinkage).
}
\item{MAF}{
Numeric. Minor allele frequency threshold for the markers.
}
\item{use.check}{
Logical. If TRUE, it includes a control term as fixed effect in the model.
}
\item{impute}{
NULL, 'EXP' of 'FM'. If 'EXP', it imputes missing genotypes using expectation (allele frequency). If 'FM' is imputes missing genotypes using a forward Markov Chain algorithm, filling missing loci with the most likely genotype based on the previous marker.
}
\item{rm.rep}{
Logical. If TRUE, it removes replicated genotypes. Genotypes are treated as identical when the genotypes are more than 95 percent identical. This argument requires imputed genotypes.
}
}
\details{
This function uses the raw dataset (\eqn{data(soynam)}), allowing user-defined data quality control for genotypes and BLUPs of genetic values.
The algorithm start from selecting the chosen families and environment that will be used for the best linear unbias predictor (BLUP). The BLUP values are calculates based on the following model:
\eqn{Trait = Control + Environment + Genotype})
Where control is a covariate set as fixed effect based on the checks of each set (microenvironment); Environment is a random effect that represents the combination of location and year; and Genotype is the random effect associated to the lines. The BLUP values are the regression coefficients corresponding to the Genotype effect. The BLUP is calculated using the R package lme4 (Bates 2010) using REML.
If checks are used as covariate (use.check=TRUE), then the best linear unbias estimator (BLUE) of the check effects is assigned to each set as a micro-environmental control. Each set had between one and five controls, including the SoyNAM parents and five other cultivars. These genotypes are normalized by environment and the BLUE of each set is calculated. All genotypes in a same set will have the same check effect.
}
\value{
This function returns a list with four objects. A numeric vector with the BLUP solution of the phenotyes ("Phen"); the corresponding genotypes ("Gen"); a vector with the respective family ("Fam"); and a numeric vector with the number of SNPs per cromosome ("Chrom"). The output of this fuction has the exact input format for the NAM package (Xavier et al. 2015) to perform genome-wide association analysis.
}
\references{
Bates, D. M. (2010). lme4: Mixed-effects modeling with R. URL http://lme4.r-forge.r-project.org/book.
Xavier, A., Xu, S., Muir, W. M., & Rainey, K. M. (2015). NAM: association studies in multiple populations. Bioinformatics, 31(23), 3862-3864.
}
\author{
Alencar Xavier
}
\examples{
Test=BLUP(trait="yield",family=2:3,env=1:2)
}
|
/man/BLUP.Rd
|
no_license
|
alenxav/SoyNAM
|
R
| false
| false
| 3,848
|
rd
|
\name{BLUP}
\alias{BLUP}
\alias{ENV}
\alias{gwnam}
\alias{mrr}
\title{
Best Linear Unbias Predictor
}
\description{
Genetic values for a given trait computed by REML.
}
\usage{
BLUP(trait="yield",family="all",env="all",dereg=FALSE,
MAF=0.05,use.check=TRUE,impute="FM",rm.rep=TRUE)
}
\arguments{
\item{trait}{
Character. Trait of interest. The options are: "yield" (grain yield in Kg/ha), "maturity" (days to maturity), "height" (plant height in cm), "lodging" (lodging score from 1 to 5), "protein" (protein percentage in the grain), "oil" (oil percentage in the grain) and "size" (seed size, mass of 100 seeds in grams).
}
\item{family}{
Numberic vector or "all". Which SoyNAM families to use.
}
\item{env}{
Numberic vector or "all". Which environments to use. The environments are coded as follows: 1 (IA_2012), 2 (IA_2013), 3 (IL_2011), 4 (IL_2012), 5 (IL_2013), 6 (IN_2012), 7 (IN_2013), 8 (KS_2012), 9 (KS_2013), 10 (MI_2012), 11 (MO_2012), 12 (MO_2013), 13 (NE_2011), 14 (NE_2012), 15 (OHmc_2012), 16 (OHmc_2013), 17 (OHmi_2012) and 18 (OHmi_2013).
}
\item{dereg}{
Logic. If true, deregress BLUPs (remove shrinkage).
}
\item{MAF}{
Numeric. Minor allele frequency threshold for the markers.
}
\item{use.check}{
Logical. If TRUE, it includes a control term as fixed effect in the model.
}
\item{impute}{
NULL, 'EXP' of 'FM'. If 'EXP', it imputes missing genotypes using expectation (allele frequency). If 'FM' is imputes missing genotypes using a forward Markov Chain algorithm, filling missing loci with the most likely genotype based on the previous marker.
}
\item{rm.rep}{
Logical. If TRUE, it removes replicated genotypes. Genotypes are treated as identical when the genotypes are more than 95 percent identical. This argument requires imputed genotypes.
}
}
\details{
This function uses the raw dataset (\eqn{data(soynam)}), allowing user-defined data quality control for genotypes and BLUPs of genetic values.
The algorithm start from selecting the chosen families and environment that will be used for the best linear unbias predictor (BLUP). The BLUP values are calculates based on the following model:
\eqn{Trait = Control + Environment + Genotype})
Where control is a covariate set as fixed effect based on the checks of each set (microenvironment); Environment is a random effect that represents the combination of location and year; and Genotype is the random effect associated to the lines. The BLUP values are the regression coefficients corresponding to the Genotype effect. The BLUP is calculated using the R package lme4 (Bates 2010) using REML.
If checks are used as covariate (use.check=TRUE), then the best linear unbias estimator (BLUE) of the check effects is assigned to each set as a micro-environmental control. Each set had between one and five controls, including the SoyNAM parents and five other cultivars. These genotypes are normalized by environment and the BLUE of each set is calculated. All genotypes in a same set will have the same check effect.
}
\value{
This function returns a list with four objects. A numeric vector with the BLUP solution of the phenotyes ("Phen"); the corresponding genotypes ("Gen"); a vector with the respective family ("Fam"); and a numeric vector with the number of SNPs per cromosome ("Chrom"). The output of this fuction has the exact input format for the NAM package (Xavier et al. 2015) to perform genome-wide association analysis.
}
\references{
Bates, D. M. (2010). lme4: Mixed-effects modeling with R. URL http://lme4.r-forge.r-project.org/book.
Xavier, A., Xu, S., Muir, W. M., & Rainey, K. M. (2015). NAM: association studies in multiple populations. Bioinformatics, 31(23), 3862-3864.
}
\author{
Alencar Xavier
}
\examples{
Test=BLUP(trait="yield",family=2:3,env=1:2)
}
|
testlist <- list(m = NULL, repetitions = 0L, in_m = structure(c(2.31584307392677e+77, 9.53818252170339e+295, 1.20688722640421e+146, 4.12396251261199e-221, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0), .Dim = c(10L, 3L)))
result <- do.call(CNull:::communities_individual_based_sampling_alpha,testlist)
str(result)
|
/CNull/inst/testfiles/communities_individual_based_sampling_alpha/AFL_communities_individual_based_sampling_alpha/communities_individual_based_sampling_alpha_valgrind_files/1615778013-test.R
|
no_license
|
akhikolla/updatedatatype-list2
|
R
| false
| false
| 348
|
r
|
testlist <- list(m = NULL, repetitions = 0L, in_m = structure(c(2.31584307392677e+77, 9.53818252170339e+295, 1.20688722640421e+146, 4.12396251261199e-221, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0), .Dim = c(10L, 3L)))
result <- do.call(CNull:::communities_individual_based_sampling_alpha,testlist)
str(result)
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/revalue.levels.R
\name{revalue.levels}
\alias{revalue.levels}
\alias{revalue.levels_}
\title{Revalue data frame factor levels.}
\usage{
revalue.levels(df, ...)
revalue.levels_(df, dots)
}
\arguments{
\item{df}{A data frame (or quitte object).}
\item{...}{Name-value pairs assigning a named vector with new names to a
column from the dataframe.}
\item{dots}{A named list of columns containing the named vector with the old
and new names for each column}
}
\value{
A data frame (or quitte object, same as \code{data}).
}
\description{
Revalue the names of a level or character column in a dataframe, according to
a named vector given as an input
}
\examples{
data <- inline.data.frame(c(
"model; scenario; region; variable; unit; period; value",
"REMIND; Baseline; USA; GDP per Capita|MER; US$2005/yr; 2010; 40000",
"REMIND; Baseline; USA; Population; million; 2010; 300",
"REMIND; Baseline; CHN; GDP per Capita|MER; US$2005/yr; 2010; 7000"))
reg_vec = c(USA = "United States")
var_vec = c("GDP per Capita|MER" = "gdp",
Population = "pop")
revalue.levels(data,region = reg_vec)
revalue.levels_(data,list(region = reg_vec, variable = var_vec))
}
\author{
Antoine Levesque
}
|
/man/revalue.levels.Rd
|
no_license
|
pik-piam/quitte
|
R
| false
| true
| 1,358
|
rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/revalue.levels.R
\name{revalue.levels}
\alias{revalue.levels}
\alias{revalue.levels_}
\title{Revalue data frame factor levels.}
\usage{
revalue.levels(df, ...)
revalue.levels_(df, dots)
}
\arguments{
\item{df}{A data frame (or quitte object).}
\item{...}{Name-value pairs assigning a named vector with new names to a
column from the dataframe.}
\item{dots}{A named list of columns containing the named vector with the old
and new names for each column}
}
\value{
A data frame (or quitte object, same as \code{data}).
}
\description{
Revalue the names of a level or character column in a dataframe, according to
a named vector given as an input
}
\examples{
data <- inline.data.frame(c(
"model; scenario; region; variable; unit; period; value",
"REMIND; Baseline; USA; GDP per Capita|MER; US$2005/yr; 2010; 40000",
"REMIND; Baseline; USA; Population; million; 2010; 300",
"REMIND; Baseline; CHN; GDP per Capita|MER; US$2005/yr; 2010; 7000"))
reg_vec = c(USA = "United States")
var_vec = c("GDP per Capita|MER" = "gdp",
Population = "pop")
revalue.levels(data,region = reg_vec)
revalue.levels_(data,list(region = reg_vec, variable = var_vec))
}
\author{
Antoine Levesque
}
|
\name{ecospat.cv.glm}
\alias{ecospat.cv.glm}
\title{GLM Cross Validation}
\description{K-fold and leave-one-out cross validation for GLM.}
\usage{ecospat.cv.glm (glm.obj, K=10, cv.lim=10, jack.knife=FALSE)}
\arguments{
\item{glm.obj}{Any calibrated GLM object with a binomial error distribution.}
\item{K}{Number of folds. 10 is recommended; 5 for small data sets.}
\item{cv.lim}{Minimum number of presences required to perform the K-fold cross-validation.}
\item{jack.knife}{If TRUE, then the leave-one-out / jacknife cross-validation is performed instead of the 10-fold cross-validation.}
}
\details{This function takes a calibrated GLM object with a binomial error distribution and returns predictions from a stratified 10-fold cross-validation or a leave-one-out / jack-knived cross-validation. Stratified means that the original prevalence of the presences and absences in the full dataset is conserved in each fold.}
\value{Returns a dataframe with the observations (obs) and the corresponding predictions by cross-validation or jacknife.}
\references{Randin, C.F., T. Dirnbock, S. Dullinger, N.E. Zimmermann, M. Zappa and A. Guisan. 2006. Are niche-based species distribution models transferable in space? \emph{Journal of Biogeography}, \bold{33}, 1689-1703.
Pearman, P.B., C.F. Randin, O. Broennimann, P. Vittoz, W.O. van der Knaap, R. Engler, G. Le Lay, N.E. Zimmermann and A. Guisan. 2008. Prediction of plant species distributions across six millennia. \emph{Ecology Letters}, \bold{11}, 357-369.}
\author{Christophe Randin \email{christophe.randin@unibas.ch} and Antoine Guisan \email{antoine.guisan@unil.ch}}
\examples{
\dontrun{
ecospat.cv.example() #generates data
glm <- ecospat.cv.glm (glm.obj = get ("glm.Achillea_atrata", envir=ecospat.env),
K=10, cv.lim=10, jack.knife=FALSE)
glm
}
}
|
/ecospat/man/ecospat.cv.glm.Rd
|
no_license
|
lzhangss/ecospat
|
R
| false
| false
| 1,862
|
rd
|
\name{ecospat.cv.glm}
\alias{ecospat.cv.glm}
\title{GLM Cross Validation}
\description{K-fold and leave-one-out cross validation for GLM.}
\usage{ecospat.cv.glm (glm.obj, K=10, cv.lim=10, jack.knife=FALSE)}
\arguments{
\item{glm.obj}{Any calibrated GLM object with a binomial error distribution.}
\item{K}{Number of folds. 10 is recommended; 5 for small data sets.}
\item{cv.lim}{Minimum number of presences required to perform the K-fold cross-validation.}
\item{jack.knife}{If TRUE, then the leave-one-out / jacknife cross-validation is performed instead of the 10-fold cross-validation.}
}
\details{This function takes a calibrated GLM object with a binomial error distribution and returns predictions from a stratified 10-fold cross-validation or a leave-one-out / jack-knived cross-validation. Stratified means that the original prevalence of the presences and absences in the full dataset is conserved in each fold.}
\value{Returns a dataframe with the observations (obs) and the corresponding predictions by cross-validation or jacknife.}
\references{Randin, C.F., T. Dirnbock, S. Dullinger, N.E. Zimmermann, M. Zappa and A. Guisan. 2006. Are niche-based species distribution models transferable in space? \emph{Journal of Biogeography}, \bold{33}, 1689-1703.
Pearman, P.B., C.F. Randin, O. Broennimann, P. Vittoz, W.O. van der Knaap, R. Engler, G. Le Lay, N.E. Zimmermann and A. Guisan. 2008. Prediction of plant species distributions across six millennia. \emph{Ecology Letters}, \bold{11}, 357-369.}
\author{Christophe Randin \email{christophe.randin@unibas.ch} and Antoine Guisan \email{antoine.guisan@unil.ch}}
\examples{
\dontrun{
ecospat.cv.example() #generates data
glm <- ecospat.cv.glm (glm.obj = get ("glm.Achillea_atrata", envir=ecospat.env),
K=10, cv.lim=10, jack.knife=FALSE)
glm
}
}
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/generated_client.R
\name{surveys_put_samples}
\alias{surveys_put_samples}
\title{Update a sample}
\usage{
surveys_put_samples(id, sample_table_id = NULL, server_name = NULL,
schema = NULL, table_name = NULL, unique_id = NULL, metadata = NULL)
}
\arguments{
\item{id}{integer required.}
\item{sample_table_id}{integer optional.}
\item{server_name}{string optional.}
\item{schema}{string optional.}
\item{table_name}{string optional.}
\item{unique_id}{string optional.}
\item{metadata}{object optional.}
}
\value{
A list containing the following elements:
\item{id}{integer, }
\item{sampleTable}{object, A list containing the following elements:
\itemize{
\item schema string,
\item name string,
}}
\item{vendorSampleTable}{object, A list containing the following elements:
\itemize{
\item schema string,
\item name string,
}}
\item{strataTabsTable}{object, A list containing the following elements:
\itemize{
\item schema string,
\item name string,
}}
\item{uniqueId}{string, }
\item{householdColumn}{integer, }
\item{basefile}{object, A list containing the following elements:
\itemize{
\item id integer,
\item name string,
\item sourceTable object,
\item remoteHost object,
\item uniqueId string,
\item bucketColumns array,
\item contactabilityColumns array,
\item audienceColumns array,
\item createdAt string,
\item updatedAt string,
}}
\item{desiredCompletes}{integer, }
\item{oversamplePercent}{integer, }
\item{buckets}{array, An array containing the following fields:
\itemize{
\item name string,
\item column string,
}}
\item{contactMethods}{array, }
\item{targetAudience}{string, }
\item{populationLimitingSQL}{string, }
\item{frameLimitingSQL}{string, }
\item{parentId}{integer, }
\item{metadata}{object, }
}
\description{
Update a sample
}
|
/man/surveys_put_samples.Rd
|
permissive
|
wlattner/civis-r
|
R
| false
| true
| 1,860
|
rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/generated_client.R
\name{surveys_put_samples}
\alias{surveys_put_samples}
\title{Update a sample}
\usage{
surveys_put_samples(id, sample_table_id = NULL, server_name = NULL,
schema = NULL, table_name = NULL, unique_id = NULL, metadata = NULL)
}
\arguments{
\item{id}{integer required.}
\item{sample_table_id}{integer optional.}
\item{server_name}{string optional.}
\item{schema}{string optional.}
\item{table_name}{string optional.}
\item{unique_id}{string optional.}
\item{metadata}{object optional.}
}
\value{
A list containing the following elements:
\item{id}{integer, }
\item{sampleTable}{object, A list containing the following elements:
\itemize{
\item schema string,
\item name string,
}}
\item{vendorSampleTable}{object, A list containing the following elements:
\itemize{
\item schema string,
\item name string,
}}
\item{strataTabsTable}{object, A list containing the following elements:
\itemize{
\item schema string,
\item name string,
}}
\item{uniqueId}{string, }
\item{householdColumn}{integer, }
\item{basefile}{object, A list containing the following elements:
\itemize{
\item id integer,
\item name string,
\item sourceTable object,
\item remoteHost object,
\item uniqueId string,
\item bucketColumns array,
\item contactabilityColumns array,
\item audienceColumns array,
\item createdAt string,
\item updatedAt string,
}}
\item{desiredCompletes}{integer, }
\item{oversamplePercent}{integer, }
\item{buckets}{array, An array containing the following fields:
\itemize{
\item name string,
\item column string,
}}
\item{contactMethods}{array, }
\item{targetAudience}{string, }
\item{populationLimitingSQL}{string, }
\item{frameLimitingSQL}{string, }
\item{parentId}{integer, }
\item{metadata}{object, }
}
\description{
Update a sample
}
|
# Initial exploration of WHO data
# 20/12/2016
rm(list = ls())
# The data are all in tables contained in an Access .mdb file.
# To access this I will start by using ImportExport, which uses RODBC
pacman::p_load(
tidyverse,
stringr,
forcats,
ggplot2,
lattice, latticeExtra
)
# Let's start by using the Shiny App to see how the package works
# ImportExportApp()
#
# # One of the packages did not load. Let's do this manually instead.
#
#
# tmp <- access_import(file = "who_bulk/dmdb.mdb")
#
# # This doesn't work on this machine either: obdc connectaccess only works with 32 bit windows!
#
# # Let's rethink and go for a different programme
#
#
# pacman::p_unload(
# ImportExport,
# shiny, shinythemes, compareGroups,
# shinyBS
# )
#
# # Let's try RODBC directly
#
# pacman::p_load(
# RODBC
# )
#
# myconn <- odbcConnectAccess2007("who_bulk/dmdb.mdb")
# After running into a series of issues importing directly, I've exported each of the
# tables into csv text files.
# The table names are
AgeGroups <- read_csv("who_bulk/exported_text/AgeGroups.txt")
AgeGroupTypes <- read_csv("who_bulk/exported_text/AgeGroupTypes.txt")
AStan <- read_csv("who_bulk/exported_text/AStan.txt")
CodingChapters <- read_csv("who_bulk/exported_text/CodingChapters.txt")
CodingTypes <- read_csv("who_bulk/exported_text/CodingTypes.txt")
Countries <- read_csv("who_bulk/exported_text/Countries.txt")
DiagConversions <- read_csv("who_bulk/exported_text/DiagConversions.txt")
Diagnosis <- read_csv("who_bulk/exported_text/Diagnosis.txt")
Gender <- read_csv("who_bulk/exported_text/Gender.txt")
MDD <- read_csv("who_bulk/exported_text/MDD.txt")
Population <- read_csv("who_bulk/exported_text/Population.txt")
Years <- read_csv("who_bulk/exported_text/Years.txt")
# All loaded...
# First, link AgeGroups to AgeGroupTypes
AgeGroups
AgeGroupTypes
AgeGroups_AgeGroupTypes <- left_join(AgeGroups, AgeGroupTypes) %>%
rename(agegroup_desc = Dsc)
# Unsure what to do with AStan right now
# Join CodingTypes to CodingChapter
CodingChapters_CodingTypes <- left_join(CodingChapters, CodingTypes, by = c("Coding"="CodingId")) %>%
rename(coding_desc = Dsc.x, icd_class_desc = Dsc.y)
Countries
# The main file to work on is mdd
MDD %>%
gather(key = "age_group", value = "death_count", d0:d21) %>%
mutate(age_group = str_replace_all(age_group, "d", "") %>% as.integer) -> mdd_long
#mdd long should be combined with AgeGroups_AgeGroupTypes
mdd_long %>%
left_join(AgeGroups_AgeGroupTypes, by = c("AgeGroup" = "agType", "age_group" = "agItem")) %>%
select(
country = Country, year = yId, icd_schema = Coding,
mort_code = Code, sex = Gender, age = Title,
age_range = agegroup_desc, death_count
) %>%
left_join(Gender, by = c("sex" ="GenderId")) %>%
mutate(sex = Title) %>% select(-Title) -> mdd_tidy
# Now to produce age groupings
unique(mdd_tidy$age) %>% as_tibble -> age_labels
age_fst <- c(
0,
0,
1,
seq(5, 85, by = 5),
85,
90,
NA,
95
)
age_lst <- c(
110,
0,
seq(4, 89, by = 5),
110,
94,
NA,
110
)
age_labels %>%
mutate(age_fst = age_fst, age_lst = age_lst) -> age_labels
rm(age_fst, age_lst)
mdd_tidy %>%
left_join(age_labels, by = c("age" = "value")) -> mdd_tidy
# age_group labelling in population df is neater, so recoding mdd to these categories
mdd_tidy %>%
mutate(age = fct_recode(
age,
"all" = "All ages",
"1" = "< 1 year",
"1_4" = "1 - 4",
"5_9" = "5 - 9",
"10_14" = "10 - 14",
"15_19" = "15 - 19",
"20_24" = "20 - 24",
"25_29" = "25 - 29",
"30_34" = "30 - 34",
"35_39" = "35 - 39",
"40_44" = "40 - 44",
"45_49" = "45 - 49",
"50_54" = "50 - 54",
"55_59" = "55 - 59",
"60_64" = "60 - 64",
"65_69" = "65 - 69",
"70_74" = "70 - 74",
"75_79" = "75 - 79",
"80_84" = "80 - 84",
"85_89" = "85 - 89",
"90_94" = "90 - 94",
"95" = "95 +",
NULL = "85 +"
)
) -> mdd_tidy
# Now to add population
Population %>%
gather(key = "age_group", value = "population_count", p_all:p_95) %>%
mutate(age_group = str_replace(age_group, "^p_", "")) %>%
left_join(Gender, by = c("Gender" ="GenderId")) %>%
select(-Gender) %>%
rename(sex = Title) %>%
select(
country = Country, year = yId,
sex, age = age_group,
population_count
) -> pop_tidy
mdd_tidy %>% inner_join(pop_tidy, by = c("country", "year", "sex", "age")) -> combined_tidy
#mdd long should be combined with AgeGroups_AgeGroupTypes
write_csv(combined_tidy, "tidy_data/tidied_data.csv")
rm(age_labels, AgeGroups, AgeGroups_AgeGroupTypes, MDD)
gc()
# Let's see if I can produce some kind of Lexis plot fairly quickly
|
/script.R
|
no_license
|
JonMinton/who_data_explore
|
R
| false
| false
| 4,770
|
r
|
# Initial exploration of WHO data
# 20/12/2016
rm(list = ls())
# The data are all in tables contained in an Access .mdb file.
# To access this I will start by using ImportExport, which uses RODBC
pacman::p_load(
tidyverse,
stringr,
forcats,
ggplot2,
lattice, latticeExtra
)
# Let's start by using the Shiny App to see how the package works
# ImportExportApp()
#
# # One of the packages did not load. Let's do this manually instead.
#
#
# tmp <- access_import(file = "who_bulk/dmdb.mdb")
#
# # This doesn't work on this machine either: obdc connectaccess only works with 32 bit windows!
#
# # Let's rethink and go for a different programme
#
#
# pacman::p_unload(
# ImportExport,
# shiny, shinythemes, compareGroups,
# shinyBS
# )
#
# # Let's try RODBC directly
#
# pacman::p_load(
# RODBC
# )
#
# myconn <- odbcConnectAccess2007("who_bulk/dmdb.mdb")
# After running into a series of issues importing directly, I've exported each of the
# tables into csv text files.
# The table names are
AgeGroups <- read_csv("who_bulk/exported_text/AgeGroups.txt")
AgeGroupTypes <- read_csv("who_bulk/exported_text/AgeGroupTypes.txt")
AStan <- read_csv("who_bulk/exported_text/AStan.txt")
CodingChapters <- read_csv("who_bulk/exported_text/CodingChapters.txt")
CodingTypes <- read_csv("who_bulk/exported_text/CodingTypes.txt")
Countries <- read_csv("who_bulk/exported_text/Countries.txt")
DiagConversions <- read_csv("who_bulk/exported_text/DiagConversions.txt")
Diagnosis <- read_csv("who_bulk/exported_text/Diagnosis.txt")
Gender <- read_csv("who_bulk/exported_text/Gender.txt")
MDD <- read_csv("who_bulk/exported_text/MDD.txt")
Population <- read_csv("who_bulk/exported_text/Population.txt")
Years <- read_csv("who_bulk/exported_text/Years.txt")
# All loaded...
# First, link AgeGroups to AgeGroupTypes
AgeGroups
AgeGroupTypes
AgeGroups_AgeGroupTypes <- left_join(AgeGroups, AgeGroupTypes) %>%
rename(agegroup_desc = Dsc)
# Unsure what to do with AStan right now
# Join CodingTypes to CodingChapter
CodingChapters_CodingTypes <- left_join(CodingChapters, CodingTypes, by = c("Coding"="CodingId")) %>%
rename(coding_desc = Dsc.x, icd_class_desc = Dsc.y)
Countries
# The main file to work on is mdd
MDD %>%
gather(key = "age_group", value = "death_count", d0:d21) %>%
mutate(age_group = str_replace_all(age_group, "d", "") %>% as.integer) -> mdd_long
#mdd long should be combined with AgeGroups_AgeGroupTypes
mdd_long %>%
left_join(AgeGroups_AgeGroupTypes, by = c("AgeGroup" = "agType", "age_group" = "agItem")) %>%
select(
country = Country, year = yId, icd_schema = Coding,
mort_code = Code, sex = Gender, age = Title,
age_range = agegroup_desc, death_count
) %>%
left_join(Gender, by = c("sex" ="GenderId")) %>%
mutate(sex = Title) %>% select(-Title) -> mdd_tidy
# Now to produce age groupings
unique(mdd_tidy$age) %>% as_tibble -> age_labels
age_fst <- c(
0,
0,
1,
seq(5, 85, by = 5),
85,
90,
NA,
95
)
age_lst <- c(
110,
0,
seq(4, 89, by = 5),
110,
94,
NA,
110
)
age_labels %>%
mutate(age_fst = age_fst, age_lst = age_lst) -> age_labels
rm(age_fst, age_lst)
mdd_tidy %>%
left_join(age_labels, by = c("age" = "value")) -> mdd_tidy
# age_group labelling in population df is neater, so recoding mdd to these categories
mdd_tidy %>%
mutate(age = fct_recode(
age,
"all" = "All ages",
"1" = "< 1 year",
"1_4" = "1 - 4",
"5_9" = "5 - 9",
"10_14" = "10 - 14",
"15_19" = "15 - 19",
"20_24" = "20 - 24",
"25_29" = "25 - 29",
"30_34" = "30 - 34",
"35_39" = "35 - 39",
"40_44" = "40 - 44",
"45_49" = "45 - 49",
"50_54" = "50 - 54",
"55_59" = "55 - 59",
"60_64" = "60 - 64",
"65_69" = "65 - 69",
"70_74" = "70 - 74",
"75_79" = "75 - 79",
"80_84" = "80 - 84",
"85_89" = "85 - 89",
"90_94" = "90 - 94",
"95" = "95 +",
NULL = "85 +"
)
) -> mdd_tidy
# Now to add population
Population %>%
gather(key = "age_group", value = "population_count", p_all:p_95) %>%
mutate(age_group = str_replace(age_group, "^p_", "")) %>%
left_join(Gender, by = c("Gender" ="GenderId")) %>%
select(-Gender) %>%
rename(sex = Title) %>%
select(
country = Country, year = yId,
sex, age = age_group,
population_count
) -> pop_tidy
mdd_tidy %>% inner_join(pop_tidy, by = c("country", "year", "sex", "age")) -> combined_tidy
#mdd long should be combined with AgeGroups_AgeGroupTypes
write_csv(combined_tidy, "tidy_data/tidied_data.csv")
rm(age_labels, AgeGroups, AgeGroups_AgeGroupTypes, MDD)
gc()
# Let's see if I can produce some kind of Lexis plot fairly quickly
|
#' Builds a small-world scale-free network and extracts VNs based on genes of interest
#'
#' This function calls createSWSFnetFromFile() and downstreamAnalysis(). Its a one strep function to create a scale-free small-world network, determines the 'best' network model, from which then vicinity networks (VNs) are extracted based on vertex IDs the user can upload. For more specifics refer to the two functions createSWSFnetFromFile() and downstreamAnalysis()
#'
#' @param dataFile data file name (.txt) in tab-separated format, first column must be vertex ID (genes), first row are condition identifiers
#' @param metric charcter defining association between each vertex ID, correlation options: "SP" - Spearman Correlation, "PE" - Pearson Correlation, and "KE" - Kendall as define in cor{stats}; distances: "EU" - Euclidean, "MA" - Manhattan, and "CA" - canaberra as defined in dist{stats}
#' @param thresholds numeric vector representing a series of thresholds that the user can choose; this variable can only be assigned with the metric is one of the three correlation metrics
#' @param GoIFile a .txt file including vertex IDs of interest, IDs must be tab-separated
#' @param annoFile wheter there is an annoation file for the vertex IDs
#'
#' @return None, output files are generated
#'
#' @export
dataToVNs <- function(dataFile, GoIFile=NULL, annoFile=NULL, metric=NULL, thresholds=NULL){
if(is.null(GoIFile)) stop("GoIFile must be specified, if no GoIFile please use createSWSFnetFromFile() instead.")
createSWSFnetFromFile(dataFile, metric, thresholds)
load("forDA.RData")
if(is.na(winnerT)) stop("downstreamAnalysis() cannot be ran as no threshold was determined to construct a scale-free small-world network, please review NetworkStats.txt")
downstreamAnalysis(winnerT, association, GoIFile, "myAnalysis.txt", dataFile, annoFile)
}
|
/src/petal/R/dataToVNs.R
|
no_license
|
ameya225/petalNet
|
R
| false
| false
| 1,855
|
r
|
#' Builds a small-world scale-free network and extracts VNs based on genes of interest
#'
#' This function calls createSWSFnetFromFile() and downstreamAnalysis(). Its a one strep function to create a scale-free small-world network, determines the 'best' network model, from which then vicinity networks (VNs) are extracted based on vertex IDs the user can upload. For more specifics refer to the two functions createSWSFnetFromFile() and downstreamAnalysis()
#'
#' @param dataFile data file name (.txt) in tab-separated format, first column must be vertex ID (genes), first row are condition identifiers
#' @param metric charcter defining association between each vertex ID, correlation options: "SP" - Spearman Correlation, "PE" - Pearson Correlation, and "KE" - Kendall as define in cor{stats}; distances: "EU" - Euclidean, "MA" - Manhattan, and "CA" - canaberra as defined in dist{stats}
#' @param thresholds numeric vector representing a series of thresholds that the user can choose; this variable can only be assigned with the metric is one of the three correlation metrics
#' @param GoIFile a .txt file including vertex IDs of interest, IDs must be tab-separated
#' @param annoFile wheter there is an annoation file for the vertex IDs
#'
#' @return None, output files are generated
#'
#' @export
dataToVNs <- function(dataFile, GoIFile=NULL, annoFile=NULL, metric=NULL, thresholds=NULL){
if(is.null(GoIFile)) stop("GoIFile must be specified, if no GoIFile please use createSWSFnetFromFile() instead.")
createSWSFnetFromFile(dataFile, metric, thresholds)
load("forDA.RData")
if(is.na(winnerT)) stop("downstreamAnalysis() cannot be ran as no threshold was determined to construct a scale-free small-world network, please review NetworkStats.txt")
downstreamAnalysis(winnerT, association, GoIFile, "myAnalysis.txt", dataFile, annoFile)
}
|
process__exists <- function(pid) {
.Call(c_processx__process_exists, pid)
}
|
/R/process-helpers.R
|
permissive
|
alxsrobert/processx
|
R
| false
| false
| 79
|
r
|
process__exists <- function(pid) {
.Call(c_processx__process_exists, pid)
}
|
library(seroincidence)
### Name: getAdditionalData
### Title: Get Additional Data
### Aliases: getAdditionalData
### ** Examples
## Not run:
##D getAdditionalData(fileName = "coxiellaIFAParams4.zip")
##D getAdditionalData(fileName = "yersiniaSSIParams4.zip")
##D getAdditionalData(fileName = "coxiellaIFAParams4.zip", savePath = getwd())
##D getAdditionalData(fileName = "yersiniaSSIParams4.zip", savePath = getwd())
## End(Not run)
|
/data/genthat_extracted_code/seroincidence/examples/getAdditionalData.Rd.R
|
no_license
|
surayaaramli/typeRrh
|
R
| false
| false
| 443
|
r
|
library(seroincidence)
### Name: getAdditionalData
### Title: Get Additional Data
### Aliases: getAdditionalData
### ** Examples
## Not run:
##D getAdditionalData(fileName = "coxiellaIFAParams4.zip")
##D getAdditionalData(fileName = "yersiniaSSIParams4.zip")
##D getAdditionalData(fileName = "coxiellaIFAParams4.zip", savePath = getwd())
##D getAdditionalData(fileName = "yersiniaSSIParams4.zip", savePath = getwd())
## End(Not run)
|
trackpts_test <- function(x){
print("DIAGNOSTICS OF FILE")
print("unique tracks")
print(length(unique(x$track_fid)))
print("________________________________________")
print("INSPECT INDIVIDUAL TRACKPOINTS")
for(i in 1:max(x$track_fid)){
print(unique((x$name[x$track_fid==i]))) #number of unique tracks
print(
max(as_datetime(x$time[x$track_fid==i]) ) - min(as_datetime(x$time[x$track_fid==i]))
)#duration of each track
ifelse(
as_date(str_sub(unique(x$name[x$track_fid==i]),1,10)) == min(as_date(x$time[x$track_fid==i])) , print("min date and name match") , print("INSPECT: min date and name of track don't match")
) #there is this time zone issue, but should bypass it in most cases unless we picked up minkeys after 00:00 GMT
ifelse(
(max(x$track_seg_point_id[x$track_fid==1]) + 1) == length(unique(x$track_seg_point_id[x$track_fid==1])) , "" , "WARNING: TRACK SEG POINT IDS ARE MISSING INTEGERS, RENAME STARTING AT ZERO"
)#shows that the track_seg_point IDS have some missing integers, may not be necessary
print("~~~~~~~~~~~~~~~~~~~~~~~~~~~~")
}
}
###trackpts_test(x) #x is the name of the trackpoints files
###you can do something similar for waypoints (namely making sure the date matches name, adjusting for GMT)
|
/trackpts_test function.R
|
no_license
|
bjbarrett/lomas_gps_code
|
R
| false
| false
| 1,308
|
r
|
trackpts_test <- function(x){
print("DIAGNOSTICS OF FILE")
print("unique tracks")
print(length(unique(x$track_fid)))
print("________________________________________")
print("INSPECT INDIVIDUAL TRACKPOINTS")
for(i in 1:max(x$track_fid)){
print(unique((x$name[x$track_fid==i]))) #number of unique tracks
print(
max(as_datetime(x$time[x$track_fid==i]) ) - min(as_datetime(x$time[x$track_fid==i]))
)#duration of each track
ifelse(
as_date(str_sub(unique(x$name[x$track_fid==i]),1,10)) == min(as_date(x$time[x$track_fid==i])) , print("min date and name match") , print("INSPECT: min date and name of track don't match")
) #there is this time zone issue, but should bypass it in most cases unless we picked up minkeys after 00:00 GMT
ifelse(
(max(x$track_seg_point_id[x$track_fid==1]) + 1) == length(unique(x$track_seg_point_id[x$track_fid==1])) , "" , "WARNING: TRACK SEG POINT IDS ARE MISSING INTEGERS, RENAME STARTING AT ZERO"
)#shows that the track_seg_point IDS have some missing integers, may not be necessary
print("~~~~~~~~~~~~~~~~~~~~~~~~~~~~")
}
}
###trackpts_test(x) #x is the name of the trackpoints files
###you can do something similar for waypoints (namely making sure the date matches name, adjusting for GMT)
|
# Note:
# This script is showing how to conduct the mutation mapping analysis for the small SNP dataset
data('gene_feature0')
data('snp_data')
mutated_gene <- annotateSNP(snp_input = snp_data, gene_feature = gene_feature0)
#-------------------------------------------------
# Mutation enrichment analysis
#------------------------------------------------
# first example
data('ResidueDistance_YPR184W')
mutated_gene1 <- filter(mutated_gene, Gene2 == 'YPR184W')
result0 <- clumpsAnalysis(gene0 = 'YPR184W',
SNPlist0 = mutated_gene1,
gene_annotation0 = gene_feature0,
pdb = ResidueDistance_YPR184W,
sstart0 = 2,
send0 = 1534,
input_dir= FALSE)
# print the mutation information for the input SNP list contained in the protein 3D structure
pdbID <- '2_1534_5d06.1.A_5b2453487f4bf94bf75ead43'
SNP_list <- printSNPforGene(gene0 = 'YPR184W',
SNPlist0 = mutated_gene1,
gene_annotation0 = gene_feature0,
pdbID0 = pdbID,
sstart0 = 2,
send0 = 1534)
# second example
data('ResidueDistance_YMR246W')
mutated_gene1 <- filter(mutated_gene, Gene2 == 'YMR246W')
result0 <- clumpsAnalysis(gene0 = 'YMR246W',
SNPlist0 = mutated_gene1,
gene_annotation0 = gene_feature0,
pdb = ResidueDistance_YMR246W,
sstart0 = 39,
send0 = 691,
input_dir= FALSE)
pdbID <- '39_691_5mst.1.A_5b41c4d68fd6f9da68b53e00'
SNP_list <- printSNPforGene(gene0 = 'YMR246W',
SNPlist0 = mutated_gene1,
gene_annotation0 = gene_feature0,
pdbID0 = pdbID,
sstart0 = 39,
send0 = 691)
#-------------------------------------------------
# Mutation hot spot analysis
#------------------------------------------------
# run the function
data('snp_YBR046C')
data('ResidueDistance_YBR046C')
outfile0 <- 'result/hot_spot_analysis'
dir.create(outfile0)
hotSpotAnalysis(
gene0 = "YBR046C",
SNPlist0 = snp_YBR046C,
gene_annotation0 = gene_feature0,
pdb = ResidueDistance_YBR046C,
sstart0 = 5, # coordinate of orginal protein residues sequence
send0 = 333, # coordinate of orginal protein residues sequence
qstart0 =1 , # coordinate of protein residues sequence in pdb file
qend0 = 329, # coordinate of protein residues sequence in pdb file
result_dir = outfile0,
input_dir=FALSE
)
# here save the installed R packages and its version to make sure the above steps could be re-produced
# library(hongR)
# print_R_package(output = "data/R_packages_for_Yeastspot3D.txt")
|
/2 General steps to conduct the mutation mapping analysis using Yeastspot3D.R
|
permissive
|
hongzhonglu/Tutorial_for_Yeastspot3D
|
R
| false
| false
| 2,872
|
r
|
# Note:
# This script is showing how to conduct the mutation mapping analysis for the small SNP dataset
data('gene_feature0')
data('snp_data')
mutated_gene <- annotateSNP(snp_input = snp_data, gene_feature = gene_feature0)
#-------------------------------------------------
# Mutation enrichment analysis
#------------------------------------------------
# first example
data('ResidueDistance_YPR184W')
mutated_gene1 <- filter(mutated_gene, Gene2 == 'YPR184W')
result0 <- clumpsAnalysis(gene0 = 'YPR184W',
SNPlist0 = mutated_gene1,
gene_annotation0 = gene_feature0,
pdb = ResidueDistance_YPR184W,
sstart0 = 2,
send0 = 1534,
input_dir= FALSE)
# print the mutation information for the input SNP list contained in the protein 3D structure
pdbID <- '2_1534_5d06.1.A_5b2453487f4bf94bf75ead43'
SNP_list <- printSNPforGene(gene0 = 'YPR184W',
SNPlist0 = mutated_gene1,
gene_annotation0 = gene_feature0,
pdbID0 = pdbID,
sstart0 = 2,
send0 = 1534)
# second example
data('ResidueDistance_YMR246W')
mutated_gene1 <- filter(mutated_gene, Gene2 == 'YMR246W')
result0 <- clumpsAnalysis(gene0 = 'YMR246W',
SNPlist0 = mutated_gene1,
gene_annotation0 = gene_feature0,
pdb = ResidueDistance_YMR246W,
sstart0 = 39,
send0 = 691,
input_dir= FALSE)
pdbID <- '39_691_5mst.1.A_5b41c4d68fd6f9da68b53e00'
SNP_list <- printSNPforGene(gene0 = 'YMR246W',
SNPlist0 = mutated_gene1,
gene_annotation0 = gene_feature0,
pdbID0 = pdbID,
sstart0 = 39,
send0 = 691)
#-------------------------------------------------
# Mutation hot spot analysis
#------------------------------------------------
# run the function
data('snp_YBR046C')
data('ResidueDistance_YBR046C')
outfile0 <- 'result/hot_spot_analysis'
dir.create(outfile0)
hotSpotAnalysis(
gene0 = "YBR046C",
SNPlist0 = snp_YBR046C,
gene_annotation0 = gene_feature0,
pdb = ResidueDistance_YBR046C,
sstart0 = 5, # coordinate of orginal protein residues sequence
send0 = 333, # coordinate of orginal protein residues sequence
qstart0 =1 , # coordinate of protein residues sequence in pdb file
qend0 = 329, # coordinate of protein residues sequence in pdb file
result_dir = outfile0,
input_dir=FALSE
)
# here save the installed R packages and its version to make sure the above steps could be re-produced
# library(hongR)
# print_R_package(output = "data/R_packages_for_Yeastspot3D.txt")
|
##Please make sure the file "household_power_consumption.txt" is in your working
##directory
dataset<-read.table("household_power_consumption.txt",sep = ";",stringsAsFactors = FALSE,
skip = 66637,nrows = 2880)
names(dataset)<-strsplit("Date;Time;Global_active_power;Global_reactive_power;Voltage;Global_intensity;Sub_metering_1;Sub_metering_2;Sub_metering_3","\\;")[[1]]
date <- strptime(paste(dataset$Date, dataset$Time), "%d/%m/%Y %H:%M:%S")
subMetering1<-dataset$Sub_metering_1
subMetering2<-dataset$Sub_metering_2
subMetering3<-dataset$Sub_metering_3
png(file="plot3.png",width=480,height=480)
plot(date,subMetering1,type = "l",xlab = "",ylab = "Energy Submetering")
lines(date,subMetering2,col="red")
lines(date,subMetering3,col="blue")
legend("topright",legend = c("Sub_metering_1","Sub_metering_2","Sub_metering_3"),
lty=1,col=c("black", "red", "blue"))
dev.off()
|
/plot3.R
|
no_license
|
WLGCCYCC/ExData_Plotting1
|
R
| false
| false
| 895
|
r
|
##Please make sure the file "household_power_consumption.txt" is in your working
##directory
dataset<-read.table("household_power_consumption.txt",sep = ";",stringsAsFactors = FALSE,
skip = 66637,nrows = 2880)
names(dataset)<-strsplit("Date;Time;Global_active_power;Global_reactive_power;Voltage;Global_intensity;Sub_metering_1;Sub_metering_2;Sub_metering_3","\\;")[[1]]
date <- strptime(paste(dataset$Date, dataset$Time), "%d/%m/%Y %H:%M:%S")
subMetering1<-dataset$Sub_metering_1
subMetering2<-dataset$Sub_metering_2
subMetering3<-dataset$Sub_metering_3
png(file="plot3.png",width=480,height=480)
plot(date,subMetering1,type = "l",xlab = "",ylab = "Energy Submetering")
lines(date,subMetering2,col="red")
lines(date,subMetering3,col="blue")
legend("topright",legend = c("Sub_metering_1","Sub_metering_2","Sub_metering_3"),
lty=1,col=c("black", "red", "blue"))
dev.off()
|
r=359.80
https://sandbox.dams.library.ucdavis.edu/fcrepo/rest/collection/sherry-lehmann/catalogs/d7mk5f/media/images/d7mk5f-005/svc:tesseract/full/full/359.80/default.jpg Accept:application/hocr+xml
|
/ark_87287/d7mk5f/d7mk5f-005/rotated.r
|
permissive
|
ucd-library/wine-price-extraction
|
R
| false
| false
| 199
|
r
|
r=359.80
https://sandbox.dams.library.ucdavis.edu/fcrepo/rest/collection/sherry-lehmann/catalogs/d7mk5f/media/images/d7mk5f-005/svc:tesseract/full/full/359.80/default.jpg Accept:application/hocr+xml
|
## ======================================================================
## This file takes the simulated data sets, which are stored in separate
## files in folder /simParts, and runs all meta-analytic techniques on them.
## Then it saves the analyses in separate files in the /analysisParts folder.
## ======================================================================
# run this file:
# source("2-analysisFramework.R", echo=TRUE)
# load all functions and packages
source("0-start.R")
library(doParallel)
# detectCores()
registerDoParallel(cores=20)
(ncores <- getDoParWorkers()) # number of parallel processes
# simDatFiles stores the names of all simulated data files in the folder "simParts"
simDatFiles <- list.files("simParts", pattern=".*\\.RData", full.names=TRUE)
library(gtools)
simDatFiles <- mixedsort(simDatFiles)
# loop through all simParts files
for (f in simDatFiles) {
load(f) # the simulation data frame always is called "sim"
n.MA <- length(unique(sim$id)) # overall number of MAs
print(paste0(Sys.time(), ": Analyzing ", n.MA, " unique MAs from file ", f))
## slice up the file into ncores pieces
if (length(unique(sim$id)) %% ncores != 0) {
warning(paste0("Number of MAs (", length(unique(sim$id)), ") not dividable by number of cores (", ncores, ")"))
}
flush.console()
# build translation table: which unique sim ID goes into which core?
translation <- rep(1:ncores, each=length(unique(sim$id))/ncores)
names(translation) <- unique(sim$id)
sim$core <- translation[as.character(sim$id)]
# Now, loop through all meta-analyses, each core gets its share of studies
res <- foreach(batch=1:ncores, .combine=rbind) %dopar% {
counter <- 1
reslist <- list() # each MA is stored as 1 list element, which is later combined to a single data frame
sim.piece <- sim[sim$core==batch, ]
n.MA.piece <- length(unique(sim.piece$id))
for (i in 1:n.MA.piece) {
print(paste0(Sys.time(), ", batch=", batch, ": Computing ", i, "/", n.MA.piece))
# select rows from one single MA
MAdat <- sim.piece[sim.piece$id == unique(sim.piece$id)[i], ]
rownames(MAdat) <- NULL
# analyze with all MA techniques
res0 <- rbind(
RMA.est(d=MAdat$d, v=MAdat$v, long=TRUE),
PETPEESE.est(MAdat$d, MAdat$v, long=TRUE),
pc_skew(t=MAdat$t, df=MAdat$N-2, long=TRUE),
pcurveEst(t=MAdat$t, df=MAdat$N-2, progress=FALSE, long=TRUE, CI=FALSE),
puniformEst(t.value=MAdat$t, n1=MAdat$n1, n2=MAdat$n2),
TPSM.est(t=MAdat$t, n1=MAdat$n1, n2=MAdat$n2, long=TRUE)#,
#topN(MAdat$d, MAdat$v, MAdat$n1, MAdat$n2, est="fixed", fixed.effect=0.3),
#topN(MAdat$d, MAdat$v, MAdat$n1, MAdat$n2, est="rma"),
#topN(MAdat$d, MAdat$v, MAdat$n1, MAdat$n2, est="PEESE"),
#betaSM.est(d=MAdat$d, v=MAdat$v, long=TRUE)
)
# collect results
res1 <- cbind(
# save settings of condition to results:
MAdat[rep(1, nrow(res0)), c("id", "condition", "k", "delta", "qrpEnv", "selProp", "tau", "kFD", "sel", "qrp")],
# save analysis results:
res0
)
reslist[[counter]] <- res1
counter <- counter+1
}
res2 <- bind_rows(reslist)
return(res2)
} # of dopar
save(res, file=paste0("analysisParts/analysis_", basename(f)), compress="gzip")
} # of "f in simDatFiles"
|
/2-analysisFramework.R
|
permissive
|
kylehamilton/meta-showdown
|
R
| false
| false
| 3,270
|
r
|
## ======================================================================
## This file takes the simulated data sets, which are stored in separate
## files in folder /simParts, and runs all meta-analytic techniques on them.
## Then it saves the analyses in separate files in the /analysisParts folder.
## ======================================================================
# run this file:
# source("2-analysisFramework.R", echo=TRUE)
# load all functions and packages
source("0-start.R")
library(doParallel)
# detectCores()
registerDoParallel(cores=20)
(ncores <- getDoParWorkers()) # number of parallel processes
# simDatFiles stores the names of all simulated data files in the folder "simParts"
simDatFiles <- list.files("simParts", pattern=".*\\.RData", full.names=TRUE)
library(gtools)
simDatFiles <- mixedsort(simDatFiles)
# loop through all simParts files
for (f in simDatFiles) {
load(f) # the simulation data frame always is called "sim"
n.MA <- length(unique(sim$id)) # overall number of MAs
print(paste0(Sys.time(), ": Analyzing ", n.MA, " unique MAs from file ", f))
## slice up the file into ncores pieces
if (length(unique(sim$id)) %% ncores != 0) {
warning(paste0("Number of MAs (", length(unique(sim$id)), ") not dividable by number of cores (", ncores, ")"))
}
flush.console()
# build translation table: which unique sim ID goes into which core?
translation <- rep(1:ncores, each=length(unique(sim$id))/ncores)
names(translation) <- unique(sim$id)
sim$core <- translation[as.character(sim$id)]
# Now, loop through all meta-analyses, each core gets its share of studies
res <- foreach(batch=1:ncores, .combine=rbind) %dopar% {
counter <- 1
reslist <- list() # each MA is stored as 1 list element, which is later combined to a single data frame
sim.piece <- sim[sim$core==batch, ]
n.MA.piece <- length(unique(sim.piece$id))
for (i in 1:n.MA.piece) {
print(paste0(Sys.time(), ", batch=", batch, ": Computing ", i, "/", n.MA.piece))
# select rows from one single MA
MAdat <- sim.piece[sim.piece$id == unique(sim.piece$id)[i], ]
rownames(MAdat) <- NULL
# analyze with all MA techniques
res0 <- rbind(
RMA.est(d=MAdat$d, v=MAdat$v, long=TRUE),
PETPEESE.est(MAdat$d, MAdat$v, long=TRUE),
pc_skew(t=MAdat$t, df=MAdat$N-2, long=TRUE),
pcurveEst(t=MAdat$t, df=MAdat$N-2, progress=FALSE, long=TRUE, CI=FALSE),
puniformEst(t.value=MAdat$t, n1=MAdat$n1, n2=MAdat$n2),
TPSM.est(t=MAdat$t, n1=MAdat$n1, n2=MAdat$n2, long=TRUE)#,
#topN(MAdat$d, MAdat$v, MAdat$n1, MAdat$n2, est="fixed", fixed.effect=0.3),
#topN(MAdat$d, MAdat$v, MAdat$n1, MAdat$n2, est="rma"),
#topN(MAdat$d, MAdat$v, MAdat$n1, MAdat$n2, est="PEESE"),
#betaSM.est(d=MAdat$d, v=MAdat$v, long=TRUE)
)
# collect results
res1 <- cbind(
# save settings of condition to results:
MAdat[rep(1, nrow(res0)), c("id", "condition", "k", "delta", "qrpEnv", "selProp", "tau", "kFD", "sel", "qrp")],
# save analysis results:
res0
)
reslist[[counter]] <- res1
counter <- counter+1
}
res2 <- bind_rows(reslist)
return(res2)
} # of dopar
save(res, file=paste0("analysisParts/analysis_", basename(f)), compress="gzip")
} # of "f in simDatFiles"
|
library(glmnet)
library(ggplot2) # pour des grpahes plus jolies
df = read.csv("communities_data_R.csv", sep = ",", header = T)
# On fait du Lasso avec alpha = 1
# Une grille de lambda est automatiquement determinée, en partant du plus petit lambda, annulant tous les
# coefficients( sauf la constante), jusqu'à une centiéme valeur de lambda (avec l'option par defaut nlambda=100)
# lambdas = 10^seq(3, -2, by = -.1)
df = df[-c(1)]
x = df[,ncol(df)]
y =
LASSO_ex1=glmnet(x,y, alpha = 1)
## question 3
LASSO_ex1
# Affiche en fonction de lambda le nombre de variables selectionné et le pourcentage de variance expliqué
# On peut donner manuellement une grille de valeur pour lambda
## question 4
# Génération du "chemin de régularisation"
plot(LASSO_ex1) #xvar = c("norm", "lambda", "dev")
which(coef(LASSO_ex1, s=0.32370) !=0) # les coef de teta non nulls
## question 5
v=coef(LASSO_ex1, s=0.32370)
v[1:100]
## question 6
Ypred = predict(LASSO_ex1,x, s=0.3) # refaire prédiction avec s="lambda.min" de la question 7
ErreurPred = sqrt(sum((Ypred-y)^2)/n)
ErreurPred
## question 7
cvLASSO_ex1=cv.glmnet(x,y, alpha=1, type.measure="mse") # , type.measure="mse", "mae", "auc
plot(cvLASSO_ex1)
#L'erreur MSE minimum de la cross-validated est:
min(cvLASSO_ex1$cvm)
# avec un lambda:
meilleur_Lambda = cvLASSO_ex1$lambda.min
meilleur_Lambda
# lambda.1se : la plus grande valeur de lambda de sorte que l'erreur se situe à moins d'une erreur-type du minimum.
lambda.1se = cvLASSO_ex1$lambda.1se
lambda.1se
## question 8
# a)
Xtest = matrix(rnorm(n*p),n,p)
Ytest = Simul(n,p,theta,sigma,Xtest)
# b)
Ypred=predict(LASSO_ex1, Xtest, s=meilleur_Lambda)
# c) Erreur quadratique moyenne
MSE_lasso=(sum((Ypred-Ytest)^2))/n
MSE_lasso
R2_Lasso=1-(sum(Ytest-Ypred)^2)/sum(Ytest^2)
R2_Lasso
# d) prédiction avec le modèle lm classique avec les variables sélectionnées par le LASSO
teta = coef(cvLASSO_ex1,s=meilleur_Lambda)
Var = which(teta!=0)
LM_ex1 = lm (y~x[,Var])
Ypred_LM = predict(LM_ex1, data = Xtest) # Ypred_LM = teta[1]+Xtest[,Var]%*%teta[-1]
MSE_LM = (sum(Ytest-Ypred_LM)^2)/n
MSE_LM
R2_LM = 1-(sum(Ytest-Ypred_LM)^2)/sum(Ytest^2)
R2_LM
sprintf("R2_Lasso: %s", round(R2_Lasso,3))
sprintf("R2_LM: %s", round(R2_LM,3))
sprintf("MSE_Lasso: %s", round(MSE_lasso,3))
sprintf("MSE_LM: %s", round(MSE_LM,3))
## question 8 Ridge
Ridge_ex1=glmnet(x,y,alpha=0, lambda = 10*seq(0,10,0.01))
plot(Ridge_ex1, xvar="lambda", label=TRUE) #xvar = c("norm", "lambda", "dev")
cvRidge_ex1=cv.glmnet(x,y, alpha=0,lambda = 20*seq(0,5,0.005))
plot(cvRidge_ex1)
meilleur_Lambda = cvRidge_ex1$lambda.min
meilleur_Lambda
Xtest = matrix(rnorm(n*p),n,p)
Ytest = Simul(n,p,theta,sigma,Xtest)
# b)
Ypred=predict(LASSO_ex1, Xtest, s=meilleur_Lambda)
# c) Erreur quadratique moyenne
MSE_Ridge = (sum((Ypred-Ytest)^2))/n
MSE_Ridge
R2_Ridge = 1-(sum(Ytest-Ypred)^2)/sum(Ytest^2)
R2_Ridge
|
/Statistique_en_grande_dimension/Notebook/WADE_Malick_Exercice_2_Communities_and_Crime.R
|
no_license
|
Malick-W/Projets
|
R
| false
| false
| 2,924
|
r
|
library(glmnet)
library(ggplot2) # pour des grpahes plus jolies
df = read.csv("communities_data_R.csv", sep = ",", header = T)
# On fait du Lasso avec alpha = 1
# Une grille de lambda est automatiquement determinée, en partant du plus petit lambda, annulant tous les
# coefficients( sauf la constante), jusqu'à une centiéme valeur de lambda (avec l'option par defaut nlambda=100)
# lambdas = 10^seq(3, -2, by = -.1)
df = df[-c(1)]
x = df[,ncol(df)]
y =
LASSO_ex1=glmnet(x,y, alpha = 1)
## question 3
LASSO_ex1
# Affiche en fonction de lambda le nombre de variables selectionné et le pourcentage de variance expliqué
# On peut donner manuellement une grille de valeur pour lambda
## question 4
# Génération du "chemin de régularisation"
plot(LASSO_ex1) #xvar = c("norm", "lambda", "dev")
which(coef(LASSO_ex1, s=0.32370) !=0) # les coef de teta non nulls
## question 5
v=coef(LASSO_ex1, s=0.32370)
v[1:100]
## question 6
Ypred = predict(LASSO_ex1,x, s=0.3) # refaire prédiction avec s="lambda.min" de la question 7
ErreurPred = sqrt(sum((Ypred-y)^2)/n)
ErreurPred
## question 7
cvLASSO_ex1=cv.glmnet(x,y, alpha=1, type.measure="mse") # , type.measure="mse", "mae", "auc
plot(cvLASSO_ex1)
#L'erreur MSE minimum de la cross-validated est:
min(cvLASSO_ex1$cvm)
# avec un lambda:
meilleur_Lambda = cvLASSO_ex1$lambda.min
meilleur_Lambda
# lambda.1se : la plus grande valeur de lambda de sorte que l'erreur se situe à moins d'une erreur-type du minimum.
lambda.1se = cvLASSO_ex1$lambda.1se
lambda.1se
## question 8
# a)
Xtest = matrix(rnorm(n*p),n,p)
Ytest = Simul(n,p,theta,sigma,Xtest)
# b)
Ypred=predict(LASSO_ex1, Xtest, s=meilleur_Lambda)
# c) Erreur quadratique moyenne
MSE_lasso=(sum((Ypred-Ytest)^2))/n
MSE_lasso
R2_Lasso=1-(sum(Ytest-Ypred)^2)/sum(Ytest^2)
R2_Lasso
# d) prédiction avec le modèle lm classique avec les variables sélectionnées par le LASSO
teta = coef(cvLASSO_ex1,s=meilleur_Lambda)
Var = which(teta!=0)
LM_ex1 = lm (y~x[,Var])
Ypred_LM = predict(LM_ex1, data = Xtest) # Ypred_LM = teta[1]+Xtest[,Var]%*%teta[-1]
MSE_LM = (sum(Ytest-Ypred_LM)^2)/n
MSE_LM
R2_LM = 1-(sum(Ytest-Ypred_LM)^2)/sum(Ytest^2)
R2_LM
sprintf("R2_Lasso: %s", round(R2_Lasso,3))
sprintf("R2_LM: %s", round(R2_LM,3))
sprintf("MSE_Lasso: %s", round(MSE_lasso,3))
sprintf("MSE_LM: %s", round(MSE_LM,3))
## question 8 Ridge
Ridge_ex1=glmnet(x,y,alpha=0, lambda = 10*seq(0,10,0.01))
plot(Ridge_ex1, xvar="lambda", label=TRUE) #xvar = c("norm", "lambda", "dev")
cvRidge_ex1=cv.glmnet(x,y, alpha=0,lambda = 20*seq(0,5,0.005))
plot(cvRidge_ex1)
meilleur_Lambda = cvRidge_ex1$lambda.min
meilleur_Lambda
Xtest = matrix(rnorm(n*p),n,p)
Ytest = Simul(n,p,theta,sigma,Xtest)
# b)
Ypred=predict(LASSO_ex1, Xtest, s=meilleur_Lambda)
# c) Erreur quadratique moyenne
MSE_Ridge = (sum((Ypred-Ytest)^2))/n
MSE_Ridge
R2_Ridge = 1-(sum(Ytest-Ypred)^2)/sum(Ytest^2)
R2_Ridge
|
% Generated by roxygen2 (4.1.0): do not edit by hand
% Please edit documentation in R/pkg.R
\docType{package}
\name{docopt-package}
\alias{docopt-package}
\title{Docopt command line specification}
\description{
docopt helps you to define an interface for your command-line app, and
automatically generate a parser for it.
}
\details{
For more information see http://docopt.org
}
|
/man/docopt-package.Rd
|
no_license
|
extemporaneousb/docopt.R
|
R
| false
| false
| 380
|
rd
|
% Generated by roxygen2 (4.1.0): do not edit by hand
% Please edit documentation in R/pkg.R
\docType{package}
\name{docopt-package}
\alias{docopt-package}
\title{Docopt command line specification}
\description{
docopt helps you to define an interface for your command-line app, and
automatically generate a parser for it.
}
\details{
For more information see http://docopt.org
}
|
# Copyright 2011 Revolution Analytics
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#options
rmr.options.env = new.env(parent=emptyenv())
rmr.options.env$backend = "hadoop"
rmr.options.env$keyval.length = 10^4
rmr.options.env$profile.nodes = "off"
rmr.options.env$dfs.tempdir = NULL
rmr.options.env$depend.check = FALSE
#rmr.options$managed.dir = "/var/rmr/managed"
rmr.options =
function(backend = c("hadoop", "local"),
profile.nodes = c("off", "calls", "memory", "both"),
keyval.length = 10^4,
dfs.tempdir = tempdir()#,
#depend.check = FALSE,
#managed.dir = FALSE
) {
args = as.list(sys.call())[-1]
this.call = match.call()
if (is.logical(profile.nodes)) {
this.call[["profile.nodes"]] = {
if(profile.nodes)
"calls"
else
"off"}}
lapply(
names(args),
function(x) {
if(x != "")
assign(x, eval(this.call[[x]]), envir = rmr.options.env)})
read.args =
if(is.null(names(args)))
args
else
named.slice(args, "")
if(length(read.args) > 0) {
read.args = simplify2array(read.args)
retval = as.list(rmr.options.env)[read.args]
if (length(retval) == 1) retval[[1]] else retval}
else NULL }
## map and reduce function generation
to.map =
function(fun1, fun2 = identity) {
if (missing(fun2)) {
function(k, v) fun1(keyval(k, v))}
else {
function(k, v) keyval(fun1(k), fun2(v))}}
to.reduce = to.map
## mapred combinators
compose.mapred =
function(mapred, map)
function(k, v) {
out = mapred(k, v)
if (is.null(out)) NULL
else map(keys(out), values(out))}
union.mapred =
function(mr1, mr2) function(k, v) {
c.keyval(mr1(k, v), mr2(k, v))}
#output cmp
cmp =
function(x, y) {
kx = keys(x)
ky = keys(y)
vx = values(x)
vy = values(y)
ox = order(sapply(kx, digest), sapply(vx, function(z){attr(z, "rmr.input") = NULL; digest(z)}))
oy = order(sapply(ky, digest), sapply(vy, function(z){attr(z, "rmr.input") = NULL; digest(z)}))
isTRUE(all.equal(kx[ox], ky[oy], check.attributes = FALSE)) &&
isTRUE(all.equal(vx[ox], vy[oy], check.attributes = FALSE))}
# backend independent dfs section
is.hidden.file =
function(f)
regexpr("[\\._]", basename(f)) == 1
part.list =
function(fname) {
if(rmr.options('backend') == "local") fname
else {
if(dfs.is.dir(fname)) {
du = hdfs.du(fname)
du[!is.hidden.file(du[,2]),2]}
else fname}}
dfs.exists =
function(f) {
if (rmr.options('backend') == 'hadoop')
hdfs.test(e = f)
else file.exists(f)}
dfs.rmr =
function(f) {
if(rmr.options('backend') == 'hadoop')
hdfs.rmr(f)
else unlink(f, recursive = TRUE)}
dfs.is.dir =
function(f) {
if (rmr.options('backend') == 'hadoop')
hdfs.test(d = f)
else file.info(f)['isdir']}
dfs.empty =
function(f)
dfs.size(f) == 0
dfs.size =
function(f) {
f = to.dfs.path(f)
if(rmr.options('backend') == 'hadoop') {
du = hdfs.du(f)
if(is.null(du)) 0
else
sum(as.numeric(du[!is.hidden.file(du[,2]), 1]))}
else file.info(f)[1, 'size'] }
# dfs bridge
to.dfs.path =
function(input) {
if (is.character(input)) {
input}
else {
if(is.function(input)) {
input()}}}
to.dfs =
function(
kv,
output = dfs.tempfile(),
format = "native") {
if(!is.keyval(kv))
warning("Converting to.dfs argument to keyval with a NULL key")
kv = as.keyval(kv)
tmp = tempfile()
dfsOutput = to.dfs.path(output)
if(is.character(format)) format = make.output.format(format)
write.file =
function(kv, f) {
con = file(f, if(format$mode == "text") "w" else "wb")
keyval.writer = make.keyval.writer(format$mode,
format$format,
con)
keyval.writer(kv)
close(con)}
write.file(kv, tmp)
if(rmr.options('backend') == 'hadoop') {
if(format$mode == "binary")
system(paste(hadoop.streaming(), "loadtb", dfsOutput, "<", tmp))
else hdfs.put(tmp, dfsOutput)}
else file.copy(tmp, dfsOutput)
file.remove(tmp)
output}
from.dfs = function(input, format = "native") {
read.file = function(f) {
con = file(f, if(format$mode == "text") "r" else "rb")
keyval.reader = make.keyval.reader(format$mode, format$format, rmr.options('keyval.length'), con)
retval = make.fast.list()
kv = keyval.reader()
while(!is.null(kv)) {
retval(list(kv))
kv = keyval.reader()}
close(con)
c.keyval(retval())}
dumptb = function(src, dest){
lapply(src, function(x) system(paste(hadoop.streaming(), "dumptb", x, ">>", dest)))}
getmerge = function(src, dest) {
on.exit(unlink(tmp))
tmp = tempfile()
lapply(src, function(x) {
hdfs.get(as.character(x), tmp)
if(.Platform$OS.type == "windows") {
cmd = paste('type', tmp, '>>' , dest)
system(paste(Sys.getenv("COMSPEC"),"/c",cmd))
}
else {
system(paste('cat', tmp, '>>' , dest))
}
unlink(tmp)})
dest}
fname = to.dfs.path(input)
if(is.character(format)) format = make.input.format(format)
if(rmr.options("backend") == "hadoop") {
tmp = tempfile()
if(format$mode == "binary") dumptb(part.list(fname), tmp)
else getmerge(part.list(fname), tmp)}
else
tmp = fname
retval = read.file(tmp)
if(rmr.options("backend") == "hadoop") unlink(tmp)
retval}
# mapreduce
dfs.tempfile = function(pattern = "file", tmpdir = NULL) {
if (is.null(tmpdir)) tmpdir = tempdir()
fname = tempfile(pattern, tmpdir)
subfname = strsplit(fname, ":")
if(length(subfname[[1]]) > 1) fname = subfname[[1]][2]
namefun = function() {fname}
reg.finalizer(environment(namefun),
function(e) {
fname = eval(expression(fname), envir = e)
if(Sys.getenv("mapred_task_id") == "" && dfs.exists(fname)) dfs.rmr(fname)
},
onexit = TRUE)
namefun}
dfs.managed.file = function(call, managed.dir = rmr.options('managed.dir')) {
file.path(managed.dir, digest(lapply(call, eval)))}
mapreduce = function(
input,
output = NULL,
map = to.map(identity),
reduce = NULL,
vectorized.reduce = FALSE,
combine = NULL,
in.memory.combine = FALSE,
input.format = "native",
output.format = "native",
backend.parameters = list(),
verbose = TRUE) {
on.exit(expr = gc(), add = TRUE) #this is here to trigger cleanup of tempfiles
if (is.null(output))
output = {
if(rmr.options('depend.check'))
dfs.managed.file(match.call())
else
dfs.tempfile()}
if(is.character(input.format)) input.format = make.input.format(input.format)
if(is.character(output.format)) output.format = make.output.format(output.format)
if(!missing(backend.parameters)) warning("backend.parameters is deprecated.")
backend = rmr.options('backend')
mr = switch(backend,
hadoop = rmr.stream,
local = mr.local,
stop("Unsupported backend: ", backend))
mr(map = map,
reduce = reduce,
combine = combine,
vectorized.reduce,
in.folder = if(is.list(input)) {lapply(input, to.dfs.path)} else to.dfs.path(input),
out.folder = to.dfs.path(output),
profile.nodes = rmr.options('profile.nodes'),
keyval.length = rmr.options('keyval.length'),
rmr.install = {
if(!is.null(rmr.options('install.args')))
do.call(Curry, c(install.packages,rmr.options('install.args')))
else NULL},
rmr.update = {
if(!is.null(rmr.options('update.args')))
do.call(Curry, c(update.packages, rmr.options('update.args')))
else NULL},
input.format = input.format,
output.format = output.format,
in.memory.combine = in.memory.combine,
backend.parameters = backend.parameters[[backend]],
verbose = verbose)
output
}
##special jobs
## a sort of relational join very useful in a variety of map reduce algorithms
## to.dfs(lapply(1:10, function(i) keyval(i, i^2)), "/tmp/reljoin.left")
## to.dfs(lapply(1:10, function(i) keyval(i, i^3)), "/tmp/reljoin.right")
## equijoin(left.input="/tmp/reljoin.left", right.input="/tmp/reljoin.right", output = "/tmp/reljoin.out")
## from.dfs("/tmp/reljoin.out")
reduce.default =
function(k, vl, vr) {
if((is.list(vl) && !is.data.frame(vl)) ||
(is.list(vr) && !is.data.frame(vr)))
list(left = vl, right = vr)
else{
vl = as.data.frame(vl)
vr = as.data.frame(vr)
names(vl) = paste(names(vl), "l", sep = ".")
names(vr) = paste(names(vr), "r", sep = ".")
if(all(is.na(vl))) vr
else {
if(all(is.na(vr))) vl
else
merge(vl, vr, by = NULL)}}}
equijoin =
function(
left.input = NULL,
right.input = NULL,
input = NULL,
output = NULL,
input.format = "native",
output.format = "native",
outer = c("", "left", "right", "full"),
map.left = to.map(identity),
map.right = to.map(identity),
reduce = reduce.default) {
stopifnot(xor(!is.null(left.input), !is.null(input) &&
(is.null(left.input) == is.null(right.input))))
outer = match.arg(outer)
left.outer = outer == "left"
right.outer = outer == "right"
full.outer = outer == "full"
if (is.null(left.input)) {
left.input = input}
mark.side =
function(kv, is.left) {
kv = split.keyval(kv)
keyval(keys(kv),
lapply(values(kv),
function(v) {
list(val = v, is.left = is.left)}))}
rmr.normalize.path =
function(url.or.path) {
if(.Platform$OS.type == "windows")
url.or.path = gsub("\\\\","/", url.or.path)
gsub(
"/+",
"/",
paste(
"/",
gsub(
"part-[0-9]+$",
"",
parse_url(url.or.path)$path),
"/",
sep = ""))}
is.left.side =
function(left.input) {
rmr.normalize.path(to.dfs.path(left.input)) ==
rmr.normalize.path(Sys.getenv("map_input_file"))}
reduce.split =
function(vv) {
tapply(
vv,
sapply(vv, function(v) v$is.left),
function(v) lapply(v, function(x)x$val),
simplify = FALSE)}
pad.side =
function(vv, outer)
if (length(vv) == 0 && (outer)) c(NA) else c.or.rbind(vv)
map =
if (is.null(input)) {
function(k, v) {
ils = is.left.side(left.input)
mark.side(if(ils) map.left(k, v) else map.right(k, v), ils)}}
else {
function(k, v) {
c.keyval(mark.side(map.left(k, v), TRUE),
mark.side(map.right(k, v), FALSE))}}
eqj.reduce =
function(k, vv) {
rs = reduce.split(vv)
left.side = pad.side(rs$`TRUE`, right.outer || full.outer)
right.side = pad.side(rs$`FALSE`, left.outer || full.outer)
if(!is.null(left.side) && !is.null(right.side))
reduce(k[[1]], left.side, right.side)}
mapreduce(
map = map,
reduce = eqj.reduce,
input = c(left.input, right.input),
output = output,
input.format = input.format,
output.format = output.format,)}
status = function(value)
cat(
sprintf(
"reporter:status:%s\n",
value),
file = stderr())
increment.counter =
function(group, counter, increment = 1)
cat(
sprintf(
"reporter:counter:%s\n",
paste(group, counter, increment, sep=",")),
file = stderr())
|
/pkg/R/mapreduce.R
|
no_license
|
forschnix/rmr2
|
R
| false
| false
| 12,198
|
r
|
# Copyright 2011 Revolution Analytics
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#options
rmr.options.env = new.env(parent=emptyenv())
rmr.options.env$backend = "hadoop"
rmr.options.env$keyval.length = 10^4
rmr.options.env$profile.nodes = "off"
rmr.options.env$dfs.tempdir = NULL
rmr.options.env$depend.check = FALSE
#rmr.options$managed.dir = "/var/rmr/managed"
rmr.options =
function(backend = c("hadoop", "local"),
profile.nodes = c("off", "calls", "memory", "both"),
keyval.length = 10^4,
dfs.tempdir = tempdir()#,
#depend.check = FALSE,
#managed.dir = FALSE
) {
args = as.list(sys.call())[-1]
this.call = match.call()
if (is.logical(profile.nodes)) {
this.call[["profile.nodes"]] = {
if(profile.nodes)
"calls"
else
"off"}}
lapply(
names(args),
function(x) {
if(x != "")
assign(x, eval(this.call[[x]]), envir = rmr.options.env)})
read.args =
if(is.null(names(args)))
args
else
named.slice(args, "")
if(length(read.args) > 0) {
read.args = simplify2array(read.args)
retval = as.list(rmr.options.env)[read.args]
if (length(retval) == 1) retval[[1]] else retval}
else NULL }
## map and reduce function generation
to.map =
function(fun1, fun2 = identity) {
if (missing(fun2)) {
function(k, v) fun1(keyval(k, v))}
else {
function(k, v) keyval(fun1(k), fun2(v))}}
to.reduce = to.map
## mapred combinators
compose.mapred =
function(mapred, map)
function(k, v) {
out = mapred(k, v)
if (is.null(out)) NULL
else map(keys(out), values(out))}
union.mapred =
function(mr1, mr2) function(k, v) {
c.keyval(mr1(k, v), mr2(k, v))}
#output cmp
cmp =
function(x, y) {
kx = keys(x)
ky = keys(y)
vx = values(x)
vy = values(y)
ox = order(sapply(kx, digest), sapply(vx, function(z){attr(z, "rmr.input") = NULL; digest(z)}))
oy = order(sapply(ky, digest), sapply(vy, function(z){attr(z, "rmr.input") = NULL; digest(z)}))
isTRUE(all.equal(kx[ox], ky[oy], check.attributes = FALSE)) &&
isTRUE(all.equal(vx[ox], vy[oy], check.attributes = FALSE))}
# backend independent dfs section
is.hidden.file =
function(f)
regexpr("[\\._]", basename(f)) == 1
part.list =
function(fname) {
if(rmr.options('backend') == "local") fname
else {
if(dfs.is.dir(fname)) {
du = hdfs.du(fname)
du[!is.hidden.file(du[,2]),2]}
else fname}}
dfs.exists =
function(f) {
if (rmr.options('backend') == 'hadoop')
hdfs.test(e = f)
else file.exists(f)}
dfs.rmr =
function(f) {
if(rmr.options('backend') == 'hadoop')
hdfs.rmr(f)
else unlink(f, recursive = TRUE)}
dfs.is.dir =
function(f) {
if (rmr.options('backend') == 'hadoop')
hdfs.test(d = f)
else file.info(f)['isdir']}
dfs.empty =
function(f)
dfs.size(f) == 0
dfs.size =
function(f) {
f = to.dfs.path(f)
if(rmr.options('backend') == 'hadoop') {
du = hdfs.du(f)
if(is.null(du)) 0
else
sum(as.numeric(du[!is.hidden.file(du[,2]), 1]))}
else file.info(f)[1, 'size'] }
# dfs bridge
to.dfs.path =
function(input) {
if (is.character(input)) {
input}
else {
if(is.function(input)) {
input()}}}
to.dfs =
function(
kv,
output = dfs.tempfile(),
format = "native") {
if(!is.keyval(kv))
warning("Converting to.dfs argument to keyval with a NULL key")
kv = as.keyval(kv)
tmp = tempfile()
dfsOutput = to.dfs.path(output)
if(is.character(format)) format = make.output.format(format)
write.file =
function(kv, f) {
con = file(f, if(format$mode == "text") "w" else "wb")
keyval.writer = make.keyval.writer(format$mode,
format$format,
con)
keyval.writer(kv)
close(con)}
write.file(kv, tmp)
if(rmr.options('backend') == 'hadoop') {
if(format$mode == "binary")
system(paste(hadoop.streaming(), "loadtb", dfsOutput, "<", tmp))
else hdfs.put(tmp, dfsOutput)}
else file.copy(tmp, dfsOutput)
file.remove(tmp)
output}
from.dfs = function(input, format = "native") {
read.file = function(f) {
con = file(f, if(format$mode == "text") "r" else "rb")
keyval.reader = make.keyval.reader(format$mode, format$format, rmr.options('keyval.length'), con)
retval = make.fast.list()
kv = keyval.reader()
while(!is.null(kv)) {
retval(list(kv))
kv = keyval.reader()}
close(con)
c.keyval(retval())}
dumptb = function(src, dest){
lapply(src, function(x) system(paste(hadoop.streaming(), "dumptb", x, ">>", dest)))}
getmerge = function(src, dest) {
on.exit(unlink(tmp))
tmp = tempfile()
lapply(src, function(x) {
hdfs.get(as.character(x), tmp)
if(.Platform$OS.type == "windows") {
cmd = paste('type', tmp, '>>' , dest)
system(paste(Sys.getenv("COMSPEC"),"/c",cmd))
}
else {
system(paste('cat', tmp, '>>' , dest))
}
unlink(tmp)})
dest}
fname = to.dfs.path(input)
if(is.character(format)) format = make.input.format(format)
if(rmr.options("backend") == "hadoop") {
tmp = tempfile()
if(format$mode == "binary") dumptb(part.list(fname), tmp)
else getmerge(part.list(fname), tmp)}
else
tmp = fname
retval = read.file(tmp)
if(rmr.options("backend") == "hadoop") unlink(tmp)
retval}
# mapreduce
dfs.tempfile = function(pattern = "file", tmpdir = NULL) {
if (is.null(tmpdir)) tmpdir = tempdir()
fname = tempfile(pattern, tmpdir)
subfname = strsplit(fname, ":")
if(length(subfname[[1]]) > 1) fname = subfname[[1]][2]
namefun = function() {fname}
reg.finalizer(environment(namefun),
function(e) {
fname = eval(expression(fname), envir = e)
if(Sys.getenv("mapred_task_id") == "" && dfs.exists(fname)) dfs.rmr(fname)
},
onexit = TRUE)
namefun}
dfs.managed.file = function(call, managed.dir = rmr.options('managed.dir')) {
file.path(managed.dir, digest(lapply(call, eval)))}
mapreduce = function(
input,
output = NULL,
map = to.map(identity),
reduce = NULL,
vectorized.reduce = FALSE,
combine = NULL,
in.memory.combine = FALSE,
input.format = "native",
output.format = "native",
backend.parameters = list(),
verbose = TRUE) {
on.exit(expr = gc(), add = TRUE) #this is here to trigger cleanup of tempfiles
if (is.null(output))
output = {
if(rmr.options('depend.check'))
dfs.managed.file(match.call())
else
dfs.tempfile()}
if(is.character(input.format)) input.format = make.input.format(input.format)
if(is.character(output.format)) output.format = make.output.format(output.format)
if(!missing(backend.parameters)) warning("backend.parameters is deprecated.")
backend = rmr.options('backend')
mr = switch(backend,
hadoop = rmr.stream,
local = mr.local,
stop("Unsupported backend: ", backend))
mr(map = map,
reduce = reduce,
combine = combine,
vectorized.reduce,
in.folder = if(is.list(input)) {lapply(input, to.dfs.path)} else to.dfs.path(input),
out.folder = to.dfs.path(output),
profile.nodes = rmr.options('profile.nodes'),
keyval.length = rmr.options('keyval.length'),
rmr.install = {
if(!is.null(rmr.options('install.args')))
do.call(Curry, c(install.packages,rmr.options('install.args')))
else NULL},
rmr.update = {
if(!is.null(rmr.options('update.args')))
do.call(Curry, c(update.packages, rmr.options('update.args')))
else NULL},
input.format = input.format,
output.format = output.format,
in.memory.combine = in.memory.combine,
backend.parameters = backend.parameters[[backend]],
verbose = verbose)
output
}
##special jobs
## a sort of relational join very useful in a variety of map reduce algorithms
## to.dfs(lapply(1:10, function(i) keyval(i, i^2)), "/tmp/reljoin.left")
## to.dfs(lapply(1:10, function(i) keyval(i, i^3)), "/tmp/reljoin.right")
## equijoin(left.input="/tmp/reljoin.left", right.input="/tmp/reljoin.right", output = "/tmp/reljoin.out")
## from.dfs("/tmp/reljoin.out")
reduce.default =
function(k, vl, vr) {
if((is.list(vl) && !is.data.frame(vl)) ||
(is.list(vr) && !is.data.frame(vr)))
list(left = vl, right = vr)
else{
vl = as.data.frame(vl)
vr = as.data.frame(vr)
names(vl) = paste(names(vl), "l", sep = ".")
names(vr) = paste(names(vr), "r", sep = ".")
if(all(is.na(vl))) vr
else {
if(all(is.na(vr))) vl
else
merge(vl, vr, by = NULL)}}}
equijoin =
function(
left.input = NULL,
right.input = NULL,
input = NULL,
output = NULL,
input.format = "native",
output.format = "native",
outer = c("", "left", "right", "full"),
map.left = to.map(identity),
map.right = to.map(identity),
reduce = reduce.default) {
stopifnot(xor(!is.null(left.input), !is.null(input) &&
(is.null(left.input) == is.null(right.input))))
outer = match.arg(outer)
left.outer = outer == "left"
right.outer = outer == "right"
full.outer = outer == "full"
if (is.null(left.input)) {
left.input = input}
mark.side =
function(kv, is.left) {
kv = split.keyval(kv)
keyval(keys(kv),
lapply(values(kv),
function(v) {
list(val = v, is.left = is.left)}))}
rmr.normalize.path =
function(url.or.path) {
if(.Platform$OS.type == "windows")
url.or.path = gsub("\\\\","/", url.or.path)
gsub(
"/+",
"/",
paste(
"/",
gsub(
"part-[0-9]+$",
"",
parse_url(url.or.path)$path),
"/",
sep = ""))}
is.left.side =
function(left.input) {
rmr.normalize.path(to.dfs.path(left.input)) ==
rmr.normalize.path(Sys.getenv("map_input_file"))}
reduce.split =
function(vv) {
tapply(
vv,
sapply(vv, function(v) v$is.left),
function(v) lapply(v, function(x)x$val),
simplify = FALSE)}
pad.side =
function(vv, outer)
if (length(vv) == 0 && (outer)) c(NA) else c.or.rbind(vv)
map =
if (is.null(input)) {
function(k, v) {
ils = is.left.side(left.input)
mark.side(if(ils) map.left(k, v) else map.right(k, v), ils)}}
else {
function(k, v) {
c.keyval(mark.side(map.left(k, v), TRUE),
mark.side(map.right(k, v), FALSE))}}
eqj.reduce =
function(k, vv) {
rs = reduce.split(vv)
left.side = pad.side(rs$`TRUE`, right.outer || full.outer)
right.side = pad.side(rs$`FALSE`, left.outer || full.outer)
if(!is.null(left.side) && !is.null(right.side))
reduce(k[[1]], left.side, right.side)}
mapreduce(
map = map,
reduce = eqj.reduce,
input = c(left.input, right.input),
output = output,
input.format = input.format,
output.format = output.format,)}
status = function(value)
cat(
sprintf(
"reporter:status:%s\n",
value),
file = stderr())
increment.counter =
function(group, counter, increment = 1)
cat(
sprintf(
"reporter:counter:%s\n",
paste(group, counter, increment, sep=",")),
file = stderr())
|
#' @importFrom R6 R6Class
stat_four_gamete_class <- R6Class("stat_four_gamete", inherit = sumstat_class,
private = list(
population = NULL,
req_segsites = TRUE
),
public = list(
initialize = function(name, population, transformation) {
assert_that(is.numeric(population))
assert_that(length(population) == 1)
private$population <- population
super$initialize(name, transformation)
},
calculate = function(seg_sites, trees, files, model) {
calc_four_gamete_stat(seg_sites,
get_population_individuals(model,
private$population),
get_locus_length_matrix(model))
}
)
)
#' Summary Statistic: Four-Gamete-Condition
#'
#' This summary statistic calculates a number of values (see 'Value')
#' related to the Four-Gamete-Condition (see 'Details').
#' It is sensitive for recombination and particularly useful when estimating
#' recombination rates with \pkg{jaatha} or Approximate Bayesian Computation.
#'
#' The Four-Gamete-Condition for two SNPs is violated if all four combinations
#' of derived and ancestral alleles at the SNPs are observed in a gamete/a
#' haplotype. Under an Infinite-Sites mutation model, a violation indicates that
#' there must have been at least one recombination event between the SNPs.
#'
#' @param name The name of the summary statistic. When simulating a model,
#' the value of the statistics are written to an entry of the returned list
#' with this name. Summary statistic names must be unique in a model.
#' @param population The population for which the statistic is calculated.
#' Can also be "all" to calculate it from all populations.
#' @param transformation An optional function for transforming the results
#' of the statistic. If specified, the results of the transformation are
#' returned instead of the original values.
#' @return
#' The statistic generates a matrix where each row represents one locus, and
#' the columns give the statistic for different classes of pairs of SNPs:
#'
#' \describe{
#' \item{mid_near}{The value for all pairs of SNPs that are close together,
#' that is within 10 percent of the locus" length. If locus trios are used,
#' only pairs of SNPs were both SNPs are on the middle locus are considered.
#' }
#' \item{mid_far}{Same as \code{mid_near}, but for pairs of SNPs that are
#' more that 10 percent of the locus" length apart. }
#' \item{outer}{Only when using locus trios. The statistic for pairs
#' where both SNPs are on the same outer locus.}
#' \item{between}{Only when using locus trios. The statistic for pairs
#' where one SNPs is on the middle locus, and the other is on an outer one.}
#' \item{mid}{The value for all pairs on the
#' middle locus or all pairs when not using trios.}
#' \item{perc_polym}{The percentage of positions that are polymorpic.}
#' }
#' @export
#' @template summary_statistics
#' @examples
#' model <- coal_model(5, 2) +
#' feat_mutation(50) +
#' feat_recombination(10) +
#' sumstat_four_gamete()
#' stats <- simulate(model)
#' print(stats$four_gamete)
sumstat_four_gamete <- function(name = "four_gamete", population = 1,
transformation = identity) {
stat_four_gamete_class$new(name, population, transformation)
}
|
/coala/R/sumstat_four_gamete.R
|
no_license
|
ingted/R-Examples
|
R
| false
| false
| 3,394
|
r
|
#' @importFrom R6 R6Class
stat_four_gamete_class <- R6Class("stat_four_gamete", inherit = sumstat_class,
private = list(
population = NULL,
req_segsites = TRUE
),
public = list(
initialize = function(name, population, transformation) {
assert_that(is.numeric(population))
assert_that(length(population) == 1)
private$population <- population
super$initialize(name, transformation)
},
calculate = function(seg_sites, trees, files, model) {
calc_four_gamete_stat(seg_sites,
get_population_individuals(model,
private$population),
get_locus_length_matrix(model))
}
)
)
#' Summary Statistic: Four-Gamete-Condition
#'
#' This summary statistic calculates a number of values (see 'Value')
#' related to the Four-Gamete-Condition (see 'Details').
#' It is sensitive for recombination and particularly useful when estimating
#' recombination rates with \pkg{jaatha} or Approximate Bayesian Computation.
#'
#' The Four-Gamete-Condition for two SNPs is violated if all four combinations
#' of derived and ancestral alleles at the SNPs are observed in a gamete/a
#' haplotype. Under an Infinite-Sites mutation model, a violation indicates that
#' there must have been at least one recombination event between the SNPs.
#'
#' @param name The name of the summary statistic. When simulating a model,
#' the value of the statistics are written to an entry of the returned list
#' with this name. Summary statistic names must be unique in a model.
#' @param population The population for which the statistic is calculated.
#' Can also be "all" to calculate it from all populations.
#' @param transformation An optional function for transforming the results
#' of the statistic. If specified, the results of the transformation are
#' returned instead of the original values.
#' @return
#' The statistic generates a matrix where each row represents one locus, and
#' the columns give the statistic for different classes of pairs of SNPs:
#'
#' \describe{
#' \item{mid_near}{The value for all pairs of SNPs that are close together,
#' that is within 10 percent of the locus" length. If locus trios are used,
#' only pairs of SNPs were both SNPs are on the middle locus are considered.
#' }
#' \item{mid_far}{Same as \code{mid_near}, but for pairs of SNPs that are
#' more that 10 percent of the locus" length apart. }
#' \item{outer}{Only when using locus trios. The statistic for pairs
#' where both SNPs are on the same outer locus.}
#' \item{between}{Only when using locus trios. The statistic for pairs
#' where one SNPs is on the middle locus, and the other is on an outer one.}
#' \item{mid}{The value for all pairs on the
#' middle locus or all pairs when not using trios.}
#' \item{perc_polym}{The percentage of positions that are polymorpic.}
#' }
#' @export
#' @template summary_statistics
#' @examples
#' model <- coal_model(5, 2) +
#' feat_mutation(50) +
#' feat_recombination(10) +
#' sumstat_four_gamete()
#' stats <- simulate(model)
#' print(stats$four_gamete)
sumstat_four_gamete <- function(name = "four_gamete", population = 1,
transformation = identity) {
stat_four_gamete_class$new(name, population, transformation)
}
|
#Obtaining Twitter Data using R
#1. Registering APRI using Twitter account
#https://apps.twitter.com
#2. Insert Values
api_key <-'xx'
api_secret <- 'xx'
access_token <- 'xx'
access_token_secret <-'xx'
library(twitteR)
setup_twitter_oauth(api_key,
api_secret,
access_token,
access_token_secret)
#3. Extract tweets
#Take a look at TESLA tweets
#pull 1000 tweets
tweets <-searchTwitter("#tesla", n=1000, lang='en')
length(tweets)
#[1] 1000
tweets #Check your tweets
#convert into list to data frame
tesla<-twListToDF(tweets)
#4. Create CSV file and save it
write.csv(tesla, file = '~/Desktop/tesla.csv', row.names = F)
# Data Cleaning and Preparation
#1. Reading Data File
tesla<-read.csv(file.choose(), header=T) #choose the tesla.csv file
str(tesla) #look at structure of the file (has 1000 obs and 16 var)
#first col is text, whether it is favorited, what is the count, when it was created,
#id of person who tweeted, whether it is a retweet etc.....)
#clean the text of special characters such as symbols and emoticons
tesla$text <- sapply(tesla$text,function(row) iconv(row, "latin1", "ASCII", sub=""))
#2. Building Corpus
library(tm)
library(NLP)
corpus <-iconv(tesla$text, to='utf-8-mac') #need only the first col text from file
corpus <- Corpus(VectorSource(corpus)) #corpus is a collection of texts
inspect(corpus[1:5]) #inspect the first five tweets
#3. Cleaning Data
#convert data to lower case for analysis
corpus <-tm_map(corpus, tolower) #convert all alphabet to lower case
inspect(corpus[1:5]) #inspect the first five tweets
#remove punctuations
corpus <-tm_map(corpus, removePunctuation)
inspect(corpus[1:5]) #inspect the first five tweets
#remove numbers
corpus <-tm_map(corpus, removeNumbers)
inspect(corpus[1:5]) #inspect the first five tweets
#remove common words-they dont add any informational value
#use the stopwords function in english
#select stopwords(english) to see what words are removed
cleanset <-tm_map(corpus, removeWords, stopwords('english'))
inspect(cleanset[1:5])
#remove URLs (https://etc.)
#make use of function http
removeURL <- function(x) gsub("http[[:alnum:]]*", '', x)
cleanset <-tm_map(cleanset, content_transformer(removeURL))
inspect(cleanset[1:5])
#tweets were pulled using tesla or tsla so we can clean it from the text
cleanset <-tm_map(cleanset, removeWords, c('tesla', 'tsla', 'teslas'))
inspect(cleanset[1:5])
#remove white spaces
cleanset <- tm_map(cleanset, stripWhitespace)
inspect(cleanset[1:5])
#lets now provide some structure to tweets by creating a matrix of rows/coloums
#this is called term document matrix (tdm)
#Create term document matrix
tdm <- TermDocumentMatrix(cleanset)
tdm
#<<TermDocumentMatrix (terms: 2243, documents: 1000)>>
#Non-/sparse entries: 9948/2233052
#Sparsity : 100% (it is rounded)
#Maximal term length: 34
#Weighting : term frequency (tf)
#if you would like to look at this matrix, you have to convert this into matrix first
tdm <- as.matrix(tdm)
tdm[1:10, 1:20] #look at first 10 rows/terms and 20 tweets
#VISUALIZE TEXT DATA
#in the tdm if you sum rows, it will tell you how many times a term appears
#also there are many words/terms so we create a subset of w where row sum is >30
# Bar Plot
w <- rowSums(tdm)
w <- subset(w, w>=30) #run "w" to see which words appear how many times
barplot(w, las = 2, col=rainbow(40)) #words represented vertically using las=2, rainbow colors
#find that words such as didnt, car, people's names also appears so go back and combine them into a clean data dictionary
#clean the dataset of these words using dictionary created and then redo term document matrix
##################
#######after creating bar plot, you can go back and combine words or clean up further if needed
#and recreate the term document matrix
cleanset <-tm_map(cleanset, removeWords, c('add words', 'add words'))
inspect(cleanset[1:5])
##################
# Word Cloud
library(wordcloud)
library(RColorBrewer)
w <- sort(rowSums(tdm), decreasing=TRUE) #sort words in decreasing order
set.seed(9999)
wordcloud(words = names(w),
freq=w, max.words = 300,
random.order =FALSE) #words are specified in names in w dataframe, frequency is stored in w, random order=false
#specifying options in word cloud
#Specify that max words be no more than say, 200
#Freq for terms to be included in wordcloud (say they have to appear 5 times to be included)
#color words, specify scale (bigger words max size =3, smaller =0.2)
#rotate some words (rotation percentage = 30%)
wordcloud(words = names(w),
freq=w,
random.order =FALSE,
max.words = 200,
min.freq = 5,
colors = brewer.pal(8, 'Dark2'),
scale = c(3, 0.2),
rot.per = .3)
#SENTIMENT ANALYSIS USING R
#load packages
library(syuzhet)
library(lubridate)
library(ggplot2)
library(scales)
library(dplyr)
#Reading Files
#take the initial apple tweet file (1000 obs and 16 vars for this)
#take the first column, text and put it into tweets dataframe
tweets <- iconv(tesla2$text, to="utf-8-mac")
#obtain sentiment scores for each 1000 tweets
#nrc_sentiment dictionary is called to calculate presence of
#eight emotions & their corresponding valence in their text file
s <-get_nrc_sentiment(tweets)
head(s)
#runs through each tweet and finds words corresponding to each sentiment
#and a score is given (last 2 cols are positive and negative tweet categories)
tail(s)
tweets[996] #look at tweet number 996
#you could also look at phrases or words in these tweets to see if they
#lead to positive or negative')
get_nrc_sentiment('ridiculous')
get_nrc_sentiment('finally tested summon feature')
#plot the sentiment scores
#lets sum the column scores across tweets for the plot
#label y axis as total count, main title of plot label
barplot(colSums(s),
las = 2,
ylab = 'Total Count',
main ='Sentiment Scores for Tesla Tweets')
####SOCIAL NETWORK ANALYSIS###
tdm[1:20, 1:20] #lets look at our term document matrix, 10 rows, 10 cols
library(igraph)
tdm[tdm>1] <-1
#whenever our tdm value is more than 1 for a tweet we convert into 1 because we dont need the values 2, 3,
#we only need that the term appeared (freq of terms is not required in network analysis)
termM <-tdm %*% t(tdm) #transpose of tdm matrix; create tweet adjacency matrix using %*%
termM[1:10, 1:10] #term term matrix, alerts appeared in 8 tweets, alerts and nflx appeared in 3 tweets
g <- graph.adjacency(termM, weighted=T, mode ='undirected') #convert it into graph, no direction for edges
g
#remove terms that have loops (going to self)
g <- simplify(g)
#set labels and degrees of Vertices (V), each word is a vertices
V(g)$label <- V(g)$name #label is name
V(g)$label
V(g)$degree <- degree(g) #degree is the number of connections between terms
V(g)$degree
#Histogram of node degree, lets just use 100 bars (too many words), label of y and x axis
hist(V(g)$degree,
breaks=100,
col='green',
main ='histogram of node degree',
ylab ='frequency',
xlab='degree of vertices') #right skewed
#Network diagram
set.seed(9999)
plot(g) #interpretation is difficult so recreate more meaningful visuals
#Recreate this by looking at just the top terms/nodes by degree
tdm <- tdm[rowSums(tdm)>30,] #lets reduce the size and counts of total frequency (rowSum)
#include only terms having frequency more than 30
#it will take out all very infrequent terms
#Rerun all other code
tdm[tdm>1] <-1
termM <-tdm %*% t(tdm)
g <- graph.adjacency(termM, weighted=T, mode ='undirected')
g <- simplify(g)
V(g)$label <- V(g)$name
V(g)$degree <- degree(g)
#play with options such as size of vertex, distance of labels, etc. then have labels of vertex
set.seed(9999)
plot(g,
vertex.color='green',
vertex.size = 8,
vertex.label.dist =1.5)
#much more cleaner than earlier. You can further increase size of vertex by changing options
#there are some dense connections in the nodes (to near nodes)
#Community creation (edge betweenness)
comm <- cluster_edge_betweenness(g)
plot(comm, g)
#you can also do this by using propagating labels
prop <-cluster_label_prop(g)
plot(prop, g) #groupings for community detection are different - algorithms are different
greed <-cluster_fast_greedy(as.undirected(g)) #greedy algorithm for clustering
plot(greed, as.undirected(g))
#highlighting degrees for a different kind of plot (play around with the numbers below)
V(g)$label.cex <- 2.2*V(g)$degree / max(V(g)$degree) + 0.3
V(g)$label.color <- rgb(0, 0, .2, .8)
V(g)$frame.color <- NA
egam <- (log(E(g)$weight) + 0.4) / max(log(E(g)$weight) + .4)
E(g)$color <- rgb(0.5, 0.5, 0, egam)
E(g)$width <- egam
plot(g,
vertex.color ='green',
vertex.size = V(g)$degree*0.5) #vertex size vary by degree
##VISUALIZATION and INTERPRETATION
#Network of tweets
tweetM <- t(tdm)%*%tdm #transpose of tdm, create tweet adjacency matrix of tdm usign %*%
g <- graph.adjacency(tweetM, weighted =T, mode = 'undirected') #store graph adjacency in g
V(g)$degree <- degree(g)
g<- simplify(g) #remove loops
#Use 100 tweets to make histogram of degree
hist(V(g)$degree,
breaks = 100,
col='green',
main='histogram of degree',
ylabl='frequencies',
xlab='degree')
#Set labels of vertices to tweet IDs
V(g)$label <- V(g)$name #vertices g label is the name
V(g)$label.cex <-1 # label size
V(g)$label.color <- rgb(0.4, 0, 0, 0.7) #change the numbers and play around for color diff
V(g)$size <- 2 #size of g
V(g)$frame.color <- NA #no frame color or lines of frame
plot(g, vertex.label =NA, vertex.size=5) #indicate size of vertex, for now, dont put labels (too much crowding)
#delete some vertices
egam <- (log(E(g)$weight) + 0.2)/ max(log(E(g)$weight) + 0.2)
E(g)$color <- rgb(0.5, 0.5, 0, egam)
E(g)$width <- egam
g2 <- delete.vertices(g, V(g)[degree(g)<100]) #degree of g less than 100; get rid of no.of connections less than 100
#if you lose too many nodes, reduce the number
plot(g2,
vertex.label.cex =0.90,
vertex.label.color ='black')
# look at clustering of tweets (1000 tweets), look at increasing/decreasing the tweet vertices #
#Delete edges - delete some edges to make the network better
#(delete edges less than 2) and (delete vertices less than 120)
E(g)$color <- rgb(0.5, 0.5, 0, egam)
E(g)$width <- egam
g3<- delete.edges(g, E(g)$weight<- 2)
g3 <- delete.vertices(g3, V(g3)[degree(g3)<400])
plot(g3)
tesla2$text[c(523,326)] #check difference between two different tweets
#take some samples of two/three major groups and find what the differences among the tweets are
tesla2$text[c(481, 962)]
tesla2$text[c(657, 834)]
|
/RCode/Extracting Twitter Data and Analysis_for NLP.R
|
no_license
|
KIKI-C/NLP-Workshop
|
R
| false
| false
| 10,800
|
r
|
#Obtaining Twitter Data using R
#1. Registering APRI using Twitter account
#https://apps.twitter.com
#2. Insert Values
api_key <-'xx'
api_secret <- 'xx'
access_token <- 'xx'
access_token_secret <-'xx'
library(twitteR)
setup_twitter_oauth(api_key,
api_secret,
access_token,
access_token_secret)
#3. Extract tweets
#Take a look at TESLA tweets
#pull 1000 tweets
tweets <-searchTwitter("#tesla", n=1000, lang='en')
length(tweets)
#[1] 1000
tweets #Check your tweets
#convert into list to data frame
tesla<-twListToDF(tweets)
#4. Create CSV file and save it
write.csv(tesla, file = '~/Desktop/tesla.csv', row.names = F)
# Data Cleaning and Preparation
#1. Reading Data File
tesla<-read.csv(file.choose(), header=T) #choose the tesla.csv file
str(tesla) #look at structure of the file (has 1000 obs and 16 var)
#first col is text, whether it is favorited, what is the count, when it was created,
#id of person who tweeted, whether it is a retweet etc.....)
#clean the text of special characters such as symbols and emoticons
tesla$text <- sapply(tesla$text,function(row) iconv(row, "latin1", "ASCII", sub=""))
#2. Building Corpus
library(tm)
library(NLP)
corpus <-iconv(tesla$text, to='utf-8-mac') #need only the first col text from file
corpus <- Corpus(VectorSource(corpus)) #corpus is a collection of texts
inspect(corpus[1:5]) #inspect the first five tweets
#3. Cleaning Data
#convert data to lower case for analysis
corpus <-tm_map(corpus, tolower) #convert all alphabet to lower case
inspect(corpus[1:5]) #inspect the first five tweets
#remove punctuations
corpus <-tm_map(corpus, removePunctuation)
inspect(corpus[1:5]) #inspect the first five tweets
#remove numbers
corpus <-tm_map(corpus, removeNumbers)
inspect(corpus[1:5]) #inspect the first five tweets
#remove common words-they dont add any informational value
#use the stopwords function in english
#select stopwords(english) to see what words are removed
cleanset <-tm_map(corpus, removeWords, stopwords('english'))
inspect(cleanset[1:5])
#remove URLs (https://etc.)
#make use of function http
removeURL <- function(x) gsub("http[[:alnum:]]*", '', x)
cleanset <-tm_map(cleanset, content_transformer(removeURL))
inspect(cleanset[1:5])
#tweets were pulled using tesla or tsla so we can clean it from the text
cleanset <-tm_map(cleanset, removeWords, c('tesla', 'tsla', 'teslas'))
inspect(cleanset[1:5])
#remove white spaces
cleanset <- tm_map(cleanset, stripWhitespace)
inspect(cleanset[1:5])
#lets now provide some structure to tweets by creating a matrix of rows/coloums
#this is called term document matrix (tdm)
#Create term document matrix
tdm <- TermDocumentMatrix(cleanset)
tdm
#<<TermDocumentMatrix (terms: 2243, documents: 1000)>>
#Non-/sparse entries: 9948/2233052
#Sparsity : 100% (it is rounded)
#Maximal term length: 34
#Weighting : term frequency (tf)
#if you would like to look at this matrix, you have to convert this into matrix first
tdm <- as.matrix(tdm)
tdm[1:10, 1:20] #look at first 10 rows/terms and 20 tweets
#VISUALIZE TEXT DATA
#in the tdm if you sum rows, it will tell you how many times a term appears
#also there are many words/terms so we create a subset of w where row sum is >30
# Bar Plot
w <- rowSums(tdm)
w <- subset(w, w>=30) #run "w" to see which words appear how many times
barplot(w, las = 2, col=rainbow(40)) #words represented vertically using las=2, rainbow colors
#find that words such as didnt, car, people's names also appears so go back and combine them into a clean data dictionary
#clean the dataset of these words using dictionary created and then redo term document matrix
##################
#######after creating bar plot, you can go back and combine words or clean up further if needed
#and recreate the term document matrix
cleanset <-tm_map(cleanset, removeWords, c('add words', 'add words'))
inspect(cleanset[1:5])
##################
# Word Cloud
library(wordcloud)
library(RColorBrewer)
w <- sort(rowSums(tdm), decreasing=TRUE) #sort words in decreasing order
set.seed(9999)
wordcloud(words = names(w),
freq=w, max.words = 300,
random.order =FALSE) #words are specified in names in w dataframe, frequency is stored in w, random order=false
#specifying options in word cloud
#Specify that max words be no more than say, 200
#Freq for terms to be included in wordcloud (say they have to appear 5 times to be included)
#color words, specify scale (bigger words max size =3, smaller =0.2)
#rotate some words (rotation percentage = 30%)
wordcloud(words = names(w),
freq=w,
random.order =FALSE,
max.words = 200,
min.freq = 5,
colors = brewer.pal(8, 'Dark2'),
scale = c(3, 0.2),
rot.per = .3)
#SENTIMENT ANALYSIS USING R
#load packages
library(syuzhet)
library(lubridate)
library(ggplot2)
library(scales)
library(dplyr)
#Reading Files
#take the initial apple tweet file (1000 obs and 16 vars for this)
#take the first column, text and put it into tweets dataframe
tweets <- iconv(tesla2$text, to="utf-8-mac")
#obtain sentiment scores for each 1000 tweets
#nrc_sentiment dictionary is called to calculate presence of
#eight emotions & their corresponding valence in their text file
s <-get_nrc_sentiment(tweets)
head(s)
#runs through each tweet and finds words corresponding to each sentiment
#and a score is given (last 2 cols are positive and negative tweet categories)
tail(s)
tweets[996] #look at tweet number 996
#you could also look at phrases or words in these tweets to see if they
#lead to positive or negative')
get_nrc_sentiment('ridiculous')
get_nrc_sentiment('finally tested summon feature')
#plot the sentiment scores
#lets sum the column scores across tweets for the plot
#label y axis as total count, main title of plot label
barplot(colSums(s),
las = 2,
ylab = 'Total Count',
main ='Sentiment Scores for Tesla Tweets')
####SOCIAL NETWORK ANALYSIS###
tdm[1:20, 1:20] #lets look at our term document matrix, 10 rows, 10 cols
library(igraph)
tdm[tdm>1] <-1
#whenever our tdm value is more than 1 for a tweet we convert into 1 because we dont need the values 2, 3,
#we only need that the term appeared (freq of terms is not required in network analysis)
termM <-tdm %*% t(tdm) #transpose of tdm matrix; create tweet adjacency matrix using %*%
termM[1:10, 1:10] #term term matrix, alerts appeared in 8 tweets, alerts and nflx appeared in 3 tweets
g <- graph.adjacency(termM, weighted=T, mode ='undirected') #convert it into graph, no direction for edges
g
#remove terms that have loops (going to self)
g <- simplify(g)
#set labels and degrees of Vertices (V), each word is a vertices
V(g)$label <- V(g)$name #label is name
V(g)$label
V(g)$degree <- degree(g) #degree is the number of connections between terms
V(g)$degree
#Histogram of node degree, lets just use 100 bars (too many words), label of y and x axis
hist(V(g)$degree,
breaks=100,
col='green',
main ='histogram of node degree',
ylab ='frequency',
xlab='degree of vertices') #right skewed
#Network diagram
set.seed(9999)
plot(g) #interpretation is difficult so recreate more meaningful visuals
#Recreate this by looking at just the top terms/nodes by degree
tdm <- tdm[rowSums(tdm)>30,] #lets reduce the size and counts of total frequency (rowSum)
#include only terms having frequency more than 30
#it will take out all very infrequent terms
#Rerun all other code
tdm[tdm>1] <-1
termM <-tdm %*% t(tdm)
g <- graph.adjacency(termM, weighted=T, mode ='undirected')
g <- simplify(g)
V(g)$label <- V(g)$name
V(g)$degree <- degree(g)
#play with options such as size of vertex, distance of labels, etc. then have labels of vertex
set.seed(9999)
plot(g,
vertex.color='green',
vertex.size = 8,
vertex.label.dist =1.5)
#much more cleaner than earlier. You can further increase size of vertex by changing options
#there are some dense connections in the nodes (to near nodes)
#Community creation (edge betweenness)
comm <- cluster_edge_betweenness(g)
plot(comm, g)
#you can also do this by using propagating labels
prop <-cluster_label_prop(g)
plot(prop, g) #groupings for community detection are different - algorithms are different
greed <-cluster_fast_greedy(as.undirected(g)) #greedy algorithm for clustering
plot(greed, as.undirected(g))
#highlighting degrees for a different kind of plot (play around with the numbers below)
V(g)$label.cex <- 2.2*V(g)$degree / max(V(g)$degree) + 0.3
V(g)$label.color <- rgb(0, 0, .2, .8)
V(g)$frame.color <- NA
egam <- (log(E(g)$weight) + 0.4) / max(log(E(g)$weight) + .4)
E(g)$color <- rgb(0.5, 0.5, 0, egam)
E(g)$width <- egam
plot(g,
vertex.color ='green',
vertex.size = V(g)$degree*0.5) #vertex size vary by degree
##VISUALIZATION and INTERPRETATION
#Network of tweets
tweetM <- t(tdm)%*%tdm #transpose of tdm, create tweet adjacency matrix of tdm usign %*%
g <- graph.adjacency(tweetM, weighted =T, mode = 'undirected') #store graph adjacency in g
V(g)$degree <- degree(g)
g<- simplify(g) #remove loops
#Use 100 tweets to make histogram of degree
hist(V(g)$degree,
breaks = 100,
col='green',
main='histogram of degree',
ylabl='frequencies',
xlab='degree')
#Set labels of vertices to tweet IDs
V(g)$label <- V(g)$name #vertices g label is the name
V(g)$label.cex <-1 # label size
V(g)$label.color <- rgb(0.4, 0, 0, 0.7) #change the numbers and play around for color diff
V(g)$size <- 2 #size of g
V(g)$frame.color <- NA #no frame color or lines of frame
plot(g, vertex.label =NA, vertex.size=5) #indicate size of vertex, for now, dont put labels (too much crowding)
#delete some vertices
egam <- (log(E(g)$weight) + 0.2)/ max(log(E(g)$weight) + 0.2)
E(g)$color <- rgb(0.5, 0.5, 0, egam)
E(g)$width <- egam
g2 <- delete.vertices(g, V(g)[degree(g)<100]) #degree of g less than 100; get rid of no.of connections less than 100
#if you lose too many nodes, reduce the number
plot(g2,
vertex.label.cex =0.90,
vertex.label.color ='black')
# look at clustering of tweets (1000 tweets), look at increasing/decreasing the tweet vertices #
#Delete edges - delete some edges to make the network better
#(delete edges less than 2) and (delete vertices less than 120)
E(g)$color <- rgb(0.5, 0.5, 0, egam)
E(g)$width <- egam
g3<- delete.edges(g, E(g)$weight<- 2)
g3 <- delete.vertices(g3, V(g3)[degree(g3)<400])
plot(g3)
tesla2$text[c(523,326)] #check difference between two different tweets
#take some samples of two/three major groups and find what the differences among the tweets are
tesla2$text[c(481, 962)]
tesla2$text[c(657, 834)]
|
#' EuPathDB: Access EuPathDB annotations using AnnotationHub
#'
#' EuPathDB provides an R interface for retrieving annotation resources from
#' the EuPathDB databases: AmoebaDB, CryptoDB, FungiDB, GiardiaDB,
#' MicrosporidiaDB, PiroplasmaDB, PlasmoDB, ToxoDB, TrichDB, and TriTrypDB
#' using the Bioconductor AnnotationHub framework.
#'
#' There are currently two types of Bioconductor resources which can be
#' retrieved for 194 supported organisms from the various EuPathDB databases:
#'
#' \itemize{
#' \item OrgDB resources
#' \item GRanges resources
#' }
#'
#' The OrgDB resources provides gene level information including chromosome,
#' location, name, description, orthologs, and associated GO terms.
#'
#' The GRanges resources provide transcript-level information such as known
#' exons and their corresponding locations.
#'
#' Each of these resources are generated using information obtained from the
#' EuPathDB GFF files along with queries made through the various EuPathDB web
#' APIs.
#'
#' For examples of how EuPathDB can be used to query and interact with
#' EuPathDB.org resources, take a look at the vignette:
#' \code{browseVignettes(package="EuPathDB")}
#'
#' Use \code{availableEuPathDB()} to get a vector of available organisms.
#'
#' @docType package
#' @name EuPathDB
#' @import jsonlite
#' @import dplyr
#' @import httr
#' @import GenomeInfoDbData
#' @import rvest
#' @import xml2
#' @import utils
#' @importFrom data.table data.table
#' @importFrom dplyr filter group_by n summarise
#' @importFrom foreach foreach
#' @importFrom glue glue glue_data
#' @seealso \code{\link{AnnotationHub}}
#' @seealso \code{\link{GRanges}}
#' @seealso \url{http://eupathdb.org/eupathdb/}
#' @author Keith Hughitt and Ashton Belew
NULL
#' Get started with EuPathDB
#'
#' This function has always been here. To be honest, I am not completely sure of its purpose.
#'
#' @param type Choose this type of metadatum to open.
#' @return Used for its side-effect of opening the package vignette. A
#' vector of experiment identifiers.
#' @author Keith Hughitt
#' @aliases availableEuPathDB
#' @examples start_eupathdb()
#' @export
start_eupathdb <- function(type="GRanges") {
## I renamed this function to handle an R check where it looks for man pages with mismatched case
## with respect to the functions within it. There is a roxygen clue for EuPathDb, so having
## a function with the same name confuses R check.
utils::vignette("reference", package="EuPathDB")
metadata_files <- list.files(path=system.file("extdata", package="EuPathDB"))
## Arbitrarily provide the first metadata file of the type.
chosen_idx <- grep(pattern=type, x=metadata_files)[1]
chosen_metadata <- metadata_files[chosen_idx]
message("Showing species in metadata file: ", chosen_metadata)
metadata_file <- system.file("extdata", chosen_metadata, package="EuPathDB")
ret <- sort(read.csv(metadata_file,
stringsAsFactors = FALSE)[["Species"]])
return(ret)
}
#' Pipe operator
#'
#' Shamelessly scabbed from Hadley: https://github.com/sckott/analogsea/issues/32
#'
#' @name %>%
#' @rdname pipe
#' @keywords internal
#' @export
#' @importFrom magrittr %>%
#' @usage lhs \%>\% rhs
NULL
#' R CMD check is super annoying about :::.
#'
#' In a fit of pique, I did a google search to see if anyone else has been
#' annoyed in the same way as I. I was not surprised to see that Yihui
#' Xie was, and in his email to r-devel in 2013 he proposed a game of
#' hide-and-seek; a game which I am repeating here.
#'
#' This just implements ::: as an infix operator that will not trip check.
#'
#' @param pkg on the left hand side
#' @param fun on the right hand side
`%:::%` <- function(pkg, fun) {
get(fun, envir = asNamespace(pkg), inherits = FALSE)
}
getMaintainer <- "GenomicFeatures" %:::% ".getMaintainer"
getMetaDataValue <- "GenomicFeatures" %:::% ".getMetaDataValue"
getTxDbVersion <- "GenomicFeatures" %:::% ".getTxDbVersion"
normAuthor <- "GenomicFeatures" %:::% ".normAuthor"
## EOF
|
/R/eupathdb.R
|
no_license
|
hupef/EuPathDB
|
R
| false
| false
| 4,022
|
r
|
#' EuPathDB: Access EuPathDB annotations using AnnotationHub
#'
#' EuPathDB provides an R interface for retrieving annotation resources from
#' the EuPathDB databases: AmoebaDB, CryptoDB, FungiDB, GiardiaDB,
#' MicrosporidiaDB, PiroplasmaDB, PlasmoDB, ToxoDB, TrichDB, and TriTrypDB
#' using the Bioconductor AnnotationHub framework.
#'
#' There are currently two types of Bioconductor resources which can be
#' retrieved for 194 supported organisms from the various EuPathDB databases:
#'
#' \itemize{
#' \item OrgDB resources
#' \item GRanges resources
#' }
#'
#' The OrgDB resources provides gene level information including chromosome,
#' location, name, description, orthologs, and associated GO terms.
#'
#' The GRanges resources provide transcript-level information such as known
#' exons and their corresponding locations.
#'
#' Each of these resources are generated using information obtained from the
#' EuPathDB GFF files along with queries made through the various EuPathDB web
#' APIs.
#'
#' For examples of how EuPathDB can be used to query and interact with
#' EuPathDB.org resources, take a look at the vignette:
#' \code{browseVignettes(package="EuPathDB")}
#'
#' Use \code{availableEuPathDB()} to get a vector of available organisms.
#'
#' @docType package
#' @name EuPathDB
#' @import jsonlite
#' @import dplyr
#' @import httr
#' @import GenomeInfoDbData
#' @import rvest
#' @import xml2
#' @import utils
#' @importFrom data.table data.table
#' @importFrom dplyr filter group_by n summarise
#' @importFrom foreach foreach
#' @importFrom glue glue glue_data
#' @seealso \code{\link{AnnotationHub}}
#' @seealso \code{\link{GRanges}}
#' @seealso \url{http://eupathdb.org/eupathdb/}
#' @author Keith Hughitt and Ashton Belew
NULL
#' Get started with EuPathDB
#'
#' This function has always been here. To be honest, I am not completely sure of its purpose.
#'
#' @param type Choose this type of metadatum to open.
#' @return Used for its side-effect of opening the package vignette. A
#' vector of experiment identifiers.
#' @author Keith Hughitt
#' @aliases availableEuPathDB
#' @examples start_eupathdb()
#' @export
start_eupathdb <- function(type="GRanges") {
## I renamed this function to handle an R check where it looks for man pages with mismatched case
## with respect to the functions within it. There is a roxygen clue for EuPathDb, so having
## a function with the same name confuses R check.
utils::vignette("reference", package="EuPathDB")
metadata_files <- list.files(path=system.file("extdata", package="EuPathDB"))
## Arbitrarily provide the first metadata file of the type.
chosen_idx <- grep(pattern=type, x=metadata_files)[1]
chosen_metadata <- metadata_files[chosen_idx]
message("Showing species in metadata file: ", chosen_metadata)
metadata_file <- system.file("extdata", chosen_metadata, package="EuPathDB")
ret <- sort(read.csv(metadata_file,
stringsAsFactors = FALSE)[["Species"]])
return(ret)
}
#' Pipe operator
#'
#' Shamelessly scabbed from Hadley: https://github.com/sckott/analogsea/issues/32
#'
#' @name %>%
#' @rdname pipe
#' @keywords internal
#' @export
#' @importFrom magrittr %>%
#' @usage lhs \%>\% rhs
NULL
#' R CMD check is super annoying about :::.
#'
#' In a fit of pique, I did a google search to see if anyone else has been
#' annoyed in the same way as I. I was not surprised to see that Yihui
#' Xie was, and in his email to r-devel in 2013 he proposed a game of
#' hide-and-seek; a game which I am repeating here.
#'
#' This just implements ::: as an infix operator that will not trip check.
#'
#' @param pkg on the left hand side
#' @param fun on the right hand side
`%:::%` <- function(pkg, fun) {
get(fun, envir = asNamespace(pkg), inherits = FALSE)
}
getMaintainer <- "GenomicFeatures" %:::% ".getMaintainer"
getMetaDataValue <- "GenomicFeatures" %:::% ".getMetaDataValue"
getTxDbVersion <- "GenomicFeatures" %:::% ".getTxDbVersion"
normAuthor <- "GenomicFeatures" %:::% ".normAuthor"
## EOF
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/dendro.resample.R
\name{dendro.resample}
\alias{dendro.resample}
\title{Resampling temporal resolution of dendrometer data}
\usage{
dendro.resample(df, by, value)
}
\arguments{
\item{df}{dataframe with first column containing date and time in the format \code{yyyy-mm-dd HH:MM:SS}.}
\item{by}{either \emph{H, D, W} or \emph{M} to resample data into hourly, daily, weekly or monthly resolution.}
\item{value}{either \emph{max, min} or \emph{mean} for the resampling value.}
}
\value{
Dataframe with resampled data.
}
\description{
This function is designed to change the temporal resolution of data. Depending on the objective, the user can define either maximum, minimum, or mean values to resample data in hourly, daily, weekly or monthly frequency.
}
\examples{
library(dendRoAnalyst)
data(nepa17)
# To resample monthly with maximum value
resample_M<-dendro.resample(df=nepa17[,1:2], by='M', value='max')
head(resample_M,10)
}
|
/man/dendro.resample.Rd
|
no_license
|
sugam72-os/dendRoAnalyst-1
|
R
| false
| true
| 1,010
|
rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/dendro.resample.R
\name{dendro.resample}
\alias{dendro.resample}
\title{Resampling temporal resolution of dendrometer data}
\usage{
dendro.resample(df, by, value)
}
\arguments{
\item{df}{dataframe with first column containing date and time in the format \code{yyyy-mm-dd HH:MM:SS}.}
\item{by}{either \emph{H, D, W} or \emph{M} to resample data into hourly, daily, weekly or monthly resolution.}
\item{value}{either \emph{max, min} or \emph{mean} for the resampling value.}
}
\value{
Dataframe with resampled data.
}
\description{
This function is designed to change the temporal resolution of data. Depending on the objective, the user can define either maximum, minimum, or mean values to resample data in hourly, daily, weekly or monthly frequency.
}
\examples{
library(dendRoAnalyst)
data(nepa17)
# To resample monthly with maximum value
resample_M<-dendro.resample(df=nepa17[,1:2], by='M', value='max')
head(resample_M,10)
}
|
% Generated by roxygen2 (4.1.1): do not edit by hand
% Please edit documentation in R/utilities.R
\name{findQTLPeaks}
\alias{findQTLPeaks}
\title{Find QTL peaks}
\usage{
findQTLPeaks(qtls, mrk, pcutoff = 0.05, peak_sigma = 25,
peak_threshold = 1, ...)
}
\arguments{
\item{qtls}{}
}
\value{
Data Frame of peaks
}
\description{
The function \code{\link{findQTLPeaks}}
}
|
/man/findQTLPeaks.Rd
|
no_license
|
scalefreegan/clustQTL
|
R
| false
| false
| 371
|
rd
|
% Generated by roxygen2 (4.1.1): do not edit by hand
% Please edit documentation in R/utilities.R
\name{findQTLPeaks}
\alias{findQTLPeaks}
\title{Find QTL peaks}
\usage{
findQTLPeaks(qtls, mrk, pcutoff = 0.05, peak_sigma = 25,
peak_threshold = 1, ...)
}
\arguments{
\item{qtls}{}
}
\value{
Data Frame of peaks
}
\description{
The function \code{\link{findQTLPeaks}}
}
|
#HW 6 Kevin Niemann
setwd('C:/Users/kevin/Google Drive/datasci/HW6')
#install.packages("pls")
#install.packages("glmnet")
library(pls)
library(glmnet)
set.seed(20) # You will need to set the seed to 20 for this to work.
# Retrieve Breast Cancer Expression Data From the following Study:
#http://www.ncbi.nlm.nih.gov/pubmed/21532620
micro_data=read.table("MicroArray.txt", header=TRUE)
dim(micro_data)
# Normalize each column
micro_data = scale(micro_data)
# Breast Cancer Samples:
cancer_samples = c(0,0,0,0,0,1,0,0,0,1,0,1,0,0,1,0,0,0,1)
# Think of this as ~ 10,000 possible variables (genes) to predict 19 outcomes.
# Convert to data.frame
micro_frame = data.frame(t(micro_data))
micro_frame$outcomes = cancer_samples
##-----Lasso Regression-----
# user glmnet, where family = 'binomial'
inputs = model.matrix(outcomes ~ ., data = micro_frame)
cancer_lasso = glmnet(inputs, micro_frame$outcomes, family='binomial', alpha = 1)
# See how the variables vary with the different regularization factor, lambda
coef(cancer_lasso)[,20][coef(cancer_lasso)[,20]>1e-10]
plot(cancer_lasso, xvar="lambda")
# Now use cv.glmnet to test different lasso cutoffs
cancer_lasso_cv = cv.glmnet(inputs,micro_frame$outcomes,alpha=1,family='binomial')
plot(cancer_lasso_cv)
# find the minumum lambda.min
best_lambda = cancer_lasso_cv$lambda.min
# Find the coefficients that are greater than zero
best_coef = coef(cancer_lasso)[,cancer_lasso$lambda == best_lambda]
best_coef = best_coef[best_coef > 1e-10]
# Plug this into the glm(...,family='binomial') to get the logistic outcome
inputFormula = paste(names(best_coef[-1]), collapse =" + ")
formula = as.formula(paste('outcomes ~', names(best_coef[-1]), sep=""))
has_cancer = glm(formula,family='binomial', data=micro_frame)
# Compare with the real outcome, cancer_samples above
|
/qtr2/hw6/HW6.R
|
no_license
|
kniemann/data-projects
|
R
| false
| false
| 1,892
|
r
|
#HW 6 Kevin Niemann
setwd('C:/Users/kevin/Google Drive/datasci/HW6')
#install.packages("pls")
#install.packages("glmnet")
library(pls)
library(glmnet)
set.seed(20) # You will need to set the seed to 20 for this to work.
# Retrieve Breast Cancer Expression Data From the following Study:
#http://www.ncbi.nlm.nih.gov/pubmed/21532620
micro_data=read.table("MicroArray.txt", header=TRUE)
dim(micro_data)
# Normalize each column
micro_data = scale(micro_data)
# Breast Cancer Samples:
cancer_samples = c(0,0,0,0,0,1,0,0,0,1,0,1,0,0,1,0,0,0,1)
# Think of this as ~ 10,000 possible variables (genes) to predict 19 outcomes.
# Convert to data.frame
micro_frame = data.frame(t(micro_data))
micro_frame$outcomes = cancer_samples
##-----Lasso Regression-----
# user glmnet, where family = 'binomial'
inputs = model.matrix(outcomes ~ ., data = micro_frame)
cancer_lasso = glmnet(inputs, micro_frame$outcomes, family='binomial', alpha = 1)
# See how the variables vary with the different regularization factor, lambda
coef(cancer_lasso)[,20][coef(cancer_lasso)[,20]>1e-10]
plot(cancer_lasso, xvar="lambda")
# Now use cv.glmnet to test different lasso cutoffs
cancer_lasso_cv = cv.glmnet(inputs,micro_frame$outcomes,alpha=1,family='binomial')
plot(cancer_lasso_cv)
# find the minumum lambda.min
best_lambda = cancer_lasso_cv$lambda.min
# Find the coefficients that are greater than zero
best_coef = coef(cancer_lasso)[,cancer_lasso$lambda == best_lambda]
best_coef = best_coef[best_coef > 1e-10]
# Plug this into the glm(...,family='binomial') to get the logistic outcome
inputFormula = paste(names(best_coef[-1]), collapse =" + ")
formula = as.formula(paste('outcomes ~', names(best_coef[-1]), sep=""))
has_cancer = glm(formula,family='binomial', data=micro_frame)
# Compare with the real outcome, cancer_samples above
|
# install.packages('rvest')
library(rvest)
title=read_html("http://sports.ltn.com.tw/baseball")
title=html_nodes(title,".boxTitle .listA .list_title")
title=html_text(title) # 只篩選出文字
# title=iconv(title,"UTF-8")
title
url=read_html("http://sports.ltn.com.tw/baseball")
url=html_nodes(url,".boxTitle .listA a")
url=html_attr(url,"href")
url
for (i in c(1:9)) {
"http://sports.ltn.com.tw/baseball/7"
}
|
/week_3/Week 3加強.R
|
no_license
|
PeterChiu1202/Politics-and-Information
|
R
| false
| false
| 434
|
r
|
# install.packages('rvest')
library(rvest)
title=read_html("http://sports.ltn.com.tw/baseball")
title=html_nodes(title,".boxTitle .listA .list_title")
title=html_text(title) # 只篩選出文字
# title=iconv(title,"UTF-8")
title
url=read_html("http://sports.ltn.com.tw/baseball")
url=html_nodes(url,".boxTitle .listA a")
url=html_attr(url,"href")
url
for (i in c(1:9)) {
"http://sports.ltn.com.tw/baseball/7"
}
|
#' @export
print.json <- function( x, ... ) cat( x )
#' @export
print.ndjson <- function( x, ... ) cat( x )
#' Pretty Json
#'
#' Adds indentiation to a JSON string
#'
#' @param json string of JSON
#' @param ... other argments passed to \link{to_json}
#'
#' @examples
#'
#' df <- data.frame(id = 1:10, val = rnorm(10))
#' js <- to_json( df )
#' pretty_json(js)
#'
#' ## can also use directly on an R object
#' pretty_json( df )
#'
#' @export
pretty_json <- function( json, ... ) UseMethod("pretty_json")
#' @export
pretty_json.json <- function( json, ... ) rcpp_pretty_json( json )
#' @export
pretty_json.character <- function( json, ... ) pretty_json( as.json( json ) )
#' @export
pretty_json.default <- function( json, ... ) {
js <- to_json( json, ... )
rcpp_pretty_json( js )
}
#' Minify Json
#'
#' Removes indentiation from a JSON string
#'
#' @param json string of JSON
#' @param ... other argments passed to \link{to_json}
#'
#' @examples
#'
#' df <- data.frame(id = 1:10, val = rnorm(10))
#' js <- to_json( df )
#' jsp <- pretty_json(js)
#' minify_json( jsp )
#'
#' @export
minify_json <- function( json, ... ) UseMethod("minify_json")
#' @export
minify_json.json <- function( json, ... ) rcpp_minify_json( json )
#' @export
minify_json.character <- function( json, ... ) minify_json( as.json( json ) )
#' @export
minify_json.default <- function( json, ... ) to_json( json, ... )
|
/R/pretty.R
|
permissive
|
SymbolixAU/jsonify
|
R
| false
| false
| 1,419
|
r
|
#' @export
print.json <- function( x, ... ) cat( x )
#' @export
print.ndjson <- function( x, ... ) cat( x )
#' Pretty Json
#'
#' Adds indentiation to a JSON string
#'
#' @param json string of JSON
#' @param ... other argments passed to \link{to_json}
#'
#' @examples
#'
#' df <- data.frame(id = 1:10, val = rnorm(10))
#' js <- to_json( df )
#' pretty_json(js)
#'
#' ## can also use directly on an R object
#' pretty_json( df )
#'
#' @export
pretty_json <- function( json, ... ) UseMethod("pretty_json")
#' @export
pretty_json.json <- function( json, ... ) rcpp_pretty_json( json )
#' @export
pretty_json.character <- function( json, ... ) pretty_json( as.json( json ) )
#' @export
pretty_json.default <- function( json, ... ) {
js <- to_json( json, ... )
rcpp_pretty_json( js )
}
#' Minify Json
#'
#' Removes indentiation from a JSON string
#'
#' @param json string of JSON
#' @param ... other argments passed to \link{to_json}
#'
#' @examples
#'
#' df <- data.frame(id = 1:10, val = rnorm(10))
#' js <- to_json( df )
#' jsp <- pretty_json(js)
#' minify_json( jsp )
#'
#' @export
minify_json <- function( json, ... ) UseMethod("minify_json")
#' @export
minify_json.json <- function( json, ... ) rcpp_minify_json( json )
#' @export
minify_json.character <- function( json, ... ) minify_json( as.json( json ) )
#' @export
minify_json.default <- function( json, ... ) to_json( json, ... )
|
##' QGIS Algorithm provided by QGIS (native c++) Extract M values (native:extractmvalues)
##'
##' @title QGIS algorithm Extract M values
##'
##' @param INPUT `source` - Input layer. Path to a vector layer.
##' @param SUMMARIES `enum` of `("First", "Last", "Count", "Sum", "Mean", "Median", "St dev (pop)", "Minimum", "Maximum", "Range", "Minority", "Majority", "Variety", "Q1", "Q3", "IQR")` - Summaries to calculate. Number of selected option, e.g. '1'. Comma separated list of options, e.g. '1,3'.
##' @param COLUMN_PREFIX `string` - Output column prefix. String value.
##' @param OUTPUT `sink` - Extracted. Path for new vector layer.
##' @param ... further parameters passed to `qgisprocess::qgis_run_algorithm()`
##' @param .complete_output logical specifing if complete out of `qgisprocess::qgis_run_algorithm()` should be used (`TRUE`) or first output (most likely the main) should read (`FALSE`). Default value is `TRUE`.
##'
##' @details
##' ## Outputs description
##' * OUTPUT - outputVector - Extracted
##'
##'
##' @export
##' @md
##' @importFrom qgisprocess qgis_run_algorithm qgis_default_value
qgis_extractmvalues <- function(INPUT = qgisprocess::qgis_default_value(), SUMMARIES = qgisprocess::qgis_default_value(), COLUMN_PREFIX = qgisprocess::qgis_default_value(), OUTPUT = qgisprocess::qgis_default_value(),..., .complete_output = TRUE) {
check_algorithm_necessities("native:extractmvalues")
output <- qgisprocess::qgis_run_algorithm("native:extractmvalues", `INPUT` = INPUT, `SUMMARIES` = SUMMARIES, `COLUMN_PREFIX` = COLUMN_PREFIX, `OUTPUT` = OUTPUT,...)
if (.complete_output) {
return(output)
}
else{
qgisprocess::qgis_output(output, "OUTPUT")
}
}
|
/R/qgis_extractmvalues.R
|
permissive
|
VB6Hobbyst7/r_package_qgis
|
R
| false
| false
| 1,691
|
r
|
##' QGIS Algorithm provided by QGIS (native c++) Extract M values (native:extractmvalues)
##'
##' @title QGIS algorithm Extract M values
##'
##' @param INPUT `source` - Input layer. Path to a vector layer.
##' @param SUMMARIES `enum` of `("First", "Last", "Count", "Sum", "Mean", "Median", "St dev (pop)", "Minimum", "Maximum", "Range", "Minority", "Majority", "Variety", "Q1", "Q3", "IQR")` - Summaries to calculate. Number of selected option, e.g. '1'. Comma separated list of options, e.g. '1,3'.
##' @param COLUMN_PREFIX `string` - Output column prefix. String value.
##' @param OUTPUT `sink` - Extracted. Path for new vector layer.
##' @param ... further parameters passed to `qgisprocess::qgis_run_algorithm()`
##' @param .complete_output logical specifing if complete out of `qgisprocess::qgis_run_algorithm()` should be used (`TRUE`) or first output (most likely the main) should read (`FALSE`). Default value is `TRUE`.
##'
##' @details
##' ## Outputs description
##' * OUTPUT - outputVector - Extracted
##'
##'
##' @export
##' @md
##' @importFrom qgisprocess qgis_run_algorithm qgis_default_value
qgis_extractmvalues <- function(INPUT = qgisprocess::qgis_default_value(), SUMMARIES = qgisprocess::qgis_default_value(), COLUMN_PREFIX = qgisprocess::qgis_default_value(), OUTPUT = qgisprocess::qgis_default_value(),..., .complete_output = TRUE) {
check_algorithm_necessities("native:extractmvalues")
output <- qgisprocess::qgis_run_algorithm("native:extractmvalues", `INPUT` = INPUT, `SUMMARIES` = SUMMARIES, `COLUMN_PREFIX` = COLUMN_PREFIX, `OUTPUT` = OUTPUT,...)
if (.complete_output) {
return(output)
}
else{
qgisprocess::qgis_output(output, "OUTPUT")
}
}
|
# Copyright 2015 Morningstar, Inc.
#library(xlsx)
#library(gdata)
library(XLConnect)
library(ggplot2)
# RBSA methodology https://corporate.morningstar.com/ib/documents/MethodologyDocuments/IBBAssociates/ReturnsBasedAnalysis.pdf
# data dump from PresentationStudio
ds<-(function(){
xlsFile<-"OE Equity 1 - RBSA_20141010.xls"
wb<-loadWorkbook(xlsFile)
r=12
p=26
endCol = 2+1*8+(p-1)*8
names<-readWorksheet(wb, 1, startRow=9, endRow=9, startCol=1, endCol=endCol, header=FALSE)
names<-c(names[1:8], "period")
months<-readWorksheet(wb, 1, startRow=7, endRow=8, startCol=1, endCol=endCol, header=FALSE)
months<-paste(months[1,], months[2,], sep="~")[c(1,(1:(p-1))*8+2)]
data<-readWorksheet(wb, 1, startRow=12, endRow=12+r-1, startCol=1, endCol=endCol, header=FALSE)
ds<-data.frame()
for(i in 1:p){
t<-data[,c(1, 2, 1:6+ifelse(i==1, 2, 2+1+(i-1)*8))]
t$period<-months[i]
names(t)<-names
ds<-rbind(ds, t)
}
if(any(round(rowSums(ds[,3:8])) != 100)){
stop("input should be rescaled")
}
ds
})()
# The goal is to show style index and subject security on the same chart,
# also to show style drift. There is no clear meaning of X or Y axis, unlike
# the HB style charts. The chart simply assigns 2-D positions to represent
# the centroids, or center of masses. When the security is white noise data,
# cannot be explained by any of the style indexes, the center of mass should
# be (0, 0). As long as this criteria is met, the style index can be arranged
# in any position. The position of a security is also its center of mass, thus
# it is a weighted average of all masses of style indexes, with the weight
# provided in the dataset. When the assigned-position of style index changed,
# the chart may show different shape.
# 4, 6 or 9. In type "9", there is one style index whose assigned position is
# (0, 0), which means it has no mass and does not affect the shape of chart
matrix<-(makeMatrix<-function(type=6){
switch(as.character(type),
"4" = merge(c(-1, 1), c(1, -1)),
"6" = merge(c(-1, 1), c(1, 0, -1)),
"9" = merge(c(-1, 0, 1), c(1, 0, -1)))
})()
# points
xy<-(function(){
xy<-as.data.frame(as.matrix(ds[,3:8])%*%as.matrix(matrix))
cbind(SecId=ds$SecId, Name=ds[,2], Period=ds$period, as.data.frame(xy/100))
})()
# plot
(function(){
rbsa<-(function(){
r=1.5
nameColors=sub("^Benchmark(.*?):\\s*", "", xy$Name)
periodSizes=c(min(levels(xy$Period)), max(levels(xy$Period)))
ggplot(xy)+
geom_point(aes(x, y,
size=Period,
shape=grepl("^Benchmark", Name),
color=sub("^Benchmark(.*?):\\s*", "", Name)),
alpha=0.4)+
scale_color_discrete(name="Security",
breaks=nameColors)+
scale_shape_discrete(name="Benchmark")+
scale_size_discrete(breaks=periodSizes)+
guides(color = guide_legend(override.aes = list(size=3))) +
guides(shape = guide_legend(override.aes = list(size=3))) +
xlab("")+
ylab("")+
coord_fixed()+
xlim(-r, r)+
ylim(-r,r)+
theme(legend.key = element_rect(fill="#ffffff"))+
geom_point(data=matrix,
aes(matrix[1], matrix[2]),
size=3,
color="#000000",
alpha=0.3)+
geom_text(data=matrix,
aes(matrix[1], matrix[2], label=names(ds)[3:8]),
size=4,
vjust=1.2)
})()
# plot to file
print(rbsa)
png("rbsa.PNG", width=800, height=600)
print(rbsa)
dev.off()
})()
|
/rbsa.R
|
no_license
|
racoon971/dss.Morningstar.RBSA
|
R
| false
| false
| 3,585
|
r
|
# Copyright 2015 Morningstar, Inc.
#library(xlsx)
#library(gdata)
library(XLConnect)
library(ggplot2)
# RBSA methodology https://corporate.morningstar.com/ib/documents/MethodologyDocuments/IBBAssociates/ReturnsBasedAnalysis.pdf
# data dump from PresentationStudio
ds<-(function(){
xlsFile<-"OE Equity 1 - RBSA_20141010.xls"
wb<-loadWorkbook(xlsFile)
r=12
p=26
endCol = 2+1*8+(p-1)*8
names<-readWorksheet(wb, 1, startRow=9, endRow=9, startCol=1, endCol=endCol, header=FALSE)
names<-c(names[1:8], "period")
months<-readWorksheet(wb, 1, startRow=7, endRow=8, startCol=1, endCol=endCol, header=FALSE)
months<-paste(months[1,], months[2,], sep="~")[c(1,(1:(p-1))*8+2)]
data<-readWorksheet(wb, 1, startRow=12, endRow=12+r-1, startCol=1, endCol=endCol, header=FALSE)
ds<-data.frame()
for(i in 1:p){
t<-data[,c(1, 2, 1:6+ifelse(i==1, 2, 2+1+(i-1)*8))]
t$period<-months[i]
names(t)<-names
ds<-rbind(ds, t)
}
if(any(round(rowSums(ds[,3:8])) != 100)){
stop("input should be rescaled")
}
ds
})()
# The goal is to show style index and subject security on the same chart,
# also to show style drift. There is no clear meaning of X or Y axis, unlike
# the HB style charts. The chart simply assigns 2-D positions to represent
# the centroids, or center of masses. When the security is white noise data,
# cannot be explained by any of the style indexes, the center of mass should
# be (0, 0). As long as this criteria is met, the style index can be arranged
# in any position. The position of a security is also its center of mass, thus
# it is a weighted average of all masses of style indexes, with the weight
# provided in the dataset. When the assigned-position of style index changed,
# the chart may show different shape.
# 4, 6 or 9. In type "9", there is one style index whose assigned position is
# (0, 0), which means it has no mass and does not affect the shape of chart
matrix<-(makeMatrix<-function(type=6){
switch(as.character(type),
"4" = merge(c(-1, 1), c(1, -1)),
"6" = merge(c(-1, 1), c(1, 0, -1)),
"9" = merge(c(-1, 0, 1), c(1, 0, -1)))
})()
# points
xy<-(function(){
xy<-as.data.frame(as.matrix(ds[,3:8])%*%as.matrix(matrix))
cbind(SecId=ds$SecId, Name=ds[,2], Period=ds$period, as.data.frame(xy/100))
})()
# plot
(function(){
rbsa<-(function(){
r=1.5
nameColors=sub("^Benchmark(.*?):\\s*", "", xy$Name)
periodSizes=c(min(levels(xy$Period)), max(levels(xy$Period)))
ggplot(xy)+
geom_point(aes(x, y,
size=Period,
shape=grepl("^Benchmark", Name),
color=sub("^Benchmark(.*?):\\s*", "", Name)),
alpha=0.4)+
scale_color_discrete(name="Security",
breaks=nameColors)+
scale_shape_discrete(name="Benchmark")+
scale_size_discrete(breaks=periodSizes)+
guides(color = guide_legend(override.aes = list(size=3))) +
guides(shape = guide_legend(override.aes = list(size=3))) +
xlab("")+
ylab("")+
coord_fixed()+
xlim(-r, r)+
ylim(-r,r)+
theme(legend.key = element_rect(fill="#ffffff"))+
geom_point(data=matrix,
aes(matrix[1], matrix[2]),
size=3,
color="#000000",
alpha=0.3)+
geom_text(data=matrix,
aes(matrix[1], matrix[2], label=names(ds)[3:8]),
size=4,
vjust=1.2)
})()
# plot to file
print(rbsa)
png("rbsa.PNG", width=800, height=600)
print(rbsa)
dev.off()
})()
|
## R
# https://stackoverflow.com/questions/25136059/how-to-show-working-directory-in-r-prompt
myRPrompt <- function(...) {
verbose <- F
p <- getwd() # absolute path as pwd
if (verbose) message("getwd(): ", getwd())
#home <- regexpr(path.expand("~"), p)
home <- regexpr(system("readlink -f ~", intern=T), p)
if (home != -1) { # remove home part
p <- substr(p, start=home + attr(home, "match.length"), stop=nchar(p))
if (p == "") p <- "/" # if in home itself (~/)
if (verbose) message("home: ", p)
}
# shorten directory names: only one letter after every .Platform$file.sep except the last directory
seps <- gregexpr(.Platform$file.sep, p)[[1]]
if (length(seps) > 1) {
if (verbose) message("seps: ", paste0(seps, collapse=", "))
ps <- rep(NA, t=length(seps) - 1)
for (i in 1:(length(seps) - 1)) { # keep the last (top) directory full name
ps[i] <- substr(p, start=seps[i] + 1, stop=seps[i] + 1)
}
p <- paste0(.Platform$file.sep, paste0(ps, collapse=.Platform$file.sep), .Platform$file.sep, basename(p))
if (verbose) message("seps p: ", p)
}
# add "~" if on home in front
if (home != -1) p <- paste0("~", p)
# add trailing slash if not in "/" or "~/"
if (substr(p, nchar(p), nchar(p)) != "/") p <- paste0(p, "/")
# add machine and trailing R>
#p <- paste0(Sys.info()[4], ":", p, " R>")
p <- paste0(Sys.info()[4], ":", p, ">")
# apply color in bash style
# unfortunately, this breaks arrow up/down behavior in R terminal:
# https://github.com/jalvesaq/colorout/issues/7#issuecomment-207849620
if (F) {
if (F) { # bash style
p <- paste0("\x1b[34m", p, "\x1b[0m") # blue
} else if (T) { # # apply color from crayon package
#library(crayon)
if (any(search() == "package:crayon")) p <- crayon::blue(p)
}
}
# attach trailing space
p <- paste0(p, " ")
} # myRPrompt
# overwrite base::setwd to run my prompt on every dir change
setwd <- function(...) {
base::setwd(...)
options(prompt=myRPrompt())
}
|
/myRPrompt.r
|
no_license
|
chrisdane/functions
|
R
| false
| false
| 2,149
|
r
|
## R
# https://stackoverflow.com/questions/25136059/how-to-show-working-directory-in-r-prompt
myRPrompt <- function(...) {
verbose <- F
p <- getwd() # absolute path as pwd
if (verbose) message("getwd(): ", getwd())
#home <- regexpr(path.expand("~"), p)
home <- regexpr(system("readlink -f ~", intern=T), p)
if (home != -1) { # remove home part
p <- substr(p, start=home + attr(home, "match.length"), stop=nchar(p))
if (p == "") p <- "/" # if in home itself (~/)
if (verbose) message("home: ", p)
}
# shorten directory names: only one letter after every .Platform$file.sep except the last directory
seps <- gregexpr(.Platform$file.sep, p)[[1]]
if (length(seps) > 1) {
if (verbose) message("seps: ", paste0(seps, collapse=", "))
ps <- rep(NA, t=length(seps) - 1)
for (i in 1:(length(seps) - 1)) { # keep the last (top) directory full name
ps[i] <- substr(p, start=seps[i] + 1, stop=seps[i] + 1)
}
p <- paste0(.Platform$file.sep, paste0(ps, collapse=.Platform$file.sep), .Platform$file.sep, basename(p))
if (verbose) message("seps p: ", p)
}
# add "~" if on home in front
if (home != -1) p <- paste0("~", p)
# add trailing slash if not in "/" or "~/"
if (substr(p, nchar(p), nchar(p)) != "/") p <- paste0(p, "/")
# add machine and trailing R>
#p <- paste0(Sys.info()[4], ":", p, " R>")
p <- paste0(Sys.info()[4], ":", p, ">")
# apply color in bash style
# unfortunately, this breaks arrow up/down behavior in R terminal:
# https://github.com/jalvesaq/colorout/issues/7#issuecomment-207849620
if (F) {
if (F) { # bash style
p <- paste0("\x1b[34m", p, "\x1b[0m") # blue
} else if (T) { # # apply color from crayon package
#library(crayon)
if (any(search() == "package:crayon")) p <- crayon::blue(p)
}
}
# attach trailing space
p <- paste0(p, " ")
} # myRPrompt
# overwrite base::setwd to run my prompt on every dir change
setwd <- function(...) {
base::setwd(...)
options(prompt=myRPrompt())
}
|
\name{rlmDD}
\alias{rlmDD}
\title{
Data driven robust methods
}
\description{
Robust estimation often relies on a dispersion function that is more slowly varying at large values than the squared function. However, the choice of tuning constant in dispersion
function may impact the estimation efficiency to a great extent. For a given family of dispersion functions, we suggest obtaining the `best' tuning constant from the data so that the asymptotic efficiency is maximized.
This library provides a robust linear regression with a tuning parameter being automatically chosen to provide the necessary resistance against outliers. The robust (loss) functions include the Huber, Tukey bisquare and the exponential loss.
}
\usage{
rlmDD(yy, xx, beta0, betaR, method, plot)
}
\arguments{
\item{yy}{Vector representing the response variable
}
\item{xx}{Design matrix of the covariates excluding the intercept in the first column
}
\item{beta0}{Initial parameter estimate using \code{lm}
}
\item{betaR}{Robust estimate of beta with a fixed tuning constant using \code{rlm}
}
\item{method}{Huber, Bisquare or Exponential
}
\item{plot}{"Y" gives a plot: the efficiency factor versus a range of tunning parameter values.
}
}
%\details{
%efgdgd
%}
\value{
The function returns a list including
\item{esti}{ Value of the robust estimate}
\item{Std.Error}{ Standard error of the robust estimate}
\item{tunning}{ Optimum tunning parameter}
\item{R2}{ R-squared value}
}
\references{
Wang, Y-G., Lin, X., Zhu, M., & Bai, Z. (2007). Robust estimation using the Huber function with a data-dependent tuning constant. Journal of Computational and Graphical Statistics, 16(2), 468-481.
Wang, X., Jiang, Y., Huang, M., & Zhang, H. (2013). Robust variable selection with exponential squared loss. Journal of the American Statistical Association, 108, 632-643.
Wang, N., Wang, Y-G., Hu, S., Hu, Z. H., Xu, J., Tang, H., & Jin, G. (2018). Robust Regression with Data-Dependent Regularization Parameters and Autoregressive Temporal Correlations. Environmental Modeling & Assessment, in press.
}
\author{
You-Gan Wang, Na Wang
}
\seealso{
\code{rlm} function from package \code{MASS}
}
\examples{
library(MASS)
data(stackloss)
LS <- lm(stack.loss ~ stack.x)
RB <- rlm(stack.loss ~ stack.x, psi = psi.huber, k = 1.345)
DD1 <- rlmDD(stack.loss, stack.x, LS$coef, RB$coef, method = "Huber",
plot = "Y")
LS <- lm(stack.loss ~ stack.x)
RB <- rlm(stack.loss ~ stack.x, psi = psi.bisquare, c = 4.685)
DD2 <- rlmDD(stack.loss, stack.x, LS$coef, RB$coef, method = "Bisquare",
plot = "Y")
LS <- lm(stack.loss ~ stack.x)
RB <- rlm(stack.loss ~ stack.x, psi = psi.huber, k = 1.345)
DD3 <- rlmDD(stack.loss, stack.x, LS$coef, RB$coef, method = "Exponential",
plot = "Y")
## Plasma dataset
data(plasma)
y <- plasma$y
x <- cbind(plasma$calories, plasma$dietary)
LS <- lm(y ~ x)
RB <- rlm(y ~ x, psi = psi.huber, k = 1.345)
DD.h <- rlmDD(y, x, LS$coef, RB$coef, method = "Huber", plot = "Y")
LS <- lm(y ~ x)
RB <- rlm(y ~ x, psi = psi.bisquare, c = 4.685)
DD.b <- rlmDD(y, x, LS$coef, RB$coef, method = "Bisquare", plot = "Y")
LS <- lm(y ~ x)
RB <- rlm(y ~ x, psi = psi.huber, k = 1.345)
DD.e <- rlmDD(y, x, LS$coef, RB$coef, method = "Exponential", plot = "Y")
}
\keyword{regression}
|
/man/rlmDD.Rd
|
no_license
|
e-123456/rlmDataDriven
|
R
| false
| false
| 3,291
|
rd
|
\name{rlmDD}
\alias{rlmDD}
\title{
Data driven robust methods
}
\description{
Robust estimation often relies on a dispersion function that is more slowly varying at large values than the squared function. However, the choice of tuning constant in dispersion
function may impact the estimation efficiency to a great extent. For a given family of dispersion functions, we suggest obtaining the `best' tuning constant from the data so that the asymptotic efficiency is maximized.
This library provides a robust linear regression with a tuning parameter being automatically chosen to provide the necessary resistance against outliers. The robust (loss) functions include the Huber, Tukey bisquare and the exponential loss.
}
\usage{
rlmDD(yy, xx, beta0, betaR, method, plot)
}
\arguments{
\item{yy}{Vector representing the response variable
}
\item{xx}{Design matrix of the covariates excluding the intercept in the first column
}
\item{beta0}{Initial parameter estimate using \code{lm}
}
\item{betaR}{Robust estimate of beta with a fixed tuning constant using \code{rlm}
}
\item{method}{Huber, Bisquare or Exponential
}
\item{plot}{"Y" gives a plot: the efficiency factor versus a range of tunning parameter values.
}
}
%\details{
%efgdgd
%}
\value{
The function returns a list including
\item{esti}{ Value of the robust estimate}
\item{Std.Error}{ Standard error of the robust estimate}
\item{tunning}{ Optimum tunning parameter}
\item{R2}{ R-squared value}
}
\references{
Wang, Y-G., Lin, X., Zhu, M., & Bai, Z. (2007). Robust estimation using the Huber function with a data-dependent tuning constant. Journal of Computational and Graphical Statistics, 16(2), 468-481.
Wang, X., Jiang, Y., Huang, M., & Zhang, H. (2013). Robust variable selection with exponential squared loss. Journal of the American Statistical Association, 108, 632-643.
Wang, N., Wang, Y-G., Hu, S., Hu, Z. H., Xu, J., Tang, H., & Jin, G. (2018). Robust Regression with Data-Dependent Regularization Parameters and Autoregressive Temporal Correlations. Environmental Modeling & Assessment, in press.
}
\author{
You-Gan Wang, Na Wang
}
\seealso{
\code{rlm} function from package \code{MASS}
}
\examples{
library(MASS)
data(stackloss)
LS <- lm(stack.loss ~ stack.x)
RB <- rlm(stack.loss ~ stack.x, psi = psi.huber, k = 1.345)
DD1 <- rlmDD(stack.loss, stack.x, LS$coef, RB$coef, method = "Huber",
plot = "Y")
LS <- lm(stack.loss ~ stack.x)
RB <- rlm(stack.loss ~ stack.x, psi = psi.bisquare, c = 4.685)
DD2 <- rlmDD(stack.loss, stack.x, LS$coef, RB$coef, method = "Bisquare",
plot = "Y")
LS <- lm(stack.loss ~ stack.x)
RB <- rlm(stack.loss ~ stack.x, psi = psi.huber, k = 1.345)
DD3 <- rlmDD(stack.loss, stack.x, LS$coef, RB$coef, method = "Exponential",
plot = "Y")
## Plasma dataset
data(plasma)
y <- plasma$y
x <- cbind(plasma$calories, plasma$dietary)
LS <- lm(y ~ x)
RB <- rlm(y ~ x, psi = psi.huber, k = 1.345)
DD.h <- rlmDD(y, x, LS$coef, RB$coef, method = "Huber", plot = "Y")
LS <- lm(y ~ x)
RB <- rlm(y ~ x, psi = psi.bisquare, c = 4.685)
DD.b <- rlmDD(y, x, LS$coef, RB$coef, method = "Bisquare", plot = "Y")
LS <- lm(y ~ x)
RB <- rlm(y ~ x, psi = psi.huber, k = 1.345)
DD.e <- rlmDD(y, x, LS$coef, RB$coef, method = "Exponential", plot = "Y")
}
\keyword{regression}
|
testlist <- list(A = structure(c(3.1838324823962e-313, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0), .Dim = 5:6), left = 0L, right = 0L, x = numeric(0))
result <- do.call(mgss:::MVP_normalfactor_rcpp,testlist)
str(result)
|
/mgss/inst/testfiles/MVP_normalfactor_rcpp/AFL_MVP_normalfactor_rcpp/MVP_normalfactor_rcpp_valgrind_files/1615948858-test.R
|
no_license
|
akhikolla/updatedatatype-list3
|
R
| false
| false
| 265
|
r
|
testlist <- list(A = structure(c(3.1838324823962e-313, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0), .Dim = 5:6), left = 0L, right = 0L, x = numeric(0))
result <- do.call(mgss:::MVP_normalfactor_rcpp,testlist)
str(result)
|
testlist <- list(ends = c(-1125300777L, 765849512L, -1760774663L, 791623263L, 1358782356L, -128659642L, -14914341L, 1092032927L, 1837701012L, 1632068659L), pts = c(1758370433L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L), starts = c(16777216L, 0L, 738263040L, 682962941L, 1612840977L, 150997320L, 747898999L, -1195392662L, 2024571419L, 808515032L, 1843231103L, -282236997L, -207881465L, -237801926L, -168118689L, -1090227888L, 235129118L, 949454105L, 1651285440L, -1119277667L, -1328604284L), members = NULL, total_members = 0L)
result <- do.call(IntervalSurgeon:::rcpp_pile,testlist)
str(result)
|
/IntervalSurgeon/inst/testfiles/rcpp_pile/AFL_rcpp_pile/rcpp_pile_valgrind_files/1609874808-test.R
|
no_license
|
akhikolla/updated-only-Issues
|
R
| false
| false
| 729
|
r
|
testlist <- list(ends = c(-1125300777L, 765849512L, -1760774663L, 791623263L, 1358782356L, -128659642L, -14914341L, 1092032927L, 1837701012L, 1632068659L), pts = c(1758370433L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L), starts = c(16777216L, 0L, 738263040L, 682962941L, 1612840977L, 150997320L, 747898999L, -1195392662L, 2024571419L, 808515032L, 1843231103L, -282236997L, -207881465L, -237801926L, -168118689L, -1090227888L, 235129118L, 949454105L, 1651285440L, -1119277667L, -1328604284L), members = NULL, total_members = 0L)
result <- do.call(IntervalSurgeon:::rcpp_pile,testlist)
str(result)
|
#### Preamble ####
# Purpose: Prepare and clean the survey data downloaded from IPUMS USA 2018 5-years ACS.
# Author: Hanrui Dou, Hanjing Huang, Hairuo Wang, Xuan Zhong - Group 161
# Data: 2 Nomverber 2020
# Contact: hairuo.wang@mail.utoronto.ca, dhr1142638924@gmail.com, hanjing.huang@mail.utoronto.ca, xuan.zhong@mail.utoronto.ca
# License: MIT
# Pre-requisites:
# - Need to have downloaded the ACS data and saved it to inputs/data
#### Workspace setup ####
library(haven)
library(tidyverse)
# Read in the raw data.
setwd("~/Desktop/STA304")
raw_data <- read_dta("usa_00005.dta")
# Add the labels
raw_data <- labelled::to_factor(raw_data)
# Keep some variables that we are going to use to build model and predict.
# Arrange detailed region to general region classification.
reduced_data <-
raw_data %>%
select(region,
sex,
age,
labforce,
empstat) %>%
mutate(region_new = case_when(`region` == 'new england division' ~ 'northeast',
`region` == 'middle atlantic division' ~ 'northeast',
`region` == 'east north central div' ~ 'midwest',
`region` == 'west north central div' ~ 'midwest',
`region` == 'south atlantic division' ~ 'south',
`region` == 'east south central div' ~ 'south',
`region` == 'west south central div' ~ 'south',
`region` == 'mountain division' ~ 'west',
`region` == 'pacific division' ~ 'west'))
#### What's next? ####
# We are going to splitting cells by age, sex, region and employment status
reduced_data <-
reduced_data %>%
count(age, sex, region_new, empstat) %>%
group_by(age, sex, region_new, empstat)
reduced_data <-
reduced_data %>%
filter(age != "less than 1 year old") %>%
filter(age != "90 (90+ in 1980 and 1990)") %>%
filter(empstat != 'n/a')
reduced_data$age <- as.integer(reduced_data$age)
# Saving the census data as a csv file in my
# working directory
write_csv(reduced_data, "census_data.csv")
|
/01-data_cleaning-post-strat1.R
|
no_license
|
HairuoWang/STA304-PS3-G161
|
R
| false
| false
| 2,202
|
r
|
#### Preamble ####
# Purpose: Prepare and clean the survey data downloaded from IPUMS USA 2018 5-years ACS.
# Author: Hanrui Dou, Hanjing Huang, Hairuo Wang, Xuan Zhong - Group 161
# Data: 2 Nomverber 2020
# Contact: hairuo.wang@mail.utoronto.ca, dhr1142638924@gmail.com, hanjing.huang@mail.utoronto.ca, xuan.zhong@mail.utoronto.ca
# License: MIT
# Pre-requisites:
# - Need to have downloaded the ACS data and saved it to inputs/data
#### Workspace setup ####
library(haven)
library(tidyverse)
# Read in the raw data.
setwd("~/Desktop/STA304")
raw_data <- read_dta("usa_00005.dta")
# Add the labels
raw_data <- labelled::to_factor(raw_data)
# Keep some variables that we are going to use to build model and predict.
# Arrange detailed region to general region classification.
reduced_data <-
raw_data %>%
select(region,
sex,
age,
labforce,
empstat) %>%
mutate(region_new = case_when(`region` == 'new england division' ~ 'northeast',
`region` == 'middle atlantic division' ~ 'northeast',
`region` == 'east north central div' ~ 'midwest',
`region` == 'west north central div' ~ 'midwest',
`region` == 'south atlantic division' ~ 'south',
`region` == 'east south central div' ~ 'south',
`region` == 'west south central div' ~ 'south',
`region` == 'mountain division' ~ 'west',
`region` == 'pacific division' ~ 'west'))
#### What's next? ####
# We are going to splitting cells by age, sex, region and employment status
reduced_data <-
reduced_data %>%
count(age, sex, region_new, empstat) %>%
group_by(age, sex, region_new, empstat)
reduced_data <-
reduced_data %>%
filter(age != "less than 1 year old") %>%
filter(age != "90 (90+ in 1980 and 1990)") %>%
filter(empstat != 'n/a')
reduced_data$age <- as.integer(reduced_data$age)
# Saving the census data as a csv file in my
# working directory
write_csv(reduced_data, "census_data.csv")
|
stateplane2latlon <- function(X, Y, metric=TRUE){
# latlon <- data.table(longitude = c(1148703.5804669, 1148721.69534794,
# 1148718.58006945, 1148719.92031838,
# 1148722.28519294),
# latitude = c(1938916.16105645, 1938209.79458671,
# 1938315.99708976, 1938270.3270949,
# 1938189.60711051))
# browser()
latlon <- data.table(longitude = X, latitude = Y)
ii <- apply(latlon, 1, function(x) !any(is.na(x)))
latlon <- latlon[ii]
coordinates(latlon) <- c("longitude", "latitude")
if(metric){
proj4string(latlon) <- CRS("+init=epsg:2790 +units=ft")
} else {
proj4string(latlon) <- CRS("+init=epsg:2790 +units=us-ft")
}
latlon <- coordinates(spTransform(latlon, CRS("+proj=longlat +datum=WGS84")))
ret <- data.table(X, Y)
ret <- ret[ii, latitude := coordinates(latlon)[ , 'latitude']][]
ret <- ret[ii, longitude := coordinates(latlon)[ , 'longitude']][]
return(ret)
}
if(FALSE){
rm(list=ls())
source("functions/latlon2stateplane.R")
source("functions/stateplane2latlon.R")
lon <- c(-87.728428378, -87.728380101, -87.7283888,
-87.728385057, -87.728378456)
lat <- c(41.988313918, 41.986375258, 41.986666744,
41.986541397, 41.986319851)
spx <- c(1148703.5804669, 1148721.69534794, 1148718.58006945,
1148719.92031838, 1148722.28519294)
spy <- c(1938916.16105645, 1938209.79458671, 1938315.99708976,
1938270.3270949, 1938189.60711051)
latlon2stateplane(lat, lon, T)
stateplane2latlon(spx, spy, T)
latlon2stateplane(lat, lon, F)
stateplane2latlon(spx, spy, F)
}
|
/west-nile-virus-predictions/R/functions/stateplane2latlon.R
|
no_license
|
mohcinemadkour/west-nile-virus-predictions
|
R
| false
| false
| 1,802
|
r
|
stateplane2latlon <- function(X, Y, metric=TRUE){
# latlon <- data.table(longitude = c(1148703.5804669, 1148721.69534794,
# 1148718.58006945, 1148719.92031838,
# 1148722.28519294),
# latitude = c(1938916.16105645, 1938209.79458671,
# 1938315.99708976, 1938270.3270949,
# 1938189.60711051))
# browser()
latlon <- data.table(longitude = X, latitude = Y)
ii <- apply(latlon, 1, function(x) !any(is.na(x)))
latlon <- latlon[ii]
coordinates(latlon) <- c("longitude", "latitude")
if(metric){
proj4string(latlon) <- CRS("+init=epsg:2790 +units=ft")
} else {
proj4string(latlon) <- CRS("+init=epsg:2790 +units=us-ft")
}
latlon <- coordinates(spTransform(latlon, CRS("+proj=longlat +datum=WGS84")))
ret <- data.table(X, Y)
ret <- ret[ii, latitude := coordinates(latlon)[ , 'latitude']][]
ret <- ret[ii, longitude := coordinates(latlon)[ , 'longitude']][]
return(ret)
}
if(FALSE){
rm(list=ls())
source("functions/latlon2stateplane.R")
source("functions/stateplane2latlon.R")
lon <- c(-87.728428378, -87.728380101, -87.7283888,
-87.728385057, -87.728378456)
lat <- c(41.988313918, 41.986375258, 41.986666744,
41.986541397, 41.986319851)
spx <- c(1148703.5804669, 1148721.69534794, 1148718.58006945,
1148719.92031838, 1148722.28519294)
spy <- c(1938916.16105645, 1938209.79458671, 1938315.99708976,
1938270.3270949, 1938189.60711051)
latlon2stateplane(lat, lon, T)
stateplane2latlon(spx, spy, T)
latlon2stateplane(lat, lon, F)
stateplane2latlon(spx, spy, F)
}
|
#' @title FUNCTION_TITLE
#' @description FUNCTION_DESCRIPTION
#' @param draw_level_cell_pred PARAM_DESCRIPTION
#' @param mask PARAM_DESCRIPTION, Default: simple_raster
#' @param return_as_raster PARAM_DESCRIPTION, Default: TRUE
#' @param summary_stat PARAM_DESCRIPTION, Default: 'mean'
#' @param ... PARAM_DESCRIPTION
#' @return OUTPUT_DESCRIPTION
#' @details DETAILS
#' @examples
#' \dontrun{
#' if (interactive()) {
#' # EXAMPLE1
#' }
#' }
#' @rdname make_cell_pred_summary
#' @export
make_cell_pred_summary <- function(draw_level_cell_pred,
mask = simple_raster,
return_as_raster = TRUE,
summary_stat = "mean",
...) {
#################################################################################
### Takes in raked or raw draw-level estimates and makes stat summary rasters
## Inputs:
# draw_level_cell_pred: Cells by Draws matrix which is output from predict_mbg() or from rake_predictions()
# mask: Should be the simple_raster
# return_as_raster: If TRUE returns as raster, else as table
# summary_stat: ie mean, cirange, quantile, sd
## Outputs: Summary table or raster of the cell_pred table put in
#################################################################################
# make summary
summ <- apply(draw_level_cell_pred, 1, summary_stat, ...)
# put it in a raster
if (return_as_raster) {
yrs <- dim(draw_level_cell_pred)[1] / length(cellIdx(mask))
message(sprintf("Making a RasterBrick with %i layers", yrs))
summ <- insertRaster(mask, matrix(summ, ncol = yrs))
}
return(summ)
}
|
/antibiotic_usage/mbg_central/LBDCore/R/make_cell_pred_summary.R
|
no_license
|
The-Oxford-GBD-group/antibiotic_modelling_code
|
R
| false
| false
| 1,691
|
r
|
#' @title FUNCTION_TITLE
#' @description FUNCTION_DESCRIPTION
#' @param draw_level_cell_pred PARAM_DESCRIPTION
#' @param mask PARAM_DESCRIPTION, Default: simple_raster
#' @param return_as_raster PARAM_DESCRIPTION, Default: TRUE
#' @param summary_stat PARAM_DESCRIPTION, Default: 'mean'
#' @param ... PARAM_DESCRIPTION
#' @return OUTPUT_DESCRIPTION
#' @details DETAILS
#' @examples
#' \dontrun{
#' if (interactive()) {
#' # EXAMPLE1
#' }
#' }
#' @rdname make_cell_pred_summary
#' @export
make_cell_pred_summary <- function(draw_level_cell_pred,
mask = simple_raster,
return_as_raster = TRUE,
summary_stat = "mean",
...) {
#################################################################################
### Takes in raked or raw draw-level estimates and makes stat summary rasters
## Inputs:
# draw_level_cell_pred: Cells by Draws matrix which is output from predict_mbg() or from rake_predictions()
# mask: Should be the simple_raster
# return_as_raster: If TRUE returns as raster, else as table
# summary_stat: ie mean, cirange, quantile, sd
## Outputs: Summary table or raster of the cell_pred table put in
#################################################################################
# make summary
summ <- apply(draw_level_cell_pred, 1, summary_stat, ...)
# put it in a raster
if (return_as_raster) {
yrs <- dim(draw_level_cell_pred)[1] / length(cellIdx(mask))
message(sprintf("Making a RasterBrick with %i layers", yrs))
summ <- insertRaster(mask, matrix(summ, ncol = yrs))
}
return(summ)
}
|
#'
#' This function allows you to calculate an average expression for the list of genes of your interest.
#'
#' @param Seurat_obj Seurat: your Seurat object
#' @param genesets, list: named list with genesets
#' @param reset_genesets, bool: if you want reset already existing genesets with a new assay object
#'
#' @return Seurat object with geneset assay
#'
#' @keywords geneset, gene signature,
#'
#' @examples
#'
#' @export
#'
quantify_genesets <- function(Seurat_obj, genesets, reset_genesets=FALSE){
if (is.null((names(genesets)))){
throw('genesets must be a names list')
}
genesets <- lapply(genesets, function(gset) gset[gset %in% rownames(Seurat_obj@assays$RNA@counts)])
DefaultAssay(Seurat_obj) <- 'RNA'
signs <- AddModuleScore(Seurat_obj, genesets)
signs_matrix <- t(sign@meta.data[grepl("Cluster" ,colnames(signs@meta.data))])
rownames(signs_matrix) <- names(genesets)
if (reset_genesets){
Seurat_obj@assays$genesets <- NULL
}
if (is.null(Seurat_obj@assays$genesets)){
Seurat_obj_signs <- CreateAssayObject(data = as.matrix(signs_matrix))
Seurat_obj@assays$genesets <- Seurat_obj_signs
DefaultAssay(Seurat_obj) <- 'genesets'
Seurat_obj <- ScaleData(Seurat_obj)
return(Seurat_obj)
} else {
signs_matrix <- rbind(Seurat_obj@assays$genesets@data, signs_matrix)
signs_matrix <- signs_matrix[unique(rownames(signs_matrix)),]
Seurat_obj@assays$genesets <- CreateAssayObject(data = signs_matrix)
DefaultAssay(Seurat_obj) <- 'genesets'
Seurat_obj <- ScaleData(Seurat_obj)
return(Seurat_obj)
}
}
|
/R/quantify_genesets.R
|
no_license
|
GrigoriiNos/rimmi.rnaseq
|
R
| false
| false
| 1,580
|
r
|
#'
#' This function allows you to calculate an average expression for the list of genes of your interest.
#'
#' @param Seurat_obj Seurat: your Seurat object
#' @param genesets, list: named list with genesets
#' @param reset_genesets, bool: if you want reset already existing genesets with a new assay object
#'
#' @return Seurat object with geneset assay
#'
#' @keywords geneset, gene signature,
#'
#' @examples
#'
#' @export
#'
quantify_genesets <- function(Seurat_obj, genesets, reset_genesets=FALSE){
if (is.null((names(genesets)))){
throw('genesets must be a names list')
}
genesets <- lapply(genesets, function(gset) gset[gset %in% rownames(Seurat_obj@assays$RNA@counts)])
DefaultAssay(Seurat_obj) <- 'RNA'
signs <- AddModuleScore(Seurat_obj, genesets)
signs_matrix <- t(sign@meta.data[grepl("Cluster" ,colnames(signs@meta.data))])
rownames(signs_matrix) <- names(genesets)
if (reset_genesets){
Seurat_obj@assays$genesets <- NULL
}
if (is.null(Seurat_obj@assays$genesets)){
Seurat_obj_signs <- CreateAssayObject(data = as.matrix(signs_matrix))
Seurat_obj@assays$genesets <- Seurat_obj_signs
DefaultAssay(Seurat_obj) <- 'genesets'
Seurat_obj <- ScaleData(Seurat_obj)
return(Seurat_obj)
} else {
signs_matrix <- rbind(Seurat_obj@assays$genesets@data, signs_matrix)
signs_matrix <- signs_matrix[unique(rownames(signs_matrix)),]
Seurat_obj@assays$genesets <- CreateAssayObject(data = signs_matrix)
DefaultAssay(Seurat_obj) <- 'genesets'
Seurat_obj <- ScaleData(Seurat_obj)
return(Seurat_obj)
}
}
|
library("reshape2")
## Read Data
subject_test <- read.table("UCI HAR Dataset/test/subject_test.txt")
subject_train <- read.table("UCI HAR Dataset/train/subject_train.txt")
X_test <- read.table("UCI HAR Dataset/test/X_test.txt")
X_train <- read.table("UCI HAR Dataset/train/X_train.txt")
y_test <- read.table("UCI HAR Dataset/test/y_test.txt")
y_train <- read.table("UCI HAR Dataset/train/y_train.txt")
activity_labels <- read.table("UCI HAR Dataset/activity_labels.txt")
features <- read.table("UCI HAR Dataset/features.txt")
## Analysis
# 1. Merges the training and the test sets to create one data set.
dataSet <- rbind(X_train,X_test)
# 2. Extracts only the measurements on the mean and standard deviation for each measurement.
# Create a vector of only mean and std, use the vector to subset.
only_mean_n_std <- grep("mean()|std()", features[, 2])
dataSet <- dataSet[,only_mean_n_std]
# 4. Appropriately labels the data set with descriptive activity names.
# Create vector of "Clean" feature names by getting rid of "()" apply to the dataSet to rename labels.
feature_names <- sapply(features[, 2], function(x) {gsub("[()]", "",x)})
names(dataSet) <- feature_names[only_mean_n_std]
# combine test and train of subject data and activity data, give descriptive lables
subject <- rbind(subject_train, subject_test)
names(subject) <- 'subject'
activity <- rbind(y_train, y_test)
names(activity) <- 'activity'
# combine subject, activity, and mean and std only data set to create final data set.
dataSet <- cbind(subject,activity, dataSet)
# 3. Uses descriptive activity names to name the activities in the data set
# group the activity column of dataSet, re-name lable of levels with activity_levels, and apply it to dataSet.
act_group <- factor(dataSet$activity)
levels(act_group) <- activity_labels[,2]
dataSet$activity <- act_group
# 5. Creates a second, independent tidy data set with the average of each variable for each activity and each subject.
# melt data to tall skinny data and cast means. Finally write the tidy data to the working directory as "tidy_data.txt"
baseData <- melt(dataSet,(id.vars=c("subject","activity")))
second_data_set <- dcast(baseData, subject + activity ~ variable, mean)
write.table(second_data_set, "tidy_data.txt", sep = ",", row.name=FALSE)
|
/run_analysis.R
|
no_license
|
tmpusr1/getcleandataproject
|
R
| false
| false
| 2,295
|
r
|
library("reshape2")
## Read Data
subject_test <- read.table("UCI HAR Dataset/test/subject_test.txt")
subject_train <- read.table("UCI HAR Dataset/train/subject_train.txt")
X_test <- read.table("UCI HAR Dataset/test/X_test.txt")
X_train <- read.table("UCI HAR Dataset/train/X_train.txt")
y_test <- read.table("UCI HAR Dataset/test/y_test.txt")
y_train <- read.table("UCI HAR Dataset/train/y_train.txt")
activity_labels <- read.table("UCI HAR Dataset/activity_labels.txt")
features <- read.table("UCI HAR Dataset/features.txt")
## Analysis
# 1. Merges the training and the test sets to create one data set.
dataSet <- rbind(X_train,X_test)
# 2. Extracts only the measurements on the mean and standard deviation for each measurement.
# Create a vector of only mean and std, use the vector to subset.
only_mean_n_std <- grep("mean()|std()", features[, 2])
dataSet <- dataSet[,only_mean_n_std]
# 4. Appropriately labels the data set with descriptive activity names.
# Create vector of "Clean" feature names by getting rid of "()" apply to the dataSet to rename labels.
feature_names <- sapply(features[, 2], function(x) {gsub("[()]", "",x)})
names(dataSet) <- feature_names[only_mean_n_std]
# combine test and train of subject data and activity data, give descriptive lables
subject <- rbind(subject_train, subject_test)
names(subject) <- 'subject'
activity <- rbind(y_train, y_test)
names(activity) <- 'activity'
# combine subject, activity, and mean and std only data set to create final data set.
dataSet <- cbind(subject,activity, dataSet)
# 3. Uses descriptive activity names to name the activities in the data set
# group the activity column of dataSet, re-name lable of levels with activity_levels, and apply it to dataSet.
act_group <- factor(dataSet$activity)
levels(act_group) <- activity_labels[,2]
dataSet$activity <- act_group
# 5. Creates a second, independent tidy data set with the average of each variable for each activity and each subject.
# melt data to tall skinny data and cast means. Finally write the tidy data to the working directory as "tidy_data.txt"
baseData <- melt(dataSet,(id.vars=c("subject","activity")))
second_data_set <- dcast(baseData, subject + activity ~ variable, mean)
write.table(second_data_set, "tidy_data.txt", sep = ",", row.name=FALSE)
|
#########################################################
#03 week 1 assignment
# April 26. 2014, Rae Woong Park
#########################################################
library(data.table)
#setwd("C:/Users/Administrator/Documents/GitHub/GettingandCleaningData/")
#setwd("C:/Users/Administrator/Documents/data/UCI HAR Dataset/")
# 1. Merges the training and the test sets to create one data set.
# & 3. Uses descriptive activity names to name the activities in the data set
# $ 4. Appropriately labels the data set with descriptive activity names.
# 3. Uses descriptive activity names to name the activities in the data set
activity_labels <- fread("activity_labels.TXT", sep="auto", header=FALSE)
trainYData <- fread("./train/y_train.txt", sep="auto", header=FALSE)
testYData <- fread("./test/y_test.txt", sep="auto", header=FALSE)
setkey(trainYData, V1)
setkey(testYData, V1)
trainYData <- trainYData[activity_labels]
testYData <- testYData[activity_labels]
setnames(trainYData, "V2", "activity")
setnames(testYData, "V2", "activity")
col.names = read.table("features.TXT")
col.namesModified <- gsub("[()]","_", col.names$V2) # replace "(", ")", "-" into "_"
trainSubjectData <- read.table("./train/subject_train.txt", col.names="subject", sep="", header=FALSE)
trainXData <- read.table("./train/X_train.txt", sep="", col.names=col.namesModified, header=FALSE)
# 4. Appropriately labels the data set with descriptive activity names.
trainData <- cbind(trainSubjectData, trainYData[,2,with=FALSE], trainXData)
testSubjectData <- read.table("./test/subject_test.txt", col.names="subject", sep="", header=FALSE)
testXData <- read.table("./test/X_test.txt", sep="", col.names=col.namesModified, header=FALSE)
# 4. Appropriately labels the data set with descriptive activity names.
testData <- cbind(testSubjectData, testYData[,2,with=FALSE], testXData)
# 1. Merges the training and the test sets to create one data set.
mergedData <-rbind(trainData, testData)
write.table(mergedData, file = "mergedData.txt", append = FALSE, quote = FALSE, sep = "\t", row.names = FALSE, col.names = TRUE)
# 2. Extracts only the measurements on the mean and standard deviation for each measurement.
# Only variables having "mean()" and "std()" in their names were included.
meanStdData <- data.table(mergedData[, grep("subject|activity|mean__|std__", colnames(mergedData), value=TRUE, ignore.case = FALSE)])
# 5. Creates a second, independent tidy data set with the average of each variable for each activity and each subject.
# The expression "each activity and each subject" is confusing!
# I thought it a grouping problem, theat is 'grouping by both subject and activity'.
# Thus each subject can have up to six rows (ativities).
setkey(meanStdData, subject, activity)
summaryDataWrite <- meanStdData[, lapply(.SD, mean()), by="subject,activity"]
write.table(summaryDataWrite, file="tidyData.txt", sep="\t", row.names=FALSE, col.names=TRUE)
#########################################################
# End of Assignment
#########################################################
|
/run_analysis.R
|
no_license
|
rwpark99/GettingandCleaningData
|
R
| false
| false
| 3,094
|
r
|
#########################################################
#03 week 1 assignment
# April 26. 2014, Rae Woong Park
#########################################################
library(data.table)
#setwd("C:/Users/Administrator/Documents/GitHub/GettingandCleaningData/")
#setwd("C:/Users/Administrator/Documents/data/UCI HAR Dataset/")
# 1. Merges the training and the test sets to create one data set.
# & 3. Uses descriptive activity names to name the activities in the data set
# $ 4. Appropriately labels the data set with descriptive activity names.
# 3. Uses descriptive activity names to name the activities in the data set
activity_labels <- fread("activity_labels.TXT", sep="auto", header=FALSE)
trainYData <- fread("./train/y_train.txt", sep="auto", header=FALSE)
testYData <- fread("./test/y_test.txt", sep="auto", header=FALSE)
setkey(trainYData, V1)
setkey(testYData, V1)
trainYData <- trainYData[activity_labels]
testYData <- testYData[activity_labels]
setnames(trainYData, "V2", "activity")
setnames(testYData, "V2", "activity")
col.names = read.table("features.TXT")
col.namesModified <- gsub("[()]","_", col.names$V2) # replace "(", ")", "-" into "_"
trainSubjectData <- read.table("./train/subject_train.txt", col.names="subject", sep="", header=FALSE)
trainXData <- read.table("./train/X_train.txt", sep="", col.names=col.namesModified, header=FALSE)
# 4. Appropriately labels the data set with descriptive activity names.
trainData <- cbind(trainSubjectData, trainYData[,2,with=FALSE], trainXData)
testSubjectData <- read.table("./test/subject_test.txt", col.names="subject", sep="", header=FALSE)
testXData <- read.table("./test/X_test.txt", sep="", col.names=col.namesModified, header=FALSE)
# 4. Appropriately labels the data set with descriptive activity names.
testData <- cbind(testSubjectData, testYData[,2,with=FALSE], testXData)
# 1. Merges the training and the test sets to create one data set.
mergedData <-rbind(trainData, testData)
write.table(mergedData, file = "mergedData.txt", append = FALSE, quote = FALSE, sep = "\t", row.names = FALSE, col.names = TRUE)
# 2. Extracts only the measurements on the mean and standard deviation for each measurement.
# Only variables having "mean()" and "std()" in their names were included.
meanStdData <- data.table(mergedData[, grep("subject|activity|mean__|std__", colnames(mergedData), value=TRUE, ignore.case = FALSE)])
# 5. Creates a second, independent tidy data set with the average of each variable for each activity and each subject.
# The expression "each activity and each subject" is confusing!
# I thought it a grouping problem, theat is 'grouping by both subject and activity'.
# Thus each subject can have up to six rows (ativities).
setkey(meanStdData, subject, activity)
summaryDataWrite <- meanStdData[, lapply(.SD, mean()), by="subject,activity"]
write.table(summaryDataWrite, file="tidyData.txt", sep="\t", row.names=FALSE, col.names=TRUE)
#########################################################
# End of Assignment
#########################################################
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/psr.R
\docType{package}
\name{psr}
\alias{psr}
\title{psr}
\description{
A package for computing various measures relating to reliability of performance science metrics. It
contains functions to compute the Typical Error (TE), Coefficient of Variation (CV), Standard Error of Measurement
(SEM), Smallest Worthwhile Change (SWC), Minimal Detectable Change (MDC), Intra-class Correlation Coefficient (ICC),
and Standard Ten Scores (STEN).
}
|
/man/psr.Rd
|
permissive
|
tommy-mcginn/psr
|
R
| false
| true
| 523
|
rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/psr.R
\docType{package}
\name{psr}
\alias{psr}
\title{psr}
\description{
A package for computing various measures relating to reliability of performance science metrics. It
contains functions to compute the Typical Error (TE), Coefficient of Variation (CV), Standard Error of Measurement
(SEM), Smallest Worthwhile Change (SWC), Minimal Detectable Change (MDC), Intra-class Correlation Coefficient (ICC),
and Standard Ten Scores (STEN).
}
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/stach_extensions.R
\docType{class}
\name{StachExtensions}
\alias{StachExtensions}
\title{StachExtensions}
\format{
An \code{R6Class} generator object
}
\description{
The purpose of this class is to provide the helper methods for converting stach to Tabular format
}
\section{Methods}{
\describe{
\strong{ ConvertToDataFrame}
This function is used to convert stach to dataFrame.
\itemize{
\item \emph{ @param } package : Stach Data which is represented as a Package object.
\item \emph{ @returnType } dataFrames List : Returns the List of Table for the stach data.
}
}
}
\section{Methods}{
\subsection{Public methods}{
\itemize{
\item \href{#method-ConvertToDataFrame}{\code{StachExtensions$ConvertToDataFrame()}}
\item \href{#method-clone}{\code{StachExtensions$clone()}}
}
}
\if{html}{\out{<hr>}}
\if{html}{\out{<a id="method-ConvertToDataFrame"></a>}}
\if{latex}{\out{\hypertarget{method-ConvertToDataFrame}{}}}
\subsection{Method \code{ConvertToDataFrame()}}{
\subsection{Usage}{
\if{html}{\out{<div class="r">}}\preformatted{StachExtensions$ConvertToDataFrame(package)}\if{html}{\out{</div>}}
}
}
\if{html}{\out{<hr>}}
\if{html}{\out{<a id="method-clone"></a>}}
\if{latex}{\out{\hypertarget{method-clone}{}}}
\subsection{Method \code{clone()}}{
The objects of this class are cloneable with this method.
\subsection{Usage}{
\if{html}{\out{<div class="r">}}\preformatted{StachExtensions$clone(deep = FALSE)}\if{html}{\out{</div>}}
}
\subsection{Arguments}{
\if{html}{\out{<div class="arguments">}}
\describe{
\item{\code{deep}}{Whether to make a deep clone.}
}
\if{html}{\out{</div>}}
}
}
}
|
/auto-generated-sdk/man/StachExtensions.Rd
|
permissive
|
afernandes85/analyticsapi-engines-r-sdk
|
R
| false
| true
| 1,676
|
rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/stach_extensions.R
\docType{class}
\name{StachExtensions}
\alias{StachExtensions}
\title{StachExtensions}
\format{
An \code{R6Class} generator object
}
\description{
The purpose of this class is to provide the helper methods for converting stach to Tabular format
}
\section{Methods}{
\describe{
\strong{ ConvertToDataFrame}
This function is used to convert stach to dataFrame.
\itemize{
\item \emph{ @param } package : Stach Data which is represented as a Package object.
\item \emph{ @returnType } dataFrames List : Returns the List of Table for the stach data.
}
}
}
\section{Methods}{
\subsection{Public methods}{
\itemize{
\item \href{#method-ConvertToDataFrame}{\code{StachExtensions$ConvertToDataFrame()}}
\item \href{#method-clone}{\code{StachExtensions$clone()}}
}
}
\if{html}{\out{<hr>}}
\if{html}{\out{<a id="method-ConvertToDataFrame"></a>}}
\if{latex}{\out{\hypertarget{method-ConvertToDataFrame}{}}}
\subsection{Method \code{ConvertToDataFrame()}}{
\subsection{Usage}{
\if{html}{\out{<div class="r">}}\preformatted{StachExtensions$ConvertToDataFrame(package)}\if{html}{\out{</div>}}
}
}
\if{html}{\out{<hr>}}
\if{html}{\out{<a id="method-clone"></a>}}
\if{latex}{\out{\hypertarget{method-clone}{}}}
\subsection{Method \code{clone()}}{
The objects of this class are cloneable with this method.
\subsection{Usage}{
\if{html}{\out{<div class="r">}}\preformatted{StachExtensions$clone(deep = FALSE)}\if{html}{\out{</div>}}
}
\subsection{Arguments}{
\if{html}{\out{<div class="arguments">}}
\describe{
\item{\code{deep}}{Whether to make a deep clone.}
}
\if{html}{\out{</div>}}
}
}
}
|
#:# libraries
library(digest)
library(mlr)
library(OpenML)
library(farff)
#:# config
set.seed(1)
#:# data
dataset <- getOMLDataSet(data.name = "fri_c1_1000_25")
head(dataset$data)
#:# preprocessing
head(dataset$data)
#:# model
task = makeClassifTask(id = "task", data = dataset$data, target = "binaryClass")
lrn = makeLearner("classif.plr", par.vals = list(), predict.type = "prob")
#:# hash
#:# e1b7376588a14612daafe09b4bf0fbf4
hash <- digest(list(task, lrn))
hash
#:# audit
cv <- makeResampleDesc("CV", iters = 5)
r <- mlr::resample(lrn, task, cv, measures = list(acc, auc, tnr, tpr, ppv, f1))
ACC <- r$aggr
ACC
#:# session info
sink(paste0("sessionInfo.txt"))
sessionInfo()
sink()
|
/models/openml_fri_c1_1000_25/classification_binaryClass/e1b7376588a14612daafe09b4bf0fbf4/code.R
|
no_license
|
pysiakk/CaseStudies2019S
|
R
| false
| false
| 691
|
r
|
#:# libraries
library(digest)
library(mlr)
library(OpenML)
library(farff)
#:# config
set.seed(1)
#:# data
dataset <- getOMLDataSet(data.name = "fri_c1_1000_25")
head(dataset$data)
#:# preprocessing
head(dataset$data)
#:# model
task = makeClassifTask(id = "task", data = dataset$data, target = "binaryClass")
lrn = makeLearner("classif.plr", par.vals = list(), predict.type = "prob")
#:# hash
#:# e1b7376588a14612daafe09b4bf0fbf4
hash <- digest(list(task, lrn))
hash
#:# audit
cv <- makeResampleDesc("CV", iters = 5)
r <- mlr::resample(lrn, task, cv, measures = list(acc, auc, tnr, tpr, ppv, f1))
ACC <- r$aggr
ACC
#:# session info
sink(paste0("sessionInfo.txt"))
sessionInfo()
sink()
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/ConstraintTaxo2newick.R
\name{ConstraintTaxo2newick}
\alias{ConstraintTaxo2newick}
\title{Build a multifurcating topological constraint tree for RAxML}
\usage{
ConstraintTaxo2newick(
inputTaxo = NULL,
Taxo.Hier = "Phylum2Genus",
inputConst = NULL,
outputNewick = NULL
)
}
\arguments{
\item{inputTaxo}{a classification table, the first column is the species name (or
the name used as tip.label by the phylogenetic tree), followed by
the different hierarchical levels of the Linnaean classification
in the subsequent columns.}
\item{Taxo.Hier}{order of entry for the different hierarchical levels of the
Linnaean classification. "Phylum2Genus" is used by default, where the second column is the highest
level (e.g. Phylum) and the last column is the lowest classification level (e.g. Genus).
The reverse can also be used, using "Genus2Phylum" (i.e. the second column contains the lowest
level and the last column contains the highest).}
\item{inputConst}{a two column table: the first column refers to the hierarchical
level of the topological constraints (e.g. 'Family', or 'Order', or
'Subdivision'; note that the names of the hierarchical levels must be the same as
the headers of the classification table); the second column contains the name
of the taxa to be constrained as monophyletic (e.g. 'Aplodactylidae',
Aulopiformes', 'Percomorphaceae').}
\item{outputNewick}{name of the output multifurcating newick tree that will
be exported in a .txt file (can also include the path to the folder).}
}
\value{
This function exports into the R environment a list of two objects. The first
object is the taxonomic table modified to include the constraints, and the
second object is the multifurcating tree converted into a 'phylo' object.
The function also exports a newick tree as a txt document that can be used to constrain
the topology in RAxML.
}
\description{
This function builds a multifurcating phylogenetic tree
from a classification table and a table of phylogenetic constraints ready to be
used by RAxML as a constraint tree (option -g in RAxML) to guide the
reconstruction of the molecular phylogenetic tree.
}
\details{
Warning: branch lengths of the multifurcating tree are misleading, only the
topology matters.
}
\examples{
# Load the table listing the topological constraints (first object of the list)
# and the classification table (second object of the list).
\dontrun{
data(TopoConstraints)
# The table details 22 topological constraints overall,
# including constraints at the Family, Order, Series, Subdivision, Division,
# Subsection, Subcohort, Cohort, Supercohort, Infraclass, and Subclass.
#
# The classification table includes 16 species from the New Zealand marine
# ray-finned fish species list.
# Create a temporary folder to store the outputs of the function.
dir.create("TempDir.TopoConstraints")
# Run the function considering all the constraints.
BackBoneTreeAll = ConstraintTaxo2newick(inputTaxo = TopoConstraints[[2]],
inputConst = TopoConstraints[[1]], outputNewick = "TempDir.TopoConstraints/BackboneTreeAll")
# Plot the constraining tree (the branch length do not matter, only the topology matters).
plot(BackBoneTreeAll[[2]], cex=0.8)
# Use only the constraints at the Family level.
FamilyConst=TopoConstraints[[1]][TopoConstraints[[1]][,1]=="Family",]
# Run the function considering only the constraints at the family level.
BackBoneTreeFamily = ConstraintTaxo2newick(inputTaxo = TopoConstraints[[2]],
inputConst = FamilyConst, outputNewick = "TempDir.TopoConstraints/BackboneTreeFamily")
# Plot the constraining tree (the branch length does not matter,
# only the topology matters), notice that only constrained taxa
# are present on the guiding tree, the other (unconstrained) taxa will
# be positioned on the tree based on their molecular affinities.
plot(BackBoneTreeFamily[[2]], cex=0.8)
# Use only the constraints at the Family and Series levels.
FamilySeriesConst=TopoConstraints[[1]][c(which(TopoConstraints[[1]][,1] == "Family"),
which(TopoConstraints[[1]][,1] == "Series")),]
# Run the function considering only the constraints at the Family and Order levels.
BackBoneTreeFamilySeries = ConstraintTaxo2newick(inputTaxo = TopoConstraints[[2]],
inputConst = FamilySeriesConst, outputNewick = "TempDir.TopoConstraints/BackboneTreeFamilySeries")
# Plot the constraining tree (the branch length does not matter,
# only the topology matters). Notice that only constrained taxa
# are present on the guiding tree, the other (unconstrained) taxa will
# be positioned on the tree based on their molecular affinities.
plot(BackBoneTreeFamilySeries[[2]], cex=0.8)
# To remove the files created while running the example do the following:
unlink("TempDir.TopoConstraints", recursive = TRUE)
}
}
|
/man/ConstraintTaxo2newick.Rd
|
no_license
|
dvdeme/regPhylo
|
R
| false
| true
| 4,829
|
rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/ConstraintTaxo2newick.R
\name{ConstraintTaxo2newick}
\alias{ConstraintTaxo2newick}
\title{Build a multifurcating topological constraint tree for RAxML}
\usage{
ConstraintTaxo2newick(
inputTaxo = NULL,
Taxo.Hier = "Phylum2Genus",
inputConst = NULL,
outputNewick = NULL
)
}
\arguments{
\item{inputTaxo}{a classification table, the first column is the species name (or
the name used as tip.label by the phylogenetic tree), followed by
the different hierarchical levels of the Linnaean classification
in the subsequent columns.}
\item{Taxo.Hier}{order of entry for the different hierarchical levels of the
Linnaean classification. "Phylum2Genus" is used by default, where the second column is the highest
level (e.g. Phylum) and the last column is the lowest classification level (e.g. Genus).
The reverse can also be used, using "Genus2Phylum" (i.e. the second column contains the lowest
level and the last column contains the highest).}
\item{inputConst}{a two column table: the first column refers to the hierarchical
level of the topological constraints (e.g. 'Family', or 'Order', or
'Subdivision'; note that the names of the hierarchical levels must be the same as
the headers of the classification table); the second column contains the name
of the taxa to be constrained as monophyletic (e.g. 'Aplodactylidae',
Aulopiformes', 'Percomorphaceae').}
\item{outputNewick}{name of the output multifurcating newick tree that will
be exported in a .txt file (can also include the path to the folder).}
}
\value{
This function exports into the R environment a list of two objects. The first
object is the taxonomic table modified to include the constraints, and the
second object is the multifurcating tree converted into a 'phylo' object.
The function also exports a newick tree as a txt document that can be used to constrain
the topology in RAxML.
}
\description{
This function builds a multifurcating phylogenetic tree
from a classification table and a table of phylogenetic constraints ready to be
used by RAxML as a constraint tree (option -g in RAxML) to guide the
reconstruction of the molecular phylogenetic tree.
}
\details{
Warning: branch lengths of the multifurcating tree are misleading, only the
topology matters.
}
\examples{
# Load the table listing the topological constraints (first object of the list)
# and the classification table (second object of the list).
\dontrun{
data(TopoConstraints)
# The table details 22 topological constraints overall,
# including constraints at the Family, Order, Series, Subdivision, Division,
# Subsection, Subcohort, Cohort, Supercohort, Infraclass, and Subclass.
#
# The classification table includes 16 species from the New Zealand marine
# ray-finned fish species list.
# Create a temporary folder to store the outputs of the function.
dir.create("TempDir.TopoConstraints")
# Run the function considering all the constraints.
BackBoneTreeAll = ConstraintTaxo2newick(inputTaxo = TopoConstraints[[2]],
inputConst = TopoConstraints[[1]], outputNewick = "TempDir.TopoConstraints/BackboneTreeAll")
# Plot the constraining tree (the branch length do not matter, only the topology matters).
plot(BackBoneTreeAll[[2]], cex=0.8)
# Use only the constraints at the Family level.
FamilyConst=TopoConstraints[[1]][TopoConstraints[[1]][,1]=="Family",]
# Run the function considering only the constraints at the family level.
BackBoneTreeFamily = ConstraintTaxo2newick(inputTaxo = TopoConstraints[[2]],
inputConst = FamilyConst, outputNewick = "TempDir.TopoConstraints/BackboneTreeFamily")
# Plot the constraining tree (the branch length does not matter,
# only the topology matters), notice that only constrained taxa
# are present on the guiding tree, the other (unconstrained) taxa will
# be positioned on the tree based on their molecular affinities.
plot(BackBoneTreeFamily[[2]], cex=0.8)
# Use only the constraints at the Family and Series levels.
FamilySeriesConst=TopoConstraints[[1]][c(which(TopoConstraints[[1]][,1] == "Family"),
which(TopoConstraints[[1]][,1] == "Series")),]
# Run the function considering only the constraints at the Family and Order levels.
BackBoneTreeFamilySeries = ConstraintTaxo2newick(inputTaxo = TopoConstraints[[2]],
inputConst = FamilySeriesConst, outputNewick = "TempDir.TopoConstraints/BackboneTreeFamilySeries")
# Plot the constraining tree (the branch length does not matter,
# only the topology matters). Notice that only constrained taxa
# are present on the guiding tree, the other (unconstrained) taxa will
# be positioned on the tree based on their molecular affinities.
plot(BackBoneTreeFamilySeries[[2]], cex=0.8)
# To remove the files created while running the example do the following:
unlink("TempDir.TopoConstraints", recursive = TRUE)
}
}
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/get_credibility_trends.R
\name{get_credibility_trends}
\alias{get_credibility_trends}
\title{Get data credibility and trends from the BBS analysis results}
\usage{
get_credibility_trends(url = "https://www.mbr-pwrc.usgs.gov/cgi-bin/atlasa15.pl?FLA&2&15&csrfmiddlewaretoken=3YKakk7LxT2ki6NSpl4mstudYCqdW02C")
}
\arguments{
\item{url}{Web address of the region or state for which species' population trends and credibility ratings are to be scraped. Default example is 'Kansas,USA'.}
}
\description{
The BBS provides regional and state-wide species population trend estimates. They also include a 'data credibility' rating (low, medium, or high deficiencies) for each species-region combination. This function uses web scraping to capture population trends and their credibility ratings.
}
|
/man/get_credibility_trends.Rd
|
permissive
|
ethanwhite/bbsAssistant
|
R
| false
| true
| 869
|
rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/get_credibility_trends.R
\name{get_credibility_trends}
\alias{get_credibility_trends}
\title{Get data credibility and trends from the BBS analysis results}
\usage{
get_credibility_trends(url = "https://www.mbr-pwrc.usgs.gov/cgi-bin/atlasa15.pl?FLA&2&15&csrfmiddlewaretoken=3YKakk7LxT2ki6NSpl4mstudYCqdW02C")
}
\arguments{
\item{url}{Web address of the region or state for which species' population trends and credibility ratings are to be scraped. Default example is 'Kansas,USA'.}
}
\description{
The BBS provides regional and state-wide species population trend estimates. They also include a 'data credibility' rating (low, medium, or high deficiencies) for each species-region combination. This function uses web scraping to capture population trends and their credibility ratings.
}
|
#' Creates bootstrap samples of the parameters
#'
#' \code{parbootstrap} creates bootstrap samples of the parameters.
#' @param qp output from quickpsy
#' @export
parbootstrap <- function(qp) {
if (qp$pariniset) {
if (is.atomic(parini)) {
parini <- qp$par
pariniset <- FALSE
}
else{
parini <- qp$parini
pariniset <- TRUE
}
}
else {
parini <- qp$par
pariniset <- FALSE
}
if (length(qp$groups) == 0)
avboot <- qp$avbootstrap %>% group_by_('sample')
else
avboot <- qp$avbootstrap %>%
group_by_(.dots = c(qp$groups, 'sample'))
avboot %>%
do(one_parameters(., qp$x, qp$k, qp$n, qp$psyfunguesslapses, qp$funname,
parini, pariniset, qp$guess, qp$lapses,
qp$optimization, qp$groups))
}
|
/R/parbootstrap.R
|
no_license
|
cran/quickpsy
|
R
| false
| false
| 807
|
r
|
#' Creates bootstrap samples of the parameters
#'
#' \code{parbootstrap} creates bootstrap samples of the parameters.
#' @param qp output from quickpsy
#' @export
parbootstrap <- function(qp) {
if (qp$pariniset) {
if (is.atomic(parini)) {
parini <- qp$par
pariniset <- FALSE
}
else{
parini <- qp$parini
pariniset <- TRUE
}
}
else {
parini <- qp$par
pariniset <- FALSE
}
if (length(qp$groups) == 0)
avboot <- qp$avbootstrap %>% group_by_('sample')
else
avboot <- qp$avbootstrap %>%
group_by_(.dots = c(qp$groups, 'sample'))
avboot %>%
do(one_parameters(., qp$x, qp$k, qp$n, qp$psyfunguesslapses, qp$funname,
parini, pariniset, qp$guess, qp$lapses,
qp$optimization, qp$groups))
}
|
## Put comments here that give an overall description of what your
## functions do
## Write a short comment describing this function
makeCacheMatrix <- function(x = matrix()) {
## Initilize the result vector
inv <- NULL
## Set the value of the Cache Matrix
set <- function(y) {
x <<- y
inv <<- NULL
}
## Get the value of the Cache Matrix
get <- function() x
## Set the inverse of the Cache Matrix
setinverse <- function(inverse) inv <<- inverse
## Get the inverse of the Cache Matrix
getinverse <- function() inv
## Store the functions into a list to be used easily
list(set = set, get = get,
setinverse = setinverse,
getinverse = getinverse)
}
## Write a short comment describing this function
cacheSolve <- function(x, ...) {
## Initilize the result
inv <- x$getinverse()
## if there is a cached inverse, return it
if(!is.null(inv)) {
message("getting cached data")
return(inv)
}
## Otherwise, read x
data <- x$get()
## Take the inverse of x
inv <- solve(data, ...)
## Store the inverse
x$setinverse(inv)
## Return a matrix that is the inverse of 'x'
inv
}
|
/cachematrix.R
|
no_license
|
thxthn/ProgrammingAssignment2
|
R
| false
| false
| 1,208
|
r
|
## Put comments here that give an overall description of what your
## functions do
## Write a short comment describing this function
makeCacheMatrix <- function(x = matrix()) {
## Initilize the result vector
inv <- NULL
## Set the value of the Cache Matrix
set <- function(y) {
x <<- y
inv <<- NULL
}
## Get the value of the Cache Matrix
get <- function() x
## Set the inverse of the Cache Matrix
setinverse <- function(inverse) inv <<- inverse
## Get the inverse of the Cache Matrix
getinverse <- function() inv
## Store the functions into a list to be used easily
list(set = set, get = get,
setinverse = setinverse,
getinverse = getinverse)
}
## Write a short comment describing this function
cacheSolve <- function(x, ...) {
## Initilize the result
inv <- x$getinverse()
## if there is a cached inverse, return it
if(!is.null(inv)) {
message("getting cached data")
return(inv)
}
## Otherwise, read x
data <- x$get()
## Take the inverse of x
inv <- solve(data, ...)
## Store the inverse
x$setinverse(inv)
## Return a matrix that is the inverse of 'x'
inv
}
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/flextable_sizes.R
\name{height}
\alias{height}
\alias{height_all}
\title{Set flextable rows height}
\usage{
height(x, i = NULL, height, part = "body")
height_all(x, height, part = "all")
}
\arguments{
\item{x}{flextable object}
\item{i}{rows selection}
\item{height}{height in inches}
\item{part}{partname of the table}
}
\description{
control rows height for a part
of the flextable.
}
\section{height_all}{
\code{height_all} is a convenient function for
setting the same height to all rows (selected
with argument \code{part}).
}
\examples{
ft <- flextable(iris)
ft <- height(ft, height = .3)
ft <- flextable(iris)
ft <- height_all(ft, height = .3)
}
\seealso{
Other flextable dimensions:
\code{\link{autofit}()},
\code{\link{dim.flextable}()},
\code{\link{dim_pretty}()},
\code{\link{fit_to_width}()},
\code{\link{flextable_dim}()},
\code{\link{width}()}
}
\concept{flextable dimensions}
|
/man/height.Rd
|
no_license
|
pvictor/flextable
|
R
| false
| true
| 980
|
rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/flextable_sizes.R
\name{height}
\alias{height}
\alias{height_all}
\title{Set flextable rows height}
\usage{
height(x, i = NULL, height, part = "body")
height_all(x, height, part = "all")
}
\arguments{
\item{x}{flextable object}
\item{i}{rows selection}
\item{height}{height in inches}
\item{part}{partname of the table}
}
\description{
control rows height for a part
of the flextable.
}
\section{height_all}{
\code{height_all} is a convenient function for
setting the same height to all rows (selected
with argument \code{part}).
}
\examples{
ft <- flextable(iris)
ft <- height(ft, height = .3)
ft <- flextable(iris)
ft <- height_all(ft, height = .3)
}
\seealso{
Other flextable dimensions:
\code{\link{autofit}()},
\code{\link{dim.flextable}()},
\code{\link{dim_pretty}()},
\code{\link{fit_to_width}()},
\code{\link{flextable_dim}()},
\code{\link{width}()}
}
\concept{flextable dimensions}
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/options.R
\name{sass_options}
\alias{sass_options}
\title{Compiler Options for Sass}
\usage{
sass_options(precision = 5, output_style = "expanded",
indented_syntax = FALSE, include_path = "",
source_comments = FALSE, indent_type = "space", indent_width = 2,
linefeed = "lf", output_path = "", source_map_file = "",
source_map_root = "", source_map_embed = FALSE,
source_map_contents = FALSE, omit_source_map_url = FALSE)
}
\arguments{
\item{precision}{Number of decimal places.}
\item{output_style}{Bracketing and formatting style of the CSS output.
Possible styles: \code{"nested"}, \code{"expanded"}, \code{"compact"}, and
\code{"compressed"}.}
\item{indented_syntax}{Enables the compiler to parse Sass Indented Syntax in
strings. Note that the compiler automatically overrides this option to
\code{TRUE} or \code{FALSE} for files with .sass and .scss file extensions
respectively.}
\item{include_path}{Vector of paths used to resolve \code{@import}. Multiple
paths are possible using a character vector of paths.}
\item{source_comments}{Annotates CSS output with line and file comments from
Sass file for debugging.}
\item{indent_type}{Specifies the indent type as \code{'space'} or
\code{'tab'}.}
\item{indent_width}{Number of tabs or spaces used for indentation. Maximum
10.}
\item{linefeed}{Specifies how new lines should be delimited. Possible values:
\code{'lf'}, \code{'cr'}, \code{'lfcr'}, and \code{'crlf'}.}
\item{output_path}{Specifies the location of the output file. Note: this
option will not write the file on disk. It is only for internal reference
with the source map.}
\item{source_map_file}{Specifies the location for Sass to write the source
map.}
\item{source_map_root}{Value will be included as source root in the source
map information.}
\item{source_map_embed}{Embeds the source map as a data URI.}
\item{source_map_contents}{Includes the contents in the source map
information.}
\item{omit_source_map_url}{Disable the inclusion of source map information in
the output file. Note: must specify \code{output_path} when \code{TRUE}.}
}
\value{
List of Sass compiler options to be used with
\code{\link{compile_sass}}.
}
\description{
Set compiler options for Sass. Used with \code{\link{compile_sass}}.
}
\examples{
compile_sass(
text = "foo { margin: 122px * .3; }",
options = sass_options(output_style = "compact")
)
}
|
/man/sass_options.Rd
|
permissive
|
schloerke/sass
|
R
| false
| true
| 2,453
|
rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/options.R
\name{sass_options}
\alias{sass_options}
\title{Compiler Options for Sass}
\usage{
sass_options(precision = 5, output_style = "expanded",
indented_syntax = FALSE, include_path = "",
source_comments = FALSE, indent_type = "space", indent_width = 2,
linefeed = "lf", output_path = "", source_map_file = "",
source_map_root = "", source_map_embed = FALSE,
source_map_contents = FALSE, omit_source_map_url = FALSE)
}
\arguments{
\item{precision}{Number of decimal places.}
\item{output_style}{Bracketing and formatting style of the CSS output.
Possible styles: \code{"nested"}, \code{"expanded"}, \code{"compact"}, and
\code{"compressed"}.}
\item{indented_syntax}{Enables the compiler to parse Sass Indented Syntax in
strings. Note that the compiler automatically overrides this option to
\code{TRUE} or \code{FALSE} for files with .sass and .scss file extensions
respectively.}
\item{include_path}{Vector of paths used to resolve \code{@import}. Multiple
paths are possible using a character vector of paths.}
\item{source_comments}{Annotates CSS output with line and file comments from
Sass file for debugging.}
\item{indent_type}{Specifies the indent type as \code{'space'} or
\code{'tab'}.}
\item{indent_width}{Number of tabs or spaces used for indentation. Maximum
10.}
\item{linefeed}{Specifies how new lines should be delimited. Possible values:
\code{'lf'}, \code{'cr'}, \code{'lfcr'}, and \code{'crlf'}.}
\item{output_path}{Specifies the location of the output file. Note: this
option will not write the file on disk. It is only for internal reference
with the source map.}
\item{source_map_file}{Specifies the location for Sass to write the source
map.}
\item{source_map_root}{Value will be included as source root in the source
map information.}
\item{source_map_embed}{Embeds the source map as a data URI.}
\item{source_map_contents}{Includes the contents in the source map
information.}
\item{omit_source_map_url}{Disable the inclusion of source map information in
the output file. Note: must specify \code{output_path} when \code{TRUE}.}
}
\value{
List of Sass compiler options to be used with
\code{\link{compile_sass}}.
}
\description{
Set compiler options for Sass. Used with \code{\link{compile_sass}}.
}
\examples{
compile_sass(
text = "foo { margin: 122px * .3; }",
options = sass_options(output_style = "compact")
)
}
|
alphabet <-
function(n) {
letters <- c("a", "b", "c", "d", "e", "f", "g", "h", "i", "j", "k", "l", "m", "n", "o", "p", "q", "r", "s", "t", "u", "v", "w", "x", "y", "z")
sample(letters, n, replace = T)
}
|
/R/alphabet.R
|
no_license
|
dtreisman/Alphabet
|
R
| false
| false
| 213
|
r
|
alphabet <-
function(n) {
letters <- c("a", "b", "c", "d", "e", "f", "g", "h", "i", "j", "k", "l", "m", "n", "o", "p", "q", "r", "s", "t", "u", "v", "w", "x", "y", "z")
sample(letters, n, replace = T)
}
|
testlist <- list(f = structure(c(9.41578138663341e-290, 1.56770832976572e-142, 1.55224558988362e-270, 1.51948358578169e-262, 6.52422101813805e+104, 2.82187483866939e-13, 5.84306544121847e-111, 2.30980258295413e-182, 7.7138684480244e+222, 7.63196218141027e-179, 1.5169999407476e-149, 8.84503376600421e+276, 8.24884434978864e+80, 6.98711896721922e-223, 3.06215421439979e-53, 1.47931183638417e-54, 5.23903260497259e+92, 3.77278919375484e+117, 1.31852720680206e-92, 3.87494901729954e-252, 4.06346240791837e-59, 3.52963133774942e-120, 3.2583626303841e+189, 2.55974175423769e-54, 5.13409978679011e-200, 3.78577134867092e-20, 1.67916440514118e+53, 1.94040154058885e+23, 4.22494255461838e+112, 1.37322168644335e+180, 3.09178888012862e-63, 4.15406653965674e+299, 5.54631615997981e+126, 1.10287023948386e+163, 6.83110214643533e+156, 1.34693537674841e-281, 2.48646550692624e-24, 1.15215786912982e-54, 3.12940686153516e+182, 1.67814989841151e-244, 1.31545419953992e+119, 2.17840625421554e-188, 3.2817955527698e-228, 9.24462841296526e-269, 3.61640802511807e-138, 5.56262456606563e+109, 1.19057123213934e-138, 5.82654064337691e+204, 8.58756681157123e+51, 2.09165528866342e+130, 1.07433804645239e+96, 9.27460001364049e-43, 1.39177659084603e-50, 1.19456164871963e-123, 5.61827089511087e+110, 1.02828862471477e+24, 8.1232206665293e+215, 7.13126394354316e-113, 2.15259518937072e-192, 7.59210260176964e-108, 1.22472620008728e+289, 1.54444331246716e+99, 1.15360506611592e-33, 9.11476566499204e-58, 1.80483739573767e-156, 1.39326849264925e-36, 3.48354762517695e+255, 1.20700687787189e+126, 1.53032245185955e-27, 4.91542968496158e-166, 1.24279905288058e-267, 8.47469211199934e+152, 1.97320653469229e+115, 7.64698378903856e-93, 1.76215164845093e-16, 4.97226305365223e+295, 2.5052630118839e-188, 1.35406190526319e+52, 1.65192878120407e+49, 2.2289329856724e-33, 6.07866588705862e-110, 6.9641909804063e-125, 2.89301997698689e+22, 9.78958800543847e+117, 9.62174506775901e+283, 2.50042891555153e+303, 1.69240313679047e+200, 8.34715733039902e-268, 6.98348600431917e-217, 1.6639332501716e-09, 6.51576676372527e-208, 3.23616378445778e+80, 1.55125952520241e+215, 4.50248588047856, 1.17611175783937e+269, 1.65450211778226e+112, 2.43541603291262e+236, 3.73517734990962e+57, 4.4701153699702e+199, 1.77430475709026e+84), .Dim = c(10L, 10L )), v = structure(c(2.58035513749295e-206, -Inf, NaN), .Dim = c(1L, 3L)))
result <- do.call(icosa:::xxxxyyyyzzzz_,testlist)
str(result)
|
/issuestests/icosa/inst/testfiles/xxxxyyyyzzzz_/xxxxyyyyzzzz__output/log_d54768b7cb0aa48ca59c3cb777a9613367bd064c/xxxxyyyyzzzz_-test.R
|
no_license
|
akhikolla/RcppDeepStateTest
|
R
| false
| false
| 2,472
|
r
|
testlist <- list(f = structure(c(9.41578138663341e-290, 1.56770832976572e-142, 1.55224558988362e-270, 1.51948358578169e-262, 6.52422101813805e+104, 2.82187483866939e-13, 5.84306544121847e-111, 2.30980258295413e-182, 7.7138684480244e+222, 7.63196218141027e-179, 1.5169999407476e-149, 8.84503376600421e+276, 8.24884434978864e+80, 6.98711896721922e-223, 3.06215421439979e-53, 1.47931183638417e-54, 5.23903260497259e+92, 3.77278919375484e+117, 1.31852720680206e-92, 3.87494901729954e-252, 4.06346240791837e-59, 3.52963133774942e-120, 3.2583626303841e+189, 2.55974175423769e-54, 5.13409978679011e-200, 3.78577134867092e-20, 1.67916440514118e+53, 1.94040154058885e+23, 4.22494255461838e+112, 1.37322168644335e+180, 3.09178888012862e-63, 4.15406653965674e+299, 5.54631615997981e+126, 1.10287023948386e+163, 6.83110214643533e+156, 1.34693537674841e-281, 2.48646550692624e-24, 1.15215786912982e-54, 3.12940686153516e+182, 1.67814989841151e-244, 1.31545419953992e+119, 2.17840625421554e-188, 3.2817955527698e-228, 9.24462841296526e-269, 3.61640802511807e-138, 5.56262456606563e+109, 1.19057123213934e-138, 5.82654064337691e+204, 8.58756681157123e+51, 2.09165528866342e+130, 1.07433804645239e+96, 9.27460001364049e-43, 1.39177659084603e-50, 1.19456164871963e-123, 5.61827089511087e+110, 1.02828862471477e+24, 8.1232206665293e+215, 7.13126394354316e-113, 2.15259518937072e-192, 7.59210260176964e-108, 1.22472620008728e+289, 1.54444331246716e+99, 1.15360506611592e-33, 9.11476566499204e-58, 1.80483739573767e-156, 1.39326849264925e-36, 3.48354762517695e+255, 1.20700687787189e+126, 1.53032245185955e-27, 4.91542968496158e-166, 1.24279905288058e-267, 8.47469211199934e+152, 1.97320653469229e+115, 7.64698378903856e-93, 1.76215164845093e-16, 4.97226305365223e+295, 2.5052630118839e-188, 1.35406190526319e+52, 1.65192878120407e+49, 2.2289329856724e-33, 6.07866588705862e-110, 6.9641909804063e-125, 2.89301997698689e+22, 9.78958800543847e+117, 9.62174506775901e+283, 2.50042891555153e+303, 1.69240313679047e+200, 8.34715733039902e-268, 6.98348600431917e-217, 1.6639332501716e-09, 6.51576676372527e-208, 3.23616378445778e+80, 1.55125952520241e+215, 4.50248588047856, 1.17611175783937e+269, 1.65450211778226e+112, 2.43541603291262e+236, 3.73517734990962e+57, 4.4701153699702e+199, 1.77430475709026e+84), .Dim = c(10L, 10L )), v = structure(c(2.58035513749295e-206, -Inf, NaN), .Dim = c(1L, 3L)))
result <- do.call(icosa:::xxxxyyyyzzzz_,testlist)
str(result)
|
# evaluate xgboost on benchmark datasets
rm(list=ls())
options(scipen = 999)
rerfPath <- "~/work/tyler/"
dataPath <- "~/work/tyler/Data/uci/processed/"
library(xgboost)
library(caret)
library(plyr)
source(paste0(rerfPath, "RandomerForest/R/Code/Utils/GetFolds.R"))
testError <- list()
colSample <- list()
dataSet <- "chess_krvk"
fold <- GetFolds(paste0(dataPath, "cv_partitions/", dataSet, "_partitions.txt"))
nFolds <- length(fold)
X <- as.matrix(read.table(paste0(dataPath, "data/", dataSet, ".csv"), header = F, sep = ",", quote = "", row.names = NULL))
catMap <- NULL
p <- ncol(X) - 1L
Y <- as.integer(X[, p + 1L]) + 1L
Y <- paste0("Y",as.character(Y))
X <- X[, -(p + 1L)]
X <- scale(X)
X[] <- apply(X, 2, as.numeric)
testError[[dataSet]] <- numeric(nFolds)
colSample[[dataSet]] <- integer(nFolds)
# loop over folds
for (k in seq.int(nFolds)) {
print(paste0("fold ", k))
trainIdx <- unlist(fold[-k])
testIdx <- fold[[k]]
# evaluate models
if (length(trainIdx) < 100) {
subSample = 1
} else {
subSample = c(0.5,0.75,1)
}
xgb_grid <- expand.grid(
nrounds = c(100, 1000),
eta = c(0.001, 0.01),
subsample = subSample,
colsample_bytree = c(0.4,0.6,0.8,1),
min_child_weight = 1,
max_depth = c(4, 6, 8, 10, 100000),
gamma = 0
)
nClasses <- length(unique(Y[trainIdx]))
if (nClasses > 2) {
# pack the training control parameters
xgb_trcontrol <- trainControl(
method = "cv",
number = 5,
verboseIter = FALSE,
returnData = FALSE,
returnResamp = "all", # save losses across all models
classProbs = TRUE, # set to TRUE for AUC to be computed
allowParallel = TRUE
)
obj <- "multi:softprob"
Met <- "Accuracy"
# train the model for each parameter combination in the grid, using CV to evaluate
xgb_train <- train(
x = X[trainIdx, ],
y = as.factor(Y[trainIdx]),
trControl = xgb_trcontrol,
tuneGrid = xgb_grid,
method = "xgbTree",
objective = obj,
num_class = nClasses,
metric = Met,
nthread = 24
)
} else {
# pack the training control parameters
xgb_trcontrol <- trainControl(
method = "cv",
number = 5,
verboseIter = FALSE,
returnData = FALSE,
returnResamp = "all", # save losses across all models
classProbs = TRUE, # set to TRUE for AUC to be computed
summaryFunction = twoClassSummary,
allowParallel = TRUE
)
obj <- "binary:logistic"
Met <- "ROC"
# train the model for each parameter combination in the grid, using CV to evaluate
xgb_train <- train(
x = X[trainIdx, ],
y = as.factor(Y[trainIdx]),
trControl = xgb_trcontrol,
tuneGrid = xgb_grid,
method = "xgbTree",
objective = obj,
metric = Met,
nthread = 24
)
}
scores <- predict(xgb_train$finalModel,X[testIdx, ])
if(nClasses > 2) {
scores <- matrix(scores, nrow = length(testIdx), ncol = nClasses, byrow = TRUE)
} else {
scores <- cbind(scores, 1 - scores)
}
predictions <- xgb_train$levels[max.col(scores)]
testError[[dataSet]][k] <- sum(predictions != Y[testIdx])/length(testIdx)
colSample[[dataSet]][k] <- xgb_train$finalModel$tuneValue$colsample_bytree
save(testError, colSample, file = paste0(rerfPath, "RandomerForest/R/Results/2018.01.31/", dataSet, "_xgb_2018_01_31.RData"))
}
|
/R/Code/Experiments/2018.01.31/task19.R
|
no_license
|
shlpu/RandomerForest
|
R
| false
| false
| 3,622
|
r
|
# evaluate xgboost on benchmark datasets
rm(list=ls())
options(scipen = 999)
rerfPath <- "~/work/tyler/"
dataPath <- "~/work/tyler/Data/uci/processed/"
library(xgboost)
library(caret)
library(plyr)
source(paste0(rerfPath, "RandomerForest/R/Code/Utils/GetFolds.R"))
testError <- list()
colSample <- list()
dataSet <- "chess_krvk"
fold <- GetFolds(paste0(dataPath, "cv_partitions/", dataSet, "_partitions.txt"))
nFolds <- length(fold)
X <- as.matrix(read.table(paste0(dataPath, "data/", dataSet, ".csv"), header = F, sep = ",", quote = "", row.names = NULL))
catMap <- NULL
p <- ncol(X) - 1L
Y <- as.integer(X[, p + 1L]) + 1L
Y <- paste0("Y",as.character(Y))
X <- X[, -(p + 1L)]
X <- scale(X)
X[] <- apply(X, 2, as.numeric)
testError[[dataSet]] <- numeric(nFolds)
colSample[[dataSet]] <- integer(nFolds)
# loop over folds
for (k in seq.int(nFolds)) {
print(paste0("fold ", k))
trainIdx <- unlist(fold[-k])
testIdx <- fold[[k]]
# evaluate models
if (length(trainIdx) < 100) {
subSample = 1
} else {
subSample = c(0.5,0.75,1)
}
xgb_grid <- expand.grid(
nrounds = c(100, 1000),
eta = c(0.001, 0.01),
subsample = subSample,
colsample_bytree = c(0.4,0.6,0.8,1),
min_child_weight = 1,
max_depth = c(4, 6, 8, 10, 100000),
gamma = 0
)
nClasses <- length(unique(Y[trainIdx]))
if (nClasses > 2) {
# pack the training control parameters
xgb_trcontrol <- trainControl(
method = "cv",
number = 5,
verboseIter = FALSE,
returnData = FALSE,
returnResamp = "all", # save losses across all models
classProbs = TRUE, # set to TRUE for AUC to be computed
allowParallel = TRUE
)
obj <- "multi:softprob"
Met <- "Accuracy"
# train the model for each parameter combination in the grid, using CV to evaluate
xgb_train <- train(
x = X[trainIdx, ],
y = as.factor(Y[trainIdx]),
trControl = xgb_trcontrol,
tuneGrid = xgb_grid,
method = "xgbTree",
objective = obj,
num_class = nClasses,
metric = Met,
nthread = 24
)
} else {
# pack the training control parameters
xgb_trcontrol <- trainControl(
method = "cv",
number = 5,
verboseIter = FALSE,
returnData = FALSE,
returnResamp = "all", # save losses across all models
classProbs = TRUE, # set to TRUE for AUC to be computed
summaryFunction = twoClassSummary,
allowParallel = TRUE
)
obj <- "binary:logistic"
Met <- "ROC"
# train the model for each parameter combination in the grid, using CV to evaluate
xgb_train <- train(
x = X[trainIdx, ],
y = as.factor(Y[trainIdx]),
trControl = xgb_trcontrol,
tuneGrid = xgb_grid,
method = "xgbTree",
objective = obj,
metric = Met,
nthread = 24
)
}
scores <- predict(xgb_train$finalModel,X[testIdx, ])
if(nClasses > 2) {
scores <- matrix(scores, nrow = length(testIdx), ncol = nClasses, byrow = TRUE)
} else {
scores <- cbind(scores, 1 - scores)
}
predictions <- xgb_train$levels[max.col(scores)]
testError[[dataSet]][k] <- sum(predictions != Y[testIdx])/length(testIdx)
colSample[[dataSet]][k] <- xgb_train$finalModel$tuneValue$colsample_bytree
save(testError, colSample, file = paste0(rerfPath, "RandomerForest/R/Results/2018.01.31/", dataSet, "_xgb_2018_01_31.RData"))
}
|
\name{dm2.refGene.LENGTH}
\docType{data}
\alias{dm2.refGene.LENGTH}
\title{Transcript length data for the organism dm}
\description{dm2.refGene.LENGTH is an R object which maps transcripts to the length (in bp) of their mature mRNA transcripts. Where available, it will also provide the mapping between a gene ID and its associated transcripts. The data is obtained from the UCSC table browser (http://genome.ucsc.edu/cgi-bin/hgTables) using the refGene table.
The data file was made by calling downloadLengthFromUCSC(dm2, refGene) on the date on which the package was last updated.}
\seealso{
\code{\link{downloadLengthFromUCSC}}}
\examples{
data(dm2.refGene.LENGTH)
head(dm2.refGene.LENGTH)
}
\keyword{datasets}
|
/man/dm2.refGene.LENGTH.Rd
|
no_license
|
nadiadavidson/geneLenDataBase
|
R
| false
| false
| 717
|
rd
|
\name{dm2.refGene.LENGTH}
\docType{data}
\alias{dm2.refGene.LENGTH}
\title{Transcript length data for the organism dm}
\description{dm2.refGene.LENGTH is an R object which maps transcripts to the length (in bp) of their mature mRNA transcripts. Where available, it will also provide the mapping between a gene ID and its associated transcripts. The data is obtained from the UCSC table browser (http://genome.ucsc.edu/cgi-bin/hgTables) using the refGene table.
The data file was made by calling downloadLengthFromUCSC(dm2, refGene) on the date on which the package was last updated.}
\seealso{
\code{\link{downloadLengthFromUCSC}}}
\examples{
data(dm2.refGene.LENGTH)
head(dm2.refGene.LENGTH)
}
\keyword{datasets}
|
#scoringKey.R
#Author: Morgan Strom
#Date: 18-08-2015
#Function to create scoring key from dataframe with scoring key
#Input:
#Data frame "key.df" containing item ID and keys
#(Name of Item ID column = "id", Key column = "key")
#Output:
#List containing scoring key
scoringKey <- function(key.df) {
#Make sure that the key column is in character format
key.df$key <- as.character(key.df$key)
#Identify scoring ranges for open form items
range <- grepl("RANGE:", key.df$key)
#Extract minimum correct reply
min <- substr(key.df$key[range], start=8, stop=regexpr("-", key.df$key[range]) - 1)
min <- as.numeric(gsub(" ", "", min))
#Extract maximum correct reply
max <- substr(key.df$key[range], start=regexpr("-", key.df$key[range]) + 1, stop=100)
max <- as.numeric(gsub(" ", "", max))
#Create key as list
key <-list()
#Closed form items
for(i in 1:nrow(key.df)) {
key[[i]] = as.numeric(key.df[i, "key"])
}
#Open form items
j <- 1
for(i in which(range)) {
key[[i]] = c(min[j], max[j])
j <- j + 1
}
#Item names
names(key) <- as.character(key.df$id)
#Return list
key
}
|
/R/scoringKey.R
|
no_license
|
talentlens/talentlens
|
R
| false
| false
| 1,131
|
r
|
#scoringKey.R
#Author: Morgan Strom
#Date: 18-08-2015
#Function to create scoring key from dataframe with scoring key
#Input:
#Data frame "key.df" containing item ID and keys
#(Name of Item ID column = "id", Key column = "key")
#Output:
#List containing scoring key
scoringKey <- function(key.df) {
#Make sure that the key column is in character format
key.df$key <- as.character(key.df$key)
#Identify scoring ranges for open form items
range <- grepl("RANGE:", key.df$key)
#Extract minimum correct reply
min <- substr(key.df$key[range], start=8, stop=regexpr("-", key.df$key[range]) - 1)
min <- as.numeric(gsub(" ", "", min))
#Extract maximum correct reply
max <- substr(key.df$key[range], start=regexpr("-", key.df$key[range]) + 1, stop=100)
max <- as.numeric(gsub(" ", "", max))
#Create key as list
key <-list()
#Closed form items
for(i in 1:nrow(key.df)) {
key[[i]] = as.numeric(key.df[i, "key"])
}
#Open form items
j <- 1
for(i in which(range)) {
key[[i]] = c(min[j], max[j])
j <- j + 1
}
#Item names
names(key) <- as.character(key.df$id)
#Return list
key
}
|
library('dplyr')
library('stringr')
library('ggplot2')
files <- list.files('/home/piotr/Uczelnia/PracaMagisterska/Dane/train')
setwd('/home/piotr/Uczelnia/PracaMagisterska/Dane/train')
files <- data.frame(filename=files, stringsAsFactors = FALSE)
label_files <- files %>% filter(str_detect(filename, 'label'))
df_list <- list()
data_list <- lapply(label_files$filename,function(x)
{
read.csv(x, header = FALSE, sep=' ')
})
data_list <- bind_rows(data_list, .id='file_no')
data_list$len <- data_list$V2 - data_list$V1
data_list %>% group_by(V3) %>% summarize(Mean = mean(len), Std=sd(len))
read_means <- data_list %>% group_by(file_no) %>% summarise(Mean=mean(len), Std=sd(len), Max=max(len)) %>% arrange(desc(Mean))
qs <- seq(0.9,1,0.01)
q_values <- quantile(data_list$len, qs)
|
/analysis.R
|
no_license
|
plubon/Basecaller
|
R
| false
| false
| 780
|
r
|
library('dplyr')
library('stringr')
library('ggplot2')
files <- list.files('/home/piotr/Uczelnia/PracaMagisterska/Dane/train')
setwd('/home/piotr/Uczelnia/PracaMagisterska/Dane/train')
files <- data.frame(filename=files, stringsAsFactors = FALSE)
label_files <- files %>% filter(str_detect(filename, 'label'))
df_list <- list()
data_list <- lapply(label_files$filename,function(x)
{
read.csv(x, header = FALSE, sep=' ')
})
data_list <- bind_rows(data_list, .id='file_no')
data_list$len <- data_list$V2 - data_list$V1
data_list %>% group_by(V3) %>% summarize(Mean = mean(len), Std=sd(len))
read_means <- data_list %>% group_by(file_no) %>% summarise(Mean=mean(len), Std=sd(len), Max=max(len)) %>% arrange(desc(Mean))
qs <- seq(0.9,1,0.01)
q_values <- quantile(data_list$len, qs)
|
count <- 0
for(i in 1:9){
row_r <- table(Solution[i,])
count <- count + sum(row_r - 1)
}
for(j in 1:9){
col_r <- table(Solution[,j])
count <- count + sum(col_r - 1)
}
|
/test.R
|
no_license
|
Karl1992/Soduku-by-R
|
R
| false
| false
| 174
|
r
|
count <- 0
for(i in 1:9){
row_r <- table(Solution[i,])
count <- count + sum(row_r - 1)
}
for(j in 1:9){
col_r <- table(Solution[,j])
count <- count + sum(col_r - 1)
}
|
MultiCompLineups = function (username, password, competitionmatrix, version = "v4",
baseurl = "https://data.statsbomb.com/api/", parallel = TRUE,
cores = detectCores())
{
events <- tibble()
for (i in 1:dim(competitionmatrix)[1]) {
temp.lineups <- tibble()
competition_id <- as.numeric(competitionmatrix[i, 1])
season_id <- as.numeric(competitionmatrix[i, 2])
matches <- matchesvector(username, password, season_id,
competition_id, version, baseurl)
temp.lineups <- alllineups(username, password, matches,
version, parallel = TRUE)
events <- bind_rows(events, temp.lineups)
}
return(events)
}
|
/R/MultiCompLineups.R
|
no_license
|
statsbomb/StatsBombR
|
R
| false
| false
| 711
|
r
|
MultiCompLineups = function (username, password, competitionmatrix, version = "v4",
baseurl = "https://data.statsbomb.com/api/", parallel = TRUE,
cores = detectCores())
{
events <- tibble()
for (i in 1:dim(competitionmatrix)[1]) {
temp.lineups <- tibble()
competition_id <- as.numeric(competitionmatrix[i, 1])
season_id <- as.numeric(competitionmatrix[i, 2])
matches <- matchesvector(username, password, season_id,
competition_id, version, baseurl)
temp.lineups <- alllineups(username, password, matches,
version, parallel = TRUE)
events <- bind_rows(events, temp.lineups)
}
return(events)
}
|
install.packages("tidyverse")
library(tidyverse)
library(dplyr)
demo_table1 <- read.csv(file='MechaCar_mpg.csv',check.names=F,stringsAsFactors = F)
linear_regression <- lm(mpg ~ vehicle_length + vehicle_weight + spoiler_angle + ground_clearance + AWD,data=demo_table1)#generate multiple linear regression model
summary(linear_regression)
coil_table <- read.csv(file='Suspension_Coil.csv',check.names=F,stringsAsFactors = F)
total_summary <- coil_table %>% summarize(Mean=mean(PSI),Median=median(PSI),Variance=var(PSI),SD=sd(PSI)) #create summary table with multiple columns
lot_summary <- coil_table %>% group_by(Manufacturing_Lot)%>% summarize(Mean=mean(PSI),Median=median(PSI),Variance=var(PSI),SD=sd(PSI)) #create summary table with multiple columns
t.test(coil_table$PSI,mu = 1500)
t.test(subset(coil_table, Manufacturing_Lot=="Lot1")$PSI,mu = 1500)
t.test(subset(coil_table, Manufacturing_Lot=="Lot2")$PSI,mu = 1500)
t.test(subset(coil_table, Manufacturing_Lot=="Lot3")$PSI,mu = 1500)
|
/MechaCarChallenge.RScript.R
|
no_license
|
worksm/MechaCar_Statistical_Analysis
|
R
| false
| false
| 995
|
r
|
install.packages("tidyverse")
library(tidyverse)
library(dplyr)
demo_table1 <- read.csv(file='MechaCar_mpg.csv',check.names=F,stringsAsFactors = F)
linear_regression <- lm(mpg ~ vehicle_length + vehicle_weight + spoiler_angle + ground_clearance + AWD,data=demo_table1)#generate multiple linear regression model
summary(linear_regression)
coil_table <- read.csv(file='Suspension_Coil.csv',check.names=F,stringsAsFactors = F)
total_summary <- coil_table %>% summarize(Mean=mean(PSI),Median=median(PSI),Variance=var(PSI),SD=sd(PSI)) #create summary table with multiple columns
lot_summary <- coil_table %>% group_by(Manufacturing_Lot)%>% summarize(Mean=mean(PSI),Median=median(PSI),Variance=var(PSI),SD=sd(PSI)) #create summary table with multiple columns
t.test(coil_table$PSI,mu = 1500)
t.test(subset(coil_table, Manufacturing_Lot=="Lot1")$PSI,mu = 1500)
t.test(subset(coil_table, Manufacturing_Lot=="Lot2")$PSI,mu = 1500)
t.test(subset(coil_table, Manufacturing_Lot=="Lot3")$PSI,mu = 1500)
|
consumption <- read.csv("C:/Users/Erick/Downloads/household_power_consumption.txt", sep=";", na.strings="?",stringsAsFactors = FALSE)
consumption$DateTime<-strptime(paste(consumption$Date,consumption$Time),'%d/%m/%Y %H:%M:%S')
studied_consumption<-subset(consumption,DateTime>=strptime('2007-02-01','%Y-%m-%d') & DateTime<strptime('2007-02-03','%Y-%m-%d'))
png(file="plot1.png", width=480, height=480)
hist(studied_consumption$Global_active_power, col="red",main="Global Active Power",xlab="Global Active Power (kilowatts)")
dev.off()
|
/plot1.R
|
no_license
|
ErickDany/ExData_Plotting1
|
R
| false
| false
| 541
|
r
|
consumption <- read.csv("C:/Users/Erick/Downloads/household_power_consumption.txt", sep=";", na.strings="?",stringsAsFactors = FALSE)
consumption$DateTime<-strptime(paste(consumption$Date,consumption$Time),'%d/%m/%Y %H:%M:%S')
studied_consumption<-subset(consumption,DateTime>=strptime('2007-02-01','%Y-%m-%d') & DateTime<strptime('2007-02-03','%Y-%m-%d'))
png(file="plot1.png", width=480, height=480)
hist(studied_consumption$Global_active_power, col="red",main="Global Active Power",xlab="Global Active Power (kilowatts)")
dev.off()
|
\name{gmB}
\alias{gmB}
\docType{data}
\title{Graphical Model 5-Dim Binary Example Data}
\description{
This data set contains a matrix containing information on five binary
variables (coded as 0/1) and the corresonding DAG model.
}
\usage{data(gmB)}
\format{
The format is a list of two components
\describe{
\item{x:}{Int [1:5000, 1:5] 0 1 1 0 0 1 1 0 1 1 ...}
\item{g:}{Formal class 'graphNEL' [package "graph"] with 6 slots\cr
.. ..@ nodes : chr [1:5] "1" "2" "3" "4" ...\cr
.. ..@ edgeL :List of 5\cr
........
}
}
}
\details{
The data was generated using Tetrad in the following way. A random DAG
on five nodes was generated; binary variables were assigned to each
node; then conditional probability tables corresponding
to the structure of the generated DAG were constructed. Finally, 5000
samples were drawn using the conditional probability tables.
}
% \source{
% %% ~~ reference to a publication or URL from which the data were obtained ~~
% }
% \references{
% %% ~~ possibly secondary sources and usages ~~
% }
\examples{
data(gmB)
## maybe str(gmB) ; plot(gmB) ...
}
\keyword{datasets}
|
/man/gmB.Rd
|
no_license
|
cran/pcalg
|
R
| false
| false
| 1,148
|
rd
|
\name{gmB}
\alias{gmB}
\docType{data}
\title{Graphical Model 5-Dim Binary Example Data}
\description{
This data set contains a matrix containing information on five binary
variables (coded as 0/1) and the corresonding DAG model.
}
\usage{data(gmB)}
\format{
The format is a list of two components
\describe{
\item{x:}{Int [1:5000, 1:5] 0 1 1 0 0 1 1 0 1 1 ...}
\item{g:}{Formal class 'graphNEL' [package "graph"] with 6 slots\cr
.. ..@ nodes : chr [1:5] "1" "2" "3" "4" ...\cr
.. ..@ edgeL :List of 5\cr
........
}
}
}
\details{
The data was generated using Tetrad in the following way. A random DAG
on five nodes was generated; binary variables were assigned to each
node; then conditional probability tables corresponding
to the structure of the generated DAG were constructed. Finally, 5000
samples were drawn using the conditional probability tables.
}
% \source{
% %% ~~ reference to a publication or URL from which the data were obtained ~~
% }
% \references{
% %% ~~ possibly secondary sources and usages ~~
% }
\examples{
data(gmB)
## maybe str(gmB) ; plot(gmB) ...
}
\keyword{datasets}
|
require(shiny)
require(shinythemes)
require(shinydashboard)
require(DT)
require(htmlwidgets)
require(sparkline) #AWS has the latest github version
suppressPackageStartupMessages(require(googleVis))
source("sparkline.R")
shinyUI( dashboardPage(
#fluidPage(theme = shinytheme("united"),
skin="yellow",
dashboardHeader(title = "Next Best Store"),
dashboardSidebar(disable=T),
dashboardBody(
tags$head(
tags$style(HTML("
.jqstooltip{
box-sizing: content-box;
}"))
),
fluidRow(
column(2,
HTML('<a href="https://people.stanford.edu/yenlow/insight"><button type="button">Project website</button></a>'),
br(),br(),
# numericInput("num",label="Show (number of locations) with the",value=10),
sliderInput("num",label="Number of locations",
min=0,max=1000,value=100),
radioButtons("highOrFast", "",
c("Highest total revenue"="high",
"Fastest revenue growth"="fast"),
selected="high")
),
column(6,
tabBox(
title = "Map",color="yellow",
# The id lets us use input$mapTab on the server to find the current tab
id = "mapTab",
height = "370px", width="100%",
tabPanel("New stores", value="new", htmlOutput("newmap")),
tabPanel("Our stores", value="old", htmlOutput("oldmap")),
tabPanel("New vs our stores", value="compare", htmlOutput("comparemap"))
)
),
column(4, align="left",
#input.highOrFast instead of input$highOrFast
#if not existing locations, show potential revenue gain
conditionalPanel("input.mapTab != 'old'",
infoBoxOutput("gain",width=NULL),
align="center"
),
#if existing stores, show drugs sold
conditionalPanel("input.mapTab == 'old'",
textInput("zip",label = "Enter zip code to view the drugs sold"),
htmlOutput("zip")
),
#if comparing predicted and existing stores, show legend
conditionalPanel("input.mapTab == 'compare'",
# infoBoxOutput("gain",width=NULL),
imageOutput("legend",height=80),
h4("Markers sized by annual revenue")
)
)
),
fluidRow(
br(),br(),
h3("About the Location", align="center"),
sparklineOutput("spark"), #even though this line points to an orphaned output$spark, it's required for rendering sparklines
dataTableOutput("table")
)
))
)
|
/models/maps/ui.R
|
no_license
|
yenlow/NextBestStore
|
R
| false
| false
| 2,890
|
r
|
require(shiny)
require(shinythemes)
require(shinydashboard)
require(DT)
require(htmlwidgets)
require(sparkline) #AWS has the latest github version
suppressPackageStartupMessages(require(googleVis))
source("sparkline.R")
shinyUI( dashboardPage(
#fluidPage(theme = shinytheme("united"),
skin="yellow",
dashboardHeader(title = "Next Best Store"),
dashboardSidebar(disable=T),
dashboardBody(
tags$head(
tags$style(HTML("
.jqstooltip{
box-sizing: content-box;
}"))
),
fluidRow(
column(2,
HTML('<a href="https://people.stanford.edu/yenlow/insight"><button type="button">Project website</button></a>'),
br(),br(),
# numericInput("num",label="Show (number of locations) with the",value=10),
sliderInput("num",label="Number of locations",
min=0,max=1000,value=100),
radioButtons("highOrFast", "",
c("Highest total revenue"="high",
"Fastest revenue growth"="fast"),
selected="high")
),
column(6,
tabBox(
title = "Map",color="yellow",
# The id lets us use input$mapTab on the server to find the current tab
id = "mapTab",
height = "370px", width="100%",
tabPanel("New stores", value="new", htmlOutput("newmap")),
tabPanel("Our stores", value="old", htmlOutput("oldmap")),
tabPanel("New vs our stores", value="compare", htmlOutput("comparemap"))
)
),
column(4, align="left",
#input.highOrFast instead of input$highOrFast
#if not existing locations, show potential revenue gain
conditionalPanel("input.mapTab != 'old'",
infoBoxOutput("gain",width=NULL),
align="center"
),
#if existing stores, show drugs sold
conditionalPanel("input.mapTab == 'old'",
textInput("zip",label = "Enter zip code to view the drugs sold"),
htmlOutput("zip")
),
#if comparing predicted and existing stores, show legend
conditionalPanel("input.mapTab == 'compare'",
# infoBoxOutput("gain",width=NULL),
imageOutput("legend",height=80),
h4("Markers sized by annual revenue")
)
)
),
fluidRow(
br(),br(),
h3("About the Location", align="center"),
sparklineOutput("spark"), #even though this line points to an orphaned output$spark, it's required for rendering sparklines
dataTableOutput("table")
)
))
)
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/algorithms.R
\name{Be16NDTIblue}
\alias{Be16NDTIblue}
\title{Be16NDTIblue algorithm}
\usage{
Be16NDTIblue(w658, w458)
}
\arguments{
\item{w658}{numeric. Value at wavelength of 658 nm}
\item{w458}{numeric. Value at wavelength of 458 nm}
}
\value{
SpatRaster or numeric
}
\description{
Applies the Be16NDTIblue algorithm
}
\references{
Beck, R.; Xu, M.; Zhan, S.; Liu, H.; Johansen, R.A.; Tong, S.; Yang, B.; Shu, S.; Wu, Q.; Wang, S.; Berling, K.; Murray, A.; Emery, E.; Reif, M.; Harwood, J.; Young, J.; Martin, M.; Stillings, G.; Stumpf, R.; Su, H.; Ye, Z.; Huang, Y. Comparison of Satellite Reflectance Algorithms for Estimating Phycocyanin Values and Cyanobacterial Total Biovolume in a Temperate Reservoir Using Coincident Hyperspectral Aircraft Imagery and Dense Coincident Surface Observations. Remote Sens. 2017, 9, 538.
}
\seealso{
Other algorithms:
\code{\link{Al10SABI}()},
\code{\link{Am092Bsub}()},
\code{\link{Am09KBBI}()},
\code{\link{Be162B643sub629}()},
\code{\link{Be162B700sub601}()},
\code{\link{Be162BsubPhy}()},
\code{\link{Be16FLHBlueRedNIR_LS8}()},
\code{\link{Be16FLHBlueRedNIR_MERIS}()},
\code{\link{Be16FLHBlueRedNIR_OLCI}()},
\code{\link{Be16FLHBlueRedNIR_S2}()},
\code{\link{Be16FLHBlueRedNIR_WV2}()},
\code{\link{Be16FLHGreenRedNIR_LS8}()},
\code{\link{Be16FLHGreenRedNIR_MERIS}()},
\code{\link{Be16FLHGreenRedNIR_OLCI}()},
\code{\link{Be16FLHGreenRedNIR_S2}()},
\code{\link{Be16FLHGreenRedNIR_WV2}()},
\code{\link{Be16FLHVioletRedNIR_LS8}()},
\code{\link{Be16FLHVioletRedNIR_MERIS}()},
\code{\link{Be16FLHVioletRedNIR_OLCI}()},
\code{\link{Be16FLHVioletRedNIR_S2}()},
\code{\link{Be16FLHVioletRedNIR_WV2}()},
\code{\link{Be16FLHblue_LS8}()},
\code{\link{Be16FLHblue_MERIS}()},
\code{\link{Be16FLHblue_OLCI}()},
\code{\link{Be16FLHblue_S2}()},
\code{\link{Be16FLHblue_WV2}()},
\code{\link{Be16FLHviolet_LS8}()},
\code{\link{Be16FLHviolet_MERIS}()},
\code{\link{Be16FLHviolet_OLCI}()},
\code{\link{Be16FLHviolet_S2}()},
\code{\link{Be16FLHviolet_WV2}()},
\code{\link{Be16NDPhyI644over615}()},
\code{\link{Be16NDPhyI644over629}()},
\code{\link{Be16NDPhyI}()},
\code{\link{Be16NDTIviolet}()},
\code{\link{Be16Phy2BDA644over629}()},
\code{\link{Da052BDA}()},
\code{\link{De933BDA}()},
\code{\link{Gi033BDA}()},
\code{\link{Go04MCI}()},
\code{\link{HU103BDA}()},
\code{\link{Kn07KIVU}()},
\code{\link{MI092BDA}()},
\code{\link{MM092BDA}()},
\code{\link{MM12NDCIalt}()},
\code{\link{MM12NDCI}()},
\code{\link{MM143BDAopt}()},
\code{\link{SI052BDA}()},
\code{\link{SM122BDA}()},
\code{\link{SY002BDA}()},
\code{\link{TurbBe16GreenPlusRedBothOverViolet}()},
\code{\link{TurbBe16RedOverViolet}()},
\code{\link{TurbBow06RedOverGreen}()},
\code{\link{TurbChip09NIROverGreen}()},
\code{\link{TurbDox02NIRoverRed}()},
\code{\link{TurbFrohn09GreenPlusRedBothOverBlue}()},
\code{\link{TurbHarr92NIR}()},
\code{\link{TurbLath91RedOverBlue}()},
\code{\link{TurbMoore80Red}()},
\code{\link{Wy08CI}()}
}
\concept{algorithms}
|
/man/Be16NDTIblue.Rd
|
permissive
|
RAJohansen/waterquality
|
R
| false
| true
| 3,015
|
rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/algorithms.R
\name{Be16NDTIblue}
\alias{Be16NDTIblue}
\title{Be16NDTIblue algorithm}
\usage{
Be16NDTIblue(w658, w458)
}
\arguments{
\item{w658}{numeric. Value at wavelength of 658 nm}
\item{w458}{numeric. Value at wavelength of 458 nm}
}
\value{
SpatRaster or numeric
}
\description{
Applies the Be16NDTIblue algorithm
}
\references{
Beck, R.; Xu, M.; Zhan, S.; Liu, H.; Johansen, R.A.; Tong, S.; Yang, B.; Shu, S.; Wu, Q.; Wang, S.; Berling, K.; Murray, A.; Emery, E.; Reif, M.; Harwood, J.; Young, J.; Martin, M.; Stillings, G.; Stumpf, R.; Su, H.; Ye, Z.; Huang, Y. Comparison of Satellite Reflectance Algorithms for Estimating Phycocyanin Values and Cyanobacterial Total Biovolume in a Temperate Reservoir Using Coincident Hyperspectral Aircraft Imagery and Dense Coincident Surface Observations. Remote Sens. 2017, 9, 538.
}
\seealso{
Other algorithms:
\code{\link{Al10SABI}()},
\code{\link{Am092Bsub}()},
\code{\link{Am09KBBI}()},
\code{\link{Be162B643sub629}()},
\code{\link{Be162B700sub601}()},
\code{\link{Be162BsubPhy}()},
\code{\link{Be16FLHBlueRedNIR_LS8}()},
\code{\link{Be16FLHBlueRedNIR_MERIS}()},
\code{\link{Be16FLHBlueRedNIR_OLCI}()},
\code{\link{Be16FLHBlueRedNIR_S2}()},
\code{\link{Be16FLHBlueRedNIR_WV2}()},
\code{\link{Be16FLHGreenRedNIR_LS8}()},
\code{\link{Be16FLHGreenRedNIR_MERIS}()},
\code{\link{Be16FLHGreenRedNIR_OLCI}()},
\code{\link{Be16FLHGreenRedNIR_S2}()},
\code{\link{Be16FLHGreenRedNIR_WV2}()},
\code{\link{Be16FLHVioletRedNIR_LS8}()},
\code{\link{Be16FLHVioletRedNIR_MERIS}()},
\code{\link{Be16FLHVioletRedNIR_OLCI}()},
\code{\link{Be16FLHVioletRedNIR_S2}()},
\code{\link{Be16FLHVioletRedNIR_WV2}()},
\code{\link{Be16FLHblue_LS8}()},
\code{\link{Be16FLHblue_MERIS}()},
\code{\link{Be16FLHblue_OLCI}()},
\code{\link{Be16FLHblue_S2}()},
\code{\link{Be16FLHblue_WV2}()},
\code{\link{Be16FLHviolet_LS8}()},
\code{\link{Be16FLHviolet_MERIS}()},
\code{\link{Be16FLHviolet_OLCI}()},
\code{\link{Be16FLHviolet_S2}()},
\code{\link{Be16FLHviolet_WV2}()},
\code{\link{Be16NDPhyI644over615}()},
\code{\link{Be16NDPhyI644over629}()},
\code{\link{Be16NDPhyI}()},
\code{\link{Be16NDTIviolet}()},
\code{\link{Be16Phy2BDA644over629}()},
\code{\link{Da052BDA}()},
\code{\link{De933BDA}()},
\code{\link{Gi033BDA}()},
\code{\link{Go04MCI}()},
\code{\link{HU103BDA}()},
\code{\link{Kn07KIVU}()},
\code{\link{MI092BDA}()},
\code{\link{MM092BDA}()},
\code{\link{MM12NDCIalt}()},
\code{\link{MM12NDCI}()},
\code{\link{MM143BDAopt}()},
\code{\link{SI052BDA}()},
\code{\link{SM122BDA}()},
\code{\link{SY002BDA}()},
\code{\link{TurbBe16GreenPlusRedBothOverViolet}()},
\code{\link{TurbBe16RedOverViolet}()},
\code{\link{TurbBow06RedOverGreen}()},
\code{\link{TurbChip09NIROverGreen}()},
\code{\link{TurbDox02NIRoverRed}()},
\code{\link{TurbFrohn09GreenPlusRedBothOverBlue}()},
\code{\link{TurbHarr92NIR}()},
\code{\link{TurbLath91RedOverBlue}()},
\code{\link{TurbMoore80Red}()},
\code{\link{Wy08CI}()}
}
\concept{algorithms}
|
#!/usr/bin/env Rscript
library(LTN)
argv=commandArgs(TRUE)
WORK_DIR=argv[1]
#source(paste0(WORK_DIR,"/src/utility/utility.R"))
#source(paste0(WORK_DIR,"/src/utility/mixed_effects.R"))
niter=as.numeric(argv[2])
covariate_index=as.numeric(argv[3])
#model_index=as.numeric(argv[4])
model_index=2
#lambda=as.numeric(argv[5])
lambda=as.numeric(argv[4])
#gprior_m=100
#modelspec=c('diagonal','sparse')[model_index]
dietcovariates=c("BF","Solid_Food","Eggs","Fish","Soy_Prod","Rye","Barley","Buckwheat_Millet")
covariates=c("Case_Control","post_seroconversion",dietcovariates)
baseline=c("control",F,rep("false",length(dietcovariates)))
covariate=covariates[covariate_index]
datadir=paste0(WORK_DIR,"/cache/")
resdir=paste0(WORK_DIR,"/results/application/")
system(paste0('mkdir -p ',resdir,'/pmap'))
system(paste0('mkdir -p ',resdir,'/pjap'))
system(paste0('mkdir -p ',resdir,'/alpha'))
system(paste0('mkdir -p ',resdir,'/time'))
system(paste0('mkdir -p ',resdir,'/memory'))
filenam=paste0(covariate,'_lambda',lambda,'.RData')
if (!file.exists(paste0(resdir,'/pjap/',filenam))){
data=readRDS(paste0(datadir,"ps_otu50.RData"))
if (covariate=="Case_Control"){
formula=as.formula(paste0('~',covariate,'+log(Age_at_Collection)+Country+post_seroconversion+Gender+',paste(dietcovariates,collapse = '+'),'+(1 | Subject_ID)'))
}else if (covariate=="post_seroconversion"){
formula=as.formula(paste0('~',covariate,'+log(Age_at_Collection)+Country+Gender+Case_Control+',paste(dietcovariates,collapse = '+'),'+(1 | Subject_ID)'))
}else if (covariate %in% dietcovariates){
formula=as.formula(paste0('~',covariate,'+log(Age_at_Collection)+Country+post_seroconversion+Gender+Case_Control+',paste(dietcovariates[-which(dietcovariates==covariate)],collapse = '+'),'+(1 | Subject_ID)'))
}else{warning('Unknown covariate')}
st<-system.time(result<-ltnme(formula=formula,
data=data,
test_baseline=baseline[covariate_index],
niter=niter,
reffcov = model_index,
SEED = 1,
save_alpha_only = T,
gprior_m = 100,
pnull = 0.5,
lambda = lambda))
cat(paste0(covariate,': ',st[3],'s'))
saveRDS(result$PMAP,paste0(resdir,'/pmap/',filenam))
saveRDS(result$PJAP,paste0(resdir,'/pjap/',filenam))
saveRDS(result$alpha_mean,paste0(resdir,'/alpha/',filenam))
saveRDS(st,paste0(resdir,'/time/',filenam))
saveRDS(object.size(result),paste0(resdir,'/memory/',filenam))
}
|
/src/application/application.R
|
no_license
|
ZhuoqunWang0120/LTN_analysis-JASA_1st_submission
|
R
| false
| false
| 2,387
|
r
|
#!/usr/bin/env Rscript
library(LTN)
argv=commandArgs(TRUE)
WORK_DIR=argv[1]
#source(paste0(WORK_DIR,"/src/utility/utility.R"))
#source(paste0(WORK_DIR,"/src/utility/mixed_effects.R"))
niter=as.numeric(argv[2])
covariate_index=as.numeric(argv[3])
#model_index=as.numeric(argv[4])
model_index=2
#lambda=as.numeric(argv[5])
lambda=as.numeric(argv[4])
#gprior_m=100
#modelspec=c('diagonal','sparse')[model_index]
dietcovariates=c("BF","Solid_Food","Eggs","Fish","Soy_Prod","Rye","Barley","Buckwheat_Millet")
covariates=c("Case_Control","post_seroconversion",dietcovariates)
baseline=c("control",F,rep("false",length(dietcovariates)))
covariate=covariates[covariate_index]
datadir=paste0(WORK_DIR,"/cache/")
resdir=paste0(WORK_DIR,"/results/application/")
system(paste0('mkdir -p ',resdir,'/pmap'))
system(paste0('mkdir -p ',resdir,'/pjap'))
system(paste0('mkdir -p ',resdir,'/alpha'))
system(paste0('mkdir -p ',resdir,'/time'))
system(paste0('mkdir -p ',resdir,'/memory'))
filenam=paste0(covariate,'_lambda',lambda,'.RData')
if (!file.exists(paste0(resdir,'/pjap/',filenam))){
data=readRDS(paste0(datadir,"ps_otu50.RData"))
if (covariate=="Case_Control"){
formula=as.formula(paste0('~',covariate,'+log(Age_at_Collection)+Country+post_seroconversion+Gender+',paste(dietcovariates,collapse = '+'),'+(1 | Subject_ID)'))
}else if (covariate=="post_seroconversion"){
formula=as.formula(paste0('~',covariate,'+log(Age_at_Collection)+Country+Gender+Case_Control+',paste(dietcovariates,collapse = '+'),'+(1 | Subject_ID)'))
}else if (covariate %in% dietcovariates){
formula=as.formula(paste0('~',covariate,'+log(Age_at_Collection)+Country+post_seroconversion+Gender+Case_Control+',paste(dietcovariates[-which(dietcovariates==covariate)],collapse = '+'),'+(1 | Subject_ID)'))
}else{warning('Unknown covariate')}
st<-system.time(result<-ltnme(formula=formula,
data=data,
test_baseline=baseline[covariate_index],
niter=niter,
reffcov = model_index,
SEED = 1,
save_alpha_only = T,
gprior_m = 100,
pnull = 0.5,
lambda = lambda))
cat(paste0(covariate,': ',st[3],'s'))
saveRDS(result$PMAP,paste0(resdir,'/pmap/',filenam))
saveRDS(result$PJAP,paste0(resdir,'/pjap/',filenam))
saveRDS(result$alpha_mean,paste0(resdir,'/alpha/',filenam))
saveRDS(st,paste0(resdir,'/time/',filenam))
saveRDS(object.size(result),paste0(resdir,'/memory/',filenam))
}
|
library(readxl)
library(zoo)
library(openxlsx)
library(forecast)
library(lmtest)
library(tseries)
library(ggplot2)
library(scales)
library(psych)
# Functions ----------------------------
mutate_data <- function(df) {
dn <- ts(df, start = c(1996), frequency = 1)
dnt <- window(dn, end = 2019)
return(data.frame(dn, dnt))
}
gcast <- function(dn, fcast) {
en <- max(time(fcast$mean)) # extract the max date used in the forecast
#Extract Source and Training Data
ds <- as.data.frame(window(dn, end = en, extend = TRUE))
names(ds) <- 'observed'
ds$date <- as.Date(time(window(dn, end = en, extend = TRUE)))
# Extract the Fitted Values (need to figure out how to grab confidence intervals)
dfit <- as.data.frame(fcast$fitted)
dfit$date <- as.Date(time(fcast$fitted))
names(dfit)[1] <- 'fitted'
ds <- merge(ds, dfit, all.x = TRUE) # Merge fitted values with source and training data
# Exract the Forecast values and confidence intervals
dfcastn <- as.data.frame(fcast)
dfcastn$date <- as.Date(as.yearmon(as.numeric(row.names(dfcastn))))
names(dfcastn) <- c('forecast','lo80','hi80','lo95','hi95','date')
pd <- merge(ds, dfcastn, all.x = TRUE) # final data.frame for use in ggplot
return(pd)
}
get_plot <- function(df, name) {
p1a <- ggplot(data = df, aes(x = date, y = observed))
p1a <- p1a + geom_point(col = 'purple') + ggtitle(paste('Prediction for', name), subtitle = 'Purple - data points, black - forecasted points, shadow - 95% conf. interval')
p1a <- p1a + xlab('Year')
p1a <- p1a + ylab('Value')
p1a <- p1a + geom_point(aes(y = forecast)) + geom_ribbon(aes(ymin = lo95, ymax = hi95), alpha = 0.25)
p1a <- p1a + theme_bw()
return(p1a)
}
# Data manipulation --------------------
data_transp_for_analysis <- read_excel("/Users/stanislavermohin/Desktop/NSU/Thesis/xlsx_output/data_transp_for_analysis.xlsx",
col_types = c("numeric", "numeric", "numeric",
"numeric", "numeric", "numeric",
"numeric"))
data <- data.frame(na.spline.default(data_transp_for_analysis))
# write.xlsx(data, file = '~/Desktop/NSU/Thesis/data_splined.xlsx')
real_names <- data.frame(name = names(data))
row.names(real_names) <- c('deflator', 'gdp', 'r&d', 'cuip', 'pa', 'fbs', 'ict')
names(data) <- rownames(real_names)
# View(data)
test_for_diffs <- data.frame(name = names(data), adf = NA, kpss = NA, pp = NA)
test_for_diffs$adf <- apply(data, 2, ndiffs, test = 'adf')
test_for_diffs$kpss <- apply(data, 2, ndiffs, test = 'kpss')
test_for_diffs$pp <- apply(data, 2, ndiffs, test = 'adf')
row.names(test_for_diffs) <- test_for_diffs$name
test_for_diffs <- subset(test_for_diffs, select = -c(name))
test_for_diffs$max <- apply(test_for_diffs, 1, max)
data_diff1 <- data[test_for_diffs$max == 1]
row.names(data_diff1) <- 1996:2019
data_diff2 <- data[test_for_diffs$max == 2]
row.names(data_diff2) <- 1996:2019
defed_data_1 <- data.frame(diff(as.matrix(data_diff1),
differences = 1))
defed_data_2 <- data.frame(diff(as.matrix(data_diff2),
differences = 2))
defed_data <- data.frame(defed_data_1[2:NROW(defed_data_1), ],
defed_data_2)
# View(defed_data)
# View(test_for_diffs)
apply(defed_data, 2, acf)
defed_toI1_data <- data.frame(data_diff1[2:NROW(data_diff1), ],
diff(as.matrix(data_diff2), differences = 1))
# View(defed_toI1_data)
acf_data <- apply(defed_toI1_data, 2, acf, plot = FALSE, lag.max = 23) # all stationary (ro(k) (k -> Inf) -> 0) => all data ~ I(1)
acf_data$`r&d` <- acf_data$`r.d`
# Check for I(1) with ACF --------------
for (col in names(acf_data)) {
all <- data.frame(x = acf_data[[col]]$lag, y = acf_data[[col]]$acf)
plot <- ggplot(all, aes(x = x, y = y)) + geom_line(aes(y = y), color = 'purple') +
ggtitle(paste('ACF function for the first difference of'), subtitle = real_names$name[rownames(real_names) == col]) +
ylab('ACF') +
xlab('Lag') +
theme_bw() +
geom_hline(yintercept = 0, color = 'light blue')
print(plot)
}
# Using data itself (no differencing) for co-integration tests
# Co-integration test ------------------
gdp <- data$gdp
needed_data <- subset(data, select = -c(gdp, deflator))
# View(needed_data)
coint_res <- data.frame(t = 1996:2019)
coint_lambda <- data.frame(t = 1996:2019)
for (column in names(needed_data)) {
fit <- lm(scale(gdp, scale = FALSE)~scale(needed_data[[column]], scale = FALSE))
print(column)
# print(bptest(fit))
# print(summary(fit))
coint_res[[column]] <- fit$residuals
coint_lambda[[column]] <- fit$coefficients[[2]]
a <- acf(fit$residuals,
lag.max = NROW(fit$residuals),
plot = FALSE)
all <- data.frame(x = a$lag, y = a$acf)
plot <- ggplot(all, aes(x = x, y = y)) + geom_line(aes(y = y), color = 'purple') +
ggtitle(paste('ACF function for'), subtitle = real_names$name[rownames(real_names) == column]) +
ylab('ACF') +
xlab('Lag') +
theme_bw() +
geom_hline(yintercept = 0, color = 'light blue')
# print(plot)
}
# ARIMA --------------------------------
forecasts <- data.frame(t = c(2020:2022,
'l802020', 'l952020',
'l802021', 'l952021',
'l802022', 'l952022'))
arima_fit_gdp <- auto.arima(gdp, max.p = 3, max.q = 3, d = 1, ic = 'aic')
summary(arima_fit_gdp)
coeftest(arima_fit_gdp)
prediction_gdp <- forecast(arima_fit_gdp, 3)
forecasts[['gdp']] <- prediction_gdp[[4]][1:3]
needed_data <- subset(data, select = -c(deflator, gdp))
for (col in names(needed_data)) {
print(col)
arima_fit <- auto.arima(needed_data[[col]], max.p = 3, max.q = 3, d = 1, ic = 'aic')
print(coeftest(arima_fit))
prediction <- forecast(arima_fit, 3)
forecasts[[col]] <- c(prediction[[4]][1:3], prediction[[5]][1:3], prediction[[6]][1:3])
}
# write.xlsx(forecasts, '~/Desktop/NSU/Thesis/forecasts.xlsx')
# Plot forecast --------------------
all <- subset(data, select = -c(deflator))
for (column in names(all)) {
data_to_go <- mutate_data(all[[column]])
arima_fit <- auto.arima(data_to_go$dnt, max.p = 3, max.q = 3, d = 1, ic = 'aic')
yfor <- forecast(arima_fit, 3)
pd <- gcast(data_to_go$dn, yfor)
plot <- get_plot(pd, column)
print(plot)
}
# ECM ----------------------------------
corr_matrix <- cor(subset(all))
# View(corr_matrix)
# write.xlsx(corr_matrix, file = '~/Desktop/NSU/Thesis/corr_matrix.xlsx')
all$rd <- all$`r&d`
data_go <- read_excel("~/Desktop/NSU/Thesis/to_analyze_data.xlsx",
col_types = c("numeric", "numeric", "numeric",
"numeric"))
data_go <- data.frame(data_go)
actual_fit <- lm(data = data_go, gdpd~res1+gdpd1+cuipd1)
summary(actual_fit) # oh, yeeeeees, it's worked
# ECM prediction -----------------------
next_gdpd <- predict.lm(actual_fit, newdata = data.frame(res1 = 1.31374812, gdpd1 = -0.015568068, cuipd1 = -6.166148502))
next_gdp_1 <- harmonic.mean(c(next_gdpd + data$gdp[NROW(data$gdp)], forecasts$gdp[1]))
next_gdp_2 <- geometric.mean(c(next_gdpd + data$gdp[NROW(data$gdp)], forecasts$gdp[1]))
next_gdp_3 <- mean(c(next_gdpd + data$gdp[NROW(data$gdp)], forecasts$gdp[1]))
# View(c(next_gdp_1, next_gdp_2, next_gdp_3))
|
/analysis.R
|
permissive
|
FunnyRabbitIsAHabbit/UniProject
|
R
| false
| false
| 7,578
|
r
|
library(readxl)
library(zoo)
library(openxlsx)
library(forecast)
library(lmtest)
library(tseries)
library(ggplot2)
library(scales)
library(psych)
# Functions ----------------------------
mutate_data <- function(df) {
dn <- ts(df, start = c(1996), frequency = 1)
dnt <- window(dn, end = 2019)
return(data.frame(dn, dnt))
}
gcast <- function(dn, fcast) {
en <- max(time(fcast$mean)) # extract the max date used in the forecast
#Extract Source and Training Data
ds <- as.data.frame(window(dn, end = en, extend = TRUE))
names(ds) <- 'observed'
ds$date <- as.Date(time(window(dn, end = en, extend = TRUE)))
# Extract the Fitted Values (need to figure out how to grab confidence intervals)
dfit <- as.data.frame(fcast$fitted)
dfit$date <- as.Date(time(fcast$fitted))
names(dfit)[1] <- 'fitted'
ds <- merge(ds, dfit, all.x = TRUE) # Merge fitted values with source and training data
# Exract the Forecast values and confidence intervals
dfcastn <- as.data.frame(fcast)
dfcastn$date <- as.Date(as.yearmon(as.numeric(row.names(dfcastn))))
names(dfcastn) <- c('forecast','lo80','hi80','lo95','hi95','date')
pd <- merge(ds, dfcastn, all.x = TRUE) # final data.frame for use in ggplot
return(pd)
}
get_plot <- function(df, name) {
p1a <- ggplot(data = df, aes(x = date, y = observed))
p1a <- p1a + geom_point(col = 'purple') + ggtitle(paste('Prediction for', name), subtitle = 'Purple - data points, black - forecasted points, shadow - 95% conf. interval')
p1a <- p1a + xlab('Year')
p1a <- p1a + ylab('Value')
p1a <- p1a + geom_point(aes(y = forecast)) + geom_ribbon(aes(ymin = lo95, ymax = hi95), alpha = 0.25)
p1a <- p1a + theme_bw()
return(p1a)
}
# Data manipulation --------------------
data_transp_for_analysis <- read_excel("/Users/stanislavermohin/Desktop/NSU/Thesis/xlsx_output/data_transp_for_analysis.xlsx",
col_types = c("numeric", "numeric", "numeric",
"numeric", "numeric", "numeric",
"numeric"))
data <- data.frame(na.spline.default(data_transp_for_analysis))
# write.xlsx(data, file = '~/Desktop/NSU/Thesis/data_splined.xlsx')
real_names <- data.frame(name = names(data))
row.names(real_names) <- c('deflator', 'gdp', 'r&d', 'cuip', 'pa', 'fbs', 'ict')
names(data) <- rownames(real_names)
# View(data)
test_for_diffs <- data.frame(name = names(data), adf = NA, kpss = NA, pp = NA)
test_for_diffs$adf <- apply(data, 2, ndiffs, test = 'adf')
test_for_diffs$kpss <- apply(data, 2, ndiffs, test = 'kpss')
test_for_diffs$pp <- apply(data, 2, ndiffs, test = 'adf')
row.names(test_for_diffs) <- test_for_diffs$name
test_for_diffs <- subset(test_for_diffs, select = -c(name))
test_for_diffs$max <- apply(test_for_diffs, 1, max)
data_diff1 <- data[test_for_diffs$max == 1]
row.names(data_diff1) <- 1996:2019
data_diff2 <- data[test_for_diffs$max == 2]
row.names(data_diff2) <- 1996:2019
defed_data_1 <- data.frame(diff(as.matrix(data_diff1),
differences = 1))
defed_data_2 <- data.frame(diff(as.matrix(data_diff2),
differences = 2))
defed_data <- data.frame(defed_data_1[2:NROW(defed_data_1), ],
defed_data_2)
# View(defed_data)
# View(test_for_diffs)
apply(defed_data, 2, acf)
defed_toI1_data <- data.frame(data_diff1[2:NROW(data_diff1), ],
diff(as.matrix(data_diff2), differences = 1))
# View(defed_toI1_data)
acf_data <- apply(defed_toI1_data, 2, acf, plot = FALSE, lag.max = 23) # all stationary (ro(k) (k -> Inf) -> 0) => all data ~ I(1)
acf_data$`r&d` <- acf_data$`r.d`
# Check for I(1) with ACF --------------
for (col in names(acf_data)) {
all <- data.frame(x = acf_data[[col]]$lag, y = acf_data[[col]]$acf)
plot <- ggplot(all, aes(x = x, y = y)) + geom_line(aes(y = y), color = 'purple') +
ggtitle(paste('ACF function for the first difference of'), subtitle = real_names$name[rownames(real_names) == col]) +
ylab('ACF') +
xlab('Lag') +
theme_bw() +
geom_hline(yintercept = 0, color = 'light blue')
print(plot)
}
# Using data itself (no differencing) for co-integration tests
# Co-integration test ------------------
gdp <- data$gdp
needed_data <- subset(data, select = -c(gdp, deflator))
# View(needed_data)
coint_res <- data.frame(t = 1996:2019)
coint_lambda <- data.frame(t = 1996:2019)
for (column in names(needed_data)) {
fit <- lm(scale(gdp, scale = FALSE)~scale(needed_data[[column]], scale = FALSE))
print(column)
# print(bptest(fit))
# print(summary(fit))
coint_res[[column]] <- fit$residuals
coint_lambda[[column]] <- fit$coefficients[[2]]
a <- acf(fit$residuals,
lag.max = NROW(fit$residuals),
plot = FALSE)
all <- data.frame(x = a$lag, y = a$acf)
plot <- ggplot(all, aes(x = x, y = y)) + geom_line(aes(y = y), color = 'purple') +
ggtitle(paste('ACF function for'), subtitle = real_names$name[rownames(real_names) == column]) +
ylab('ACF') +
xlab('Lag') +
theme_bw() +
geom_hline(yintercept = 0, color = 'light blue')
# print(plot)
}
# ARIMA --------------------------------
forecasts <- data.frame(t = c(2020:2022,
'l802020', 'l952020',
'l802021', 'l952021',
'l802022', 'l952022'))
arima_fit_gdp <- auto.arima(gdp, max.p = 3, max.q = 3, d = 1, ic = 'aic')
summary(arima_fit_gdp)
coeftest(arima_fit_gdp)
prediction_gdp <- forecast(arima_fit_gdp, 3)
forecasts[['gdp']] <- prediction_gdp[[4]][1:3]
needed_data <- subset(data, select = -c(deflator, gdp))
for (col in names(needed_data)) {
print(col)
arima_fit <- auto.arima(needed_data[[col]], max.p = 3, max.q = 3, d = 1, ic = 'aic')
print(coeftest(arima_fit))
prediction <- forecast(arima_fit, 3)
forecasts[[col]] <- c(prediction[[4]][1:3], prediction[[5]][1:3], prediction[[6]][1:3])
}
# write.xlsx(forecasts, '~/Desktop/NSU/Thesis/forecasts.xlsx')
# Plot forecast --------------------
all <- subset(data, select = -c(deflator))
for (column in names(all)) {
data_to_go <- mutate_data(all[[column]])
arima_fit <- auto.arima(data_to_go$dnt, max.p = 3, max.q = 3, d = 1, ic = 'aic')
yfor <- forecast(arima_fit, 3)
pd <- gcast(data_to_go$dn, yfor)
plot <- get_plot(pd, column)
print(plot)
}
# ECM ----------------------------------
corr_matrix <- cor(subset(all))
# View(corr_matrix)
# write.xlsx(corr_matrix, file = '~/Desktop/NSU/Thesis/corr_matrix.xlsx')
all$rd <- all$`r&d`
data_go <- read_excel("~/Desktop/NSU/Thesis/to_analyze_data.xlsx",
col_types = c("numeric", "numeric", "numeric",
"numeric"))
data_go <- data.frame(data_go)
actual_fit <- lm(data = data_go, gdpd~res1+gdpd1+cuipd1)
summary(actual_fit) # oh, yeeeeees, it's worked
# ECM prediction -----------------------
next_gdpd <- predict.lm(actual_fit, newdata = data.frame(res1 = 1.31374812, gdpd1 = -0.015568068, cuipd1 = -6.166148502))
next_gdp_1 <- harmonic.mean(c(next_gdpd + data$gdp[NROW(data$gdp)], forecasts$gdp[1]))
next_gdp_2 <- geometric.mean(c(next_gdpd + data$gdp[NROW(data$gdp)], forecasts$gdp[1]))
next_gdp_3 <- mean(c(next_gdpd + data$gdp[NROW(data$gdp)], forecasts$gdp[1]))
# View(c(next_gdp_1, next_gdp_2, next_gdp_3))
|
# install.packages('keras')
# install.packages('purrr')
# install.packages('functional')
library(IsolationForest)
library(MASS)
library(caret)
library(fGarch)
library(fitdistrplus)
library(pracma)
library(BBmisc)
library(functional)
library(dplyr)
library(keras)
library(lubridate)
library(tensorflow)
Sys.sleep(5)
install_tensorflow(restart_session = FALSE)
setwd("/home/jonghyeon3/extension_AD/evaluations/data")
fn<-list.files(getwd())
#data load and preprocess
{
input = data.frame(read.csv('creditcard-0.1-0.csv', header=T))
normal= input[which(input$anomaly_type =="normal"),]
anomaly= input[which(input$anomaly_type !="normal"),]
normal_seq = aggregate(normal$Activity, by=list(normal$Case), FUN=paste0)
anomaly_seq = aggregate(anomaly$Activity, by=list(anomaly$Case), FUN=paste0)
delete_case= anomaly_seq[which(is.element(anomaly_seq$x , normal_seq$x)),'Group.1']
input = input[which(!is.element(input$Case, delete_case)),]
input$Event = 1:nrow(input)
input$Event = as.factor(input$Event)
one= rep(1, nrow(input))
input[,'start'] = ave(one, by= input$Case, FUN= cumsum) -1
input[which(input$start !=1),'start'] =0
}
####
#functions
{
fun_itree = function(x){
if(sum( apply(x, 2, FUN= function(x){length(unique(x))} ) > 1 )>0){
tr<-IsolationTrees(as.data.frame(x), ntree=100, nmin=5)
as<-AnomalyScore(x,tr)
return(as$outF)
}else{
as = rep(0, nrow(x))
return(as)
}
}
fun_embedding = function(ActivityID, embedding_size){
model <- keras_model_sequential()
model %>% layer_embedding(input_dim = length(unique(ActivityID))+1, output_dim = embedding_size, input_length = 1, name="embedding") %>%
layer_flatten() %>%
layer_dense(units=40, activation = "relu") %>%
layer_dense(units=10, activation = "relu") %>%
layer_dense(units=1)
model %>% compile(loss = "mse", optimizer = "sgd", metric="accuracy")
layer <- get_layer(model, "embedding")
embeddings <- data.frame(layer$get_weights()[[1]])
embeddings$ActivityID <- c("none", levels(ActivityID) )
return(embeddings)
}
fun_onehot = function(data){
if(length(levels(data$ActivityID))>1){
a<- model.matrix(~ActivityID, data = data)
A<- as.numeric(data[,2])
A[which(A!=1)] <- 0
a<- cbind(ActivityID1 = A, a[,-1])
onehot<- as.data.frame(a)
}else{
A<- as.numeric(data[,2])
A[which(A!=1)] <- 0
a<- cbind(ActivityID1 = A)
onehot<- as.data.frame(a)
}
return(onehot)
}
fun_batch_remove_TRUE = function(input, Min, start_index, Max, until, embedding_size_p, remove_threshold ){}
fun_batch_remove_FALSE = function(input, Min,start_index, Max, until, embedding_size_p ){
#prepare data
pre<-input
pre= pre[ with(pre, order(Case,timestamp)),]
one= rep(1, nrow(pre))
pre[,'start'] = ave(one, by= pre$Case, FUN= cumsum) -1
pre[which(pre$start !=1),'start'] =0
pre= pre[ with(pre, order(timestamp)),]
pre[,'Event'] = as.factor(1:nrow(pre))
pre[,'num_case'] = cumsum(pre$start)
pre[,'leverage'] = rep(-1, nrow(pre))
pre[,'t1'] = rep(0, nrow(pre))
pre[,'t2'] = rep(0, nrow(pre))
pre[,'t3'] = rep(0, nrow(pre))
pre[,'tn']= rep(0, nrow(pre))
pre[,'time'] = rep(0, nrow(pre))
event_num = nrow(pre)
case_num= length(unique(pre$Case))
start_index = start_index
last_index = nrow(pre)
leverage_start <- Sys.time()
pre2 = pre[1:start_index,]
cur_len = sum(pre2$start)
data<- pre2[,c("Case","Activity","order")]
names(data)[1:2] <- c("ID", "ActivityID")
#basic: Max should be larger than Min or equal
if(Max< (Min+1)){
Max=Min+1
}
# Max option
if(cur_len > Max ){
del_case = pre[which(pre$start==1),'Case'][1:(cur_len-Max)]
pre = pre[which(!is.element(pre$Case, del_case)),]
pre[,'num_case'] = cumsum(pre$start)
event_num = nrow(pre)
case_num= length(unique(pre$Case))
last_index = nrow(pre)
pre2 = pre2[which(!is.element(pre2$Case, del_case)),]
data<- pre2[,c("Case","Activity","order")]
names(data)[1:2] <- c("ID", "ActivityID")
cur_len = sum(pre2$start)
start_index = nrow(pre2)
last_index = nrow(pre)
}
if(start_index == last_index){
#skip
}else{
if(embedding_size_p>0){
num_act= length(unique(data$ActivityID))
embedding_size = round(num_act*embedding_size_p)
# deep embedding encoding
embeddings = fun_embedding(as.factor(data$ActivityID), embedding_size)
object_case = pre2$Case[nrow(pre2)]
object_event = pre2$Event[nrow(pre2)]
data$ID <- as.factor(data$ID)
data$ActivityID <- as.factor(data$ActivityID)
n= length(unique(data[,1]))
m = max(table(data[,1]))
data$order = as.character(data$order)
data$ID = as.character(data$ID)
all3 = merge(data, embeddings, by='ActivityID', all.x=T)
all3= all3[ with(all3, order(ID, order)),]
all3 = all3[,c("ID","ActivityID",names(all3)[(ncol(all3)-embedding_size+1):ncol(all3)])]
num_event = nrow(all3)
max<- m*(embedding_size)
c=unique(pre2[,c("Case","anomaly_type")]) #CHANGE
label = as.character(c[,2])
# prefix encoding
prefixL = as.numeric()
newdat2<- matrix(NA, nrow=num_event , ncol=max)
for(j in 1:num_event){
cut = all3[which(all3[1:j,1]== all3[j,1] ),-c(1:2)]
if(class(cut)=='numeric'){
prefixL[j] = 1
}else{
prefixL[j] = nrow(cut)
}
save2 <- as.vector(t(cut))
newdat2[j,1:length(save2)] <- save2
}
newdat2[which(is.na(newdat2))] <- 0 # zero-padding
newdat2_save= newdat2
newdat3 = data.frame(cbind(Case=as.character(all3[,1]), label= as.character(pre2$anomaly_type), newdat2))
x2= newdat3[which(prefixL == prefixL[start_index]),-(1:2)]
x2 = x2[,1:(prefixL[start_index]*embedding_size)]
}else{
object_case = pre2$Case[nrow(pre2)]
object_event = pre2$Event[nrow(pre2)]
data$ID <- as.factor(data$ID)
data$ActivityID <- as.factor(data$ActivityID)
# One-hot encoding
data1 <- fun_onehot(data)
newdat <- cbind(data[,1], data1)
newdat[,1] <- as.factor(newdat[,1])
n<- length(levels((newdat[,1]))) # the number of cases
m<-max(table((newdat[,1]))) # maximum trace length
num_act= ncol(newdat)-1
num_event = nrow(newdat)
max<- m*num_act
c=unique(pre2[,c("Case","anomaly_type")])
# prefix encoding
prefixL = as.numeric()
newdat2<- matrix(NA, nrow=num_event , ncol=max)
for(j in 1:num_event){
cut = newdat[which(newdat[1:j,1]== newdat[j,1] ),-1]
if(class(cut)=='numeric'){
prefixL[j] = 1
}else{
prefixL[j] = nrow(cut)
}
save2 <- as.vector(t(cut))
newdat2[j,1:length(save2)] <- save2
}
newdat2[which(is.na(newdat2))] <- 0 # zero-padding
newdat2_save= newdat2
act_save = names(newdat) #change 1
newdat3 = data.frame(cbind(Case=as.character(newdat[,1]), label= as.character(pre2$anomaly_type), newdat2))
x2= newdat3[which(prefixL == prefixL[start_index]),-(1:2)]
x2 = x2[,1:(prefixL[start_index]*num_act)]
}
#Caculate leverage
x= as.matrix(sapply(x2, as.numeric))
h_diag <- fun_itree(x)
pre[start_index, 'leverage'] = h_diag[length(h_diag)]
leverage_end <- Sys.time()
pre[start_index, 'time'] = (leverage_end-leverage_start)
pre[start_index, 'tn'] = (h_diag[length(h_diag)] > (mean(h_diag)+sd(h_diag)))
#Set escape option
if(until==0 | start_index+until>last_index){
until = last_index
}else{
until= start_index+until
}
#Start event steam
for(i in (start_index+1):until){ # last_index
print(paste("Start to calculate leverage score of ", i ,"-th event (total ",event_num," events)" ,sep=''))
leverage_start <- Sys.time()
pre2 = rbind(pre2, pre[i,])
cur_len = sum(pre2$start)
data<- pre2[,c("Case","Activity",'order')]
names(data)[1:2] <- c("ID", "ActivityID")
# Max option
object_case = pre2$Case[nrow(pre2)]
object_event = pre2$Event[nrow(pre2)]
if(cur_len > Max ){
del_case = pre2[which(pre2$start==1),'Case']
del_case = del_case[1:(cur_len-Max)]
del_case= del_case[which(!is.element(del_case, object_case))]
data = data[which(!is.element(data[,1], del_case)),]
pre3= pre2[which(!is.element(pre2[,1], del_case)),]
label = as.character(pre3[,c("anomaly_type")])
c=unique(pre3[,c("Case","anomaly_type")]) #revision2
}else{
label = as.character(pre2[,c("anomaly_type")])
pre3=pre2; c=unique(pre3[,c("Case","anomaly_type")]) #revision2
}
if(embedding_size_p>0){
num_act= length(unique(data$ActivityID))
embedding_size = round(num_act*embedding_size_p)
# embedding encoding
embeddings = fun_embedding( as.factor(data$ActivityID), embedding_size)
object_case = pre2$Case[nrow(pre2)]
object_event = pre2$Event[nrow(pre2)]
data$ID <- as.factor(data$ID)
data$ActivityID <- as.factor(data$ActivityID)
n= length(unique(data[,1]))
m = max(table(data[,1]))
data$order = as.character(data$order)
data$ID = as.character(data$ID)
all3 = merge(data, embeddings, by='ActivityID', all.x=T)
all3= all3[ with(all3, order(ID, order)),]
all3 = all3[,c("ID","ActivityID",names(all3)[(ncol(all3)-embedding_size+1):ncol(all3)])]
num_event = nrow(all3)
max<- m*(embedding_size)
c=unique(pre2[,c("Case","anomaly_type")]) #CHANGE
label = as.character(c[,2])
{ # update event
newdat2<- matrix(NA, nrow=num_event , ncol=max)
prefixL = as.numeric()
for(j in 1:num_event){
cut = all3[which(all3[1:j,1]== all3[j,1] ),-c(1:2)]
if(class(cut)=='numeric'){
prefixL[j] = 1
}else{
prefixL[j] = nrow(cut)
}
save2 <- as.vector(t(cut))
newdat2[j,1:length(save2)] <- save2
}
}
# Max option
if(cur_len > Max ){
del_case = pre2[which(pre2$start==1),'Case'][1:(cur_len-Max)]
del_case= del_case[which(!is.element(del_case, object_case))]
pre2 = pre2[which(!is.element(all3[,1], del_case)),]
newdat2 = newdat2[which(!is.element(all3[,1], del_case)),]
label= label[which(!is.element(all3[,1], del_case))]
prefixL= prefixL[which(!is.element(all3[,1], del_case))]
all3 = all3[which(!is.element(all3[,1], del_case)),]
}
newdat2[which(is.na(newdat2))] <- 0 # zero-padding
newdat2_save= newdat2
newdat3 <-data.frame(cbind(Case= as.character(all3[,1]), label= label, newdat2))
x2= newdat3[which(prefixL == prefixL[length(prefixL)]),-(1:2)]
x2 = x2[,1:(prefixL[length(prefixL)]*embedding_size)]
}else{
object_case = pre3$Case[nrow(pre3)] #revision2
object_event = pre3$Event[nrow(pre3)] #revision2
data$ID <- as.factor(data$ID)
data$ActivityID <- as.factor(data$ActivityID)
#revision2
# One-hot encoding
data1 <- fun_onehot(data)
newdat <- cbind(data[,1], data1)
newdat[,1] <- as.factor(newdat[,1])
n<- length(levels((newdat[,1]))) # the number of cases
m<-max(table((newdat[,1]))) # maximum trace length
num_act= ncol(newdat)-1
num_event = nrow(newdat)
max<- m*num_act
newdat2<- matrix(NA, nrow=n , ncol=max)
prefixL = as.numeric()
for(j in 1:n){
cut = newdat[which(newdat[,1]== c[j,1] ),-1]
save2 <- as.vector(t(cut))
prefixL[j] = sum(save2)
newdat2[j,1:length(save2)] <- save2
}
CP = prefixL[which(c[,1]== object_case)]
newdat2 = newdat2[which(prefixL >= CP),]
if( length(which(prefixL >= CP)) != 1 ){
newdat2 = newdat2[, 1:(CP*num_act)]
loc = which(c[which(prefixL >= CP),1] == object_case)
newdat2[which(is.na(newdat2))] <- 0 # zero-padding
newdat2_save= newdat2
newdat3= cbind(c[which(prefixL >= CP),], newdat2)
act_save = names(newdat) #change 1
# newdat3 <-data.frame(cbind(Case= as.character(newdat[,1]), label= label, newdat2))
x2= newdat3[,-(1:2)]
}else{
x2= NA
}
}
#revision2
if(is.na(x2)){
pre[i, 'leverage'] = 0
}else{
#Calculate leverage
x= as.matrix(sapply(x2, as.numeric))
h_diag <- fun_itree(x)
pre[i, 'leverage'] = h_diag[loc]
}
leverage_end <- Sys.time()
print(paste("Anomaly score of", i ,"-th event = ", round( h_diag[loc],5), " (CaseID=",object_case,")" ,sep=''))
pre[i, 'time'] = (leverage_end-leverage_start)
pre[i, 'tn'] = (h_diag[loc] > (mean(h_diag)+sd(h_diag)))
}
return(pre)
}
}
fun_remove_TRUE = function(input, Min,start_index, Max, until,embedding_size_p, remove_threshold ){}
fun_remove_FALSE = function(input, Min, start_index, Max, until, embedding_size_p){}
streaming_score = function(input, Min = 100, start_index = start_index, Max = 0, until=0, batch = TRUE ,embedding_size_p = 0, remove=TRUE, remove_threshold = 0.2){
total_start <- Sys.time()
if(remove==TRUE){
if(batch==TRUE){ #
pre=fun_batch_remove_TRUE(input=input, Min=Min, start_index= start_index, Max=Max, until=until, embedding_size_p=embedding_size_p, remove_threshold=remove_threshold )
}else{
pre=fun_remove_TRUE(input=input, Min=Min, start_index= start_index, Max=Max, until=until, embedding_size_p=embedding_size_p, remove_threshold=remove_threshold )
}
}else{
if(batch==TRUE){
pre=fun_batch_remove_FALSE(input=input, Min=Min, start_index=start_index, Max=Max, until=until, embedding_size_p=embedding_size_p )
}else{
pre=fun_remove_FALSE(input=input, Min=Min,start_index=start_index, Max=Max, until=until, embedding_size_p=embedding_size_p )
}
}
total_end <- Sys.time()
print(total_end - total_start)
return(pre)
}
}
#Result
{
start_index = which(cumsum(input$start) == 101)[1]
last_index = nrow(input)
part = seq(start_index, last_index, 1000 )
part = part[-length(part)]
output_total = data.frame()
for(i in part){
output = streaming_score(input, Min=100, start_index= i, Max=1000, until = 299, batch=TRUE, remove= FALSE, embedding_size_p=0) # onehot
if(is.null(output) == 0 ){
output = output[order(output$timestamp),]
start = min(which(output$leverage >=0))
loc = which(output$leverage>=0)
output = output[loc,]
output_total = rbind(output_total, output)
}
}
setwd("/home/jonghyeon3/extension_AD/evaluations/bs2/result")
write.csv(output_total, "result_itree_creditcard.csv", row.names= FALSE)
}
# plot(see$leverage, ylim= c(0,1),
# col= ifelse(see$label==1 ,'red', 'black' ), cex= ifelse(see$label==1 ,1.0, 0.5), pch= ifelse(see$label==1 ,9, 1)
# , ylab= 'Anomaly score')
#
# plot(see2$leverage, ylim= c(0,1),
# col= ifelse(see2$label==1 ,'red', 'black' ), cex= ifelse(see2$label==1 ,1.0, 0.5), pch= ifelse(see2$label==1 ,9, 1)
# , ylab= 'Anomaly score')
|
/bs2/Model2_creditcard.R
|
no_license
|
paai-lab/Online-Anomaly-Detection-Extension-2021
|
R
| false
| false
| 16,338
|
r
|
# install.packages('keras')
# install.packages('purrr')
# install.packages('functional')
library(IsolationForest)
library(MASS)
library(caret)
library(fGarch)
library(fitdistrplus)
library(pracma)
library(BBmisc)
library(functional)
library(dplyr)
library(keras)
library(lubridate)
library(tensorflow)
Sys.sleep(5)
install_tensorflow(restart_session = FALSE)
setwd("/home/jonghyeon3/extension_AD/evaluations/data")
fn<-list.files(getwd())
#data load and preprocess
{
input = data.frame(read.csv('creditcard-0.1-0.csv', header=T))
normal= input[which(input$anomaly_type =="normal"),]
anomaly= input[which(input$anomaly_type !="normal"),]
normal_seq = aggregate(normal$Activity, by=list(normal$Case), FUN=paste0)
anomaly_seq = aggregate(anomaly$Activity, by=list(anomaly$Case), FUN=paste0)
delete_case= anomaly_seq[which(is.element(anomaly_seq$x , normal_seq$x)),'Group.1']
input = input[which(!is.element(input$Case, delete_case)),]
input$Event = 1:nrow(input)
input$Event = as.factor(input$Event)
one= rep(1, nrow(input))
input[,'start'] = ave(one, by= input$Case, FUN= cumsum) -1
input[which(input$start !=1),'start'] =0
}
####
#functions
{
fun_itree = function(x){
if(sum( apply(x, 2, FUN= function(x){length(unique(x))} ) > 1 )>0){
tr<-IsolationTrees(as.data.frame(x), ntree=100, nmin=5)
as<-AnomalyScore(x,tr)
return(as$outF)
}else{
as = rep(0, nrow(x))
return(as)
}
}
fun_embedding = function(ActivityID, embedding_size){
model <- keras_model_sequential()
model %>% layer_embedding(input_dim = length(unique(ActivityID))+1, output_dim = embedding_size, input_length = 1, name="embedding") %>%
layer_flatten() %>%
layer_dense(units=40, activation = "relu") %>%
layer_dense(units=10, activation = "relu") %>%
layer_dense(units=1)
model %>% compile(loss = "mse", optimizer = "sgd", metric="accuracy")
layer <- get_layer(model, "embedding")
embeddings <- data.frame(layer$get_weights()[[1]])
embeddings$ActivityID <- c("none", levels(ActivityID) )
return(embeddings)
}
fun_onehot = function(data){
if(length(levels(data$ActivityID))>1){
a<- model.matrix(~ActivityID, data = data)
A<- as.numeric(data[,2])
A[which(A!=1)] <- 0
a<- cbind(ActivityID1 = A, a[,-1])
onehot<- as.data.frame(a)
}else{
A<- as.numeric(data[,2])
A[which(A!=1)] <- 0
a<- cbind(ActivityID1 = A)
onehot<- as.data.frame(a)
}
return(onehot)
}
fun_batch_remove_TRUE = function(input, Min, start_index, Max, until, embedding_size_p, remove_threshold ){}
fun_batch_remove_FALSE = function(input, Min,start_index, Max, until, embedding_size_p ){
#prepare data
pre<-input
pre= pre[ with(pre, order(Case,timestamp)),]
one= rep(1, nrow(pre))
pre[,'start'] = ave(one, by= pre$Case, FUN= cumsum) -1
pre[which(pre$start !=1),'start'] =0
pre= pre[ with(pre, order(timestamp)),]
pre[,'Event'] = as.factor(1:nrow(pre))
pre[,'num_case'] = cumsum(pre$start)
pre[,'leverage'] = rep(-1, nrow(pre))
pre[,'t1'] = rep(0, nrow(pre))
pre[,'t2'] = rep(0, nrow(pre))
pre[,'t3'] = rep(0, nrow(pre))
pre[,'tn']= rep(0, nrow(pre))
pre[,'time'] = rep(0, nrow(pre))
event_num = nrow(pre)
case_num= length(unique(pre$Case))
start_index = start_index
last_index = nrow(pre)
leverage_start <- Sys.time()
pre2 = pre[1:start_index,]
cur_len = sum(pre2$start)
data<- pre2[,c("Case","Activity","order")]
names(data)[1:2] <- c("ID", "ActivityID")
#basic: Max should be larger than Min or equal
if(Max< (Min+1)){
Max=Min+1
}
# Max option
if(cur_len > Max ){
del_case = pre[which(pre$start==1),'Case'][1:(cur_len-Max)]
pre = pre[which(!is.element(pre$Case, del_case)),]
pre[,'num_case'] = cumsum(pre$start)
event_num = nrow(pre)
case_num= length(unique(pre$Case))
last_index = nrow(pre)
pre2 = pre2[which(!is.element(pre2$Case, del_case)),]
data<- pre2[,c("Case","Activity","order")]
names(data)[1:2] <- c("ID", "ActivityID")
cur_len = sum(pre2$start)
start_index = nrow(pre2)
last_index = nrow(pre)
}
if(start_index == last_index){
#skip
}else{
if(embedding_size_p>0){
num_act= length(unique(data$ActivityID))
embedding_size = round(num_act*embedding_size_p)
# deep embedding encoding
embeddings = fun_embedding(as.factor(data$ActivityID), embedding_size)
object_case = pre2$Case[nrow(pre2)]
object_event = pre2$Event[nrow(pre2)]
data$ID <- as.factor(data$ID)
data$ActivityID <- as.factor(data$ActivityID)
n= length(unique(data[,1]))
m = max(table(data[,1]))
data$order = as.character(data$order)
data$ID = as.character(data$ID)
all3 = merge(data, embeddings, by='ActivityID', all.x=T)
all3= all3[ with(all3, order(ID, order)),]
all3 = all3[,c("ID","ActivityID",names(all3)[(ncol(all3)-embedding_size+1):ncol(all3)])]
num_event = nrow(all3)
max<- m*(embedding_size)
c=unique(pre2[,c("Case","anomaly_type")]) #CHANGE
label = as.character(c[,2])
# prefix encoding
prefixL = as.numeric()
newdat2<- matrix(NA, nrow=num_event , ncol=max)
for(j in 1:num_event){
cut = all3[which(all3[1:j,1]== all3[j,1] ),-c(1:2)]
if(class(cut)=='numeric'){
prefixL[j] = 1
}else{
prefixL[j] = nrow(cut)
}
save2 <- as.vector(t(cut))
newdat2[j,1:length(save2)] <- save2
}
newdat2[which(is.na(newdat2))] <- 0 # zero-padding
newdat2_save= newdat2
newdat3 = data.frame(cbind(Case=as.character(all3[,1]), label= as.character(pre2$anomaly_type), newdat2))
x2= newdat3[which(prefixL == prefixL[start_index]),-(1:2)]
x2 = x2[,1:(prefixL[start_index]*embedding_size)]
}else{
object_case = pre2$Case[nrow(pre2)]
object_event = pre2$Event[nrow(pre2)]
data$ID <- as.factor(data$ID)
data$ActivityID <- as.factor(data$ActivityID)
# One-hot encoding
data1 <- fun_onehot(data)
newdat <- cbind(data[,1], data1)
newdat[,1] <- as.factor(newdat[,1])
n<- length(levels((newdat[,1]))) # the number of cases
m<-max(table((newdat[,1]))) # maximum trace length
num_act= ncol(newdat)-1
num_event = nrow(newdat)
max<- m*num_act
c=unique(pre2[,c("Case","anomaly_type")])
# prefix encoding
prefixL = as.numeric()
newdat2<- matrix(NA, nrow=num_event , ncol=max)
for(j in 1:num_event){
cut = newdat[which(newdat[1:j,1]== newdat[j,1] ),-1]
if(class(cut)=='numeric'){
prefixL[j] = 1
}else{
prefixL[j] = nrow(cut)
}
save2 <- as.vector(t(cut))
newdat2[j,1:length(save2)] <- save2
}
newdat2[which(is.na(newdat2))] <- 0 # zero-padding
newdat2_save= newdat2
act_save = names(newdat) #change 1
newdat3 = data.frame(cbind(Case=as.character(newdat[,1]), label= as.character(pre2$anomaly_type), newdat2))
x2= newdat3[which(prefixL == prefixL[start_index]),-(1:2)]
x2 = x2[,1:(prefixL[start_index]*num_act)]
}
#Caculate leverage
x= as.matrix(sapply(x2, as.numeric))
h_diag <- fun_itree(x)
pre[start_index, 'leverage'] = h_diag[length(h_diag)]
leverage_end <- Sys.time()
pre[start_index, 'time'] = (leverage_end-leverage_start)
pre[start_index, 'tn'] = (h_diag[length(h_diag)] > (mean(h_diag)+sd(h_diag)))
#Set escape option
if(until==0 | start_index+until>last_index){
until = last_index
}else{
until= start_index+until
}
#Start event steam
for(i in (start_index+1):until){ # last_index
print(paste("Start to calculate leverage score of ", i ,"-th event (total ",event_num," events)" ,sep=''))
leverage_start <- Sys.time()
pre2 = rbind(pre2, pre[i,])
cur_len = sum(pre2$start)
data<- pre2[,c("Case","Activity",'order')]
names(data)[1:2] <- c("ID", "ActivityID")
# Max option
object_case = pre2$Case[nrow(pre2)]
object_event = pre2$Event[nrow(pre2)]
if(cur_len > Max ){
del_case = pre2[which(pre2$start==1),'Case']
del_case = del_case[1:(cur_len-Max)]
del_case= del_case[which(!is.element(del_case, object_case))]
data = data[which(!is.element(data[,1], del_case)),]
pre3= pre2[which(!is.element(pre2[,1], del_case)),]
label = as.character(pre3[,c("anomaly_type")])
c=unique(pre3[,c("Case","anomaly_type")]) #revision2
}else{
label = as.character(pre2[,c("anomaly_type")])
pre3=pre2; c=unique(pre3[,c("Case","anomaly_type")]) #revision2
}
if(embedding_size_p>0){
num_act= length(unique(data$ActivityID))
embedding_size = round(num_act*embedding_size_p)
# embedding encoding
embeddings = fun_embedding( as.factor(data$ActivityID), embedding_size)
object_case = pre2$Case[nrow(pre2)]
object_event = pre2$Event[nrow(pre2)]
data$ID <- as.factor(data$ID)
data$ActivityID <- as.factor(data$ActivityID)
n= length(unique(data[,1]))
m = max(table(data[,1]))
data$order = as.character(data$order)
data$ID = as.character(data$ID)
all3 = merge(data, embeddings, by='ActivityID', all.x=T)
all3= all3[ with(all3, order(ID, order)),]
all3 = all3[,c("ID","ActivityID",names(all3)[(ncol(all3)-embedding_size+1):ncol(all3)])]
num_event = nrow(all3)
max<- m*(embedding_size)
c=unique(pre2[,c("Case","anomaly_type")]) #CHANGE
label = as.character(c[,2])
{ # update event
newdat2<- matrix(NA, nrow=num_event , ncol=max)
prefixL = as.numeric()
for(j in 1:num_event){
cut = all3[which(all3[1:j,1]== all3[j,1] ),-c(1:2)]
if(class(cut)=='numeric'){
prefixL[j] = 1
}else{
prefixL[j] = nrow(cut)
}
save2 <- as.vector(t(cut))
newdat2[j,1:length(save2)] <- save2
}
}
# Max option
if(cur_len > Max ){
del_case = pre2[which(pre2$start==1),'Case'][1:(cur_len-Max)]
del_case= del_case[which(!is.element(del_case, object_case))]
pre2 = pre2[which(!is.element(all3[,1], del_case)),]
newdat2 = newdat2[which(!is.element(all3[,1], del_case)),]
label= label[which(!is.element(all3[,1], del_case))]
prefixL= prefixL[which(!is.element(all3[,1], del_case))]
all3 = all3[which(!is.element(all3[,1], del_case)),]
}
newdat2[which(is.na(newdat2))] <- 0 # zero-padding
newdat2_save= newdat2
newdat3 <-data.frame(cbind(Case= as.character(all3[,1]), label= label, newdat2))
x2= newdat3[which(prefixL == prefixL[length(prefixL)]),-(1:2)]
x2 = x2[,1:(prefixL[length(prefixL)]*embedding_size)]
}else{
object_case = pre3$Case[nrow(pre3)] #revision2
object_event = pre3$Event[nrow(pre3)] #revision2
data$ID <- as.factor(data$ID)
data$ActivityID <- as.factor(data$ActivityID)
#revision2
# One-hot encoding
data1 <- fun_onehot(data)
newdat <- cbind(data[,1], data1)
newdat[,1] <- as.factor(newdat[,1])
n<- length(levels((newdat[,1]))) # the number of cases
m<-max(table((newdat[,1]))) # maximum trace length
num_act= ncol(newdat)-1
num_event = nrow(newdat)
max<- m*num_act
newdat2<- matrix(NA, nrow=n , ncol=max)
prefixL = as.numeric()
for(j in 1:n){
cut = newdat[which(newdat[,1]== c[j,1] ),-1]
save2 <- as.vector(t(cut))
prefixL[j] = sum(save2)
newdat2[j,1:length(save2)] <- save2
}
CP = prefixL[which(c[,1]== object_case)]
newdat2 = newdat2[which(prefixL >= CP),]
if( length(which(prefixL >= CP)) != 1 ){
newdat2 = newdat2[, 1:(CP*num_act)]
loc = which(c[which(prefixL >= CP),1] == object_case)
newdat2[which(is.na(newdat2))] <- 0 # zero-padding
newdat2_save= newdat2
newdat3= cbind(c[which(prefixL >= CP),], newdat2)
act_save = names(newdat) #change 1
# newdat3 <-data.frame(cbind(Case= as.character(newdat[,1]), label= label, newdat2))
x2= newdat3[,-(1:2)]
}else{
x2= NA
}
}
#revision2
if(is.na(x2)){
pre[i, 'leverage'] = 0
}else{
#Calculate leverage
x= as.matrix(sapply(x2, as.numeric))
h_diag <- fun_itree(x)
pre[i, 'leverage'] = h_diag[loc]
}
leverage_end <- Sys.time()
print(paste("Anomaly score of", i ,"-th event = ", round( h_diag[loc],5), " (CaseID=",object_case,")" ,sep=''))
pre[i, 'time'] = (leverage_end-leverage_start)
pre[i, 'tn'] = (h_diag[loc] > (mean(h_diag)+sd(h_diag)))
}
return(pre)
}
}
fun_remove_TRUE = function(input, Min,start_index, Max, until,embedding_size_p, remove_threshold ){}
fun_remove_FALSE = function(input, Min, start_index, Max, until, embedding_size_p){}
streaming_score = function(input, Min = 100, start_index = start_index, Max = 0, until=0, batch = TRUE ,embedding_size_p = 0, remove=TRUE, remove_threshold = 0.2){
total_start <- Sys.time()
if(remove==TRUE){
if(batch==TRUE){ #
pre=fun_batch_remove_TRUE(input=input, Min=Min, start_index= start_index, Max=Max, until=until, embedding_size_p=embedding_size_p, remove_threshold=remove_threshold )
}else{
pre=fun_remove_TRUE(input=input, Min=Min, start_index= start_index, Max=Max, until=until, embedding_size_p=embedding_size_p, remove_threshold=remove_threshold )
}
}else{
if(batch==TRUE){
pre=fun_batch_remove_FALSE(input=input, Min=Min, start_index=start_index, Max=Max, until=until, embedding_size_p=embedding_size_p )
}else{
pre=fun_remove_FALSE(input=input, Min=Min,start_index=start_index, Max=Max, until=until, embedding_size_p=embedding_size_p )
}
}
total_end <- Sys.time()
print(total_end - total_start)
return(pre)
}
}
#Result
{
start_index = which(cumsum(input$start) == 101)[1]
last_index = nrow(input)
part = seq(start_index, last_index, 1000 )
part = part[-length(part)]
output_total = data.frame()
for(i in part){
output = streaming_score(input, Min=100, start_index= i, Max=1000, until = 299, batch=TRUE, remove= FALSE, embedding_size_p=0) # onehot
if(is.null(output) == 0 ){
output = output[order(output$timestamp),]
start = min(which(output$leverage >=0))
loc = which(output$leverage>=0)
output = output[loc,]
output_total = rbind(output_total, output)
}
}
setwd("/home/jonghyeon3/extension_AD/evaluations/bs2/result")
write.csv(output_total, "result_itree_creditcard.csv", row.names= FALSE)
}
# plot(see$leverage, ylim= c(0,1),
# col= ifelse(see$label==1 ,'red', 'black' ), cex= ifelse(see$label==1 ,1.0, 0.5), pch= ifelse(see$label==1 ,9, 1)
# , ylab= 'Anomaly score')
#
# plot(see2$leverage, ylim= c(0,1),
# col= ifelse(see2$label==1 ,'red', 'black' ), cex= ifelse(see2$label==1 ,1.0, 0.5), pch= ifelse(see2$label==1 ,9, 1)
# , ylab= 'Anomaly score')
|
##plot1
## Read data from file:
hpc <- read.csv("~/household_power_consumption.txt", sep=";")
## Subset required data (only two dates):
hpc2 <- subset(hpc, Date == "1/2/2007" | Date == "2/2/2007")
## Convert date and time formats:
hpc3 <- transform(hpc2, Date = as.Date(hpc2$Date, format = "%d/%m/%Y"))
hpc4 <- transform(hpc3, Time = strptime(paste(hpc3$Date, hpc3$Time),
format = "%Y-%m-%d %H:%M:%S"))
## Convert the global active power to numeric:
gap1 <- as.vector(hpc4$Global_active_power)
gap2 <- as.numeric(gap1)
## Create the histogram:
hist(as.numeric(gap2),
xlab = "Global Active Power (kilowatts)",
col = "red",
main = "Global Active Power")
## Create the png file:
dev.copy(png, file = "plot1.png")
dev.off()
|
/plot1.R
|
no_license
|
pinakmishra/ExData_Plotting1
|
R
| false
| false
| 779
|
r
|
##plot1
## Read data from file:
hpc <- read.csv("~/household_power_consumption.txt", sep=";")
## Subset required data (only two dates):
hpc2 <- subset(hpc, Date == "1/2/2007" | Date == "2/2/2007")
## Convert date and time formats:
hpc3 <- transform(hpc2, Date = as.Date(hpc2$Date, format = "%d/%m/%Y"))
hpc4 <- transform(hpc3, Time = strptime(paste(hpc3$Date, hpc3$Time),
format = "%Y-%m-%d %H:%M:%S"))
## Convert the global active power to numeric:
gap1 <- as.vector(hpc4$Global_active_power)
gap2 <- as.numeric(gap1)
## Create the histogram:
hist(as.numeric(gap2),
xlab = "Global Active Power (kilowatts)",
col = "red",
main = "Global Active Power")
## Create the png file:
dev.copy(png, file = "plot1.png")
dev.off()
|
#' Space headway \code{h} between the lead and following vehicles at time \code{t}. Lead and following vehicles use \code{gbm} and constant speed models, respectively.
#'
#' @param xl0 location of lead vehicle at \code{t} = 0, a number
#' @param ul0 pre-breakdown speed of lead vehicle, a number
#' @param theta a \code{gbm} model parameter, a number
#' @param xf1 location of following vehicle at \code{t} = \code{t1}, a number
#' @param t1 deceleration time, a number
#' @param t time, a number
#' @usage f1(xl0, ul0, theta, xf1, t, t1)
# #' @examples
# #' f1(-689.6, 92.4, -0.0154, -240, 4, 0)
f1 <- function(xl0, ul0, theta, xf1, t, t1) {
x0 <- xf1
u <- ul0
t0 <- t1
h <- xl0 + gbmx(ul0, theta, t) - xfollow(x0, u, t, t0)
return(h)
}
|
/R/f1.R
|
permissive
|
PJOssenbruggen/Basic
|
R
| false
| false
| 749
|
r
|
#' Space headway \code{h} between the lead and following vehicles at time \code{t}. Lead and following vehicles use \code{gbm} and constant speed models, respectively.
#'
#' @param xl0 location of lead vehicle at \code{t} = 0, a number
#' @param ul0 pre-breakdown speed of lead vehicle, a number
#' @param theta a \code{gbm} model parameter, a number
#' @param xf1 location of following vehicle at \code{t} = \code{t1}, a number
#' @param t1 deceleration time, a number
#' @param t time, a number
#' @usage f1(xl0, ul0, theta, xf1, t, t1)
# #' @examples
# #' f1(-689.6, 92.4, -0.0154, -240, 4, 0)
f1 <- function(xl0, ul0, theta, xf1, t, t1) {
x0 <- xf1
u <- ul0
t0 <- t1
h <- xl0 + gbmx(ul0, theta, t) - xfollow(x0, u, t, t0)
return(h)
}
|
#-------------------------------------------server.R -----------------------------------------#
# Ce fichier contient lest le coeur de l'application. il est le backend de l'application. Il #
# reçoit les requetes de l'interface, les traitent, et les renvoie à l'interface pour les #
# afficher.Les variables de cet fichier sont locales et non globales. #
#---------------------------------------------------------------------------------------------#
#--------------------- Appel du contenu ou des variables du fichier global.R -----------------#
source("./global.r", local = TRUE)
#--------------------------- Debut de des instruction server (backend) -----------------------#
shinyServer(function(input, output) {
#-------------------- Raccoucircement de certaines chaine de caractères pour faciliter l'affichage ----------------
Type_Cara$CARACT_NOM_FR[which(Type_Cara$CARACT_NOM_FR == "Services automobile et stationnement")] <- rep("Auto_Parking")
Type_Cara$CARACT_NOM_FR[which(Type_Cara$CARACT_NOM_FR == "Modes de paiement")] <- rep("Paiement")
Adr_Cara$CARACT_NOM_FR[which(Adr_Cara$CARACT_NOM_FR == "Services automobile et stationnement")] <- rep("Auto_Parking")
Adr_Cara$CARACT_NOM_FR[which(Adr_Cara$CARACT_NOM_FR == "Modes de paiement")] <- rep("Paiement")
#---------------------KPI: nombre total d'établissement de la province --------------
output$total_hotel_box <- renderValueBox({
nbr_hbrg <- length((unique(ETBL_Adr$ETBL_ID))) # calcul du nombre d'établissement au total
valueBox(h3(nbr_hbrg),"Total Établissement",icon = icon("home"),color = "aqua") # injecter le nombre d'etablissement dans infobox
})
#---------------------KPI: nombre total de municipalité ------------------------------
output$total_City_box <- renderValueBox({
nbr_muni <- length(unique(ETBL_Adr$ADR_MUNICIPALITE))
valueBox(h3(nbr_muni),"Total Municipalité", icon = icon("map"), color = "blue")
})
#---------------------KPI: nombre total de type d'établissements----------------------
output$total_type_box <- renderValueBox({
nbr_type_hbrg <- length(unique(Type_Cara$ETBL_TYPE_FR))
valueBox(h3(nbr_type_hbrg),"Total Type", icon = icon("th"), color = "aqua")
})
#------------------------ Barchart des type d'établissement ------------------------
#Création du graphique
output$bar_chart_type <- renderPlot({
if (input$choix_ville == "Tous") {
tampon <- data.frame(table(unique(Type_Cara[,c(2,3)])$ETBL_TYPE_FR)) #On determine les frequences des differents types
} else {
tmp <- unique(Adr_Cara[which(Adr_Cara$ADR_MUNICIPALITE == input$choix_ville),2])
tampon <- unique(Type_Cara[which(Type_Cara$ETBL_ID %in% tmp),c(2,3)])$ETBL_TYPE_FR
tampon <- data.frame(table(tampon))
}
names(tampon) <- c("Type","Frequence") # Changement des noms des colonnes
tampon$Type <- as.character(tampon$Type) #Convertir en class caractère afin de remplcer les caractères longs
tampon$Type[which(tampon$Type == "Chalet / appartement / résidence de tourisme")] <- rep("Chalet/App/Residence")#Raccourcir la chaine
tamp_graph_static <- tampon
#rm(tampon)
graph <- ggplot(tamp_graph_static, aes(x = Type, y = Frequence)) +
geom_bar(stat = "identity", aes(fill = Type)) +
geom_text(aes(label = Frequence), vjust = 1, size = 4.0) +
theme(axis.text.x = element_blank()) +
#Condition sur l'affichage du titre du graphique
if (input$choix_ville == "Tous") {
ggtitle(paste("Repartition par type d'établissement au Québec"))
} else {ggtitle(paste("Repartition par type d'établissement à",input$choix_ville))}
graph
})
#---------------------- Barchart des caractéristiques ---------------------------
output$bar_chart <- renderPlot({
if (input$choix_ville == "Tous") {
tmp_interactif <- data.frame(table(Type_Cara[which(Type_Cara$CARACT_NOM_FR == input$filtre),6]))
names(tmp_interactif) <- c("Caracteristique","Frequence") #Changement des noms de colonnes
graph <- ggplot(tmp_interactif, aes(x = Caracteristique, y = Frequence)) +
geom_bar(stat = "identity", aes(fill = Caracteristique)) +
geom_text(aes(label = Frequence), vjust = 1, size = 3.0) +
theme(axis.text.x = element_blank()) +
ggtitle(paste("Repartition des établissements par",input$filtre))
graph #on affiche le graphique
} else {
tmp_interactif <- Adr_Cara[which(Adr_Cara$ADR_MUNICIPALITE == input$choix_ville),c(4,5)]
tmp_interactif <- data.frame(table(tmp_interactif[which(tmp_interactif$CARACT_NOM_FR == input$filtre),2]))
names(tmp_interactif) <- c("Caracteristique","Frequence")
graph <- ggplot(tmp_interactif, aes(x = Caracteristique, y = Frequence)) +
geom_bar(stat = "identity", aes(fill = Caracteristique)) +
geom_text(aes(label = Frequence), vjust = 1, size = 3.0) +
theme(axis.text.x = element_blank()) +
ggtitle(paste("Repartition par",input$filtre,"pour la ville de",input$choix_ville))
graph #On affiche le graphique
}
})
#---------------------- Tableau des liste des Établissements----------------------------
#Tire du tableau de lliste des établissements
output$titre_tab <- renderText({
if (input$choix_ville == "Tous") {
titre <- "Liste des établissements au Québec"
titre
} else {
titre <- paste("Liste des établissements à",input$choix_ville)
titre
}
})
#Visualisation du tableau des établissements
output$list_hbgr <- DT::renderDT({
if (input$choix_ville == "Tous") { #liste des établissements de toutes les municipalités
tp <- ETBL_Adr[,c(3,4)]
tp
} else {# On affiche la liste des établissement de la manucipalité choisie
tp <- ETBL_Adr[which(ETBL_Adr$ADR_MUNICIPALITE == input$choix_ville),c(3,4)]
tp
}
}, options = list(pageLength = 5))
#----------------------- Map de la repartition des établissements -------------------------
#titre du mao
output$titre_map <- renderText({
if (input$choix_ville == "Tous") {
titre <- "Regroupement des établissements au Québec"
titre
} else {
titre <- paste("Map des établissement à",input$choix_ville)
titre
}
})
# CONSTRUCTION DE LA MAP
output$map <- renderLeaflet({
#Filtre des positions à afficher selon la municipalité
if (input$choix_ville == "Tous") { # Lorsque le choix est pour toutes les municipalités
# Debut de la construction de la map
map_graph <- leaflet()
map_graph <- addTiles(map_graph)
map_graph <- addCircleMarkers(map_graph, lat = ETBL_Adr$ADR_LATITUDE,lng = ETBL_Adr$ADR_LONGITUDE, radius = 4, color = "#0073B7",
clusterOptions = markerClusterOptions(),label = as.character(ETBL_Adr$ETBL_NOM_FR))
map_graph <- setView(map_graph,
lat = mean(ETBL_Adr$ADR_LATITUDE, na.rm = TRUE),
lng = mean(ETBL_Adr$ADR_LONGITUDE, na.rm = TRUE), zoom = 7)
#affichage de la map
map_graph
} else {#Lorsque le choix est porté sur une municipalité spécifique
# Debut de la construction de la map
map_graph <- leaflet()
map_graph <- addTiles(map_graph)
#Obtenit Lat et Long selon la municipalité selectionnée comme filtre
tampon_choix <- ETBL_Adr[which(ETBL_Adr$ADR_MUNICIPALITE == input$choix_ville),c(2,3,5,7,8)]
#map_graph <- addCircleMarkers(map_graph, lat = tampon_choix$ADR_LATITUDE,lng = tampon_choix$ADR_LONGITUDE, radius = 4, color = "#0073B7",
#clusterOptions = markerClusterOptions(),label = as.character(tampon_choix$ETBL_NOM_FR))
map_graph <- addMarkers(map_graph,lat = tampon_choix$ADR_LATITUDE,lng = tampon_choix$ADR_LONGITUDE, label = as.character(tampon_choix$ETBL_NOM_FR))
map_graph <- setView(map_graph,
lat = mean(tampon_choix$ADR_LATITUDE, na.rm = TRUE),
lng = mean(tampon_choix$ADR_LONGITUDE, na.rm = TRUE), zoom = 10)
#affichage de la map
map_graph
}
})
#------------------------Visualisation des tableaux dans la 2ieme tab "Data" ----------------
output$data_viz_out <- DT::renderDT({
#recuperation de la valeur selectionner en input
tab <- input$data_viz_in
# Condition sur l'affichage selon la tab selectionné
if (tab == "JOINTURE ETBL & ADRESSES") {
tab_out <- ETBL_Adr[,c(-1)]
colnames(tab_out) <- c("ID","NOM","ADRESS CIVIQUE","MUNICIPALITÉ","CODE POSTAL","LATITUDE","LONGITUDE") #Renommer les colonnes
} else {
tab_out <- Type_Cara[,c(-1)]
}
#affichage de la tab selectionnée
tab_out
}, filter = "top")
})
#-------------------------------------- THE END -------------------------------------------
|
/server.R
|
no_license
|
yamess/R-shiny-Dashboard
|
R
| false
| false
| 9,197
|
r
|
#-------------------------------------------server.R -----------------------------------------#
# Ce fichier contient lest le coeur de l'application. il est le backend de l'application. Il #
# reçoit les requetes de l'interface, les traitent, et les renvoie à l'interface pour les #
# afficher.Les variables de cet fichier sont locales et non globales. #
#---------------------------------------------------------------------------------------------#
#--------------------- Appel du contenu ou des variables du fichier global.R -----------------#
source("./global.r", local = TRUE)
#--------------------------- Debut de des instruction server (backend) -----------------------#
shinyServer(function(input, output) {
#-------------------- Raccoucircement de certaines chaine de caractères pour faciliter l'affichage ----------------
Type_Cara$CARACT_NOM_FR[which(Type_Cara$CARACT_NOM_FR == "Services automobile et stationnement")] <- rep("Auto_Parking")
Type_Cara$CARACT_NOM_FR[which(Type_Cara$CARACT_NOM_FR == "Modes de paiement")] <- rep("Paiement")
Adr_Cara$CARACT_NOM_FR[which(Adr_Cara$CARACT_NOM_FR == "Services automobile et stationnement")] <- rep("Auto_Parking")
Adr_Cara$CARACT_NOM_FR[which(Adr_Cara$CARACT_NOM_FR == "Modes de paiement")] <- rep("Paiement")
#---------------------KPI: nombre total d'établissement de la province --------------
output$total_hotel_box <- renderValueBox({
nbr_hbrg <- length((unique(ETBL_Adr$ETBL_ID))) # calcul du nombre d'établissement au total
valueBox(h3(nbr_hbrg),"Total Établissement",icon = icon("home"),color = "aqua") # injecter le nombre d'etablissement dans infobox
})
#---------------------KPI: nombre total de municipalité ------------------------------
output$total_City_box <- renderValueBox({
nbr_muni <- length(unique(ETBL_Adr$ADR_MUNICIPALITE))
valueBox(h3(nbr_muni),"Total Municipalité", icon = icon("map"), color = "blue")
})
#---------------------KPI: nombre total de type d'établissements----------------------
output$total_type_box <- renderValueBox({
nbr_type_hbrg <- length(unique(Type_Cara$ETBL_TYPE_FR))
valueBox(h3(nbr_type_hbrg),"Total Type", icon = icon("th"), color = "aqua")
})
#------------------------ Barchart des type d'établissement ------------------------
#Création du graphique
output$bar_chart_type <- renderPlot({
if (input$choix_ville == "Tous") {
tampon <- data.frame(table(unique(Type_Cara[,c(2,3)])$ETBL_TYPE_FR)) #On determine les frequences des differents types
} else {
tmp <- unique(Adr_Cara[which(Adr_Cara$ADR_MUNICIPALITE == input$choix_ville),2])
tampon <- unique(Type_Cara[which(Type_Cara$ETBL_ID %in% tmp),c(2,3)])$ETBL_TYPE_FR
tampon <- data.frame(table(tampon))
}
names(tampon) <- c("Type","Frequence") # Changement des noms des colonnes
tampon$Type <- as.character(tampon$Type) #Convertir en class caractère afin de remplcer les caractères longs
tampon$Type[which(tampon$Type == "Chalet / appartement / résidence de tourisme")] <- rep("Chalet/App/Residence")#Raccourcir la chaine
tamp_graph_static <- tampon
#rm(tampon)
graph <- ggplot(tamp_graph_static, aes(x = Type, y = Frequence)) +
geom_bar(stat = "identity", aes(fill = Type)) +
geom_text(aes(label = Frequence), vjust = 1, size = 4.0) +
theme(axis.text.x = element_blank()) +
#Condition sur l'affichage du titre du graphique
if (input$choix_ville == "Tous") {
ggtitle(paste("Repartition par type d'établissement au Québec"))
} else {ggtitle(paste("Repartition par type d'établissement à",input$choix_ville))}
graph
})
#---------------------- Barchart des caractéristiques ---------------------------
output$bar_chart <- renderPlot({
if (input$choix_ville == "Tous") {
tmp_interactif <- data.frame(table(Type_Cara[which(Type_Cara$CARACT_NOM_FR == input$filtre),6]))
names(tmp_interactif) <- c("Caracteristique","Frequence") #Changement des noms de colonnes
graph <- ggplot(tmp_interactif, aes(x = Caracteristique, y = Frequence)) +
geom_bar(stat = "identity", aes(fill = Caracteristique)) +
geom_text(aes(label = Frequence), vjust = 1, size = 3.0) +
theme(axis.text.x = element_blank()) +
ggtitle(paste("Repartition des établissements par",input$filtre))
graph #on affiche le graphique
} else {
tmp_interactif <- Adr_Cara[which(Adr_Cara$ADR_MUNICIPALITE == input$choix_ville),c(4,5)]
tmp_interactif <- data.frame(table(tmp_interactif[which(tmp_interactif$CARACT_NOM_FR == input$filtre),2]))
names(tmp_interactif) <- c("Caracteristique","Frequence")
graph <- ggplot(tmp_interactif, aes(x = Caracteristique, y = Frequence)) +
geom_bar(stat = "identity", aes(fill = Caracteristique)) +
geom_text(aes(label = Frequence), vjust = 1, size = 3.0) +
theme(axis.text.x = element_blank()) +
ggtitle(paste("Repartition par",input$filtre,"pour la ville de",input$choix_ville))
graph #On affiche le graphique
}
})
#---------------------- Tableau des liste des Établissements----------------------------
#Tire du tableau de lliste des établissements
output$titre_tab <- renderText({
if (input$choix_ville == "Tous") {
titre <- "Liste des établissements au Québec"
titre
} else {
titre <- paste("Liste des établissements à",input$choix_ville)
titre
}
})
#Visualisation du tableau des établissements
output$list_hbgr <- DT::renderDT({
if (input$choix_ville == "Tous") { #liste des établissements de toutes les municipalités
tp <- ETBL_Adr[,c(3,4)]
tp
} else {# On affiche la liste des établissement de la manucipalité choisie
tp <- ETBL_Adr[which(ETBL_Adr$ADR_MUNICIPALITE == input$choix_ville),c(3,4)]
tp
}
}, options = list(pageLength = 5))
#----------------------- Map de la repartition des établissements -------------------------
#titre du mao
output$titre_map <- renderText({
if (input$choix_ville == "Tous") {
titre <- "Regroupement des établissements au Québec"
titre
} else {
titre <- paste("Map des établissement à",input$choix_ville)
titre
}
})
# CONSTRUCTION DE LA MAP
output$map <- renderLeaflet({
#Filtre des positions à afficher selon la municipalité
if (input$choix_ville == "Tous") { # Lorsque le choix est pour toutes les municipalités
# Debut de la construction de la map
map_graph <- leaflet()
map_graph <- addTiles(map_graph)
map_graph <- addCircleMarkers(map_graph, lat = ETBL_Adr$ADR_LATITUDE,lng = ETBL_Adr$ADR_LONGITUDE, radius = 4, color = "#0073B7",
clusterOptions = markerClusterOptions(),label = as.character(ETBL_Adr$ETBL_NOM_FR))
map_graph <- setView(map_graph,
lat = mean(ETBL_Adr$ADR_LATITUDE, na.rm = TRUE),
lng = mean(ETBL_Adr$ADR_LONGITUDE, na.rm = TRUE), zoom = 7)
#affichage de la map
map_graph
} else {#Lorsque le choix est porté sur une municipalité spécifique
# Debut de la construction de la map
map_graph <- leaflet()
map_graph <- addTiles(map_graph)
#Obtenit Lat et Long selon la municipalité selectionnée comme filtre
tampon_choix <- ETBL_Adr[which(ETBL_Adr$ADR_MUNICIPALITE == input$choix_ville),c(2,3,5,7,8)]
#map_graph <- addCircleMarkers(map_graph, lat = tampon_choix$ADR_LATITUDE,lng = tampon_choix$ADR_LONGITUDE, radius = 4, color = "#0073B7",
#clusterOptions = markerClusterOptions(),label = as.character(tampon_choix$ETBL_NOM_FR))
map_graph <- addMarkers(map_graph,lat = tampon_choix$ADR_LATITUDE,lng = tampon_choix$ADR_LONGITUDE, label = as.character(tampon_choix$ETBL_NOM_FR))
map_graph <- setView(map_graph,
lat = mean(tampon_choix$ADR_LATITUDE, na.rm = TRUE),
lng = mean(tampon_choix$ADR_LONGITUDE, na.rm = TRUE), zoom = 10)
#affichage de la map
map_graph
}
})
#------------------------Visualisation des tableaux dans la 2ieme tab "Data" ----------------
output$data_viz_out <- DT::renderDT({
#recuperation de la valeur selectionner en input
tab <- input$data_viz_in
# Condition sur l'affichage selon la tab selectionné
if (tab == "JOINTURE ETBL & ADRESSES") {
tab_out <- ETBL_Adr[,c(-1)]
colnames(tab_out) <- c("ID","NOM","ADRESS CIVIQUE","MUNICIPALITÉ","CODE POSTAL","LATITUDE","LONGITUDE") #Renommer les colonnes
} else {
tab_out <- Type_Cara[,c(-1)]
}
#affichage de la tab selectionnée
tab_out
}, filter = "top")
})
#-------------------------------------- THE END -------------------------------------------
|
#' ChainNetwork
#'
#' Spawn a chain network covariance matrix
#'
#' @param p Positive integer.The desired number of dimensions.
#' @param n_perm Positive integer. The first n_perm dimensions will be permuted randomly.
#' @param a Positive float between 0 and 1. Scale parameter for the elements of the covariance matrix
#' @param prec_mat Should the precision matrix be returned? If false the covariance matrix will be returned (default).
#' @param scaled If TRUE the created precision matrix will be inverted and scaled to a correlation matrix.
#'
#' @return A covariance or precision matrix.
#' @export
#'
#' @importFrom stats runif cov2cor
#'
#' @examples
#' ChainNetwork(50)
ChainNetwork <- function(p, n_perm = p, a = 0.5, prec_mat = F, scaled = T) {
stopifnot(p >= n_perm)
s_vec <- cumsum(runif(p, 0.5, 1))
if (!is.null(n_perm) && n_perm >= 0) {
perm_inds <- sample(1:n_perm, n_perm, replace = FALSE)
if (n_perm < p) perm_inds <- c(perm_inds, (n_perm + 1):p)
s_vec <- s_vec[perm_inds]
}
omega <- matrix(0, nrow = p, ncol = p)
for (i in seq_len(p)) {
for (j in seq_len(p)) {
omega[i, j] <- exp(-a * abs(s_vec[i] - s_vec[j]))
}
}
if (scaled) {
sigma <- cov2cor(solve(omega))
if (prec_mat) {
solve(sigma)
} else {
sigma
}
} else {
if (prec_mat) {
omega
} else {
solve(omega)
}
}
}
#' ScaleNetwork
#'
#' Spawn a hub network covariance matrix
#'
#' @inheritParams ChainNetwork
#' @inheritParams RandomNetwork
#' @param preferential_power Power coefficient alpha for weighting of degree number as alpha in prefential attachment mechanism.
#'
#' @return A covariance or precision matrix.
#' @export
#'
#' @examples
#' ScaleNetwork(50)
ScaleNetwork <- function(p, preferential_power = 1, u = 0.1, v = 0.3, prec_mat = T, scaled = F) {
theta <- matrix(0, p, p)
probs <- numeric(p)
theta[1, 2] <- theta[2, 1] <- TRUE
for (i in seq(3, p)) {
probs <- colSums(theta)^preferential_power
probs <- probs / sum(probs)
edges <- sample.int(i - 1, 1, prob = probs[1:(i - 1)])
theta[edges, i] <- theta[i, edges] <- TRUE
}
diag(theta) <- 0
omega <- theta * v
diag(omega) <- abs(min(eigen(omega)$values)) + u
if (scaled) {
sigma <- cov2cor(solve(omega))
if (prec_mat) {
solve(sigma)
} else {
sigma
}
} else {
if (prec_mat) {
omega
} else {
solve(omega)
}
}
}
#' RandomNetwork
#'
#' Spawn a erdos renyi type random network covariance matrix
#'
#' @inheritParams ChainNetwork
#' @param prob Probabilty that a pair of nodes have a common edge.
#' @param u Constant added to the diagonal elements of the precision matrix for controlling the magnitude of partial correlations.
#' @param v Constant added to the off diagonal of the precision matrix for controlling the magnitude of partial correlations.
#'
#' @return A covariance matrix.
#' @export
#'
#' @examples
#' RandomNetwork(50)
RandomNetwork <- function(p, prob = min(1, 5 / p), u = 0.1, v = 0.3, prec_mat = F, scaled = F) {
theta <- matrix(0, p, p)
tmp <- matrix(runif(p^2, 0, 0.5), p, p)
tmp <- tmp + t(tmp)
theta[tmp < prob] <- 1
diag(theta) <- 0
omega <- theta * v
diag(omega) <- abs(min(eigen(omega)$values)) + u
if (scaled) {
sigma <- cov2cor(solve(omega))
if (prec_mat) {
solve(sigma)
} else {
sigma
}
} else {
if (prec_mat) {
omega
} else {
solve(omega)
}
}
}
#' DiagMatrix
#'
#' Spawn a p-dimensional identity matrix.
#'
#' @inheritParams ChainNetwork
#'
#' @return A covariance matrix.
#' @export
#'
#' @examples
#' DiagMatrix(50)
DiagMatrix <- function(p) {
diag(p)
}
#' MoveEdges
#'
#' Radomly move a share of the edges in a random graph
#'
#' In order to create a slightly different graph with the same level of sparsity
#' the selected share of edges will be randomly moved to positions where no edge existed
#' before. Make sure to choose share_moves low for dense graphs.
#'
#' @param x A precision matrix
#' @param share_moves Share of edges to be moved.
#' @param tol Tolerance for zero values.
#'
#' @export
MoveEdges <- function(x, share_moves = 0.1, tol = 1e-16) {
if (share_moves == 0) return(x)
edges <- which(upper.tri(x) & abs(x) >= tol)
not_edges <- which(upper.tri(x) & x == 0)
n_moves <- floor(share_moves * length(edges))
if (length(not_edges) <= n_moves) {
n_moves <- length(not_edges)
warning("Cannot move edges because graph is not sparse enough. Will move as many as possible.")
}
d <- diag(x)
# ensure that matrix is not pd to begin with
diag(x) <- diag(x) - abs(min(eigen(x)$values)) - 0.1
# sample until matrix is pd again
while (any(eigen(x)$values <= 0)) {
sel_edges <- sample(edges, n_moves)
x[sample(not_edges, n_moves)] <- x[sel_edges]
x[sel_edges] <- 0
# make symmetric again
x[lower.tri(x)] <- t(x)[lower.tri(x)]
diag(x) <- d
}
x
}
#' RegrowNetwork
#'
#' Prune graph and regrow edges according of scale-free network
#'
#' Note that v and preferential_power agruments need to be equal to the ones which
#' initially created omega.
#'
#' @param omega A precision matrix as created by ScaleNetwork
#' @param n_nodes Number of nodes to prune and regrow. Default is 0.1 of all nodes.
#' @inheritParams ScaleNetwork
#' @inheritParams RandomNetwork
#'
#' @export
#'
#' @examples
#' omega <- ScaleNetwork(20, v = 1)
#' omega_hat <- RegrowNetwork(omega, v = 1)
#' omega
#' omega_hat
RegrowNetwork <- function(omega, n_nodes = ncol(omega) * 0.1, preferential_power = 1, v = 0.3) {
d <- diag(omega)
diag(omega) <- 0
n_nodes <- floor(n_nodes)
p <- ncol(omega)
# prune
pruned <- numeric(n_nodes)
for (i in seq(1, n_nodes)) {
candidates <- which(colSums(omega != 0) == 1)
if (length(candidates) == 0) stop("Not enough nodes to prune from graph!")
pruned[i] <- sample(candidates, 1)
omega[, pruned[i]] <- omega[pruned[i], ] <- 0
}
# regrow
pruned <- rev(pruned)
probs <- numeric(p)
omega_temp <- omega
# Regrow network and discard permutation if it leads to non pd matrix
while (any(eigen(omega)$values <= 0)) {
omega <- omega_temp
for (i in seq(1, n_nodes)) {
probs <- colSums(omega != 0)^preferential_power
if (sum(probs) == 0 | any(is.na(probs))) probs <- rep(1, p)
probs <- probs / sum(probs)
edges <- sample.int(p, 1, prob = probs)
omega[edges, pruned[i]] <- omega[pruned[i], edges] <- v
}
diag(omega) <- d
}
omega
}
|
/R/models.R
|
no_license
|
lorenzha/hdcd
|
R
| false
| false
| 6,537
|
r
|
#' ChainNetwork
#'
#' Spawn a chain network covariance matrix
#'
#' @param p Positive integer.The desired number of dimensions.
#' @param n_perm Positive integer. The first n_perm dimensions will be permuted randomly.
#' @param a Positive float between 0 and 1. Scale parameter for the elements of the covariance matrix
#' @param prec_mat Should the precision matrix be returned? If false the covariance matrix will be returned (default).
#' @param scaled If TRUE the created precision matrix will be inverted and scaled to a correlation matrix.
#'
#' @return A covariance or precision matrix.
#' @export
#'
#' @importFrom stats runif cov2cor
#'
#' @examples
#' ChainNetwork(50)
ChainNetwork <- function(p, n_perm = p, a = 0.5, prec_mat = F, scaled = T) {
stopifnot(p >= n_perm)
s_vec <- cumsum(runif(p, 0.5, 1))
if (!is.null(n_perm) && n_perm >= 0) {
perm_inds <- sample(1:n_perm, n_perm, replace = FALSE)
if (n_perm < p) perm_inds <- c(perm_inds, (n_perm + 1):p)
s_vec <- s_vec[perm_inds]
}
omega <- matrix(0, nrow = p, ncol = p)
for (i in seq_len(p)) {
for (j in seq_len(p)) {
omega[i, j] <- exp(-a * abs(s_vec[i] - s_vec[j]))
}
}
if (scaled) {
sigma <- cov2cor(solve(omega))
if (prec_mat) {
solve(sigma)
} else {
sigma
}
} else {
if (prec_mat) {
omega
} else {
solve(omega)
}
}
}
#' ScaleNetwork
#'
#' Spawn a hub network covariance matrix
#'
#' @inheritParams ChainNetwork
#' @inheritParams RandomNetwork
#' @param preferential_power Power coefficient alpha for weighting of degree number as alpha in prefential attachment mechanism.
#'
#' @return A covariance or precision matrix.
#' @export
#'
#' @examples
#' ScaleNetwork(50)
ScaleNetwork <- function(p, preferential_power = 1, u = 0.1, v = 0.3, prec_mat = T, scaled = F) {
theta <- matrix(0, p, p)
probs <- numeric(p)
theta[1, 2] <- theta[2, 1] <- TRUE
for (i in seq(3, p)) {
probs <- colSums(theta)^preferential_power
probs <- probs / sum(probs)
edges <- sample.int(i - 1, 1, prob = probs[1:(i - 1)])
theta[edges, i] <- theta[i, edges] <- TRUE
}
diag(theta) <- 0
omega <- theta * v
diag(omega) <- abs(min(eigen(omega)$values)) + u
if (scaled) {
sigma <- cov2cor(solve(omega))
if (prec_mat) {
solve(sigma)
} else {
sigma
}
} else {
if (prec_mat) {
omega
} else {
solve(omega)
}
}
}
#' RandomNetwork
#'
#' Spawn a erdos renyi type random network covariance matrix
#'
#' @inheritParams ChainNetwork
#' @param prob Probabilty that a pair of nodes have a common edge.
#' @param u Constant added to the diagonal elements of the precision matrix for controlling the magnitude of partial correlations.
#' @param v Constant added to the off diagonal of the precision matrix for controlling the magnitude of partial correlations.
#'
#' @return A covariance matrix.
#' @export
#'
#' @examples
#' RandomNetwork(50)
RandomNetwork <- function(p, prob = min(1, 5 / p), u = 0.1, v = 0.3, prec_mat = F, scaled = F) {
theta <- matrix(0, p, p)
tmp <- matrix(runif(p^2, 0, 0.5), p, p)
tmp <- tmp + t(tmp)
theta[tmp < prob] <- 1
diag(theta) <- 0
omega <- theta * v
diag(omega) <- abs(min(eigen(omega)$values)) + u
if (scaled) {
sigma <- cov2cor(solve(omega))
if (prec_mat) {
solve(sigma)
} else {
sigma
}
} else {
if (prec_mat) {
omega
} else {
solve(omega)
}
}
}
#' DiagMatrix
#'
#' Spawn a p-dimensional identity matrix.
#'
#' @inheritParams ChainNetwork
#'
#' @return A covariance matrix.
#' @export
#'
#' @examples
#' DiagMatrix(50)
DiagMatrix <- function(p) {
diag(p)
}
#' MoveEdges
#'
#' Radomly move a share of the edges in a random graph
#'
#' In order to create a slightly different graph with the same level of sparsity
#' the selected share of edges will be randomly moved to positions where no edge existed
#' before. Make sure to choose share_moves low for dense graphs.
#'
#' @param x A precision matrix
#' @param share_moves Share of edges to be moved.
#' @param tol Tolerance for zero values.
#'
#' @export
MoveEdges <- function(x, share_moves = 0.1, tol = 1e-16) {
if (share_moves == 0) return(x)
edges <- which(upper.tri(x) & abs(x) >= tol)
not_edges <- which(upper.tri(x) & x == 0)
n_moves <- floor(share_moves * length(edges))
if (length(not_edges) <= n_moves) {
n_moves <- length(not_edges)
warning("Cannot move edges because graph is not sparse enough. Will move as many as possible.")
}
d <- diag(x)
# ensure that matrix is not pd to begin with
diag(x) <- diag(x) - abs(min(eigen(x)$values)) - 0.1
# sample until matrix is pd again
while (any(eigen(x)$values <= 0)) {
sel_edges <- sample(edges, n_moves)
x[sample(not_edges, n_moves)] <- x[sel_edges]
x[sel_edges] <- 0
# make symmetric again
x[lower.tri(x)] <- t(x)[lower.tri(x)]
diag(x) <- d
}
x
}
#' RegrowNetwork
#'
#' Prune graph and regrow edges according of scale-free network
#'
#' Note that v and preferential_power agruments need to be equal to the ones which
#' initially created omega.
#'
#' @param omega A precision matrix as created by ScaleNetwork
#' @param n_nodes Number of nodes to prune and regrow. Default is 0.1 of all nodes.
#' @inheritParams ScaleNetwork
#' @inheritParams RandomNetwork
#'
#' @export
#'
#' @examples
#' omega <- ScaleNetwork(20, v = 1)
#' omega_hat <- RegrowNetwork(omega, v = 1)
#' omega
#' omega_hat
RegrowNetwork <- function(omega, n_nodes = ncol(omega) * 0.1, preferential_power = 1, v = 0.3) {
d <- diag(omega)
diag(omega) <- 0
n_nodes <- floor(n_nodes)
p <- ncol(omega)
# prune
pruned <- numeric(n_nodes)
for (i in seq(1, n_nodes)) {
candidates <- which(colSums(omega != 0) == 1)
if (length(candidates) == 0) stop("Not enough nodes to prune from graph!")
pruned[i] <- sample(candidates, 1)
omega[, pruned[i]] <- omega[pruned[i], ] <- 0
}
# regrow
pruned <- rev(pruned)
probs <- numeric(p)
omega_temp <- omega
# Regrow network and discard permutation if it leads to non pd matrix
while (any(eigen(omega)$values <= 0)) {
omega <- omega_temp
for (i in seq(1, n_nodes)) {
probs <- colSums(omega != 0)^preferential_power
if (sum(probs) == 0 | any(is.na(probs))) probs <- rep(1, p)
probs <- probs / sum(probs)
edges <- sample.int(p, 1, prob = probs)
omega[edges, pruned[i]] <- omega[pruned[i], edges] <- v
}
diag(omega) <- d
}
omega
}
|
# ==============================================================================
# Functions for getting and setting DEFAULT values for visual properties,
# organized into sections:
#
# I. General functions for setting node, edge and network defaults
# II. Specific functions for setting particular node, edge and network defaults
#
# ==============================================================================
# I. General Functions
# ------------------------------------------------------------------------------
#' Updates the default values of visual properties in a style
#'
#' @description Updates visual property defaults, overriding any prior settings. See mapVisualProperty for
#' the list of visual properties.
#' @param style.name (char) name for style
#' @param defaults (list) a list of visual property default settings
#' @param base.url (optional) Ignore unless you need to specify a custom domain,
#' port or version to connect to the CyREST API. Default is http://localhost:1234
#' and the latest version of the CyREST API supported by this version of RCy3.
#' @return server response
#' @examples
#' \donttest{
#' updateStyleDefaults('myStyle',list('node fill color'='#0000FF','node size'=50))
#' }
#' @export
#' @seealso mapVisualProperty
updateStyleDefaults <- function(style.name, defaults,base.url=.defaultBaseUrl){
# set default style
if(is.null(style.name)){
style.name <- 'default'
message('style.name not specified, so updating "default" style.')
}
def.list <- list()
for (i in seq_len(length(defaults))) {
visual.prop.name <- names(defaults)[i]
visual.prop.name = toupper(gsub("\\s+","_",visual.prop.name))
visual.prop.name = switch(visual.prop.name,
'EDGE_COLOR'='EDGE_UNSELECTED_PAINT',
'EDGE_THICKNESS'='EDGE_WIDTH',
'NODE_BORDER_COLOR'='NODE_BORDER_PAINT',
visual.prop.name)
def.list[[i]] <- list(visualProperty=visual.prop.name,
value=defaults[[i]])
}
invisible(cyrestPUT(paste('styles', style.name,'defaults', sep = '/'),
body=def.list, base.url = base.url))
}
# ------------------------------------------------------------------------------
#' @title Get Visual Property Default
#'
#' @description Retrieve the default value for a visual property.
#' @param property Name of property, e.g., NODE_FILL_COLOR (see \link{getVisualPropertyNames})
#' @param style.name Name of style; default is "default" style
#' @param base.url (optional) Ignore unless you need to specify a custom domain,
#' port or version to connect to the CyREST API. Default is http://localhost:1234
#' and the latest version of the CyREST API supported by this version of RCy3.
#' @return None
#' @examples \donttest{
#' getVisualPropertyDefault('NODE_SIZE')
#' }
#' @export
getVisualPropertyDefault <- function(property, style.name=NULL, base.url=.defaultBaseUrl) {
# set default style
if(is.null(style.name)){
style.name <- 'default'
message('style.name not specified, so accessing "default" style.')
}
res <- cyrestGET(paste("styles", as.character(style.name), "defaults", property, sep="/"), base.url=base.url)
return(res[[2]])
}
# ------------------------------------------------------------------------------
#' @title Set Visual Property Default
#'
#' @description Set the default value for a visual property.
#' @param style.string A named list including "visualProperty" and "value"
#' @param style.name Name of style; default is "default" style
#' @param base.url (optional) Ignore unless you need to specify a custom domain,
#' port or version to connect to the CyREST API. Default is http://localhost:1234
#' and the latest version of the CyREST API supported by this version of RCy3.
#' @return None
#' @examples \donttest{
#' setVisualPropertyDefault(list(visualProperty = "NODE_SIZE", value = 35))
#' }
#' @export
setVisualPropertyDefault <- function(style.string, style.name=NULL, base.url=.defaultBaseUrl) {
# set default style
if(is.null(style.name)){
style.name <- 'default'
message('style.name not specified, so updating "default" style.')
}
res <- cyrestPUT(paste("styles", as.character(style.name), "defaults", sep="/"),
body = list(style.string),
base.url=base.url)
Sys.sleep(get(".MODEL_PROPAGATION_SECS", envir=RCy3env)) ## NOTE: TEMPORARY SLEEP "FIX"
invisible(res)
}
# ==============================================================================
# II. Specific Functions
# ==============================================================================
# II.a. Node Properties
# Pattern A: (1) prepare input value as named list, (2) call setVisualPropertyDefault()
# Pattern B: (1) call getVisualPropertyDefault()
# ------------------------------------------------------------------------------
#' @title Set Node Border Color Default
#'
#' @description Set the default node border color.
#' @param new.color Color as hex code, e.g., #FD5903
#' @param style.name Name of style; default is "default" style
#' @param base.url (optional) Ignore unless you need to specify a custom domain,
#' port or version to connect to the CyREST API. Default is http://localhost:1234
#' and the latest version of the CyREST API supported by this version of RCy3.
#' @return None
#' @examples \donttest{
#' setNodeBorderColorDefault('#FD5903')
#' }
#' @export
setNodeBorderColorDefault <- function(new.color, style.name=NULL, base.url=.defaultBaseUrl) {
.checkHexColor(new.color)
style = list(visualProperty = "NODE_BORDER_PAINT", value = new.color)
setVisualPropertyDefault(style, style.name, base.url)
}
# ------------------------------------------------------------------------------
#' @title Set Node Border Width Default
#'
#' @description Set the default node border width.
#' @param new.width Numeric value for width
#' @param style.name Name of style; default is "default" style.
#' @param base.url (optional) Ignore unless you need to specify a custom domain,
#' port or version to connect to the CyREST API. Default is http://localhost:1234
#' and the latest version of the CyREST API supported by this version of RCy3.
#' @return None
#' @examples \donttest{
#' setNodeBorderWidthDefault(2)
#' }
#' @export
setNodeBorderWidthDefault <- function(new.width, style.name=NULL,
base.url=.defaultBaseUrl) {
style = list(visualProperty = "NODE_BORDER_WIDTH", value = new.width)
setVisualPropertyDefault(style, style.name, base.url)
}
# ------------------------------------------------------------------------------
#' @title Set Node Border Opacity Default
#'
#' @description Set defaults opacity value for all unmapped node borders.
#' @param new.opacity Numeric values between 0 and 255; 0 is invisible.
#' @param style.name Name of style; default is "default" style.
#' @param base.url (optional) Ignore unless you need to specify a custom domain,
#' port or version to connect to the CyREST API. Default is http://localhost:1234
#' and the latest version of the CyREST API supported by this version of RCy3.
#' @return None
#' @examples \donttest{
#' setNodeBorderOpacityDefault(50)
#' }
#' @export
setNodeBorderOpacityDefault <- function(new.opacity, style.name=NULL,
base.url=.defaultBaseUrl) {
.checkOpacity(new.opacity)
style = list(visualProperty = "NODE_BORDER_TRANSPARENCY", value = new.opacity)
setVisualPropertyDefault(style, style.name, base.url)
}
# ------------------------------------------------------------------------------
#' @title Set Node Color Default
#'
#' @description Set the default node color.
#' @param new.color Color as hex code, e.g., #FD5903
#' @param style.name Name of style; default is "default" style
#' @param base.url (optional) Ignore unless you need to specify a custom domain,
#' port or version to connect to the CyREST API. Default is http://localhost:1234
#' and the latest version of the CyREST API supported by this version of RCy3.
#' @return None
#' @examples \donttest{
#' setNodeColorDefault('#FD5903')
#' }
#' @export
setNodeColorDefault <- function(new.color, style.name=NULL,
base.url=.defaultBaseUrl) {
.checkHexColor(new.color)
style = list(visualProperty = "NODE_FILL_COLOR", value = new.color)
setVisualPropertyDefault(style, style.name, base.url)
}
# ------------------------------------------------------------------------------
#' @title Set Node Custom Bar Chart
#'
#' @description Makes a bar chart per node using specified node table columns by
#' setting a default custom graphic style.
#' @param columns List of node column names to be displayed, in order.
#' @param type Type of bar chart: GROUPED (default), STACKED, HEAT_STRIPS, or UP_DOWN
#' @param colors (optional) List of colors to be matched with columns or with
#' range, depending on type. Default is a set of colors from an appropriate
#' Brewer palette.
#' @param range (optional) Min and max values of chart. Default is to use min
#' and max from specified data columns.
#' @param orientation (optional) HORIZONTAL or VERTICAL (default).
#' @param colAxis (optional) Show axis with column labels. Default is FALSE.
#' @param rangeAxis (optional) Show axis with range of values. Default is FALSE.
#' @param zeroLine (optional) Show a line at zero. Default is FALSE.
#' @param axisWidth (optional) Width of axis lines, if shown. Default is 0.25.
#' @param axisColor (optional) Color of axis lines, if shown. Default is black.
#' @param axisFontSize (optional) Font size of axis labels, if shown. Default
#' is 1.
#' @param separation (optional) Distance between bars. Default is 0.0.
#' @param slot (optional) Which custom graphics slot to modify. Slots 1-9 are
#' available for independent charts, gradients and images. Default is 1.
#' @param style.name (optional) Name of style; default is "default" style
#' @param base.url (optional) Ignore unless you need to specify a custom domain,
#' port or version to connect to the CyREST API. Default is http://localhost:1234
#' and the latest version of the CyREST API supported by this version of RCy3.
#' @return None
#' @examples \donttest{
#' setNodeCustomBarChart(c("data1","data2","data3"))
#' }
#' @seealso setNodeCustomPosition, removeNodeCustomGraphics
#' @export
#' @importFrom RJSONIO toJSON
#' @importFrom stats na.omit
setNodeCustomBarChart<-function(columns, type="GROUPED", colors=NULL,
range=NULL, orientation="VERTICAL",
colAxis=FALSE, rangeAxis=FALSE, zeroLine=FALSE,
axisWidth=0.25, axisColor = "#000000",
axisFontSize=1, separation=0.0,
slot=1, style.name=NULL,
base.url=.defaultBaseUrl){
if (!type %in% c('GROUPED','STACKED','HEAT_STRIPS','UP_DOWN'))
stop ('type must be one of the following: GROUPED, STACKED, HEAT_STRIPS, or UP_DOWN')
.checkSlot(slot)
vp<-paste('NODE_CUSTOMGRAPHICS',as.character(slot),sep='_')
chart <- list(cy_dataColumns = columns,
cy_type = type,
cy_orientation = orientation,
cy_showDomainAxis = colAxis,
cy_showRangeAxis = rangeAxis,
cy_showRangeZeroBaseline = zeroLine,
cy_axisWidth = axisWidth,
cy_axisColor = axisColor,
cy_axisLabelFontSize = axisFontSize,
cy_separation = separation)
if (is.null(colors)){
if (type %in% c("GROUPED","STACKED"))
colors<-rep(.cyPalette('set1'),length.out=length(columns))
else if (type == "HEAT_STRIPS")
colors<-.cyPalette('rdbu')[c(2,6,10)]
else
colors<-.cyPalette('rdbu')[c(2,10)]
}
chart[['cy_colors']] <- colors
chart[['cy_colorScheme']] <- "Custom"
if (!is.null(range))
chart[['cy_range']] <- range
else{
cols<-getTableColumns(columns=columns, base.url = base.url)
min<-min(apply(na.omit(cols),2,min))
max<-max(apply(na.omit(cols),2,max))
chart[['cy_range']] <- c(min,max)
}
style.string = list(visualProperty = vp, value = paste("org.cytoscape.BarChart",toJSON(chart),sep = ":"))
setVisualPropertyDefault(style.string, style.name, base.url = base.url)
}
# ------------------------------------------------------------------------------
#' @title Set Node Custom Box Chart
#'
#' @description Makes a box chart per node using specified node table columns by
#' setting a default custom graphic style.
#' @param columns List of node column names to be displayed.
#' @param colors (optional) List of colors to be matched with columns or with
#' range, depending on type. Default is a set of colors from an appropriate
#' Brewer palette.
#' @param range (optional) Min and max values of chart. Default is to use min
#' and max from specified data columns.
#' @param orientation (optional) HORIZONTAL or VERTICAL (default).
#' @param rangeAxis (optional) Show axis with range of values. Default is FALSE.
#' @param zeroLine (optional) Show a line at zero. Default is FALSE.
#' @param axisWidth (optional) Width of axis lines, if shown. Default is 0.25.
#' @param axisColor (optional) Color of axis lines, if shown. Default is black.
#' @param axisFontSize (optional) Font size of axis labels, if shown. Default
#' is 1.
#' @param slot (optional) Which custom graphics slot to modify. Slots 1-9 are
#' available for independent charts, gradients and images. Default is 1.
#' @param style.name (optional) Name of style; default is "default" style
#' @param base.url (optional) Ignore unless you need to specify a custom domain,
#' port or version to connect to the CyREST API. Default is http://localhost:1234
#' and the latest version of the CyREST API supported by this version of RCy3.
#' @return None
#' @examples \donttest{
#' setNodeCustomBoxChart(c("data1","data2","data3"))
#' }
#' @seealso setNodeCustomPosition, removeNodeCustomGraphics
#' @export
#' @importFrom RJSONIO toJSON
#' @importFrom stats na.omit
setNodeCustomBoxChart<-function(columns, colors=NULL,
range=NULL, orientation="VERTICAL",
rangeAxis=FALSE, zeroLine=FALSE,
axisWidth=0.25, axisColor = "#000000",
axisFontSize=1,
slot=1, style.name=NULL,
base.url=.defaultBaseUrl){
.checkSlot(slot)
vp<-paste('NODE_CUSTOMGRAPHICS',as.character(slot),sep='_')
chart <- list(cy_dataColumns = columns,
cy_orientation = orientation,
cy_showRangeAxis = rangeAxis,
cy_showRangeZeroBaseline = zeroLine,
cy_axisWidth = axisWidth,
cy_axisColor = axisColor,
cy_axisLabelFontSize = axisFontSize)
if (is.null(colors))
colors<-rep(.cyPalette('set1'),length.out=length(columns))
chart[['cy_colors']] <- colors
chart[['cy_colorScheme']] <- "Custom"
if (!is.null(range))
chart[['cy_range']] <- range
else{
cols<-getTableColumns(columns=columns, base.url = base.url)
min<-min(apply(na.omit(cols),2,min))
max<-max(apply(na.omit(cols),2,max))
chart[['cy_range']] <- c(min,max)
}
style.string = list(visualProperty = vp, value = paste("org.cytoscape.BoxChart",toJSON(chart),sep = ":"))
setVisualPropertyDefault(style.string, style.name, base.url = base.url)
}
# ------------------------------------------------------------------------------
#' @title Set Node Custom HeatMap Chart
#'
#' @description Makes a heatmap chart per node using specified node table columns by
#' setting a default custom graphic style.
#' @param columns List of node column names to be displayed.
#' @param colors (optional) List of colors to be matched with columns or with
#' range, depending on type. Default is a set of colors from an appropriate
#' Brewer palette.
#' @param range (optional) Min and max values of chart. Default is to use min
#' and max from specified data columns.
#' @param orientation (optional) VERTICAL or HORIZONTAL (default).
#' @param rangeAxis (optional) Show axis with range of values. Default is FALSE.
#' @param zeroLine (optional) Show a line at zero. Default is FALSE.
#' @param axisWidth (optional) Width of axis lines, if shown. Default is 0.25.
#' @param axisColor (optional) Color of axis lines, if shown. Default is black.
#' @param axisFontSize (optional) Font size of axis labels, if shown. Default
#' is 1.
#' @param slot (optional) Which custom graphics slot to modify. Slots 1-9 are
#' available for independent charts, gradients and images. Default is 1.
#' @param style.name (optional) Name of style; default is "default" style
#' @param base.url (optional) Ignore unless you need to specify a custom domain,
#' port or version to connect to the CyREST API. Default is http://localhost:1234
#' and the latest version of the CyREST API supported by this version of RCy3.
#' @return None
#' @examples \donttest{
#' setNodeCustomHeatMapChart(c("data1","data2","data3"))
#' }
#' @seealso setNodeCustomPosition, removeNodeCustomGraphics
#' @export
#' @importFrom RJSONIO toJSON
#' @importFrom stats na.omit
setNodeCustomHeatMapChart<-function(columns, colors=NULL,
range=NULL, orientation="HORIZONTAL",
rangeAxis=FALSE, zeroLine=FALSE,
axisWidth=0.25, axisColor = "#000000",
axisFontSize=1,
slot=1, style.name=NULL,
base.url=.defaultBaseUrl){
.checkSlot(slot)
vp<-paste('NODE_CUSTOMGRAPHICS',as.character(slot),sep='_')
chart <- list(cy_dataColumns = rev(columns), #rev for left-to-right ordering
cy_orientation = orientation,
cy_showRangeAxis = rangeAxis,
cy_showRangeZeroBaseline = zeroLine,
cy_axisWidth = axisWidth,
cy_axisColor = axisColor,
cy_axisLabelFontSize = axisFontSize)
if (is.null(colors))
colors<-c(.cyPalette('rdbu')[c(2,6,10)],"#888888")
chart[['cy_colors']] <- colors
chart[['cy_colorScheme']] <- "Custom"
if (!is.null(range))
chart[['cy_range']] <- range
else{
cols<-getTableColumns(columns=columns, base.url = base.url)
min<-min(apply(na.omit(cols),2,min))
max<-max(apply(na.omit(cols),2,max))
chart[['cy_range']] <- c(min,max)
}
style.string = list(visualProperty = vp, value = paste("org.cytoscape.HeatMapChart",toJSON(chart),sep = ":"))
setVisualPropertyDefault(style.string, style.name, base.url = base.url)
}
# ------------------------------------------------------------------------------
#' @title Set Node Custom Line Chart
#'
#' @description Makes a line chart per node using specified node table columns by
#' setting a default custom graphic style.
#' @param columns List of node column names to be displayed.
#' @param colors (optional) List of colors to be matched with columns or with
#' range, depending on type. Default is a set of colors from an appropriate
#' Brewer palette.
#' @param range (optional) Min and max values of chart. Default is to use min
#' and max from specified data columns.
#' @param lineWidth (optional) Width of chart line. Default is 1.0.
#' @param rangeAxis (optional) Show axis with range of values. Default is FALSE.
#' @param zeroLine (optional) Show a line at zero. Default is FALSE.
#' @param axisWidth (optional) Width of axis lines, if shown. Default is 0.25.
#' @param axisColor (optional) Color of axis lines, if shown. Default is black.
#' @param axisFontSize (optional) Font size of axis labels, if shown. Default
#' is 1.
#' @param slot (optional) Which custom graphics slot to modify. Slots 1-9 are
#' available for independent charts, gradients and images. Default is 1.
#' @param style.name (optional) Name of style; default is "default" style
#' @param base.url (optional) Ignore unless you need to specify a custom domain,
#' port or version to connect to the CyREST API. Default is http://localhost:1234
#' and the latest version of the CyREST API supported by this version of RCy3.
#' @return None
#' @examples \donttest{
#' setNodeCustomLineChart(c("data1","data2","data3"))
#' }
#' @seealso setNodeCustomPosition, removeNodeCustomGraphics
#' @export
#' @importFrom RJSONIO toJSON
#' @importFrom stats na.omit
setNodeCustomLineChart<-function(columns, colors=NULL,
range=NULL, lineWidth=1.0,
rangeAxis=FALSE, zeroLine=FALSE,
axisWidth=0.25, axisColor = "#000000",
axisFontSize=1,
slot=1, style.name=NULL,
base.url=.defaultBaseUrl){
.checkSlot(slot)
vp<-paste('NODE_CUSTOMGRAPHICS',as.character(slot),sep='_')
chart <- list(cy_dataColumns = columns,
cy_lineWidth = lineWidth,
cy_showRangeAxis = rangeAxis,
cy_showRangeZeroBaseline = zeroLine,
cy_axisWidth = axisWidth,
cy_axisColor = axisColor,
cy_axisLabelFontSize = axisFontSize)
if (is.null(colors))
colors<-rep(.cyPalette('set1'),length.out=length(columns))
chart[['cy_colors']] <- colors
chart[['cy_colorScheme']] <- "Custom"
if (!is.null(range))
chart[['cy_range']] <- range
else{
cols<-getTableColumns(columns=columns, base.url = base.url)
min<-min(apply(na.omit(cols),2,min))
max<-max(apply(na.omit(cols),2,max))
chart[['cy_range']] <- c(min,max)
}
style.string = list(visualProperty = vp, value = paste("org.cytoscape.LineChart",toJSON(chart),sep = ":"))
setVisualPropertyDefault(style.string, style.name, base.url = base.url)
}
# ------------------------------------------------------------------------------
#' @title Set Node Custom Pie Chart
#'
#' @description Makes a pie chart per node using specified node table columns by
#' setting a default custom graphic style.
#' @param columns List of node column names to be displayed.
#' @param colors (optional) List of colors to be matched with columns or with
#' range, depending on type. Default is a set of colors from an appropriate
#' Brewer palette.
#' @param startAngle (optional) Angle to start filling pie. Default is 0.0.
#' @param slot (optional) Which custom graphics slot to modify. Slots 1-9 are
#' available for independent charts, gradients and images. Default is 1.
#' @param style.name (optional) Name of style; default is "default" style
#' @param base.url (optional) Ignore unless you need to specify a custom domain,
#' port or version to connect to the CyREST API. Default is http://localhost:1234
#' and the latest version of the CyREST API supported by this version of RCy3.
#' @return None
#' @examples \donttest{
#' setNodeCustomPieChart(c("data1","data2","data3"))
#' }
#' @seealso setNodeCustomPosition, removeNodeCustomGraphics
#' @export
#' @importFrom RJSONIO toJSON
setNodeCustomPieChart<-function(columns, colors=NULL,
startAngle=0.0,
slot=1, style.name=NULL,
base.url=.defaultBaseUrl){
.checkSlot(slot)
vp<-paste('NODE_CUSTOMGRAPHICS',as.character(slot),sep='_')
chart <- list(cy_dataColumns = columns,
cy_startAngle = startAngle)
if (is.null(colors))
colors<-rep(.cyPalette('set1'),length.out=length(columns))
chart[['cy_colors']] <- colors
chart[['cy_colorScheme']] <- "Custom"
style.string = list(visualProperty = vp, value = paste("org.cytoscape.PieChart",toJSON(chart),sep = ":"))
setVisualPropertyDefault(style.string, style.name, base.url = base.url)
}
# ------------------------------------------------------------------------------
#' @title Set Node Custom Ring Chart
#'
#' @description Makes a ring chart per node using specified node table columns by
#' setting a default custom graphic style.
#' @param columns List of node column names to be displayed.
#' @param colors (optional) List of colors to be matched with columns or with
#' range, depending on type. Default is a set of colors from an appropriate
#' Brewer palette.
#' @param startAngle (optional) Angle to start filling ring Default is 0.0.
#' @param holeSize (optional) Size of hole in ring. Ranges 0-1. Default is 0.5.
#' @param slot (optional) Which custom graphics slot to modify. Slots 1-9 are
#' available for independent charts, gradients and images. Default is 1.
#' @param style.name (optional) Name of style; default is "default" style
#' @param base.url (optional) Ignore unless you need to specify a custom domain,
#' port or version to connect to the CyREST API. Default is http://localhost:1234
#' and the latest version of the CyREST API supported by this version of RCy3.
#' @return None
#' @examples \donttest{
#' setNodeCustomRingChart(c("data1","data2","data3"))
#' }
#' @seealso setNodeCustomPosition, removeNodeCustomGraphics
#' @export
#' @importFrom RJSONIO toJSON
setNodeCustomRingChart<-function(columns, colors=NULL,
startAngle=0.0, holeSize = 0.5,
slot=1, style.name=NULL,
base.url=.defaultBaseUrl){
.checkSlot(slot)
vp<-paste('NODE_CUSTOMGRAPHICS',as.character(slot),sep='_')
chart <- list(cy_dataColumns = columns,
cy_startAngle = startAngle,
cy_holeSize = holeSize)
if (is.null(colors))
colors<-rep(.cyPalette('set1'),length.out=length(columns))
chart[['cy_colors']] <- colors
chart[['cy_colorScheme']] <- "Custom"
style.string = list(visualProperty = vp, value = paste("org.cytoscape.RingChart",toJSON(chart),sep = ":"))
setVisualPropertyDefault(style.string, style.name)
}
# ------------------------------------------------------------------------------
#' @title Set Node Custom Linear Gradient
#'
#' @description Makes a gradient fill per node by setting a default custom
#' graphic style.
#' @param colors (optional) List of colors to define gradient
#' @param anchors (optional) Position of colors from 0.0 to 1.0.
#' @param angle (optional) Angle of gradient. Default is 0 (left-to-right).
#' @param slot (optional) Which custom graphics slot to modify. Slots 1-9 are
#' available for independent charts, gradients and images. Default is 1.
#' @param style.name (optional) Name of style; default is "default" style
#' @param base.url (optional) Ignore unless you need to specify a custom domain,
#' port or version to connect to the CyREST API. Default is http://localhost:1234
#' and the latest version of the CyREST API supported by this version of RCy3.
#' @return None
#' @examples \donttest{
#' setNodeCustomLinearGradient()
#' }
#' @export
setNodeCustomLinearGradient<-function(colors=c("#DDDDDD","#888888"), anchors=c(0.0,1.0), angle=0.0,
slot=1, style.name=NULL,
base.url=.defaultBaseUrl){
.checkSlot(slot)
vp<-paste('NODE_CUSTOMGRAPHICS',as.character(slot),sep='_')
chart <- list(cy_angle = angle,
cy_gradientColors = colors,
cy_gradientFractions = anchors)
style.string = list(visualProperty = vp, value = paste("org.cytoscape.LinearGradient",toJSON(chart),sep = ":"))
setVisualPropertyDefault(style.string, style.name)
}
# ------------------------------------------------------------------------------
#' @title Set Node Custom Radial Gradient
#'
#' @description Makes a gradient fill per node by setting a default custom
#' graphic style.
#' @param colors (optional) List of colors to define gradient
#' @param anchors (optional) Position of colors from 0.0 to 1.0.
#' @param xCenter (optional) X position for center of radial effect from 0.0
#' to 1.0. Default is 0.5.
#' @param yCenter (optional) Y position for center of radial effect from 0.0
#' to 1.0. Default is 0.5.
#' @param slot (optional) Which custom graphics slot to modify. Slots 1-9 are
#' available for independent charts, gradients and images. Default is 1.
#' @param style.name (optional) Name of style; default is "default" style
#' @param base.url (optional) Ignore unless you need to specify a custom domain,
#' port or version to connect to the CyREST API. Default is http://localhost:1234
#' and the latest version of the CyREST API supported by this version of RCy3.
#' @return None
#' @examples \donttest{
#' setNodeCustomRadialGradient()
#' }
#' @export
setNodeCustomRadialGradient<-function(colors=c("#DDDDDD","#888888"), anchors=c(0.0,1.0),
xCenter=0.5, yCenter=0.5,
slot=1, style.name=NULL,
base.url=.defaultBaseUrl){
.checkSlot(slot)
vp<-paste('NODE_CUSTOMGRAPHICS',as.character(slot),sep='_')
chart <- list(cy_gradientColors = colors,
cy_gradientFractions = anchors,
cy_center = list(x = xCenter,
y = yCenter))
style.string = list(visualProperty = vp, value = paste("org.cytoscape.RadialGradient",toJSON(chart),sep = ":"))
setVisualPropertyDefault(style.string, style.name)
}
# ------------------------------------------------------------------------------
#' @title Set Node Custom Position
#'
#' @description Adjust the position of a custom graphic relative to its node.
#' @param nodeAnchor Position on node to place the graphic: NW,N,NE,E,SE,S,SW,W
#' or C for center (default)
#' @param graphicAnchor Position on graphic to place on node: NW,N,NE,E,SE,S,SW,W
#' or C for center (default)
#' @param justification Positioning of content within graphic: l,r,c (default)
#' @param xOffset Additional offset in the x direction
#' @param yOffset Additional offset in the y direction
#' @param slot (optional) Which custom graphics slot to modify. Slots 1-9 are
#' available for independent charts, gradients and images. Default is 1.
#' @param style.name (optional) Name of style; default is "default" style
#' @param base.url (optional) Ignore unless you need to specify a custom domain,
#' port or version to connect to the CyREST API. Default is http://localhost:1234
#' and the latest version of the CyREST API supported by this version of RCy3.
#' @return None
#' @examples \donttest{
#' setNodeCustomPosition()
#' }
#' @export
setNodeCustomPosition<-function(nodeAnchor="C", graphicAnchor="C", justification="c",
xOffset=0.0, yOffset=0.0, slot=1, style.name=NULL,
base.url = .defaultBaseUrl){
.checkSlot(slot)
vp<-paste('NODE_CUSTOMGRAPHICS_POSITION',as.character(slot),sep='_')
style.string = list(visualProperty = vp, value = paste(nodeAnchor,graphicAnchor,
justification,xOffset,yOffset,
sep = ","))
setVisualPropertyDefault(style.string, style.name, base.url = base.url)
}
# ------------------------------------------------------------------------------
#' @title Remove Node Custom Graphics
#'
#' @description Remove the default custom charts, images and gradients.
#' @param slot (optional) Which custom graphics slot to modify. Slots 1-9 are
#' available for independent charts, gradients and images. Default is 1.
#' @param style.name (optional) Name of style; default is "default" style
#' @param base.url (optional) Ignore unless you need to specify a custom domain,
#' port or version to connect to the CyREST API. Default is http://localhost:1234
#' and the latest version of the CyREST API supported by this version of RCy3.
#' @return None
#' @examples \donttest{
#' removeNodeCustomGraphics()
#' }
#' @export
removeNodeCustomGraphics<-function(slot=1, style.name=NULL,
base.url = .defaultBaseUrl){
.checkSlot(slot)
vp<-paste('NODE_CUSTOMGRAPHICS',as.character(slot),sep='_')
setVisualPropertyDefault(list(visualProperty = vp, value = NULL),
style.name, base.url = base.url)
}
# ------------------------------------------------------------------------------
#' @title Set Node Fill Opacity Default
#'
#' @description Set default opacity value for all unmapped nodes.
#' @param new.opacity Numeric values between 0 and 255; 0 is invisible.
#' @param style.name Name of style; default is "default" style.
#' @param base.url (optional) Ignore unless you need to specify a custom domain,
#' port or version to connect to the CyREST API. Default is http://localhost:1234
#' and the latest version of the CyREST API supported by this version of RCy3.
#' @return None
#' @examples \donttest{
#' setNodeFillOpacityDefault(50)
#' }
#' @export
setNodeFillOpacityDefault <- function(new.opacity, style.name=NULL,
base.url=.defaultBaseUrl) {
.checkOpacity(new.opacity)
style = list(visualProperty = "NODE_TRANSPARENCY", value = new.opacity)
setVisualPropertyDefault(style, style.name, base.url)
}
# ------------------------------------------------------------------------------
#' @title Set Node Font Face Default
#'
#' @description Set the default node font.
#' @param new.font String specification of font face, style and size, e.g.,
#' "SansSerif,plain,12" or "Dialog,plain,10"
#' @param style.name Name of style; default is "default" style
#' @param base.url (optional) Ignore unless you need to specify a custom domain,
#' port or version to connect to the CyREST API. Default is http://localhost:1234
#' and the latest version of the CyREST API supported by this version of RCy3.
#' @return None
#' @examples \donttest{
#' setNodeFontFaceDefault("Dialog,plain,10")
#' }
#' @export
setNodeFontFaceDefault <- function(new.font, style.name=NULL,
base.url=.defaultBaseUrl) {
style = list(visualProperty = "NODE_LABEL_FONT_FACE", value = new.font)
setVisualPropertyDefault(style, style.name, base.url)
}
# ------------------------------------------------------------------------------
#' @title Set Node Font Size Default
#'
#' @description Set the default node font size.
#' @param new.size Numeric value for size
#' @param style.name Name of style; default is "default" style
#' @param base.url (optional) Ignore unless you need to specify a custom domain,
#' port or version to connect to the CyREST API. Default is http://localhost:1234
#' and the latest version of the CyREST API supported by this version of RCy3.
#' @return None
#' @examples \donttest{
#' setNodeFontSizeDefault(12)
#' }
#' @export
setNodeFontSizeDefault <- function(new.size, style.name=NULL,
base.url=.defaultBaseUrl) {
style = list(visualProperty = "NODE_LABEL_FONT_SIZE", value = new.size)
setVisualPropertyDefault(style, style.name, base.url)
}
# ------------------------------------------------------------------------------
#' @title Set Node Height Default
#'
#' @description Set the default node height.
#' @param new.height Numeric value for height.
#' @param style.name Name of style; default is "default" style.
#' @param base.url (optional) Ignore unless you need to specify a custom domain,
#' port or version to connect to the CyREST API. Default is http://localhost:1234
#' and the latest version of the CyREST API supported by this version of RCy3.
#' @return None
#' @examples \donttest{
#' setNodeHeightDefault(35)
#' }
#' @export
setNodeHeightDefault <- function(new.height, style.name=NULL,
base.url=.defaultBaseUrl) {
lockNodeDimensions(FALSE, style.name, base.url)
style <- list(visualProperty = "NODE_HEIGHT", value = new.height)
setVisualPropertyDefault(style, style.name, base.url)
}
# ------------------------------------------------------------------------------
#' @title Set Node Label Default
#'
#' @description Set the default node label.
#' @param new.label String label for unmapped nodes.
#' @param style.name Name of style; default is "default" style
#' @param base.url (optional) Ignore unless you need to specify a custom domain,
#' port or version to connect to the CyREST API. Default is http://localhost:1234
#' and the latest version of the CyREST API supported by this version of RCy3.
#' @return None
#' @examples \donttest{
#' setNodeLabelDefault('unknown')
#' }
#' @export
setNodeLabelDefault <- function(new.label, style.name=NULL,
base.url=.defaultBaseUrl) {
style = list(visualProperty = "NODE_LABEL", value = new.label)
setVisualPropertyDefault(style, style.name, base.url)
}
# ------------------------------------------------------------------------------
#' @title Set Node Label Color Default
#'
#' @description Set the default node label color.
#' @param new.color Color as hex code, e.g., #FD5903
#' @param style.name Name of style; default is "default" style
#' @param base.url (optional) Ignore unless you need to specify a custom domain,
#' port or version to connect to the CyREST API. Default is http://localhost:1234
#' and the latest version of the CyREST API supported by this version of RCy3.
#' @return None
#' @examples \donttest{
#' setNodeLabelColorDefault('#FD5903')
#' }
#' @export
setNodeLabelColorDefault <- function(new.color, style.name=NULL,
base.url=.defaultBaseUrl) {
.checkHexColor(new.color)
style = list(visualProperty = "NODE_LABEL_COLOR", value = new.color)
setVisualPropertyDefault(style, style.name, base.url)
}
# ------------------------------------------------------------------------------
#' @title Set Node Label Opacity Default
#'
#' @description Set default opacity value for all unmapped node labels.
#' @param new.opacity Numeric values between 0 and 255; 0 is invisible.
#' @param style.name Name of style; default is "default" style.
#' @param base.url (optional) Ignore unless you need to specify a custom domain,
#' port or version to connect to the CyREST API. Default is http://localhost:1234
#' and the latest version of the CyREST API supported by this version of RCy3.
#' @return None
#' @examples \donttest{
#' setNodeLabelOpacityDefault(50)
#' }
#' @export
setNodeLabelOpacityDefault <- function(new.opacity, style.name=NULL,
base.url=.defaultBaseUrl) {
.checkOpacity(new.opacity)
style = list(visualProperty = "NODE_LABEL_TRANSPARENCY", value = new.opacity)
setVisualPropertyDefault(style, style.name, base.url)
}
# ------------------------------------------------------------------------------
#' @title Get Node Selection Color Default
#'
#' @description Retrieve the default selection node color.
#' @param style.name Name of style; default is "default" style
#' @param base.url (optional) Ignore unless you need to specify a custom domain,
#' port or version to connect to the CyREST API. Default is http://localhost:1234
#' and the latest version of the CyREST API supported by this version of RCy3.
#' @return None
#' @examples \donttest{
#' getNodeSelectionColorDefault()
#' }
#' @export
getNodeSelectionColorDefault <- function(style.name=NULL, base.url=.defaultBaseUrl) {
return(getVisualPropertyDefault('NODE_SELECTED_PAINT', style.name, base.url))
}
# ------------------------------------------------------------------------------
#' @title Set Node Selection Color Default
#'
#' @description Set the default selection node color.
#' @param new.color Color as hex code, e.g., #FD5903
#' @param style.name (optional) Name of style; default is "default" style
#' @param base.url (optional) Ignore unless you need to specify a custom domain,
#' port or version to connect to the CyREST API. Default is http://localhost:1234
#' and the latest version of the CyREST API supported by this version of RCy3.
#' @return None
#' @examples \donttest{
#' setNodeSelectionColorDefault('#FD5903')
#' }
#' @export
setNodeSelectionColorDefault <- function(new.color, style.name=NULL, base.url=.defaultBaseUrl) {
.checkHexColor(new.color)
style = list(visualProperty = "NODE_SELECTED_PAINT", value = new.color)
setVisualPropertyDefault(style, style.name, base.url)
}
# ------------------------------------------------------------------------------
#' @title Set Node Shape Default
#'
#' @description Set the default node shape.
#' @param new.shape Name of shape, e.g., ELLIPSE, RECTANGLE, etc (see \link{getNodeShapes})
#' @param style.name Name of style; default is "default" style
#' @param base.url (optional) Ignore unless you need to specify a custom domain,
#' port or version to connect to the CyREST API. Default is http://localhost:1234
#' and the latest version of the CyREST API supported by this version of RCy3.
#' @return None
#' @examples \donttest{
#' setNodeShapeDefault('ELLIPSE')
#' }
#' @export
setNodeShapeDefault <- function(new.shape, style.name=NULL, base.url=.defaultBaseUrl) {
new.shape <- toupper(new.shape)
if (new.shape %in% getNodeShapes(base.url)){
style = list(visualProperty = "NODE_SHAPE", value = new.shape)
setVisualPropertyDefault(style, style.name, base.url)
}else{
stop (sprintf ('%s is not a valid shape. Use getNodeShapes() to find valid values.', new.shape))
}
}
# ------------------------------------------------------------------------------
#' @title Set Node Size Default
#'
#' @description Set the default node size.
#' @param new.size Numeric value for size
#' @param style.name Name of style; default is "default" style.
#' @param base.url (optional) Ignore unless you need to specify a custom domain,
#' port or version to connect to the CyREST API. Default is http://localhost:1234
#' and the latest version of the CyREST API supported by this version of RCy3.
#' @return None
#' @examples \donttest{
#' setNodeSizeDefault(35)
#' }
#' @export
setNodeSizeDefault <- function(new.size, style.name=NULL,
base.url=.defaultBaseUrl) {
lockNodeDimensions(TRUE, style.name, base.url)
style <- list(visualProperty = "NODE_SIZE", value = new.size)
setVisualPropertyDefault(style, style.name, base.url)
}
# ------------------------------------------------------------------------------
#' @title Set Node Width Default
#'
#' @description Set the default node width.
#' @param new.width Numeric value for width.
#' @param style.name Name of style; default is "default" style.
#' @param base.url (optional) Ignore unless you need to specify a custom domain,
#' port or version to connect to the CyREST API. Default is http://localhost:1234
#' and the latest version of the CyREST API supported by this version of RCy3.
#' @return None
#' @examples \donttest{
#' setNodeWidthDefault(35)
#' }
#' @export
setNodeWidthDefault <- function(new.width, style.name=NULL,
base.url=.defaultBaseUrl) {
lockNodeDimensions(FALSE, style.name, base.url)
style <- list(visualProperty = "NODE_WIDTH", value = new.width)
setVisualPropertyDefault(style, style.name, base.url)
}
# ------------------------------------------------------------------------------
#' @title Set Node Tooltip Default
#'
#' @description Set the default node tooltip
#' @param new.tooltip String tooltip for unmapped nodes.
#' @param style.name Name of style; default is "default" style
#' @param base.url (optional) Ignore unless you need to specify a custom domain,
#' port or version to connect to the CyREST API. Default is http://localhost:1234
#' and the latest version of the CyREST API supported by this version of RCy3.
#' @return None
#' @examples \donttest{
#' setNodeTooltipDefault('unknown')
#' }
#' @export
setNodeTooltipDefault <- function(new.tooltip, style.name=NULL,
base.url=.defaultBaseUrl) {
style = list(visualProperty = "NODE_TOOLTIP", value = new.tooltip)
setVisualPropertyDefault(style, style.name, base.url)
}
# ==============================================================================
# II.b. Edge Properties
# Pattern A: (1) prepare input value as named list, (2) call setVisualPropertyDefault()
# Pattern B: (1) call getVisualPropertyDefault()
# ------------------------------------------------------------------------------
#' @title Set Edge Color Default
#'
#' @description Set the default edge color.
#' @param new.color Color as hex code, e.g., #FD5903
#' @param style.name Name of style; default is "default" style
#' @param base.url (optional) Ignore unless you need to specify a custom domain,
#' port or version to connect to the CyREST API. Default is http://localhost:1234
#' and the latest version of the CyREST API supported by this version of RCy3.
#' @return None
#' @examples \donttest{
#' setEdgeColorDefault('#FD5903')
#' }
#' @export
setEdgeColorDefault <- function(new.color, style.name=NULL,
base.url=.defaultBaseUrl) {
.checkHexColor(new.color)
style = list(visualProperty = "EDGE_UNSELECTED_PAINT", value = new.color)
setVisualPropertyDefault(style, style.name, base.url)
style = list(visualProperty = "EDGE_STROKE_UNSELECTED_PAINT", value = new.color)
setVisualPropertyDefault(style, style.name, base.url)
}
# ------------------------------------------------------------------------------
#' @title Set Edge Font Face Default
#'
#' @description Set the default edge font.
#' @param new.font String specification of font face, style and size, e.g.,
#' "SansSerif,plain,12" or "Dialog,plain,10"
#' @param style.name Name of style; default is "default" style
#' @param base.url (optional) Ignore unless you need to specify a custom domain,
#' port or version to connect to the CyREST API. Default is http://localhost:1234
#' and the latest version of the CyREST API supported by this version of RCy3.
#' @return None
#' @examples \donttest{
#' setEdgeFontFaceDefault("Dialog,plain,10")
#' }
#' @export
setEdgeFontFaceDefault <- function(new.font, style.name=NULL,
base.url=.defaultBaseUrl) {
style = list(visualProperty = "EDGE_LABEL_FONT_FACE", value = new.font)
setVisualPropertyDefault(style, style.name, base.url)
}
# ------------------------------------------------------------------------------
#' @title Set Edge Font Size Default
#'
#' @description Set the default edge font size.
#' @param new.size Numeric value for size
#' @param style.name Name of style; default is "default" style
#' @param base.url (optional) Ignore unless you need to specify a custom domain,
#' port or version to connect to the CyREST API. Default is http://localhost:1234
#' and the latest version of the CyREST API supported by this version of RCy3.
#' @return None
#' @examples \donttest{
#' setEdgeFontSizeDefault(12)
#' }
#' @export
setEdgeFontSizeDefault <- function(new.size, style.name=NULL,
base.url=.defaultBaseUrl) {
style = list(visualProperty = "EDGE_LABEL_FONT_SIZE", value = new.size)
setVisualPropertyDefault(style, style.name, base.url)
}
# ------------------------------------------------------------------------------
#' @title Set Edge Label Default
#'
#' @description Set the default edge label.
#' @param new.label String label for unmapped edges.
#' @param style.name Name of style; default is "default" style
#' @param base.url (optional) Ignore unless you need to specify a custom domain,
#' port or version to connect to the CyREST API. Default is http://localhost:1234
#' and the latest version of the CyREST API supported by this version of RCy3.
#' @return None
#' @examples \donttest{
#' setEdgeLabelDefault('unknown')
#' }
#' @export
setEdgeLabelDefault <- function(new.label, style.name=NULL,
base.url=.defaultBaseUrl) {
style = list(visualProperty = "EDGE_LABEL", value = new.label)
setVisualPropertyDefault(style, style.name, base.url)
}
# ------------------------------------------------------------------------------
#' @title Set Edge Label Color Default
#'
#' @description Set the default edge label color.
#' @param new.color Color as hex code, e.g., #FD5903
#' @param style.name Name of style; default is "default" style
#' @param base.url (optional) Ignore unless you need to specify a custom domain,
#' port or version to connect to the CyREST API. Default is http://localhost:1234
#' and the latest version of the CyREST API supported by this version of RCy3.
#' @return None
#' @examples \donttest{
#' setEdgeLabelColorDefault("#FD5903")
#' }
#' @export
setEdgeLabelColorDefault <- function(new.color, style.name=NULL,
base.url=.defaultBaseUrl) {
.checkHexColor(new.color)
style = list(visualProperty = "EDGE_LABEL_COLOR", value = new.color)
setVisualPropertyDefault(style, style.name, base.url)
}
# ------------------------------------------------------------------------------
#' @title Set Edge Label Opacity Default
#'
#' @description Set default opacity value for all unmapped edge labels.
#' @param new.opacity Numeric values between 0 and 255; 0 is invisible.
#' @param style.name Name of style; default is "default" style.
#' @param base.url (optional) Ignore unless you need to specify a custom domain,
#' port or version to connect to the CyREST API. Default is http://localhost:1234
#' and the latest version of the CyREST API supported by this version of RCy3.
#' @return None
#' @examples \donttest{
#' setEdgeLabelOpacityDefault(50)
#' }
#' @export
setEdgeLabelOpacityDefault <- function(new.opacity, style.name=NULL,
base.url=.defaultBaseUrl) {
.checkOpacity(new.opacity)
style = list(visualProperty = "EDGE_LABEL_TRANSPARENCY", value = new.opacity)
setVisualPropertyDefault(style, style.name, base.url)
}
# ------------------------------------------------------------------------------
#' @title Set Edge Line Width Default
#'
#' @description Set the default edge width.
#' @param new.width Numeric value for width
#' @param style.name Name of style; default is "default" style
#' @param base.url (optional) Ignore unless you need to specify a custom domain,
#' port or version to connect to the CyREST API. Default is http://localhost:1234
#' and the latest version of the CyREST API supported by this version of RCy3.
#' @return None
#' @examples \donttest{
#' setEdgeLineWidthDefault(3)
#' }
#' @export
setEdgeLineWidthDefault <- function(new.width, style.name=NULL,
base.url=.defaultBaseUrl) {
style = list(visualProperty = "EDGE_WIDTH", value = new.width)
setVisualPropertyDefault(style, style.name, base.url)
}
# ------------------------------------------------------------------------------
#' @title Set Edge Line Style Default
#'
#' @description Set the default edge style.
#' @param new.line.style Name of line style, e.g., SOLID, LONG_DASH, etc (see \link{getLineStyles})
#' @param style.name Name of style; default is "default" style
#' @param base.url (optional) Ignore unless you need to specify a custom domain,
#' port or version to connect to the CyREST API. Default is http://localhost:1234
#' and the latest version of the CyREST API supported by this version of RCy3.
#' @return None
#' @examples \donttest{
#' setEdgeLineStyleDefault('LONG_DASH')
#' }
#' @export
setEdgeLineStyleDefault <- function(new.line.style, style.name=NULL,
base.url=.defaultBaseUrl) {
style = list(visualProperty = "EDGE_LINE_TYPE", value = new.line.style)
setVisualPropertyDefault(style, style.name, base.url)
}
# ------------------------------------------------------------------------------
#' @title Set Edge Opacity Default
#'
#' @description Set default opacity value for all unmapped edges.
#' @param new.opacity Numeric values between 0 and 255; 0 is invisible.
#' @param style.name Name of style; default is "default" style.
#' @param base.url (optional) Ignore unless you need to specify a custom domain,
#' port or version to connect to the CyREST API. Default is http://localhost:1234
#' and the latest version of the CyREST API supported by this version of RCy3.
#' @return None
#' @examples \donttest{
#' setEdgeOpacityDefault(50)
#' }
#' @export
setEdgeOpacityDefault <- function(new.opacity, style.name=NULL,
base.url=.defaultBaseUrl) {
.checkOpacity(new.opacity)
style = list(visualProperty = "EDGE_TRANSPARENCY", value = new.opacity)
setVisualPropertyDefault(style, style.name, base.url)
}
# ------------------------------------------------------------------------------
#' @title Get Edge Selection Color Default
#'
#' @description Retrieve the default selected edge color.
#' @param style.name Name of style; default is "default" style
#' @param base.url (optional) Ignore unless you need to specify a custom domain,
#' port or version to connect to the CyREST API. Default is http://localhost:1234
#' and the latest version of the CyREST API supported by this version of RCy3.
#' @return None
#' @examples \donttest{
#' getEdgeSelectionColorDefault()
#' }
#' @export
getEdgeSelectionColorDefault <- function(style.name=NULL, base.url=.defaultBaseUrl) {
matched <- unname(getStyleDependencies()['arrowColorMatchesEdge'])
if(matched)
return(getVisualPropertyDefault('EDGE_SELECTED_PAINT',style.name, base.url))
else
return(getVisualPropertyDefault('EDGE_STROKE_SELECTED_PAINT',style.name, base.url))
}
# ------------------------------------------------------------------------------
#' @title Set Edge Selection Color Default
#'
#' @description Set the default selected edge color.
#' @param new.color Color as hex code, e.g., #FD5903
#' @param style.name Name of style; default is "default" style
#' @param base.url (optional) Ignore unless you need to specify a custom domain,
#' port or version to connect to the CyREST API. Default is http://localhost:1234
#' and the latest version of the CyREST API supported by this version of RCy3.
#' @return None
#' @examples \donttest{
#' setEdgeSelectionColorDefault('#FD5903')
#' }
#' @export
setEdgeSelectionColorDefault <- function(new.color, style.name=NULL, base.url=.defaultBaseUrl) {
.checkHexColor(new.color)
style = list(visualProperty = "EDGE_SELECTED_PAINT", value = new.color)
setVisualPropertyDefault(style, style.name, base.url)
style = list(visualProperty = "EDGE_STROKE_ELECTED_PAINT", value = new.color)
setVisualPropertyDefault(style, style.name, base.url)
}
# ------------------------------------------------------------------------------
#' @title Set Edge Source Arrow Color Default
#'
#' @description Set the default edge source arrow color.
#' @param new.color Color as hex code, e.g., #FD5903
#' @param style.name Name of style; default is "default" style
#' @param base.url (optional) Ignore unless you need to specify a custom domain,
#' port or version to connect to the CyREST API. Default is http://localhost:1234
#' and the latest version of the CyREST API supported by this version of RCy3.
#' @return None
#' @examples \donttest{
#' setEdgeSourceArrowColorDefault('#FD5903')
#' }
#' @export
setEdgeSourceArrowColorDefault <- function(new.color, style.name=NULL, base.url=.defaultBaseUrl) {
.checkHexColor(new.color)
style = list(visualProperty = "EDGE_SOURCE_ARROW_UNSELECTED_PAINT", value = new.color)
setVisualPropertyDefault(style, style.name, base.url)
}
# ------------------------------------------------------------------------------
#' @title Set Edge Target Arrow Color Default
#'
#' @description Set the default edge target arrow color.
#' @param new.color Color as hex code, e.g., #FD5903
#' @param style.name Name of style; default is "default" style
#' @param base.url (optional) Ignore unless you need to specify a custom domain,
#' port or version to connect to the CyREST API. Default is http://localhost:1234
#' and the latest version of the CyREST API supported by this version of RCy3.
#' @return None
#' @examples \donttest{
#' setEdgeTargetArrowColorDefault('#FD5903')
#' }
#' @export
setEdgeTargetArrowColorDefault <- function(new.color, style.name=NULL, base.url=.defaultBaseUrl) {
.checkHexColor(new.color)
style = list(visualProperty = "EDGE_TARGET_ARROW_UNSELECTED_PAINT", value = new.color)
setVisualPropertyDefault(style, style.name, base.url)
}
# ------------------------------------------------------------------------------
#' @title Set Edge Source Arrow Shape Default
#'
#' @description Set the default edge source arrow shape.
#' @param new.shape Name of shape, e.g., ARROW, T, etc (see \link{getArrowShapes})
#' @param style.name Name of style; default is "default" style
#' @param base.url (optional) Ignore unless you need to specify a custom domain,
#' port or version to connect to the CyREST API. Default is http://localhost:1234
#' and the latest version of the CyREST API supported by this version of RCy3.
#' @return None
#' @examples \donttest{
#' setEdgeSourceArrowShapeDefault('ARROW')
#' }
#' @export
setEdgeSourceArrowShapeDefault <- function(new.shape, style.name=NULL, base.url=.defaultBaseUrl) {
style = list(visualProperty = "EDGE_SOURCE_ARROW_SHAPE", value = new.shape)
setVisualPropertyDefault(style, style.name, base.url)
}
# ------------------------------------------------------------------------------
#' @title Set Edge Target Arrow Shape Default
#'
#' @description Set the default edge target arrow shape.
#' @param new.shape Name of shape, e.g., ARROW, T, etc (see \link{getArrowShapes})
#' @param style.name Name of style; default is "default" style
#' @param base.url (optional) Ignore unless you need to specify a custom domain,
#' port or version to connect to the CyREST API. Default is http://localhost:1234
#' and the latest version of the CyREST API supported by this version of RCy3.
#' @return None
#' @examples \donttest{
#' setEdgeTargetArrowShapeDefault('ARROW')
#' }
#' @export
setEdgeTargetArrowShapeDefault <- function(new.shape, style.name=NULL, base.url=.defaultBaseUrl) {
style = list(visualProperty = "EDGE_TARGET_ARROW_SHAPE", value = new.shape)
setVisualPropertyDefault(style, style.name, base.url)
}
# ------------------------------------------------------------------------------
#' @title Set Edge Tooltip Default
#'
#' @description Set the default edge tooltip
#' @param new.tooltip String tooltip for unmapped edges.
#' @param style.name Name of style; default is "default" style
#' @param base.url (optional) Ignore unless you need to specify a custom domain,
#' port or version to connect to the CyREST API. Default is http://localhost:1234
#' and the latest version of the CyREST API supported by this version of RCy3.
#' @return None
#' @examples \donttest{
#' setEdgeTooltipDefault('unknown')
#' }
#' @export
setEdgeTooltipDefault <- function(new.tooltip, style.name=NULL,
base.url=.defaultBaseUrl) {
style = list(visualProperty = "EDGE_TOOLTIP", value = new.tooltip)
setVisualPropertyDefault(style, style.name, base.url)
}
# ==============================================================================
# II.c. Network Properties
# Pattern A: (1) prepare input value as named list, (2) call setVisualPropertyDefault()
# Pattern B: (1) call getVisualPropertyDefault()
# ------------------------------------------------------------------------------
#' @title Get Background Color Default
#'
#' @description Retrieve the default background color.
#' @param style.name Name of style; default is "default" style
#' @param base.url (optional) Ignore unless you need to specify a custom domain,
#' port or version to connect to the CyREST API. Default is http://localhost:1234
#' and the latest version of the CyREST API supported by this version of RCy3.
#' @return None
#' @examples \donttest{
#' getBackgroundColorDefault()
#' }
#' @export
getBackgroundColorDefault <- function(style.name=NULL, base.url=.defaultBaseUrl) {
return(getVisualPropertyDefault('NETWORK_BACKGROUND_PAINT',style.name, base.url))
}
# ------------------------------------------------------------------------------
#' @title Set Background Color Default
#'
#' @description Set the default background color.
#' @param new.color Color as hex code, e.g., #FD5903
#' @param style.name Name of style; default is "default" style
#' @param base.url (optional) Ignore unless you need to specify a custom domain,
#' port or version to connect to the CyREST API. Default is http://localhost:1234
#' and the latest version of the CyREST API supported by this version of RCy3.
#' @return None
#' @examples \donttest{
#' setBackgroundColorDefault('#888888')
#' }
#' @export
setBackgroundColorDefault <- function(new.color, style.name=NULL, base.url=.defaultBaseUrl) {
.checkHexColor(new.color)
style = list(visualProperty = "NETWORK_BACKGROUND_PAINT", value = new.color)
setVisualPropertyDefault(style, style.name, base.url)
}
|
/R/StyleDefaults.R
|
permissive
|
kozo2/RCy3
|
R
| false
| false
| 61,937
|
r
|
# ==============================================================================
# Functions for getting and setting DEFAULT values for visual properties,
# organized into sections:
#
# I. General functions for setting node, edge and network defaults
# II. Specific functions for setting particular node, edge and network defaults
#
# ==============================================================================
# I. General Functions
# ------------------------------------------------------------------------------
#' Updates the default values of visual properties in a style
#'
#' @description Updates visual property defaults, overriding any prior settings. See mapVisualProperty for
#' the list of visual properties.
#' @param style.name (char) name for style
#' @param defaults (list) a list of visual property default settings
#' @param base.url (optional) Ignore unless you need to specify a custom domain,
#' port or version to connect to the CyREST API. Default is http://localhost:1234
#' and the latest version of the CyREST API supported by this version of RCy3.
#' @return server response
#' @examples
#' \donttest{
#' updateStyleDefaults('myStyle',list('node fill color'='#0000FF','node size'=50))
#' }
#' @export
#' @seealso mapVisualProperty
updateStyleDefaults <- function(style.name, defaults,base.url=.defaultBaseUrl){
# set default style
if(is.null(style.name)){
style.name <- 'default'
message('style.name not specified, so updating "default" style.')
}
def.list <- list()
for (i in seq_len(length(defaults))) {
visual.prop.name <- names(defaults)[i]
visual.prop.name = toupper(gsub("\\s+","_",visual.prop.name))
visual.prop.name = switch(visual.prop.name,
'EDGE_COLOR'='EDGE_UNSELECTED_PAINT',
'EDGE_THICKNESS'='EDGE_WIDTH',
'NODE_BORDER_COLOR'='NODE_BORDER_PAINT',
visual.prop.name)
def.list[[i]] <- list(visualProperty=visual.prop.name,
value=defaults[[i]])
}
invisible(cyrestPUT(paste('styles', style.name,'defaults', sep = '/'),
body=def.list, base.url = base.url))
}
# ------------------------------------------------------------------------------
#' @title Get Visual Property Default
#'
#' @description Retrieve the default value for a visual property.
#' @param property Name of property, e.g., NODE_FILL_COLOR (see \link{getVisualPropertyNames})
#' @param style.name Name of style; default is "default" style
#' @param base.url (optional) Ignore unless you need to specify a custom domain,
#' port or version to connect to the CyREST API. Default is http://localhost:1234
#' and the latest version of the CyREST API supported by this version of RCy3.
#' @return None
#' @examples \donttest{
#' getVisualPropertyDefault('NODE_SIZE')
#' }
#' @export
getVisualPropertyDefault <- function(property, style.name=NULL, base.url=.defaultBaseUrl) {
# set default style
if(is.null(style.name)){
style.name <- 'default'
message('style.name not specified, so accessing "default" style.')
}
res <- cyrestGET(paste("styles", as.character(style.name), "defaults", property, sep="/"), base.url=base.url)
return(res[[2]])
}
# ------------------------------------------------------------------------------
#' @title Set Visual Property Default
#'
#' @description Set the default value for a visual property.
#' @param style.string A named list including "visualProperty" and "value"
#' @param style.name Name of style; default is "default" style
#' @param base.url (optional) Ignore unless you need to specify a custom domain,
#' port or version to connect to the CyREST API. Default is http://localhost:1234
#' and the latest version of the CyREST API supported by this version of RCy3.
#' @return None
#' @examples \donttest{
#' setVisualPropertyDefault(list(visualProperty = "NODE_SIZE", value = 35))
#' }
#' @export
setVisualPropertyDefault <- function(style.string, style.name=NULL, base.url=.defaultBaseUrl) {
# set default style
if(is.null(style.name)){
style.name <- 'default'
message('style.name not specified, so updating "default" style.')
}
res <- cyrestPUT(paste("styles", as.character(style.name), "defaults", sep="/"),
body = list(style.string),
base.url=base.url)
Sys.sleep(get(".MODEL_PROPAGATION_SECS", envir=RCy3env)) ## NOTE: TEMPORARY SLEEP "FIX"
invisible(res)
}
# ==============================================================================
# II. Specific Functions
# ==============================================================================
# II.a. Node Properties
# Pattern A: (1) prepare input value as named list, (2) call setVisualPropertyDefault()
# Pattern B: (1) call getVisualPropertyDefault()
# ------------------------------------------------------------------------------
#' @title Set Node Border Color Default
#'
#' @description Set the default node border color.
#' @param new.color Color as hex code, e.g., #FD5903
#' @param style.name Name of style; default is "default" style
#' @param base.url (optional) Ignore unless you need to specify a custom domain,
#' port or version to connect to the CyREST API. Default is http://localhost:1234
#' and the latest version of the CyREST API supported by this version of RCy3.
#' @return None
#' @examples \donttest{
#' setNodeBorderColorDefault('#FD5903')
#' }
#' @export
setNodeBorderColorDefault <- function(new.color, style.name=NULL, base.url=.defaultBaseUrl) {
.checkHexColor(new.color)
style = list(visualProperty = "NODE_BORDER_PAINT", value = new.color)
setVisualPropertyDefault(style, style.name, base.url)
}
# ------------------------------------------------------------------------------
#' @title Set Node Border Width Default
#'
#' @description Set the default node border width.
#' @param new.width Numeric value for width
#' @param style.name Name of style; default is "default" style.
#' @param base.url (optional) Ignore unless you need to specify a custom domain,
#' port or version to connect to the CyREST API. Default is http://localhost:1234
#' and the latest version of the CyREST API supported by this version of RCy3.
#' @return None
#' @examples \donttest{
#' setNodeBorderWidthDefault(2)
#' }
#' @export
setNodeBorderWidthDefault <- function(new.width, style.name=NULL,
base.url=.defaultBaseUrl) {
style = list(visualProperty = "NODE_BORDER_WIDTH", value = new.width)
setVisualPropertyDefault(style, style.name, base.url)
}
# ------------------------------------------------------------------------------
#' @title Set Node Border Opacity Default
#'
#' @description Set defaults opacity value for all unmapped node borders.
#' @param new.opacity Numeric values between 0 and 255; 0 is invisible.
#' @param style.name Name of style; default is "default" style.
#' @param base.url (optional) Ignore unless you need to specify a custom domain,
#' port or version to connect to the CyREST API. Default is http://localhost:1234
#' and the latest version of the CyREST API supported by this version of RCy3.
#' @return None
#' @examples \donttest{
#' setNodeBorderOpacityDefault(50)
#' }
#' @export
setNodeBorderOpacityDefault <- function(new.opacity, style.name=NULL,
base.url=.defaultBaseUrl) {
.checkOpacity(new.opacity)
style = list(visualProperty = "NODE_BORDER_TRANSPARENCY", value = new.opacity)
setVisualPropertyDefault(style, style.name, base.url)
}
# ------------------------------------------------------------------------------
#' @title Set Node Color Default
#'
#' @description Set the default node color.
#' @param new.color Color as hex code, e.g., #FD5903
#' @param style.name Name of style; default is "default" style
#' @param base.url (optional) Ignore unless you need to specify a custom domain,
#' port or version to connect to the CyREST API. Default is http://localhost:1234
#' and the latest version of the CyREST API supported by this version of RCy3.
#' @return None
#' @examples \donttest{
#' setNodeColorDefault('#FD5903')
#' }
#' @export
setNodeColorDefault <- function(new.color, style.name=NULL,
base.url=.defaultBaseUrl) {
.checkHexColor(new.color)
style = list(visualProperty = "NODE_FILL_COLOR", value = new.color)
setVisualPropertyDefault(style, style.name, base.url)
}
# ------------------------------------------------------------------------------
#' @title Set Node Custom Bar Chart
#'
#' @description Makes a bar chart per node using specified node table columns by
#' setting a default custom graphic style.
#' @param columns List of node column names to be displayed, in order.
#' @param type Type of bar chart: GROUPED (default), STACKED, HEAT_STRIPS, or UP_DOWN
#' @param colors (optional) List of colors to be matched with columns or with
#' range, depending on type. Default is a set of colors from an appropriate
#' Brewer palette.
#' @param range (optional) Min and max values of chart. Default is to use min
#' and max from specified data columns.
#' @param orientation (optional) HORIZONTAL or VERTICAL (default).
#' @param colAxis (optional) Show axis with column labels. Default is FALSE.
#' @param rangeAxis (optional) Show axis with range of values. Default is FALSE.
#' @param zeroLine (optional) Show a line at zero. Default is FALSE.
#' @param axisWidth (optional) Width of axis lines, if shown. Default is 0.25.
#' @param axisColor (optional) Color of axis lines, if shown. Default is black.
#' @param axisFontSize (optional) Font size of axis labels, if shown. Default
#' is 1.
#' @param separation (optional) Distance between bars. Default is 0.0.
#' @param slot (optional) Which custom graphics slot to modify. Slots 1-9 are
#' available for independent charts, gradients and images. Default is 1.
#' @param style.name (optional) Name of style; default is "default" style
#' @param base.url (optional) Ignore unless you need to specify a custom domain,
#' port or version to connect to the CyREST API. Default is http://localhost:1234
#' and the latest version of the CyREST API supported by this version of RCy3.
#' @return None
#' @examples \donttest{
#' setNodeCustomBarChart(c("data1","data2","data3"))
#' }
#' @seealso setNodeCustomPosition, removeNodeCustomGraphics
#' @export
#' @importFrom RJSONIO toJSON
#' @importFrom stats na.omit
setNodeCustomBarChart<-function(columns, type="GROUPED", colors=NULL,
range=NULL, orientation="VERTICAL",
colAxis=FALSE, rangeAxis=FALSE, zeroLine=FALSE,
axisWidth=0.25, axisColor = "#000000",
axisFontSize=1, separation=0.0,
slot=1, style.name=NULL,
base.url=.defaultBaseUrl){
if (!type %in% c('GROUPED','STACKED','HEAT_STRIPS','UP_DOWN'))
stop ('type must be one of the following: GROUPED, STACKED, HEAT_STRIPS, or UP_DOWN')
.checkSlot(slot)
vp<-paste('NODE_CUSTOMGRAPHICS',as.character(slot),sep='_')
chart <- list(cy_dataColumns = columns,
cy_type = type,
cy_orientation = orientation,
cy_showDomainAxis = colAxis,
cy_showRangeAxis = rangeAxis,
cy_showRangeZeroBaseline = zeroLine,
cy_axisWidth = axisWidth,
cy_axisColor = axisColor,
cy_axisLabelFontSize = axisFontSize,
cy_separation = separation)
if (is.null(colors)){
if (type %in% c("GROUPED","STACKED"))
colors<-rep(.cyPalette('set1'),length.out=length(columns))
else if (type == "HEAT_STRIPS")
colors<-.cyPalette('rdbu')[c(2,6,10)]
else
colors<-.cyPalette('rdbu')[c(2,10)]
}
chart[['cy_colors']] <- colors
chart[['cy_colorScheme']] <- "Custom"
if (!is.null(range))
chart[['cy_range']] <- range
else{
cols<-getTableColumns(columns=columns, base.url = base.url)
min<-min(apply(na.omit(cols),2,min))
max<-max(apply(na.omit(cols),2,max))
chart[['cy_range']] <- c(min,max)
}
style.string = list(visualProperty = vp, value = paste("org.cytoscape.BarChart",toJSON(chart),sep = ":"))
setVisualPropertyDefault(style.string, style.name, base.url = base.url)
}
# ------------------------------------------------------------------------------
#' @title Set Node Custom Box Chart
#'
#' @description Makes a box chart per node using specified node table columns by
#' setting a default custom graphic style.
#' @param columns List of node column names to be displayed.
#' @param colors (optional) List of colors to be matched with columns or with
#' range, depending on type. Default is a set of colors from an appropriate
#' Brewer palette.
#' @param range (optional) Min and max values of chart. Default is to use min
#' and max from specified data columns.
#' @param orientation (optional) HORIZONTAL or VERTICAL (default).
#' @param rangeAxis (optional) Show axis with range of values. Default is FALSE.
#' @param zeroLine (optional) Show a line at zero. Default is FALSE.
#' @param axisWidth (optional) Width of axis lines, if shown. Default is 0.25.
#' @param axisColor (optional) Color of axis lines, if shown. Default is black.
#' @param axisFontSize (optional) Font size of axis labels, if shown. Default
#' is 1.
#' @param slot (optional) Which custom graphics slot to modify. Slots 1-9 are
#' available for independent charts, gradients and images. Default is 1.
#' @param style.name (optional) Name of style; default is "default" style
#' @param base.url (optional) Ignore unless you need to specify a custom domain,
#' port or version to connect to the CyREST API. Default is http://localhost:1234
#' and the latest version of the CyREST API supported by this version of RCy3.
#' @return None
#' @examples \donttest{
#' setNodeCustomBoxChart(c("data1","data2","data3"))
#' }
#' @seealso setNodeCustomPosition, removeNodeCustomGraphics
#' @export
#' @importFrom RJSONIO toJSON
#' @importFrom stats na.omit
setNodeCustomBoxChart<-function(columns, colors=NULL,
range=NULL, orientation="VERTICAL",
rangeAxis=FALSE, zeroLine=FALSE,
axisWidth=0.25, axisColor = "#000000",
axisFontSize=1,
slot=1, style.name=NULL,
base.url=.defaultBaseUrl){
.checkSlot(slot)
vp<-paste('NODE_CUSTOMGRAPHICS',as.character(slot),sep='_')
chart <- list(cy_dataColumns = columns,
cy_orientation = orientation,
cy_showRangeAxis = rangeAxis,
cy_showRangeZeroBaseline = zeroLine,
cy_axisWidth = axisWidth,
cy_axisColor = axisColor,
cy_axisLabelFontSize = axisFontSize)
if (is.null(colors))
colors<-rep(.cyPalette('set1'),length.out=length(columns))
chart[['cy_colors']] <- colors
chart[['cy_colorScheme']] <- "Custom"
if (!is.null(range))
chart[['cy_range']] <- range
else{
cols<-getTableColumns(columns=columns, base.url = base.url)
min<-min(apply(na.omit(cols),2,min))
max<-max(apply(na.omit(cols),2,max))
chart[['cy_range']] <- c(min,max)
}
style.string = list(visualProperty = vp, value = paste("org.cytoscape.BoxChart",toJSON(chart),sep = ":"))
setVisualPropertyDefault(style.string, style.name, base.url = base.url)
}
# ------------------------------------------------------------------------------
#' @title Set Node Custom HeatMap Chart
#'
#' @description Makes a heatmap chart per node using specified node table columns by
#' setting a default custom graphic style.
#' @param columns List of node column names to be displayed.
#' @param colors (optional) List of colors to be matched with columns or with
#' range, depending on type. Default is a set of colors from an appropriate
#' Brewer palette.
#' @param range (optional) Min and max values of chart. Default is to use min
#' and max from specified data columns.
#' @param orientation (optional) VERTICAL or HORIZONTAL (default).
#' @param rangeAxis (optional) Show axis with range of values. Default is FALSE.
#' @param zeroLine (optional) Show a line at zero. Default is FALSE.
#' @param axisWidth (optional) Width of axis lines, if shown. Default is 0.25.
#' @param axisColor (optional) Color of axis lines, if shown. Default is black.
#' @param axisFontSize (optional) Font size of axis labels, if shown. Default
#' is 1.
#' @param slot (optional) Which custom graphics slot to modify. Slots 1-9 are
#' available for independent charts, gradients and images. Default is 1.
#' @param style.name (optional) Name of style; default is "default" style
#' @param base.url (optional) Ignore unless you need to specify a custom domain,
#' port or version to connect to the CyREST API. Default is http://localhost:1234
#' and the latest version of the CyREST API supported by this version of RCy3.
#' @return None
#' @examples \donttest{
#' setNodeCustomHeatMapChart(c("data1","data2","data3"))
#' }
#' @seealso setNodeCustomPosition, removeNodeCustomGraphics
#' @export
#' @importFrom RJSONIO toJSON
#' @importFrom stats na.omit
setNodeCustomHeatMapChart<-function(columns, colors=NULL,
range=NULL, orientation="HORIZONTAL",
rangeAxis=FALSE, zeroLine=FALSE,
axisWidth=0.25, axisColor = "#000000",
axisFontSize=1,
slot=1, style.name=NULL,
base.url=.defaultBaseUrl){
.checkSlot(slot)
vp<-paste('NODE_CUSTOMGRAPHICS',as.character(slot),sep='_')
chart <- list(cy_dataColumns = rev(columns), #rev for left-to-right ordering
cy_orientation = orientation,
cy_showRangeAxis = rangeAxis,
cy_showRangeZeroBaseline = zeroLine,
cy_axisWidth = axisWidth,
cy_axisColor = axisColor,
cy_axisLabelFontSize = axisFontSize)
if (is.null(colors))
colors<-c(.cyPalette('rdbu')[c(2,6,10)],"#888888")
chart[['cy_colors']] <- colors
chart[['cy_colorScheme']] <- "Custom"
if (!is.null(range))
chart[['cy_range']] <- range
else{
cols<-getTableColumns(columns=columns, base.url = base.url)
min<-min(apply(na.omit(cols),2,min))
max<-max(apply(na.omit(cols),2,max))
chart[['cy_range']] <- c(min,max)
}
style.string = list(visualProperty = vp, value = paste("org.cytoscape.HeatMapChart",toJSON(chart),sep = ":"))
setVisualPropertyDefault(style.string, style.name, base.url = base.url)
}
# ------------------------------------------------------------------------------
#' @title Set Node Custom Line Chart
#'
#' @description Makes a line chart per node using specified node table columns by
#' setting a default custom graphic style.
#' @param columns List of node column names to be displayed.
#' @param colors (optional) List of colors to be matched with columns or with
#' range, depending on type. Default is a set of colors from an appropriate
#' Brewer palette.
#' @param range (optional) Min and max values of chart. Default is to use min
#' and max from specified data columns.
#' @param lineWidth (optional) Width of chart line. Default is 1.0.
#' @param rangeAxis (optional) Show axis with range of values. Default is FALSE.
#' @param zeroLine (optional) Show a line at zero. Default is FALSE.
#' @param axisWidth (optional) Width of axis lines, if shown. Default is 0.25.
#' @param axisColor (optional) Color of axis lines, if shown. Default is black.
#' @param axisFontSize (optional) Font size of axis labels, if shown. Default
#' is 1.
#' @param slot (optional) Which custom graphics slot to modify. Slots 1-9 are
#' available for independent charts, gradients and images. Default is 1.
#' @param style.name (optional) Name of style; default is "default" style
#' @param base.url (optional) Ignore unless you need to specify a custom domain,
#' port or version to connect to the CyREST API. Default is http://localhost:1234
#' and the latest version of the CyREST API supported by this version of RCy3.
#' @return None
#' @examples \donttest{
#' setNodeCustomLineChart(c("data1","data2","data3"))
#' }
#' @seealso setNodeCustomPosition, removeNodeCustomGraphics
#' @export
#' @importFrom RJSONIO toJSON
#' @importFrom stats na.omit
setNodeCustomLineChart<-function(columns, colors=NULL,
range=NULL, lineWidth=1.0,
rangeAxis=FALSE, zeroLine=FALSE,
axisWidth=0.25, axisColor = "#000000",
axisFontSize=1,
slot=1, style.name=NULL,
base.url=.defaultBaseUrl){
.checkSlot(slot)
vp<-paste('NODE_CUSTOMGRAPHICS',as.character(slot),sep='_')
chart <- list(cy_dataColumns = columns,
cy_lineWidth = lineWidth,
cy_showRangeAxis = rangeAxis,
cy_showRangeZeroBaseline = zeroLine,
cy_axisWidth = axisWidth,
cy_axisColor = axisColor,
cy_axisLabelFontSize = axisFontSize)
if (is.null(colors))
colors<-rep(.cyPalette('set1'),length.out=length(columns))
chart[['cy_colors']] <- colors
chart[['cy_colorScheme']] <- "Custom"
if (!is.null(range))
chart[['cy_range']] <- range
else{
cols<-getTableColumns(columns=columns, base.url = base.url)
min<-min(apply(na.omit(cols),2,min))
max<-max(apply(na.omit(cols),2,max))
chart[['cy_range']] <- c(min,max)
}
style.string = list(visualProperty = vp, value = paste("org.cytoscape.LineChart",toJSON(chart),sep = ":"))
setVisualPropertyDefault(style.string, style.name, base.url = base.url)
}
# ------------------------------------------------------------------------------
#' @title Set Node Custom Pie Chart
#'
#' @description Makes a pie chart per node using specified node table columns by
#' setting a default custom graphic style.
#' @param columns List of node column names to be displayed.
#' @param colors (optional) List of colors to be matched with columns or with
#' range, depending on type. Default is a set of colors from an appropriate
#' Brewer palette.
#' @param startAngle (optional) Angle to start filling pie. Default is 0.0.
#' @param slot (optional) Which custom graphics slot to modify. Slots 1-9 are
#' available for independent charts, gradients and images. Default is 1.
#' @param style.name (optional) Name of style; default is "default" style
#' @param base.url (optional) Ignore unless you need to specify a custom domain,
#' port or version to connect to the CyREST API. Default is http://localhost:1234
#' and the latest version of the CyREST API supported by this version of RCy3.
#' @return None
#' @examples \donttest{
#' setNodeCustomPieChart(c("data1","data2","data3"))
#' }
#' @seealso setNodeCustomPosition, removeNodeCustomGraphics
#' @export
#' @importFrom RJSONIO toJSON
setNodeCustomPieChart<-function(columns, colors=NULL,
startAngle=0.0,
slot=1, style.name=NULL,
base.url=.defaultBaseUrl){
.checkSlot(slot)
vp<-paste('NODE_CUSTOMGRAPHICS',as.character(slot),sep='_')
chart <- list(cy_dataColumns = columns,
cy_startAngle = startAngle)
if (is.null(colors))
colors<-rep(.cyPalette('set1'),length.out=length(columns))
chart[['cy_colors']] <- colors
chart[['cy_colorScheme']] <- "Custom"
style.string = list(visualProperty = vp, value = paste("org.cytoscape.PieChart",toJSON(chart),sep = ":"))
setVisualPropertyDefault(style.string, style.name, base.url = base.url)
}
# ------------------------------------------------------------------------------
#' @title Set Node Custom Ring Chart
#'
#' @description Makes a ring chart per node using specified node table columns by
#' setting a default custom graphic style.
#' @param columns List of node column names to be displayed.
#' @param colors (optional) List of colors to be matched with columns or with
#' range, depending on type. Default is a set of colors from an appropriate
#' Brewer palette.
#' @param startAngle (optional) Angle to start filling ring Default is 0.0.
#' @param holeSize (optional) Size of hole in ring. Ranges 0-1. Default is 0.5.
#' @param slot (optional) Which custom graphics slot to modify. Slots 1-9 are
#' available for independent charts, gradients and images. Default is 1.
#' @param style.name (optional) Name of style; default is "default" style
#' @param base.url (optional) Ignore unless you need to specify a custom domain,
#' port or version to connect to the CyREST API. Default is http://localhost:1234
#' and the latest version of the CyREST API supported by this version of RCy3.
#' @return None
#' @examples \donttest{
#' setNodeCustomRingChart(c("data1","data2","data3"))
#' }
#' @seealso setNodeCustomPosition, removeNodeCustomGraphics
#' @export
#' @importFrom RJSONIO toJSON
setNodeCustomRingChart<-function(columns, colors=NULL,
startAngle=0.0, holeSize = 0.5,
slot=1, style.name=NULL,
base.url=.defaultBaseUrl){
.checkSlot(slot)
vp<-paste('NODE_CUSTOMGRAPHICS',as.character(slot),sep='_')
chart <- list(cy_dataColumns = columns,
cy_startAngle = startAngle,
cy_holeSize = holeSize)
if (is.null(colors))
colors<-rep(.cyPalette('set1'),length.out=length(columns))
chart[['cy_colors']] <- colors
chart[['cy_colorScheme']] <- "Custom"
style.string = list(visualProperty = vp, value = paste("org.cytoscape.RingChart",toJSON(chart),sep = ":"))
setVisualPropertyDefault(style.string, style.name)
}
# ------------------------------------------------------------------------------
#' @title Set Node Custom Linear Gradient
#'
#' @description Makes a gradient fill per node by setting a default custom
#' graphic style.
#' @param colors (optional) List of colors to define gradient
#' @param anchors (optional) Position of colors from 0.0 to 1.0.
#' @param angle (optional) Angle of gradient. Default is 0 (left-to-right).
#' @param slot (optional) Which custom graphics slot to modify. Slots 1-9 are
#' available for independent charts, gradients and images. Default is 1.
#' @param style.name (optional) Name of style; default is "default" style
#' @param base.url (optional) Ignore unless you need to specify a custom domain,
#' port or version to connect to the CyREST API. Default is http://localhost:1234
#' and the latest version of the CyREST API supported by this version of RCy3.
#' @return None
#' @examples \donttest{
#' setNodeCustomLinearGradient()
#' }
#' @export
setNodeCustomLinearGradient<-function(colors=c("#DDDDDD","#888888"), anchors=c(0.0,1.0), angle=0.0,
slot=1, style.name=NULL,
base.url=.defaultBaseUrl){
.checkSlot(slot)
vp<-paste('NODE_CUSTOMGRAPHICS',as.character(slot),sep='_')
chart <- list(cy_angle = angle,
cy_gradientColors = colors,
cy_gradientFractions = anchors)
style.string = list(visualProperty = vp, value = paste("org.cytoscape.LinearGradient",toJSON(chart),sep = ":"))
setVisualPropertyDefault(style.string, style.name)
}
# ------------------------------------------------------------------------------
#' @title Set Node Custom Radial Gradient
#'
#' @description Makes a gradient fill per node by setting a default custom
#' graphic style.
#' @param colors (optional) List of colors to define gradient
#' @param anchors (optional) Position of colors from 0.0 to 1.0.
#' @param xCenter (optional) X position for center of radial effect from 0.0
#' to 1.0. Default is 0.5.
#' @param yCenter (optional) Y position for center of radial effect from 0.0
#' to 1.0. Default is 0.5.
#' @param slot (optional) Which custom graphics slot to modify. Slots 1-9 are
#' available for independent charts, gradients and images. Default is 1.
#' @param style.name (optional) Name of style; default is "default" style
#' @param base.url (optional) Ignore unless you need to specify a custom domain,
#' port or version to connect to the CyREST API. Default is http://localhost:1234
#' and the latest version of the CyREST API supported by this version of RCy3.
#' @return None
#' @examples \donttest{
#' setNodeCustomRadialGradient()
#' }
#' @export
setNodeCustomRadialGradient<-function(colors=c("#DDDDDD","#888888"), anchors=c(0.0,1.0),
xCenter=0.5, yCenter=0.5,
slot=1, style.name=NULL,
base.url=.defaultBaseUrl){
.checkSlot(slot)
vp<-paste('NODE_CUSTOMGRAPHICS',as.character(slot),sep='_')
chart <- list(cy_gradientColors = colors,
cy_gradientFractions = anchors,
cy_center = list(x = xCenter,
y = yCenter))
style.string = list(visualProperty = vp, value = paste("org.cytoscape.RadialGradient",toJSON(chart),sep = ":"))
setVisualPropertyDefault(style.string, style.name)
}
# ------------------------------------------------------------------------------
#' @title Set Node Custom Position
#'
#' @description Adjust the position of a custom graphic relative to its node.
#' @param nodeAnchor Position on node to place the graphic: NW,N,NE,E,SE,S,SW,W
#' or C for center (default)
#' @param graphicAnchor Position on graphic to place on node: NW,N,NE,E,SE,S,SW,W
#' or C for center (default)
#' @param justification Positioning of content within graphic: l,r,c (default)
#' @param xOffset Additional offset in the x direction
#' @param yOffset Additional offset in the y direction
#' @param slot (optional) Which custom graphics slot to modify. Slots 1-9 are
#' available for independent charts, gradients and images. Default is 1.
#' @param style.name (optional) Name of style; default is "default" style
#' @param base.url (optional) Ignore unless you need to specify a custom domain,
#' port or version to connect to the CyREST API. Default is http://localhost:1234
#' and the latest version of the CyREST API supported by this version of RCy3.
#' @return None
#' @examples \donttest{
#' setNodeCustomPosition()
#' }
#' @export
setNodeCustomPosition<-function(nodeAnchor="C", graphicAnchor="C", justification="c",
xOffset=0.0, yOffset=0.0, slot=1, style.name=NULL,
base.url = .defaultBaseUrl){
.checkSlot(slot)
vp<-paste('NODE_CUSTOMGRAPHICS_POSITION',as.character(slot),sep='_')
style.string = list(visualProperty = vp, value = paste(nodeAnchor,graphicAnchor,
justification,xOffset,yOffset,
sep = ","))
setVisualPropertyDefault(style.string, style.name, base.url = base.url)
}
# ------------------------------------------------------------------------------
#' @title Remove Node Custom Graphics
#'
#' @description Remove the default custom charts, images and gradients.
#' @param slot (optional) Which custom graphics slot to modify. Slots 1-9 are
#' available for independent charts, gradients and images. Default is 1.
#' @param style.name (optional) Name of style; default is "default" style
#' @param base.url (optional) Ignore unless you need to specify a custom domain,
#' port or version to connect to the CyREST API. Default is http://localhost:1234
#' and the latest version of the CyREST API supported by this version of RCy3.
#' @return None
#' @examples \donttest{
#' removeNodeCustomGraphics()
#' }
#' @export
removeNodeCustomGraphics<-function(slot=1, style.name=NULL,
base.url = .defaultBaseUrl){
.checkSlot(slot)
vp<-paste('NODE_CUSTOMGRAPHICS',as.character(slot),sep='_')
setVisualPropertyDefault(list(visualProperty = vp, value = NULL),
style.name, base.url = base.url)
}
# ------------------------------------------------------------------------------
#' @title Set Node Fill Opacity Default
#'
#' @description Set default opacity value for all unmapped nodes.
#' @param new.opacity Numeric values between 0 and 255; 0 is invisible.
#' @param style.name Name of style; default is "default" style.
#' @param base.url (optional) Ignore unless you need to specify a custom domain,
#' port or version to connect to the CyREST API. Default is http://localhost:1234
#' and the latest version of the CyREST API supported by this version of RCy3.
#' @return None
#' @examples \donttest{
#' setNodeFillOpacityDefault(50)
#' }
#' @export
setNodeFillOpacityDefault <- function(new.opacity, style.name=NULL,
base.url=.defaultBaseUrl) {
.checkOpacity(new.opacity)
style = list(visualProperty = "NODE_TRANSPARENCY", value = new.opacity)
setVisualPropertyDefault(style, style.name, base.url)
}
# ------------------------------------------------------------------------------
#' @title Set Node Font Face Default
#'
#' @description Set the default node font.
#' @param new.font String specification of font face, style and size, e.g.,
#' "SansSerif,plain,12" or "Dialog,plain,10"
#' @param style.name Name of style; default is "default" style
#' @param base.url (optional) Ignore unless you need to specify a custom domain,
#' port or version to connect to the CyREST API. Default is http://localhost:1234
#' and the latest version of the CyREST API supported by this version of RCy3.
#' @return None
#' @examples \donttest{
#' setNodeFontFaceDefault("Dialog,plain,10")
#' }
#' @export
setNodeFontFaceDefault <- function(new.font, style.name=NULL,
base.url=.defaultBaseUrl) {
style = list(visualProperty = "NODE_LABEL_FONT_FACE", value = new.font)
setVisualPropertyDefault(style, style.name, base.url)
}
# ------------------------------------------------------------------------------
#' @title Set Node Font Size Default
#'
#' @description Set the default node font size.
#' @param new.size Numeric value for size
#' @param style.name Name of style; default is "default" style
#' @param base.url (optional) Ignore unless you need to specify a custom domain,
#' port or version to connect to the CyREST API. Default is http://localhost:1234
#' and the latest version of the CyREST API supported by this version of RCy3.
#' @return None
#' @examples \donttest{
#' setNodeFontSizeDefault(12)
#' }
#' @export
setNodeFontSizeDefault <- function(new.size, style.name=NULL,
base.url=.defaultBaseUrl) {
style = list(visualProperty = "NODE_LABEL_FONT_SIZE", value = new.size)
setVisualPropertyDefault(style, style.name, base.url)
}
# ------------------------------------------------------------------------------
#' @title Set Node Height Default
#'
#' @description Set the default node height.
#' @param new.height Numeric value for height.
#' @param style.name Name of style; default is "default" style.
#' @param base.url (optional) Ignore unless you need to specify a custom domain,
#' port or version to connect to the CyREST API. Default is http://localhost:1234
#' and the latest version of the CyREST API supported by this version of RCy3.
#' @return None
#' @examples \donttest{
#' setNodeHeightDefault(35)
#' }
#' @export
setNodeHeightDefault <- function(new.height, style.name=NULL,
base.url=.defaultBaseUrl) {
lockNodeDimensions(FALSE, style.name, base.url)
style <- list(visualProperty = "NODE_HEIGHT", value = new.height)
setVisualPropertyDefault(style, style.name, base.url)
}
# ------------------------------------------------------------------------------
#' @title Set Node Label Default
#'
#' @description Set the default node label.
#' @param new.label String label for unmapped nodes.
#' @param style.name Name of style; default is "default" style
#' @param base.url (optional) Ignore unless you need to specify a custom domain,
#' port or version to connect to the CyREST API. Default is http://localhost:1234
#' and the latest version of the CyREST API supported by this version of RCy3.
#' @return None
#' @examples \donttest{
#' setNodeLabelDefault('unknown')
#' }
#' @export
setNodeLabelDefault <- function(new.label, style.name=NULL,
base.url=.defaultBaseUrl) {
style = list(visualProperty = "NODE_LABEL", value = new.label)
setVisualPropertyDefault(style, style.name, base.url)
}
# ------------------------------------------------------------------------------
#' @title Set Node Label Color Default
#'
#' @description Set the default node label color.
#' @param new.color Color as hex code, e.g., #FD5903
#' @param style.name Name of style; default is "default" style
#' @param base.url (optional) Ignore unless you need to specify a custom domain,
#' port or version to connect to the CyREST API. Default is http://localhost:1234
#' and the latest version of the CyREST API supported by this version of RCy3.
#' @return None
#' @examples \donttest{
#' setNodeLabelColorDefault('#FD5903')
#' }
#' @export
setNodeLabelColorDefault <- function(new.color, style.name=NULL,
base.url=.defaultBaseUrl) {
.checkHexColor(new.color)
style = list(visualProperty = "NODE_LABEL_COLOR", value = new.color)
setVisualPropertyDefault(style, style.name, base.url)
}
# ------------------------------------------------------------------------------
#' @title Set Node Label Opacity Default
#'
#' @description Set default opacity value for all unmapped node labels.
#' @param new.opacity Numeric values between 0 and 255; 0 is invisible.
#' @param style.name Name of style; default is "default" style.
#' @param base.url (optional) Ignore unless you need to specify a custom domain,
#' port or version to connect to the CyREST API. Default is http://localhost:1234
#' and the latest version of the CyREST API supported by this version of RCy3.
#' @return None
#' @examples \donttest{
#' setNodeLabelOpacityDefault(50)
#' }
#' @export
setNodeLabelOpacityDefault <- function(new.opacity, style.name=NULL,
base.url=.defaultBaseUrl) {
.checkOpacity(new.opacity)
style = list(visualProperty = "NODE_LABEL_TRANSPARENCY", value = new.opacity)
setVisualPropertyDefault(style, style.name, base.url)
}
# ------------------------------------------------------------------------------
#' @title Get Node Selection Color Default
#'
#' @description Retrieve the default selection node color.
#' @param style.name Name of style; default is "default" style
#' @param base.url (optional) Ignore unless you need to specify a custom domain,
#' port or version to connect to the CyREST API. Default is http://localhost:1234
#' and the latest version of the CyREST API supported by this version of RCy3.
#' @return None
#' @examples \donttest{
#' getNodeSelectionColorDefault()
#' }
#' @export
getNodeSelectionColorDefault <- function(style.name=NULL, base.url=.defaultBaseUrl) {
return(getVisualPropertyDefault('NODE_SELECTED_PAINT', style.name, base.url))
}
# ------------------------------------------------------------------------------
#' @title Set Node Selection Color Default
#'
#' @description Set the default selection node color.
#' @param new.color Color as hex code, e.g., #FD5903
#' @param style.name (optional) Name of style; default is "default" style
#' @param base.url (optional) Ignore unless you need to specify a custom domain,
#' port or version to connect to the CyREST API. Default is http://localhost:1234
#' and the latest version of the CyREST API supported by this version of RCy3.
#' @return None
#' @examples \donttest{
#' setNodeSelectionColorDefault('#FD5903')
#' }
#' @export
setNodeSelectionColorDefault <- function(new.color, style.name=NULL, base.url=.defaultBaseUrl) {
.checkHexColor(new.color)
style = list(visualProperty = "NODE_SELECTED_PAINT", value = new.color)
setVisualPropertyDefault(style, style.name, base.url)
}
# ------------------------------------------------------------------------------
#' @title Set Node Shape Default
#'
#' @description Set the default node shape.
#' @param new.shape Name of shape, e.g., ELLIPSE, RECTANGLE, etc (see \link{getNodeShapes})
#' @param style.name Name of style; default is "default" style
#' @param base.url (optional) Ignore unless you need to specify a custom domain,
#' port or version to connect to the CyREST API. Default is http://localhost:1234
#' and the latest version of the CyREST API supported by this version of RCy3.
#' @return None
#' @examples \donttest{
#' setNodeShapeDefault('ELLIPSE')
#' }
#' @export
setNodeShapeDefault <- function(new.shape, style.name=NULL, base.url=.defaultBaseUrl) {
new.shape <- toupper(new.shape)
if (new.shape %in% getNodeShapes(base.url)){
style = list(visualProperty = "NODE_SHAPE", value = new.shape)
setVisualPropertyDefault(style, style.name, base.url)
}else{
stop (sprintf ('%s is not a valid shape. Use getNodeShapes() to find valid values.', new.shape))
}
}
# ------------------------------------------------------------------------------
#' @title Set Node Size Default
#'
#' @description Set the default node size.
#' @param new.size Numeric value for size
#' @param style.name Name of style; default is "default" style.
#' @param base.url (optional) Ignore unless you need to specify a custom domain,
#' port or version to connect to the CyREST API. Default is http://localhost:1234
#' and the latest version of the CyREST API supported by this version of RCy3.
#' @return None
#' @examples \donttest{
#' setNodeSizeDefault(35)
#' }
#' @export
setNodeSizeDefault <- function(new.size, style.name=NULL,
base.url=.defaultBaseUrl) {
lockNodeDimensions(TRUE, style.name, base.url)
style <- list(visualProperty = "NODE_SIZE", value = new.size)
setVisualPropertyDefault(style, style.name, base.url)
}
# ------------------------------------------------------------------------------
#' @title Set Node Width Default
#'
#' @description Set the default node width.
#' @param new.width Numeric value for width.
#' @param style.name Name of style; default is "default" style.
#' @param base.url (optional) Ignore unless you need to specify a custom domain,
#' port or version to connect to the CyREST API. Default is http://localhost:1234
#' and the latest version of the CyREST API supported by this version of RCy3.
#' @return None
#' @examples \donttest{
#' setNodeWidthDefault(35)
#' }
#' @export
setNodeWidthDefault <- function(new.width, style.name=NULL,
base.url=.defaultBaseUrl) {
lockNodeDimensions(FALSE, style.name, base.url)
style <- list(visualProperty = "NODE_WIDTH", value = new.width)
setVisualPropertyDefault(style, style.name, base.url)
}
# ------------------------------------------------------------------------------
#' @title Set Node Tooltip Default
#'
#' @description Set the default node tooltip
#' @param new.tooltip String tooltip for unmapped nodes.
#' @param style.name Name of style; default is "default" style
#' @param base.url (optional) Ignore unless you need to specify a custom domain,
#' port or version to connect to the CyREST API. Default is http://localhost:1234
#' and the latest version of the CyREST API supported by this version of RCy3.
#' @return None
#' @examples \donttest{
#' setNodeTooltipDefault('unknown')
#' }
#' @export
setNodeTooltipDefault <- function(new.tooltip, style.name=NULL,
base.url=.defaultBaseUrl) {
style = list(visualProperty = "NODE_TOOLTIP", value = new.tooltip)
setVisualPropertyDefault(style, style.name, base.url)
}
# ==============================================================================
# II.b. Edge Properties
# Pattern A: (1) prepare input value as named list, (2) call setVisualPropertyDefault()
# Pattern B: (1) call getVisualPropertyDefault()
# ------------------------------------------------------------------------------
#' @title Set Edge Color Default
#'
#' @description Set the default edge color.
#' @param new.color Color as hex code, e.g., #FD5903
#' @param style.name Name of style; default is "default" style
#' @param base.url (optional) Ignore unless you need to specify a custom domain,
#' port or version to connect to the CyREST API. Default is http://localhost:1234
#' and the latest version of the CyREST API supported by this version of RCy3.
#' @return None
#' @examples \donttest{
#' setEdgeColorDefault('#FD5903')
#' }
#' @export
setEdgeColorDefault <- function(new.color, style.name=NULL,
base.url=.defaultBaseUrl) {
.checkHexColor(new.color)
style = list(visualProperty = "EDGE_UNSELECTED_PAINT", value = new.color)
setVisualPropertyDefault(style, style.name, base.url)
style = list(visualProperty = "EDGE_STROKE_UNSELECTED_PAINT", value = new.color)
setVisualPropertyDefault(style, style.name, base.url)
}
# ------------------------------------------------------------------------------
#' @title Set Edge Font Face Default
#'
#' @description Set the default edge font.
#' @param new.font String specification of font face, style and size, e.g.,
#' "SansSerif,plain,12" or "Dialog,plain,10"
#' @param style.name Name of style; default is "default" style
#' @param base.url (optional) Ignore unless you need to specify a custom domain,
#' port or version to connect to the CyREST API. Default is http://localhost:1234
#' and the latest version of the CyREST API supported by this version of RCy3.
#' @return None
#' @examples \donttest{
#' setEdgeFontFaceDefault("Dialog,plain,10")
#' }
#' @export
setEdgeFontFaceDefault <- function(new.font, style.name=NULL,
base.url=.defaultBaseUrl) {
style = list(visualProperty = "EDGE_LABEL_FONT_FACE", value = new.font)
setVisualPropertyDefault(style, style.name, base.url)
}
# ------------------------------------------------------------------------------
#' @title Set Edge Font Size Default
#'
#' @description Set the default edge font size.
#' @param new.size Numeric value for size
#' @param style.name Name of style; default is "default" style
#' @param base.url (optional) Ignore unless you need to specify a custom domain,
#' port or version to connect to the CyREST API. Default is http://localhost:1234
#' and the latest version of the CyREST API supported by this version of RCy3.
#' @return None
#' @examples \donttest{
#' setEdgeFontSizeDefault(12)
#' }
#' @export
setEdgeFontSizeDefault <- function(new.size, style.name=NULL,
base.url=.defaultBaseUrl) {
style = list(visualProperty = "EDGE_LABEL_FONT_SIZE", value = new.size)
setVisualPropertyDefault(style, style.name, base.url)
}
# ------------------------------------------------------------------------------
#' @title Set Edge Label Default
#'
#' @description Set the default edge label.
#' @param new.label String label for unmapped edges.
#' @param style.name Name of style; default is "default" style
#' @param base.url (optional) Ignore unless you need to specify a custom domain,
#' port or version to connect to the CyREST API. Default is http://localhost:1234
#' and the latest version of the CyREST API supported by this version of RCy3.
#' @return None
#' @examples \donttest{
#' setEdgeLabelDefault('unknown')
#' }
#' @export
setEdgeLabelDefault <- function(new.label, style.name=NULL,
base.url=.defaultBaseUrl) {
style = list(visualProperty = "EDGE_LABEL", value = new.label)
setVisualPropertyDefault(style, style.name, base.url)
}
# ------------------------------------------------------------------------------
#' @title Set Edge Label Color Default
#'
#' @description Set the default edge label color.
#' @param new.color Color as hex code, e.g., #FD5903
#' @param style.name Name of style; default is "default" style
#' @param base.url (optional) Ignore unless you need to specify a custom domain,
#' port or version to connect to the CyREST API. Default is http://localhost:1234
#' and the latest version of the CyREST API supported by this version of RCy3.
#' @return None
#' @examples \donttest{
#' setEdgeLabelColorDefault("#FD5903")
#' }
#' @export
setEdgeLabelColorDefault <- function(new.color, style.name=NULL,
base.url=.defaultBaseUrl) {
.checkHexColor(new.color)
style = list(visualProperty = "EDGE_LABEL_COLOR", value = new.color)
setVisualPropertyDefault(style, style.name, base.url)
}
# ------------------------------------------------------------------------------
#' @title Set Edge Label Opacity Default
#'
#' @description Set default opacity value for all unmapped edge labels.
#' @param new.opacity Numeric values between 0 and 255; 0 is invisible.
#' @param style.name Name of style; default is "default" style.
#' @param base.url (optional) Ignore unless you need to specify a custom domain,
#' port or version to connect to the CyREST API. Default is http://localhost:1234
#' and the latest version of the CyREST API supported by this version of RCy3.
#' @return None
#' @examples \donttest{
#' setEdgeLabelOpacityDefault(50)
#' }
#' @export
setEdgeLabelOpacityDefault <- function(new.opacity, style.name=NULL,
base.url=.defaultBaseUrl) {
.checkOpacity(new.opacity)
style = list(visualProperty = "EDGE_LABEL_TRANSPARENCY", value = new.opacity)
setVisualPropertyDefault(style, style.name, base.url)
}
# ------------------------------------------------------------------------------
#' @title Set Edge Line Width Default
#'
#' @description Set the default edge width.
#' @param new.width Numeric value for width
#' @param style.name Name of style; default is "default" style
#' @param base.url (optional) Ignore unless you need to specify a custom domain,
#' port or version to connect to the CyREST API. Default is http://localhost:1234
#' and the latest version of the CyREST API supported by this version of RCy3.
#' @return None
#' @examples \donttest{
#' setEdgeLineWidthDefault(3)
#' }
#' @export
setEdgeLineWidthDefault <- function(new.width, style.name=NULL,
base.url=.defaultBaseUrl) {
style = list(visualProperty = "EDGE_WIDTH", value = new.width)
setVisualPropertyDefault(style, style.name, base.url)
}
# ------------------------------------------------------------------------------
#' @title Set Edge Line Style Default
#'
#' @description Set the default edge style.
#' @param new.line.style Name of line style, e.g., SOLID, LONG_DASH, etc (see \link{getLineStyles})
#' @param style.name Name of style; default is "default" style
#' @param base.url (optional) Ignore unless you need to specify a custom domain,
#' port or version to connect to the CyREST API. Default is http://localhost:1234
#' and the latest version of the CyREST API supported by this version of RCy3.
#' @return None
#' @examples \donttest{
#' setEdgeLineStyleDefault('LONG_DASH')
#' }
#' @export
setEdgeLineStyleDefault <- function(new.line.style, style.name=NULL,
base.url=.defaultBaseUrl) {
style = list(visualProperty = "EDGE_LINE_TYPE", value = new.line.style)
setVisualPropertyDefault(style, style.name, base.url)
}
# ------------------------------------------------------------------------------
#' @title Set Edge Opacity Default
#'
#' @description Set default opacity value for all unmapped edges.
#' @param new.opacity Numeric values between 0 and 255; 0 is invisible.
#' @param style.name Name of style; default is "default" style.
#' @param base.url (optional) Ignore unless you need to specify a custom domain,
#' port or version to connect to the CyREST API. Default is http://localhost:1234
#' and the latest version of the CyREST API supported by this version of RCy3.
#' @return None
#' @examples \donttest{
#' setEdgeOpacityDefault(50)
#' }
#' @export
setEdgeOpacityDefault <- function(new.opacity, style.name=NULL,
base.url=.defaultBaseUrl) {
.checkOpacity(new.opacity)
style = list(visualProperty = "EDGE_TRANSPARENCY", value = new.opacity)
setVisualPropertyDefault(style, style.name, base.url)
}
# ------------------------------------------------------------------------------
#' @title Get Edge Selection Color Default
#'
#' @description Retrieve the default selected edge color.
#' @param style.name Name of style; default is "default" style
#' @param base.url (optional) Ignore unless you need to specify a custom domain,
#' port or version to connect to the CyREST API. Default is http://localhost:1234
#' and the latest version of the CyREST API supported by this version of RCy3.
#' @return None
#' @examples \donttest{
#' getEdgeSelectionColorDefault()
#' }
#' @export
getEdgeSelectionColorDefault <- function(style.name=NULL, base.url=.defaultBaseUrl) {
matched <- unname(getStyleDependencies()['arrowColorMatchesEdge'])
if(matched)
return(getVisualPropertyDefault('EDGE_SELECTED_PAINT',style.name, base.url))
else
return(getVisualPropertyDefault('EDGE_STROKE_SELECTED_PAINT',style.name, base.url))
}
# ------------------------------------------------------------------------------
#' @title Set Edge Selection Color Default
#'
#' @description Set the default selected edge color.
#' @param new.color Color as hex code, e.g., #FD5903
#' @param style.name Name of style; default is "default" style
#' @param base.url (optional) Ignore unless you need to specify a custom domain,
#' port or version to connect to the CyREST API. Default is http://localhost:1234
#' and the latest version of the CyREST API supported by this version of RCy3.
#' @return None
#' @examples \donttest{
#' setEdgeSelectionColorDefault('#FD5903')
#' }
#' @export
setEdgeSelectionColorDefault <- function(new.color, style.name=NULL, base.url=.defaultBaseUrl) {
.checkHexColor(new.color)
style = list(visualProperty = "EDGE_SELECTED_PAINT", value = new.color)
setVisualPropertyDefault(style, style.name, base.url)
style = list(visualProperty = "EDGE_STROKE_ELECTED_PAINT", value = new.color)
setVisualPropertyDefault(style, style.name, base.url)
}
# ------------------------------------------------------------------------------
#' @title Set Edge Source Arrow Color Default
#'
#' @description Set the default edge source arrow color.
#' @param new.color Color as hex code, e.g., #FD5903
#' @param style.name Name of style; default is "default" style
#' @param base.url (optional) Ignore unless you need to specify a custom domain,
#' port or version to connect to the CyREST API. Default is http://localhost:1234
#' and the latest version of the CyREST API supported by this version of RCy3.
#' @return None
#' @examples \donttest{
#' setEdgeSourceArrowColorDefault('#FD5903')
#' }
#' @export
setEdgeSourceArrowColorDefault <- function(new.color, style.name=NULL, base.url=.defaultBaseUrl) {
.checkHexColor(new.color)
style = list(visualProperty = "EDGE_SOURCE_ARROW_UNSELECTED_PAINT", value = new.color)
setVisualPropertyDefault(style, style.name, base.url)
}
# ------------------------------------------------------------------------------
#' @title Set Edge Target Arrow Color Default
#'
#' @description Set the default edge target arrow color.
#' @param new.color Color as hex code, e.g., #FD5903
#' @param style.name Name of style; default is "default" style
#' @param base.url (optional) Ignore unless you need to specify a custom domain,
#' port or version to connect to the CyREST API. Default is http://localhost:1234
#' and the latest version of the CyREST API supported by this version of RCy3.
#' @return None
#' @examples \donttest{
#' setEdgeTargetArrowColorDefault('#FD5903')
#' }
#' @export
setEdgeTargetArrowColorDefault <- function(new.color, style.name=NULL, base.url=.defaultBaseUrl) {
.checkHexColor(new.color)
style = list(visualProperty = "EDGE_TARGET_ARROW_UNSELECTED_PAINT", value = new.color)
setVisualPropertyDefault(style, style.name, base.url)
}
# ------------------------------------------------------------------------------
#' @title Set Edge Source Arrow Shape Default
#'
#' @description Set the default edge source arrow shape.
#' @param new.shape Name of shape, e.g., ARROW, T, etc (see \link{getArrowShapes})
#' @param style.name Name of style; default is "default" style
#' @param base.url (optional) Ignore unless you need to specify a custom domain,
#' port or version to connect to the CyREST API. Default is http://localhost:1234
#' and the latest version of the CyREST API supported by this version of RCy3.
#' @return None
#' @examples \donttest{
#' setEdgeSourceArrowShapeDefault('ARROW')
#' }
#' @export
setEdgeSourceArrowShapeDefault <- function(new.shape, style.name=NULL, base.url=.defaultBaseUrl) {
style = list(visualProperty = "EDGE_SOURCE_ARROW_SHAPE", value = new.shape)
setVisualPropertyDefault(style, style.name, base.url)
}
# ------------------------------------------------------------------------------
#' @title Set Edge Target Arrow Shape Default
#'
#' @description Set the default edge target arrow shape.
#' @param new.shape Name of shape, e.g., ARROW, T, etc (see \link{getArrowShapes})
#' @param style.name Name of style; default is "default" style
#' @param base.url (optional) Ignore unless you need to specify a custom domain,
#' port or version to connect to the CyREST API. Default is http://localhost:1234
#' and the latest version of the CyREST API supported by this version of RCy3.
#' @return None
#' @examples \donttest{
#' setEdgeTargetArrowShapeDefault('ARROW')
#' }
#' @export
setEdgeTargetArrowShapeDefault <- function(new.shape, style.name=NULL, base.url=.defaultBaseUrl) {
style = list(visualProperty = "EDGE_TARGET_ARROW_SHAPE", value = new.shape)
setVisualPropertyDefault(style, style.name, base.url)
}
# ------------------------------------------------------------------------------
#' @title Set Edge Tooltip Default
#'
#' @description Set the default edge tooltip
#' @param new.tooltip String tooltip for unmapped edges.
#' @param style.name Name of style; default is "default" style
#' @param base.url (optional) Ignore unless you need to specify a custom domain,
#' port or version to connect to the CyREST API. Default is http://localhost:1234
#' and the latest version of the CyREST API supported by this version of RCy3.
#' @return None
#' @examples \donttest{
#' setEdgeTooltipDefault('unknown')
#' }
#' @export
setEdgeTooltipDefault <- function(new.tooltip, style.name=NULL,
base.url=.defaultBaseUrl) {
style = list(visualProperty = "EDGE_TOOLTIP", value = new.tooltip)
setVisualPropertyDefault(style, style.name, base.url)
}
# ==============================================================================
# II.c. Network Properties
# Pattern A: (1) prepare input value as named list, (2) call setVisualPropertyDefault()
# Pattern B: (1) call getVisualPropertyDefault()
# ------------------------------------------------------------------------------
#' @title Get Background Color Default
#'
#' @description Retrieve the default background color.
#' @param style.name Name of style; default is "default" style
#' @param base.url (optional) Ignore unless you need to specify a custom domain,
#' port or version to connect to the CyREST API. Default is http://localhost:1234
#' and the latest version of the CyREST API supported by this version of RCy3.
#' @return None
#' @examples \donttest{
#' getBackgroundColorDefault()
#' }
#' @export
getBackgroundColorDefault <- function(style.name=NULL, base.url=.defaultBaseUrl) {
return(getVisualPropertyDefault('NETWORK_BACKGROUND_PAINT',style.name, base.url))
}
# ------------------------------------------------------------------------------
#' @title Set Background Color Default
#'
#' @description Set the default background color.
#' @param new.color Color as hex code, e.g., #FD5903
#' @param style.name Name of style; default is "default" style
#' @param base.url (optional) Ignore unless you need to specify a custom domain,
#' port or version to connect to the CyREST API. Default is http://localhost:1234
#' and the latest version of the CyREST API supported by this version of RCy3.
#' @return None
#' @examples \donttest{
#' setBackgroundColorDefault('#888888')
#' }
#' @export
setBackgroundColorDefault <- function(new.color, style.name=NULL, base.url=.defaultBaseUrl) {
.checkHexColor(new.color)
style = list(visualProperty = "NETWORK_BACKGROUND_PAINT", value = new.color)
setVisualPropertyDefault(style, style.name, base.url)
}
|
# Number of splits (copy from above)
ksplits = ksplits
# Initialize empty lists
modell = list()
modell.predict = list()
modell.predict.p = list()
history = list()
# Loop over k-folds
for (fold in 1:ksplits) {
cat('Fold: ' ,fold, '\n')
# extract indices for fold
ix.test <- as.numeric(unlist(lapply(xv.kfold, function(x){x[[fold]]})))
# extract training and test data for fold
x.train <- x.Liver[-ix.test, ]
x.test <- x.Liver[ ix.test, ]
# FUNCTIONAL API (DOES allow dropout in input)
# Define the model
inputs <- layer_input(shape = ncol(x.train))
predictions <- inputs %>%
# layer_dropout(rate = 0.5) %>% # Dropout
layer_dense(units = 64, activation = 'relu',
kernel_regularizer = regularizer_l1(l=0.001)) %>% # Dense (64)
layer_dense(units = 32, activation = 'relu',
kernel_regularizer = regularizer_l1(l=0.001)) %>% # Dense (32)
layer_dense(units = 26, activation = 'softmax') # Softmax (26)
# Put pieces together
modell[[fold]] <- keras_model(inputs = inputs, outputs = predictions)
# Compile the model
modell[[fold]] %>% compile(
loss = 'categorical_crossentropy',
optimizer = optimizer_rmsprop(),
metrics = c('accuracy')
)
# fit generator samples euqally across all classes (5 examples)
gen <- function(){
ix.batch <- tapply(1:nrow(x.train), y.Liver[-ix.test], sample, 16) %>% unlist %>% as.vector
x.batch <- x.train[ix.batch,]
y.batch <- y.train[-ix.test,][ix.batch,]
return(list(x.batch, y.batch))
}
# Train the model
history[[fold]] <-
modell[[fold]] %>% fit_generator(
generator = gen,
steps_per_epoch = 256,
epochs = 1000,
validation_data = list(x.test, y.train[ix.test,]),
verbose = 0)
# print val acc
print(history[[fold]]$metrics %>% lapply(tail, 1))
# predict probabilities for softmax classification
modell.predict.p[[fold]] <- modell[[fold]] %>% predict(x.test)
# Predict labels for validation data
x.test.class <- modell.predict.p[[fold]] %>% apply(1, function(x){which.max(x)})
modell.predict[[fold]] <-
list( predicted = factor(x.test.class, labels = levels(y.Liver)),
true = y.Liver[ix.test] )
rm(x.test.class)
}
|
/R/NN_fitGenerator_2hidden_64-32nodes.R
|
permissive
|
passt/bloodinthedish
|
R
| false
| false
| 2,293
|
r
|
# Number of splits (copy from above)
ksplits = ksplits
# Initialize empty lists
modell = list()
modell.predict = list()
modell.predict.p = list()
history = list()
# Loop over k-folds
for (fold in 1:ksplits) {
cat('Fold: ' ,fold, '\n')
# extract indices for fold
ix.test <- as.numeric(unlist(lapply(xv.kfold, function(x){x[[fold]]})))
# extract training and test data for fold
x.train <- x.Liver[-ix.test, ]
x.test <- x.Liver[ ix.test, ]
# FUNCTIONAL API (DOES allow dropout in input)
# Define the model
inputs <- layer_input(shape = ncol(x.train))
predictions <- inputs %>%
# layer_dropout(rate = 0.5) %>% # Dropout
layer_dense(units = 64, activation = 'relu',
kernel_regularizer = regularizer_l1(l=0.001)) %>% # Dense (64)
layer_dense(units = 32, activation = 'relu',
kernel_regularizer = regularizer_l1(l=0.001)) %>% # Dense (32)
layer_dense(units = 26, activation = 'softmax') # Softmax (26)
# Put pieces together
modell[[fold]] <- keras_model(inputs = inputs, outputs = predictions)
# Compile the model
modell[[fold]] %>% compile(
loss = 'categorical_crossentropy',
optimizer = optimizer_rmsprop(),
metrics = c('accuracy')
)
# fit generator samples euqally across all classes (5 examples)
gen <- function(){
ix.batch <- tapply(1:nrow(x.train), y.Liver[-ix.test], sample, 16) %>% unlist %>% as.vector
x.batch <- x.train[ix.batch,]
y.batch <- y.train[-ix.test,][ix.batch,]
return(list(x.batch, y.batch))
}
# Train the model
history[[fold]] <-
modell[[fold]] %>% fit_generator(
generator = gen,
steps_per_epoch = 256,
epochs = 1000,
validation_data = list(x.test, y.train[ix.test,]),
verbose = 0)
# print val acc
print(history[[fold]]$metrics %>% lapply(tail, 1))
# predict probabilities for softmax classification
modell.predict.p[[fold]] <- modell[[fold]] %>% predict(x.test)
# Predict labels for validation data
x.test.class <- modell.predict.p[[fold]] %>% apply(1, function(x){which.max(x)})
modell.predict[[fold]] <-
list( predicted = factor(x.test.class, labels = levels(y.Liver)),
true = y.Liver[ix.test] )
rm(x.test.class)
}
|
# not exported
reconstruct.vector<-function(x,index,n)
{
if(length(x) == n+1) { return(x) }
y<-rep(NA,n+1)
y[1]=x[1]
y[index+1]=x[-1]
##deal with start
i=1
while(is.na(y[i+1])) i=i+1
# if(i<n) y[2:(i+1)]=0
if(i<n) y[2:(i+1)]=x[2]
for(i in 2:n){
if(is.na(y[i+1])) y[i+1]=y[i]
}
return(y)
}
# not exported
reconstruct.pelt.result<-function(pelt.out,index,n)
{
if(length(pelt.out$lastchangelike) == n+1) { return(pelt.out) }
#update the output of F
F<-reconstruct.vector(pelt.out$lastchangelike,index,n)
cpts=pelt.out$cpts
if(length(cpts)>1){
cpts=index[cpts]
}
cpts[length(cpts)]<-n # last changepoint is always the end of the data
return(list("lastchangelike"=F,"cpts"=cpts))
}
#' Most Recent Changepoints.
#'
#' Detects the Most Recent Changepoints (mrc) for panel data consisting of many related univariate timeseries (Bardwell, Eckley, Fearnhead and Smith, 2016).
#' The method first determines the most recent univariate changepoints using PELT (Killick, Fearnhead and Eckley 2012) and then pools information across the
#' time-series by solving the K-median problem using \link[tbart]{tb.raw} (Teitz and Bart, 1968).
#'
#' @param data An \eqn{n \times N} matrix or data frame representing a length \eqn{n} time series containing observations of \eqn{N} variables. The data can contain missing values
#' which are indicated by NA.
#' @param cost A string indicating which cost function to use. Possible choices are "mean" (change in mean) or "var" (change in variance).
#' The default value is \code{cost="mean"}.
#' @param alpha The variable-specific penalty used to penalise the addition of a given changepoint into a given variable. This can be specified as a real positive value
#' or as a function of \eqn{n}. The latter form is used when the data contains missing values which leads to time series of different lengths.
#'
#' Default value \code{alpha=function(n) 1.5*log(n)}.
#' @param indexed Boolean value indicating that the first column of \code{data} is an index variable. If \code{indexed=FALSE} an index variable will automatically be generated.
#' Default value is \code{indexed=FALSE}.
#' @param pmax Maximum number of most recent changepoints to search for. Default value \code{pmax=5}.
#' @param mad Boolean value indicating if the variates should be scaled by an estimate of the their standard deviation obtained using mean absolute deviation (Zhang, Nancy, Siegmund and David 2007).
#' This is useful in conjunction with \code{cost="mean"} for which unit variance is assumed. Default value is \code{mad=FALSE}.
#' @param phi Lag 1 autocorrelation to model the temporal dependence in the residuals of the time series assuming a MA(1) process. Default \code{phi=0}.
#'
#' @references \insertRef{doi:10.1080/00401706.2018.1438926}{changepoint.mv}
#' @references \insertRef{OR:TEITZBART}{changepoint.mv}
#' @references \insertRef{doi:10.1080/01621459.2012.737745}{changepoint.mv}
#'
#' @examples
#' library(changepoint.mv)
#' data(mrcexample)
#' res<-mrc(mrcexample,pmax=2)
#' MDL(res) # MDL == pmax (possibly under-estimating MDL, retry)
#' res<-mrc(mrcexample,pmax=6)
#' MDL(res) # MDL = 5 (< pmax)
#' # view the most recent changepoints (corresponding to pmax = 5)
#' unique(cpts.mr(res,p=5)[,1])
#' summary(res) # summary of result
#'
#' @export
mrc <- function(data,cost="mean",alpha=function(n) 1.5*log(n),pmax=5,indexed=FALSE,mad=FALSE,phi=0.0)
{
# no of series
n <- nrow(data)
usr.data<-process.data(data,indexed)
penalty_function<-NULL
# set the penalty function
if(is_function(alpha))
{
penalty_function<-alpha
}
else if(is_numeric(alpha))
{
penalty_function<-function(n) {alpha}
}
else
{
stop("alpha should be scalar or a function")
}
# phi correction - only moving average for now
correction<-NULL
if(is.na(phi))
{
correction<-1
}
else if(is_scalar(phi) && 0 <= phi && phi <= 1)
{
correction<-1+2*phi
}
else
{
stop("phi must be a scalar with 0 <= phi <= 1")
}
if(!(cost == "mean" || cost == "var"))
{
stop("mrc only supports cost = mean or var in this version.")
}
min.dist<-0
if(cost=="var")
{
min.dist<-1
}
# apply mad if necessary
if(mad)
{
data<-cbind(usr.data[,1],data.frame(Map(function(i) usr.data[,i]*sqrt(2)/mad(diff(usr.data[,i][!is.na(usr.data[,i])])),2:ncol(usr.data))))
names(data)<-names(usr.data)
}
else
{
data<-usr.data
}
data<-as.matrix(data[-1])
not.missing.indices<-Map(function(col) (1:nrow(data))[!is.na(data[,col])],1:ncol(data))
pelt.results<-tryCatch(
{
Map(function(X,index)
{
pelt.result<-rcppeigen_peltuv(X,cost,correction*penalty_function(length(X)),min.dist)
pelt.result$lastchangelike[1:length(X)]<-pelt.result$lastchangelike[1:length(X)]+rcppeigen_tail_costs(X,cost,min.dist)
pelt.result$cpts<-pelt.result$cpts[-1]
pelt.result<-reconstruct.pelt.result(pelt.result,index,nrow(data))
return(list(pelt.result$lastchangelike[-(nrow(data)+1)],pelt.result$cpts))
},
Map(function(u) data[,u][!is.na(data[,u])],1:ncol(data)),
not.missing.indices
)
},
error = function(e) {e$message<-"mrc stopped because of user interrupt";stop();}
)
G <- matrix(unlist(Map(function(X) X[[1]],pelt.results)),ncol(data),nrow(data),byrow=TRUE)
if(!is.na(phi))
{
G <- G/correction
}
uv.cpts <- Map(function(X) X[[2]] ,pelt.results)
N <- dim(G)[1]
location.vec <- 0:(n-1)
penalised.cost <- numeric( pmax )
mmrc <- vector( "list" , pmax )
affected <- vector( "list" , pmax )
# p=1 separate as simpler
mmrc[[1]] <- tb.raw( G , c(1) )
# say all series are affected by this 1 change
index <- rep(1,times=N)
# find which affected (more evidence above threshold)
affected[[1]] <- index
# objective cost
penalised.cost[1] <- sum( G[ , mmrc[[1]] ] )
if (pmax>1){
for (p in 2:pmax){
# mmrc[[i]] gives locations of the i best locations
mmrc[[p]] <- tb.raw( G , c(1:p) )
0 # affected[[i]], gives each dimension a label from 1:i
# depending on which change it is associated with
affected[[p]] <- apply( G[ , mmrc[[p]] ] , 1 , which.min )
# penalised.cost[[i]] gives the objective cost for solving with i different changes/sets
csum <- 0
for (i in 1:p){
wa <- which( affected[[p]] == i)
csum <- csum + sum( G[ wa , mmrc[[p]][i] ] )
}
penalised.cost[p] <- csum
}
}
locations <- vector("list",pmax)
for ( i in 1:pmax ){
locations[[i]] <- location.vec[ mmrc[[i]] ]
}
MDL <- which.min(penalised.cost + (N*log(1:pmax)+ (1:pmax)*log(n))/log(2)) ##MDL
cpts.uv<-Map(function(cpts) c(0,cpts),uv.cpts)
cpts.mv<-list()
for ( i in 1:pmax )
{
cpts.mr<-locations[[i]][affected[[i]]]
cpts.mv[[i]]<-Map(function(uv,mr) if(length(uv) > 2) {uv[length(uv)-1]<-mr;return(uv);} else {return(uv);},cpts.uv,cpts.mr)
}
return(changepoint.mv.mrc.class(data=usr.data,pmax=pmax,cpts.uv=cpts.uv,cost=cost,mad=mad,cpts.mv=cpts.mv,alpha=penalty_function(n),
penalised.costs=penalised.cost,locations=locations,affected=affected))
}
|
/fuzzedpackages/changepoint.mv/R/mrc.R
|
no_license
|
akhikolla/testpackages
|
R
| false
| false
| 7,855
|
r
|
# not exported
reconstruct.vector<-function(x,index,n)
{
if(length(x) == n+1) { return(x) }
y<-rep(NA,n+1)
y[1]=x[1]
y[index+1]=x[-1]
##deal with start
i=1
while(is.na(y[i+1])) i=i+1
# if(i<n) y[2:(i+1)]=0
if(i<n) y[2:(i+1)]=x[2]
for(i in 2:n){
if(is.na(y[i+1])) y[i+1]=y[i]
}
return(y)
}
# not exported
reconstruct.pelt.result<-function(pelt.out,index,n)
{
if(length(pelt.out$lastchangelike) == n+1) { return(pelt.out) }
#update the output of F
F<-reconstruct.vector(pelt.out$lastchangelike,index,n)
cpts=pelt.out$cpts
if(length(cpts)>1){
cpts=index[cpts]
}
cpts[length(cpts)]<-n # last changepoint is always the end of the data
return(list("lastchangelike"=F,"cpts"=cpts))
}
#' Most Recent Changepoints.
#'
#' Detects the Most Recent Changepoints (mrc) for panel data consisting of many related univariate timeseries (Bardwell, Eckley, Fearnhead and Smith, 2016).
#' The method first determines the most recent univariate changepoints using PELT (Killick, Fearnhead and Eckley 2012) and then pools information across the
#' time-series by solving the K-median problem using \link[tbart]{tb.raw} (Teitz and Bart, 1968).
#'
#' @param data An \eqn{n \times N} matrix or data frame representing a length \eqn{n} time series containing observations of \eqn{N} variables. The data can contain missing values
#' which are indicated by NA.
#' @param cost A string indicating which cost function to use. Possible choices are "mean" (change in mean) or "var" (change in variance).
#' The default value is \code{cost="mean"}.
#' @param alpha The variable-specific penalty used to penalise the addition of a given changepoint into a given variable. This can be specified as a real positive value
#' or as a function of \eqn{n}. The latter form is used when the data contains missing values which leads to time series of different lengths.
#'
#' Default value \code{alpha=function(n) 1.5*log(n)}.
#' @param indexed Boolean value indicating that the first column of \code{data} is an index variable. If \code{indexed=FALSE} an index variable will automatically be generated.
#' Default value is \code{indexed=FALSE}.
#' @param pmax Maximum number of most recent changepoints to search for. Default value \code{pmax=5}.
#' @param mad Boolean value indicating if the variates should be scaled by an estimate of the their standard deviation obtained using mean absolute deviation (Zhang, Nancy, Siegmund and David 2007).
#' This is useful in conjunction with \code{cost="mean"} for which unit variance is assumed. Default value is \code{mad=FALSE}.
#' @param phi Lag 1 autocorrelation to model the temporal dependence in the residuals of the time series assuming a MA(1) process. Default \code{phi=0}.
#'
#' @references \insertRef{doi:10.1080/00401706.2018.1438926}{changepoint.mv}
#' @references \insertRef{OR:TEITZBART}{changepoint.mv}
#' @references \insertRef{doi:10.1080/01621459.2012.737745}{changepoint.mv}
#'
#' @examples
#' library(changepoint.mv)
#' data(mrcexample)
#' res<-mrc(mrcexample,pmax=2)
#' MDL(res) # MDL == pmax (possibly under-estimating MDL, retry)
#' res<-mrc(mrcexample,pmax=6)
#' MDL(res) # MDL = 5 (< pmax)
#' # view the most recent changepoints (corresponding to pmax = 5)
#' unique(cpts.mr(res,p=5)[,1])
#' summary(res) # summary of result
#'
#' @export
mrc <- function(data,cost="mean",alpha=function(n) 1.5*log(n),pmax=5,indexed=FALSE,mad=FALSE,phi=0.0)
{
# no of series
n <- nrow(data)
usr.data<-process.data(data,indexed)
penalty_function<-NULL
# set the penalty function
if(is_function(alpha))
{
penalty_function<-alpha
}
else if(is_numeric(alpha))
{
penalty_function<-function(n) {alpha}
}
else
{
stop("alpha should be scalar or a function")
}
# phi correction - only moving average for now
correction<-NULL
if(is.na(phi))
{
correction<-1
}
else if(is_scalar(phi) && 0 <= phi && phi <= 1)
{
correction<-1+2*phi
}
else
{
stop("phi must be a scalar with 0 <= phi <= 1")
}
if(!(cost == "mean" || cost == "var"))
{
stop("mrc only supports cost = mean or var in this version.")
}
min.dist<-0
if(cost=="var")
{
min.dist<-1
}
# apply mad if necessary
if(mad)
{
data<-cbind(usr.data[,1],data.frame(Map(function(i) usr.data[,i]*sqrt(2)/mad(diff(usr.data[,i][!is.na(usr.data[,i])])),2:ncol(usr.data))))
names(data)<-names(usr.data)
}
else
{
data<-usr.data
}
data<-as.matrix(data[-1])
not.missing.indices<-Map(function(col) (1:nrow(data))[!is.na(data[,col])],1:ncol(data))
pelt.results<-tryCatch(
{
Map(function(X,index)
{
pelt.result<-rcppeigen_peltuv(X,cost,correction*penalty_function(length(X)),min.dist)
pelt.result$lastchangelike[1:length(X)]<-pelt.result$lastchangelike[1:length(X)]+rcppeigen_tail_costs(X,cost,min.dist)
pelt.result$cpts<-pelt.result$cpts[-1]
pelt.result<-reconstruct.pelt.result(pelt.result,index,nrow(data))
return(list(pelt.result$lastchangelike[-(nrow(data)+1)],pelt.result$cpts))
},
Map(function(u) data[,u][!is.na(data[,u])],1:ncol(data)),
not.missing.indices
)
},
error = function(e) {e$message<-"mrc stopped because of user interrupt";stop();}
)
G <- matrix(unlist(Map(function(X) X[[1]],pelt.results)),ncol(data),nrow(data),byrow=TRUE)
if(!is.na(phi))
{
G <- G/correction
}
uv.cpts <- Map(function(X) X[[2]] ,pelt.results)
N <- dim(G)[1]
location.vec <- 0:(n-1)
penalised.cost <- numeric( pmax )
mmrc <- vector( "list" , pmax )
affected <- vector( "list" , pmax )
# p=1 separate as simpler
mmrc[[1]] <- tb.raw( G , c(1) )
# say all series are affected by this 1 change
index <- rep(1,times=N)
# find which affected (more evidence above threshold)
affected[[1]] <- index
# objective cost
penalised.cost[1] <- sum( G[ , mmrc[[1]] ] )
if (pmax>1){
for (p in 2:pmax){
# mmrc[[i]] gives locations of the i best locations
mmrc[[p]] <- tb.raw( G , c(1:p) )
0 # affected[[i]], gives each dimension a label from 1:i
# depending on which change it is associated with
affected[[p]] <- apply( G[ , mmrc[[p]] ] , 1 , which.min )
# penalised.cost[[i]] gives the objective cost for solving with i different changes/sets
csum <- 0
for (i in 1:p){
wa <- which( affected[[p]] == i)
csum <- csum + sum( G[ wa , mmrc[[p]][i] ] )
}
penalised.cost[p] <- csum
}
}
locations <- vector("list",pmax)
for ( i in 1:pmax ){
locations[[i]] <- location.vec[ mmrc[[i]] ]
}
MDL <- which.min(penalised.cost + (N*log(1:pmax)+ (1:pmax)*log(n))/log(2)) ##MDL
cpts.uv<-Map(function(cpts) c(0,cpts),uv.cpts)
cpts.mv<-list()
for ( i in 1:pmax )
{
cpts.mr<-locations[[i]][affected[[i]]]
cpts.mv[[i]]<-Map(function(uv,mr) if(length(uv) > 2) {uv[length(uv)-1]<-mr;return(uv);} else {return(uv);},cpts.uv,cpts.mr)
}
return(changepoint.mv.mrc.class(data=usr.data,pmax=pmax,cpts.uv=cpts.uv,cost=cost,mad=mad,cpts.mv=cpts.mv,alpha=penalty_function(n),
penalised.costs=penalised.cost,locations=locations,affected=affected))
}
|
library(lingtypology)
x <- read.csv("strange.csv", sep = ";", header = TRUE, encoding = "UTF-8")
map.feature(languages = x$language,
features = x$language,
title = "Languages",
stroke.features = x$strange,
stroke.title = "Strange nasals",
popup = x$biblio,
label = x$language,
label.hide = TRUE,
radius = 7,
legend = TRUE,
stroke.legend = TRUE,
stroke.legend.position = "topleft")
|
/mapafrica_strange_nasals.R
|
no_license
|
sudarikova/cartography
|
R
| false
| false
| 529
|
r
|
library(lingtypology)
x <- read.csv("strange.csv", sep = ";", header = TRUE, encoding = "UTF-8")
map.feature(languages = x$language,
features = x$language,
title = "Languages",
stroke.features = x$strange,
stroke.title = "Strange nasals",
popup = x$biblio,
label = x$language,
label.hide = TRUE,
radius = 7,
legend = TRUE,
stroke.legend = TRUE,
stroke.legend.position = "topleft")
|
# This script is used to make an entry on the kaggle Titanic dataset
#packages: caret, randomForest
# I also prefer the tidyverse set of packages for any data wrangling I need
#to see EDA for the Titanic dataset look at the titanic_EDA upload
library(caret)
library(randomForest)
library(tidyverse)
train<- read.csv("/Titanic_Kaggle_GitHub/train.csv")
test<- read.csv("/Titanic_Kaggle_GitHub/test.csv")
#this code uses the following variabels and a random forest algorithm to predict a passengers survival
# sets Survived as a factor
train$Survived<- factor(train$Survived)
#sets a seed for reproducibility
set.seed(66)
#This creates a model object using the randomForest algorithim
model<- train(Survived ~ Pclass + Sex + SibSp + Embarked +Parch + Fare,
data= train,
method= "rf",
trControl= trainControl(method="cv",
number=5)
)
model
#Prediction time!
test$Survived<- predict(model, newdata=test)
# this throws an error as one of the variables in "test" has an NA, which can be remedied with:
#Lookign at the EDA analysis we see Fare has an NA and remedy it below. NOTE: Setting a column mean for NA value is generally not best practice
test$Fare[is.na(test$Fare)]<- mean(test$Fare, na.rm=TRUE)
#Repeat above
test$Survived<- predict(model, newdata=test)
head(test)
submission<- test%>%select("PassengerId", "Survived")
write.table(submission, file="submission.csv",col.names=TRUE,row.names=FALSE, sep = ",")
#Below here is a logistic regression to predict survival and the corresponding code to make a submission
LMmodel<-glm(Survived ~ Pclass + Sex + SibSp + Embarked +Parch + Fare,data=train, family= "binomial")
summary(LMmodel)
#The p-values do not lend confidence here,but I will walk through the rest of the codeto make a submission
test$LMSurvived<-predict(LMmodel, newdata=test, type="response")
table(test$LMSurvived)
#As shown here the data is in the wrong form, and we will simply use a 50% measure to choose survival.
test$LMSurvived = as.numeric(test$LMSurvived >= 0.5)
table(test$LMSurvived)
LMsubmission<- test%>%select(PassengerId, Survived=LMSurvived)
write.table(LMsubmission, file="LMsubmission.csv",col.names=TRUE,row.names=FALSE, sep = ",")
# This submission scored lower than the early randomForest model, as suspected as the variables chosen did not appear very significant when lookign at the summary of them model
|
/titanic_prediction_script.R
|
no_license
|
Holt-Williams/Titanic_Kaggle
|
R
| false
| false
| 2,455
|
r
|
# This script is used to make an entry on the kaggle Titanic dataset
#packages: caret, randomForest
# I also prefer the tidyverse set of packages for any data wrangling I need
#to see EDA for the Titanic dataset look at the titanic_EDA upload
library(caret)
library(randomForest)
library(tidyverse)
train<- read.csv("/Titanic_Kaggle_GitHub/train.csv")
test<- read.csv("/Titanic_Kaggle_GitHub/test.csv")
#this code uses the following variabels and a random forest algorithm to predict a passengers survival
# sets Survived as a factor
train$Survived<- factor(train$Survived)
#sets a seed for reproducibility
set.seed(66)
#This creates a model object using the randomForest algorithim
model<- train(Survived ~ Pclass + Sex + SibSp + Embarked +Parch + Fare,
data= train,
method= "rf",
trControl= trainControl(method="cv",
number=5)
)
model
#Prediction time!
test$Survived<- predict(model, newdata=test)
# this throws an error as one of the variables in "test" has an NA, which can be remedied with:
#Lookign at the EDA analysis we see Fare has an NA and remedy it below. NOTE: Setting a column mean for NA value is generally not best practice
test$Fare[is.na(test$Fare)]<- mean(test$Fare, na.rm=TRUE)
#Repeat above
test$Survived<- predict(model, newdata=test)
head(test)
submission<- test%>%select("PassengerId", "Survived")
write.table(submission, file="submission.csv",col.names=TRUE,row.names=FALSE, sep = ",")
#Below here is a logistic regression to predict survival and the corresponding code to make a submission
LMmodel<-glm(Survived ~ Pclass + Sex + SibSp + Embarked +Parch + Fare,data=train, family= "binomial")
summary(LMmodel)
#The p-values do not lend confidence here,but I will walk through the rest of the codeto make a submission
test$LMSurvived<-predict(LMmodel, newdata=test, type="response")
table(test$LMSurvived)
#As shown here the data is in the wrong form, and we will simply use a 50% measure to choose survival.
test$LMSurvived = as.numeric(test$LMSurvived >= 0.5)
table(test$LMSurvived)
LMsubmission<- test%>%select(PassengerId, Survived=LMSurvived)
write.table(LMsubmission, file="LMsubmission.csv",col.names=TRUE,row.names=FALSE, sep = ",")
# This submission scored lower than the early randomForest model, as suspected as the variables chosen did not appear very significant when lookign at the summary of them model
|
#########################################
#
# R code for generating simulated
# TTE data using numerical integration
# of hazard in R.
#
#########################################
library(mrgsolve)
library(dplyr)
library(multidplyr)
library(readr)
library(XML)
#library(flexsurv)
library(survival)
library(survminer)
#'
#' First, we read the dataset used for fitting model in NONMEM, keeping only the
#' event records.
#'
dd = read_csv('./run6/TTEdat_v11.csv',na='.',guess_max = 100000) %>%
filter(EVID == 0 & STIME == 0) %>%
rename(DOSE=dose, X=x, AUC=AUCss)
#'
#' Let's look at a snippet of the data and plot the Kaplan-Meier estimates
#' of the survival curves.
#'
head(dd)
fit_original = survfit(Surv(TIME,DV)~DOSE, data=dd)
survminer::ggsurvplot(fit_original)
#'
#' Define the model using mrgsolve notation. We will point to the NONMEM
#' output in the $NMXML block. For more information about defining models
#' using mrgsolve see the mrgsolve user's guide at
#' http://mrgsolve.github.io/user_guide/.
#'
#path = './run6'
modelCode <- '
$NMXML
file = "./run6/run6.xml"
theta=TRUE, omega=TRUE,sigma=FALSE
$INIT
CUMHAZ = 0
$PARAM
X=0, AUC=0
$MAIN
double EMAX = THETA1;
double EC50 = THETA2;
double COEFF = THETA3;
double DELTA = THETA4;
double LAMBDA = THETA5 * exp(ETA(1));
double COV = COEFF*X; // binary covariate effect
double DEFF = EMAX * AUC / (EC50 + AUC); // exposure effect
double DEL = 1E-6 ; // to keep from taking 0**power
//
// ODE block
//
$ODE
double BASE = DELTA*pow(LAMBDA,DELTA)*pow((SOLVERTIME+DEL),(DELTA-1)); // TV baseline weibell hazard
dxdt_CUMHAZ = BASE * exp(DEFF + COV); // Hazard model + covariate
//
// Table
//
$TABLE
double SURV = exp(-CUMHAZ);
$CAPTURE CUMHAZ SURV
'
#'
#' Next, we compile the mrgsolve model and interrogate the parameter values.
#'
mrgsolveModel <- mcode(model="tte", code=modelCode,project='./run6/',preclean=TRUE)
param(mrgsolveModel)
#'
#' Next, we integrate hazard function for each subject in the trial.
#' We will get the cumulative hazard at a grid of times that are
#' spaced every 0.5 hour. Given the time scale of the original data
#' this grid is sufficiently dense.
survivalCurves = mrgsolveModel %>%
idata_set(dd) %>%
mrgsim(delta=(1/7)/2, carry.out='X,DOSE')
head(survivalCurves)
#'
#' Next, we define function for simulating nsims survival outcomes (T)
#' for a single survival curve.
simTimes = function(dd,nsims) {
uvec = runif(nsims)
simTTE = sapply(uvec, function(u) approx(x=dd$SURV,y=dd$time,xout=u)$y)
return(data.frame(ID=dd$ID[1],irep=1:nsims,simTTE=simTTE))
}
#'
#' We will parallelize the simulations using the multidplyr package.
#' To do this, we will set-up a cluster, split the data across clusters,
#' install dplyr library and copy simTimes function
# to each node
mycluster = create_cluster(cores = parallel::detectCores())
survivalCurves = survivalCurves %>%
partition(ID,cluster= mycluster) %>%
cluster_library('dplyr') %>%
cluster_assign_value('simTimes',simTimes)
#'
#' Finally, we will simulate 1000 outcomes for each patient,
#' then recombine and rearrange by simulation number (irep) and ID.
#'
simulatedTimes = survivalCurves %>%
do(simTimes(.,nsims=1000)) %>%
collect() %>%
arrange(irep,ID)
#'
#' Now, we need to simulate the censoring times. TO do this, we will
#' use a cubic spline estimate of the cumulative hazard (and survival
#' function) for the censoring process. This is effectively a parametric
#' model which is a close approximation to a Kaplan-Meier estimate
#' of the survival function.
#'
#' For this example, we choose the number of internal knots for the spline
#' through trial-and-error. More formal methods for choosing the number
#' and location of the spline knots could have be used. For this example,
#' four internal knots provides a good fit to the censoring distribution.
#'
censorModel = flexsurv::flexsurvspline(Surv(TIME,1-DV)~1, data=dd, k=4)
plot(censorModel, xlab='Time', ylab='Survival function for censoring process')
#'
#' We will define an administrative censoring time to be the last observed
#' censoring time. Subjects whose simulated censoring time is beyond the
#' last observed censoring time will be set to this administrative censoring
#' time.
#'
adminCensorTime = max(dd$TIME[dd$DV==0])
#'
#' Next, we extract the estimated censoring survival values at a grid of
#' times. We will interpolate between these values and use the inverse CDF
#' method to simulate censoring times.
#'
survEst = summary(censorModel,
ci=FALSE,
t=seq(0,max(dd$TIME),length=200)) %>%
as.data.frame()
ucens = runif(nrow(simulatedTimes))
simulatedTimes <- simulatedTimes %>%
ungroup() %>%
mutate(censorTime = approxfun(x=survEst$est, y=survEst$time)(ucens),
censorTime = ifelse(is.na(censorTime), adminCensorTime, censorTime),
simDV = as.numeric(simTTE < censorTime),
simTIME = ifelse(simDV==1, simTTE, censorTime)) %>%
left_join(dd %>% select(ID,DOSE,X))
head(simulatedTimes)
#'
#' As an example, we plot the simulated survival times from the first
#' simulated study.
#'
fit1 = survfit(Surv(simTIME,simDV) ~ DOSE, data=filter(simulatedTimes,irep==1))
survminer::ggsurvplot(fit1)
save(simulatedTimes, file='simulatedTimesForVPC.Rdata')
write_csv(simulatedTimes,path = './simulatedTimesForVPC.csv',na='.',col_names = TRUE)
|
/final_mod/mrgsolveVPC_finalModel.r
|
no_license
|
frenchjl/TTEmanuscript
|
R
| false
| false
| 5,451
|
r
|
#########################################
#
# R code for generating simulated
# TTE data using numerical integration
# of hazard in R.
#
#########################################
library(mrgsolve)
library(dplyr)
library(multidplyr)
library(readr)
library(XML)
#library(flexsurv)
library(survival)
library(survminer)
#'
#' First, we read the dataset used for fitting model in NONMEM, keeping only the
#' event records.
#'
dd = read_csv('./run6/TTEdat_v11.csv',na='.',guess_max = 100000) %>%
filter(EVID == 0 & STIME == 0) %>%
rename(DOSE=dose, X=x, AUC=AUCss)
#'
#' Let's look at a snippet of the data and plot the Kaplan-Meier estimates
#' of the survival curves.
#'
head(dd)
fit_original = survfit(Surv(TIME,DV)~DOSE, data=dd)
survminer::ggsurvplot(fit_original)
#'
#' Define the model using mrgsolve notation. We will point to the NONMEM
#' output in the $NMXML block. For more information about defining models
#' using mrgsolve see the mrgsolve user's guide at
#' http://mrgsolve.github.io/user_guide/.
#'
#path = './run6'
modelCode <- '
$NMXML
file = "./run6/run6.xml"
theta=TRUE, omega=TRUE,sigma=FALSE
$INIT
CUMHAZ = 0
$PARAM
X=0, AUC=0
$MAIN
double EMAX = THETA1;
double EC50 = THETA2;
double COEFF = THETA3;
double DELTA = THETA4;
double LAMBDA = THETA5 * exp(ETA(1));
double COV = COEFF*X; // binary covariate effect
double DEFF = EMAX * AUC / (EC50 + AUC); // exposure effect
double DEL = 1E-6 ; // to keep from taking 0**power
//
// ODE block
//
$ODE
double BASE = DELTA*pow(LAMBDA,DELTA)*pow((SOLVERTIME+DEL),(DELTA-1)); // TV baseline weibell hazard
dxdt_CUMHAZ = BASE * exp(DEFF + COV); // Hazard model + covariate
//
// Table
//
$TABLE
double SURV = exp(-CUMHAZ);
$CAPTURE CUMHAZ SURV
'
#'
#' Next, we compile the mrgsolve model and interrogate the parameter values.
#'
mrgsolveModel <- mcode(model="tte", code=modelCode,project='./run6/',preclean=TRUE)
param(mrgsolveModel)
#'
#' Next, we integrate hazard function for each subject in the trial.
#' We will get the cumulative hazard at a grid of times that are
#' spaced every 0.5 hour. Given the time scale of the original data
#' this grid is sufficiently dense.
survivalCurves = mrgsolveModel %>%
idata_set(dd) %>%
mrgsim(delta=(1/7)/2, carry.out='X,DOSE')
head(survivalCurves)
#'
#' Next, we define function for simulating nsims survival outcomes (T)
#' for a single survival curve.
simTimes = function(dd,nsims) {
uvec = runif(nsims)
simTTE = sapply(uvec, function(u) approx(x=dd$SURV,y=dd$time,xout=u)$y)
return(data.frame(ID=dd$ID[1],irep=1:nsims,simTTE=simTTE))
}
#'
#' We will parallelize the simulations using the multidplyr package.
#' To do this, we will set-up a cluster, split the data across clusters,
#' install dplyr library and copy simTimes function
# to each node
mycluster = create_cluster(cores = parallel::detectCores())
survivalCurves = survivalCurves %>%
partition(ID,cluster= mycluster) %>%
cluster_library('dplyr') %>%
cluster_assign_value('simTimes',simTimes)
#'
#' Finally, we will simulate 1000 outcomes for each patient,
#' then recombine and rearrange by simulation number (irep) and ID.
#'
simulatedTimes = survivalCurves %>%
do(simTimes(.,nsims=1000)) %>%
collect() %>%
arrange(irep,ID)
#'
#' Now, we need to simulate the censoring times. TO do this, we will
#' use a cubic spline estimate of the cumulative hazard (and survival
#' function) for the censoring process. This is effectively a parametric
#' model which is a close approximation to a Kaplan-Meier estimate
#' of the survival function.
#'
#' For this example, we choose the number of internal knots for the spline
#' through trial-and-error. More formal methods for choosing the number
#' and location of the spline knots could have be used. For this example,
#' four internal knots provides a good fit to the censoring distribution.
#'
censorModel = flexsurv::flexsurvspline(Surv(TIME,1-DV)~1, data=dd, k=4)
plot(censorModel, xlab='Time', ylab='Survival function for censoring process')
#'
#' We will define an administrative censoring time to be the last observed
#' censoring time. Subjects whose simulated censoring time is beyond the
#' last observed censoring time will be set to this administrative censoring
#' time.
#'
adminCensorTime = max(dd$TIME[dd$DV==0])
#'
#' Next, we extract the estimated censoring survival values at a grid of
#' times. We will interpolate between these values and use the inverse CDF
#' method to simulate censoring times.
#'
survEst = summary(censorModel,
ci=FALSE,
t=seq(0,max(dd$TIME),length=200)) %>%
as.data.frame()
ucens = runif(nrow(simulatedTimes))
simulatedTimes <- simulatedTimes %>%
ungroup() %>%
mutate(censorTime = approxfun(x=survEst$est, y=survEst$time)(ucens),
censorTime = ifelse(is.na(censorTime), adminCensorTime, censorTime),
simDV = as.numeric(simTTE < censorTime),
simTIME = ifelse(simDV==1, simTTE, censorTime)) %>%
left_join(dd %>% select(ID,DOSE,X))
head(simulatedTimes)
#'
#' As an example, we plot the simulated survival times from the first
#' simulated study.
#'
fit1 = survfit(Surv(simTIME,simDV) ~ DOSE, data=filter(simulatedTimes,irep==1))
survminer::ggsurvplot(fit1)
save(simulatedTimes, file='simulatedTimesForVPC.Rdata')
write_csv(simulatedTimes,path = './simulatedTimesForVPC.csv',na='.',col_names = TRUE)
|
% Generated by roxygen2 (4.0.1): do not edit by hand
\name{load.trees}
\alias{load.trees}
\title{Custom functions to load tree lists so that rwty can do basic processing on the way in.}
\usage{
load.trees(file, type = "nexus", gens.per.tree = NA, trim = 1,
skiplines.p = 1)
}
\arguments{
\item{file}{A path to a .t file containing an MCMC chain of trees}
\item{type}{An argument that designates the type of tree file. If "nexus",
trees are loaded using ape's read.nexus function. Otherwise, it's read.tree.}
\item{gens.per.tree}{The number of generations separating trees. If not provided, RWTY will attempt to calculate it automatically.}
\item{trim}{Used for thinning the chain. If a number N is provided, RWTY keeps every Nth tree.}
\item{skiplines.p}{The number of lines that must be skipped to get to the header of the p file.
MrBayes, for instance, prints a comment line at the top of the p file, so MrBayes files should be
read in with a skiplines.p value of 1.}
}
\value{
output An rwty.trees object containing the multiPhylo and the table of values from the .p file if available.
}
\description{
Loads trees, looks for a .p file of tree likelihoods, returns and rwty.trees object containing both
}
\examples{
#load.trees(file="mytrees.nex", type="nexus")
}
\keyword{MCMC,}
\keyword{Phylogenetics,}
\keyword{load}
|
/rwty/man/load.trees.Rd
|
no_license
|
jamiepg1/RWTY
|
R
| false
| false
| 1,333
|
rd
|
% Generated by roxygen2 (4.0.1): do not edit by hand
\name{load.trees}
\alias{load.trees}
\title{Custom functions to load tree lists so that rwty can do basic processing on the way in.}
\usage{
load.trees(file, type = "nexus", gens.per.tree = NA, trim = 1,
skiplines.p = 1)
}
\arguments{
\item{file}{A path to a .t file containing an MCMC chain of trees}
\item{type}{An argument that designates the type of tree file. If "nexus",
trees are loaded using ape's read.nexus function. Otherwise, it's read.tree.}
\item{gens.per.tree}{The number of generations separating trees. If not provided, RWTY will attempt to calculate it automatically.}
\item{trim}{Used for thinning the chain. If a number N is provided, RWTY keeps every Nth tree.}
\item{skiplines.p}{The number of lines that must be skipped to get to the header of the p file.
MrBayes, for instance, prints a comment line at the top of the p file, so MrBayes files should be
read in with a skiplines.p value of 1.}
}
\value{
output An rwty.trees object containing the multiPhylo and the table of values from the .p file if available.
}
\description{
Loads trees, looks for a .p file of tree likelihoods, returns and rwty.trees object containing both
}
\examples{
#load.trees(file="mytrees.nex", type="nexus")
}
\keyword{MCMC,}
\keyword{Phylogenetics,}
\keyword{load}
|
library(BayesDA)
### Name: cow
### Title: Data from an Experiment with Treatment Assignment Based on
### Covariates
### Aliases: cow
### Keywords: datasets
### ** Examples
data(cow)
summary(cow)
names(cow)
# Investigating balance on pretreatment variables:
with(cow, tapply(lactation, level, mean))
with(cow, tapply(age, level, mean))
|
/data/genthat_extracted_code/BayesDA/examples/cow.Rd.R
|
no_license
|
surayaaramli/typeRrh
|
R
| false
| false
| 344
|
r
|
library(BayesDA)
### Name: cow
### Title: Data from an Experiment with Treatment Assignment Based on
### Covariates
### Aliases: cow
### Keywords: datasets
### ** Examples
data(cow)
summary(cow)
names(cow)
# Investigating balance on pretreatment variables:
with(cow, tapply(lactation, level, mean))
with(cow, tapply(age, level, mean))
|
#!/usr/bin/env Rscript
library("optparse")
library("data.table")
V="Version: 1.0"
D="Depends: R (>= 3.4.0), optparse, data.table"
a = commandArgs(trailingOnly=TRUE)
option_list = list(
make_option(c("--output_prefix"), type="character", default="annovarToMaf",
help="If provided writes resulting MAF file to an output file [%default].", metavar="character"),
make_option(c("--center"), type="character", default="NA",
help="Center field in MAF file will be filled with this value [%default].", metavar="character"),
make_option(c("--refBuild"), type="character", default="hg19",
help="NCBI_Build field in MAF file will be filled with this value [%default].", metavar="character"),
make_option(c("--tsbCol"), type="character", default="Tumor_Sample_Barcode",
help="column name containing Tumor_Sample_Barcode or sample names in input file [%default].", metavar="character"),
make_option(c("--ref_table"), type="character", default="refGene",
help="reference table used for gene-based annotations. Can be 'ensGene' or 'refGene' [%default].", metavar="character")
)
opt_parser <- OptionParser(usage="usage: %prog [options] <input.tsv> \n\t<input.tsv> is a TSV file (separator: '\\t', stdin: '-').", option_list=option_list, description = paste(V, D, sep="\n"))
opt <- parse_args(opt_parser, args=a, positional_arguments=TRUE)
output_prefix <- opt$options$output_prefix
center <- opt$options$center
refBuild <- opt$options$refBuild
tsbCol <- opt$options$tsbCol
ref_table <- opt$options$ref_table
d <- ifelse(opt$args[1]=='-', 'file:///dev/stdin', opt$args[1]) # Data_path
# main
.libPaths(c("~/lib", .libPaths()))
library("maftools")
ann_maf = annovarToMaf(annovar = d, Center = center, refBuild = refBuild,
tsbCol = tsbCol, table = ref_table, basename = output_prefix)
### THE END ###
|
/maftools_annovarToMaf.R
|
permissive
|
chunyangbao/cbao_utilities
|
R
| false
| false
| 1,869
|
r
|
#!/usr/bin/env Rscript
library("optparse")
library("data.table")
V="Version: 1.0"
D="Depends: R (>= 3.4.0), optparse, data.table"
a = commandArgs(trailingOnly=TRUE)
option_list = list(
make_option(c("--output_prefix"), type="character", default="annovarToMaf",
help="If provided writes resulting MAF file to an output file [%default].", metavar="character"),
make_option(c("--center"), type="character", default="NA",
help="Center field in MAF file will be filled with this value [%default].", metavar="character"),
make_option(c("--refBuild"), type="character", default="hg19",
help="NCBI_Build field in MAF file will be filled with this value [%default].", metavar="character"),
make_option(c("--tsbCol"), type="character", default="Tumor_Sample_Barcode",
help="column name containing Tumor_Sample_Barcode or sample names in input file [%default].", metavar="character"),
make_option(c("--ref_table"), type="character", default="refGene",
help="reference table used for gene-based annotations. Can be 'ensGene' or 'refGene' [%default].", metavar="character")
)
opt_parser <- OptionParser(usage="usage: %prog [options] <input.tsv> \n\t<input.tsv> is a TSV file (separator: '\\t', stdin: '-').", option_list=option_list, description = paste(V, D, sep="\n"))
opt <- parse_args(opt_parser, args=a, positional_arguments=TRUE)
output_prefix <- opt$options$output_prefix
center <- opt$options$center
refBuild <- opt$options$refBuild
tsbCol <- opt$options$tsbCol
ref_table <- opt$options$ref_table
d <- ifelse(opt$args[1]=='-', 'file:///dev/stdin', opt$args[1]) # Data_path
# main
.libPaths(c("~/lib", .libPaths()))
library("maftools")
ann_maf = annovarToMaf(annovar = d, Center = center, refBuild = refBuild,
tsbCol = tsbCol, table = ref_table, basename = output_prefix)
### THE END ###
|
library(readr)
dataset <- read_csv("Downloads/dataset.csv", col_names = FALSE)
data <- dataset[, 2:14]
data_norm <- scale(data)
minPoints <- 3
epsilon <- 3
noise <- c()
for (i in 1:nrow(data_norm)) {
counter <- 0
for (j in 1:nrow(data_norm)) {
if (dist(rbind(data_norm[i, ], data_norm[j, ])) < epsilon) {
counter <- counter + 1
}
}
if (counter < minPoints) {
noise[length(noise) + 1] <- i
}
}
data_norm <- data_norm[-noise, ]
c_ward <- hclust(dist(data_norm), method = "ward.D2")
ward_cut <- cutree(c_ward, 3)
pca <- prcomp(data[], center=TRUE, scale.=TRUE)
dim_red <- predict(pca, newdata = data[])
col <- c()
j <- 1
for (i in 1:178) {
if (i %in% noise) {
col[i] <- 0
} else {
col[i] <- ward_cut[j]
j <- j + 1
}
}
library('plotly')
plot_ly(data.frame(dim_red), x = dim_red[, 1], y = dim_red[, 2], z=dim_red[, 3], color = col)
|
/ex07/unknowndata.r
|
no_license
|
SimonGiebenhain/AnaVis
|
R
| false
| false
| 874
|
r
|
library(readr)
dataset <- read_csv("Downloads/dataset.csv", col_names = FALSE)
data <- dataset[, 2:14]
data_norm <- scale(data)
minPoints <- 3
epsilon <- 3
noise <- c()
for (i in 1:nrow(data_norm)) {
counter <- 0
for (j in 1:nrow(data_norm)) {
if (dist(rbind(data_norm[i, ], data_norm[j, ])) < epsilon) {
counter <- counter + 1
}
}
if (counter < minPoints) {
noise[length(noise) + 1] <- i
}
}
data_norm <- data_norm[-noise, ]
c_ward <- hclust(dist(data_norm), method = "ward.D2")
ward_cut <- cutree(c_ward, 3)
pca <- prcomp(data[], center=TRUE, scale.=TRUE)
dim_red <- predict(pca, newdata = data[])
col <- c()
j <- 1
for (i in 1:178) {
if (i %in% noise) {
col[i] <- 0
} else {
col[i] <- ward_cut[j]
j <- j + 1
}
}
library('plotly')
plot_ly(data.frame(dim_red), x = dim_red[, 1], y = dim_red[, 2], z=dim_red[, 3], color = col)
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/primary-keys.R
\name{cdm_get_pk}
\alias{cdm_get_pk}
\title{Retrieve the name of the primary key column of a \code{dm} table}
\usage{
cdm_get_pk(dm, table)
}
\arguments{
\item{dm}{A \code{dm} object.}
\item{table}{A table in the \code{dm}.}
}
\description{
\code{cdm_get_pk()} returns the name of the
column marked as primary key of a table of a \code{\link{dm}} object.
If no primary key is
set for the table, an empty character vector is returned.
}
\examples{
library(dplyr)
nycflights_dm <- cdm_nycflights13()
nycflights_dm \%>\%
cdm_get_pk(planes)
}
\seealso{
Other primary key functions:
\code{\link{cdm_add_pk}()},
\code{\link{cdm_get_all_pks}()},
\code{\link{cdm_has_pk}()},
\code{\link{cdm_rm_pk}()},
\code{\link{enum_pk_candidates}()}
}
\concept{primary key functions}
|
/man/cdm_get_pk.Rd
|
permissive
|
bbecane/dm
|
R
| false
| true
| 861
|
rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/primary-keys.R
\name{cdm_get_pk}
\alias{cdm_get_pk}
\title{Retrieve the name of the primary key column of a \code{dm} table}
\usage{
cdm_get_pk(dm, table)
}
\arguments{
\item{dm}{A \code{dm} object.}
\item{table}{A table in the \code{dm}.}
}
\description{
\code{cdm_get_pk()} returns the name of the
column marked as primary key of a table of a \code{\link{dm}} object.
If no primary key is
set for the table, an empty character vector is returned.
}
\examples{
library(dplyr)
nycflights_dm <- cdm_nycflights13()
nycflights_dm \%>\%
cdm_get_pk(planes)
}
\seealso{
Other primary key functions:
\code{\link{cdm_add_pk}()},
\code{\link{cdm_get_all_pks}()},
\code{\link{cdm_has_pk}()},
\code{\link{cdm_rm_pk}()},
\code{\link{enum_pk_candidates}()}
}
\concept{primary key functions}
|
testlist <- list(A = structure(c(2.17107980817984e+205, 9.53818252179844e+295 ), .Dim = 1:2), B = structure(c(2.19477796793271e+294, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0), .Dim = c(5L, 7L)))
result <- do.call(multivariance:::match_rows,testlist)
str(result)
|
/multivariance/inst/testfiles/match_rows/AFL_match_rows/match_rows_valgrind_files/1613122592-test.R
|
no_license
|
akhikolla/updatedatatype-list3
|
R
| false
| false
| 323
|
r
|
testlist <- list(A = structure(c(2.17107980817984e+205, 9.53818252179844e+295 ), .Dim = 1:2), B = structure(c(2.19477796793271e+294, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0), .Dim = c(5L, 7L)))
result <- do.call(multivariance:::match_rows,testlist)
str(result)
|
load("mydata.rda")
data = datacsv1
# Divide orginal data into two
## Transform NA into 0 in data
for(i in c(9:14,19)){
data[, i] = ifelse(is.na(data[, i]), 0, data[, i])
}
data_index = data[c(9:14,19)]
## Score the quality of each information (row)
data_index$Score = 0
for(i in 1:7){
data_index[, 8] = data_index[, 8] + ifelse(data_index[, i] ==0, 1, 0)
}
## Clear the character columns of data
data = data[, -c(2,5,6,18,20,28:30)]
## Partition
data_lowscore = subset(data, data_index[, 8] <= 2.5); nrow(data_lowscore)
data_highscore = subset(data, data_index[, 8] > 2.5); nrow(data_highscore)
## Save two datasets
write.csv (data_lowscore, "data_lowscore.csv", row.names=FALSE)
write.csv (data_highscore, "data_highscore.csv", row.names=FALSE)
# Visualize the distribution
## Distribution of Score
library(ggplot2)
ggplot(data = data_index, aes(x = as.factor(Score))) +
geom_bar() +
ggtitle("Distribution of Score")
|
/Project_Data_Partition.R
|
no_license
|
YurongJiang/Unsupervised-NY-property-fraud-detection-model
|
R
| false
| false
| 948
|
r
|
load("mydata.rda")
data = datacsv1
# Divide orginal data into two
## Transform NA into 0 in data
for(i in c(9:14,19)){
data[, i] = ifelse(is.na(data[, i]), 0, data[, i])
}
data_index = data[c(9:14,19)]
## Score the quality of each information (row)
data_index$Score = 0
for(i in 1:7){
data_index[, 8] = data_index[, 8] + ifelse(data_index[, i] ==0, 1, 0)
}
## Clear the character columns of data
data = data[, -c(2,5,6,18,20,28:30)]
## Partition
data_lowscore = subset(data, data_index[, 8] <= 2.5); nrow(data_lowscore)
data_highscore = subset(data, data_index[, 8] > 2.5); nrow(data_highscore)
## Save two datasets
write.csv (data_lowscore, "data_lowscore.csv", row.names=FALSE)
write.csv (data_highscore, "data_highscore.csv", row.names=FALSE)
# Visualize the distribution
## Distribution of Score
library(ggplot2)
ggplot(data = data_index, aes(x = as.factor(Score))) +
geom_bar() +
ggtitle("Distribution of Score")
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/cv.plsRcox.R
\name{cv.plsRcox}
\alias{cv.plsRcox}
\title{Cross-validating a plsRcox-Model}
\usage{
cv.plsRcox(
data,
method = c("efron", "breslow"),
nfold = 5,
nt = 10,
plot.it = TRUE,
se = TRUE,
givefold,
scaleX = TRUE,
folddetails = FALSE,
allCVcrit = FALSE,
details = FALSE,
namedataset = "data",
save = FALSE,
verbose = TRUE,
...
)
}
\arguments{
\item{data}{A list of three items: \itemize{ \item\code{x} the explanatory
variables passed to \code{\link{plsRcox}}'s \code{Xplan} argument,
\item\code{time} passed to \code{\link{plsRcox}}'s \code{time} argument,
\item\code{status} \code{\link{plsRcox}}'s \code{status} argument. }}
\item{method}{A character string specifying the method for tie handling. If
there are no tied death times all the methods are equivalent. The Efron
approximation is used as the default here, it is more accurate when dealing
with tied death times, and is as efficient computationally.}
\item{nfold}{The number of folds to use to perform the cross-validation
process.}
\item{nt}{The number of components to include in the model. It this is not
supplied, 10 components are fitted.}
\item{plot.it}{Shall the results be displayed on a plot ?}
\item{se}{Should standard errors be plotted ?}
\item{givefold}{Explicit list of omited values in each fold can be provided
using this argument.}
\item{scaleX}{Shall the predictors be standardized ?}
\item{folddetails}{Should values and completion status for each folds be
returned ?}
\item{allCVcrit}{Should the other 13 CV criteria be evaled and returned ?}
\item{details}{Should all results of the functions that perform error
computations be returned ?}
\item{namedataset}{Name to use to craft temporary results names}
\item{save}{Should temporary results be saved ?}
\item{verbose}{Should some CV details be displayed ?}
\item{\dots}{Other arguments to pass to \code{\link{plsRcox}}.}
}
\value{
\item{nt}{The number of components requested}
\item{cv.error1}{Vector with the mean values, across folds, of, per fold
unit, Cross-validated log-partial-likelihood for models with 0 to nt
components.} \item{cv.error2}{Vector with the mean values, across folds, of,
per fold unit, van Houwelingen Cross-validated log-partial-likelihood for
models with 0 to nt components.} \item{cv.error3}{Vector with the mean
values, across folds, of iAUC_CD for models with 0 to nt components.}
\item{cv.error4}{Vector with the mean values, across folds, of iAUC_hc for
models with 0 to nt components.} \item{cv.error5}{Vector with the mean
values, across folds, of iAUC_sh for models with 0 to nt components.}
\item{cv.error6}{Vector with the mean values, across folds, of iAUC_Uno for
models with 0 to nt components.} \item{cv.error7}{Vector with the mean
values, across folds, of iAUC_hz.train for models with 0 to nt components.}
\item{cv.error8}{Vector with the mean values, across folds, of iAUC_hz.test
for models with 0 to nt components.} \item{cv.error9}{Vector with the mean
values, across folds, of iAUC_survivalROC.train for models with 0 to nt
components.} \item{cv.error10}{Vector with the mean values, across folds, of
iAUC_survivalROC.test for models with 0 to nt components.}
\item{cv.error11}{Vector with the mean values, across folds, of iBrierScore
unw for models with 0 to nt components.} \item{cv.error12}{Vector with the
mean values, across folds, of iSchmidScore (robust BS) unw for models with 0
to nt components.} \item{cv.error13}{Vector with the mean values, across
folds, of iBrierScore w for models with 0 to nt components.}
\item{cv.error14}{Vector with the mean values, across folds, of iSchmidScore
(robust BS) w for models with 0 to nt components.} \item{cv.se1}{Vector with
the standard error values, across folds, of, per fold unit, Cross-validated
log-partial-likelihood for models with 0 to nt components.}
\item{cv.se2}{Vector with the standard error values, across folds, of, per
fold unit, van Houwelingen Cross-validated log-partial-likelihood for models
with 0 to nt components.} \item{cv.se3}{Vector with the standard error
values, across folds, of iAUC_CD for models with 0 to nt components.}
\item{cv.se4}{Vector with the standard error values, across folds, of
iAUC_hc for models with 0 to nt components.} \item{cv.se5}{Vector with the
standard error values, across folds, of iAUC_sh for models with 0 to nt
components.} \item{cv.se6}{Vector with the standard error values, across
folds, of iAUC_Uno for models with 0 to nt components.} \item{cv.se7}{Vector
with the standard error values, across folds, of iAUC_hz.train for models
with 0 to nt components.} \item{cv.se8}{Vector with the standard error
values, across folds, of iAUC_hz.test for models with 0 to nt components.}
\item{cv.se9}{Vector with the standard error values, across folds, of
iAUC_survivalROC.train for models with 0 to nt components.}
\item{cv.se10}{Vector with the standard error values, across folds, of
iAUC_survivalROC.test for models with 0 to nt components.}
\item{cv.se11}{Vector with the standard error values, across folds, of
iBrierScore unw for models with 0 to nt components.} \item{cv.se12}{Vector
with the standard error values, across folds, of iSchmidScore (robust BS)
unw for models with 0 to nt components.} \item{cv.se13}{Vector with the
standard error values, across folds, of iBrierScore w for models with 0 to
nt components.} \item{cv.se14}{Vector with the standard error values, across
folds, of iSchmidScore (robust BS) w for models with 0 to nt components.}
\item{folds}{Explicit list of the values that were omited values in each
fold.} \item{lambda.min1}{Vector with the standard error values, across
folds, of, per fold unit, Cross-validated log-partial-likelihood for models
with 0 to nt components.} \item{lambda.min2}{Vector with the standard error
values, across folds, of, per fold unit, van Houwelingen Cross-validated
log-partial-likelihood for models with 0 to nt components.}
\item{lambda.min1}{Optimal Nbr of components, min Cross-validated
log-partial-likelihood criterion.} \item{lambda.se1}{Optimal Nbr of
components, min+1se Cross-validated log-partial-likelihood criterion.}
\item{lambda.min2}{Optimal Nbr of components, min van Houwelingen
Cross-validated log-partial-likelihood.} \item{lambda.se2}{Optimal Nbr of
components, min+1se van Houwelingen Cross-validated log-partial-likelihood.}
\item{lambda.min3}{Optimal Nbr of components, max iAUC_CD criterion.}
\item{lambda.se3}{Optimal Nbr of components, max+1se iAUC_CD criterion.}
\item{lambda.min4}{Optimal Nbr of components, max iAUC_hc criterion.}
\item{lambda.se4}{Optimal Nbr of components, max+1se iAUC_hc criterion.}
\item{lambda.min5}{Optimal Nbr of components, max iAUC_sh criterion.}
\item{lambda.se5}{Optimal Nbr of components, max+1se iAUC_sh criterion.}
\item{lambda.min6}{Optimal Nbr of components, max iAUC_Uno criterion.}
\item{lambda.se6}{Optimal Nbr of components, max+1se iAUC_Uno criterion.}
\item{lambda.min7}{Optimal Nbr of components, max iAUC_hz.train criterion.}
\item{lambda.se7}{Optimal Nbr of components, max+1se iAUC_hz.train
criterion.} \item{lambda.min8}{Optimal Nbr of components, max iAUC_hz.test
criterion.} \item{lambda.se8}{Optimal Nbr of components, max+1se
iAUC_hz.test criterion.} \item{lambda.min9}{Optimal Nbr of components, max
iAUC_survivalROC.train criterion.} \item{lambda.se9}{Optimal Nbr of
components, max+1se iAUC_survivalROC.train criterion.}
\item{lambda.min10}{Optimal Nbr of components, max iAUC_survivalROC.test
criterion.} \item{lambda.se10}{Optimal Nbr of components, max+1se
iAUC_survivalROC.test criterion.} \item{lambda.min11}{Optimal Nbr of
components, min iBrierScore unw criterion.} \item{lambda.se11}{Optimal Nbr
of components, min+1se iBrierScore unw criterion.}
\item{lambda.min12}{Optimal Nbr of components, min iSchmidScore unw
criterion.} \item{lambda.se12}{Optimal Nbr of components, min+1se
iSchmidScore unw criterion.} \item{lambda.min13}{Optimal Nbr of components,
min iBrierScore w criterion.} \item{lambda.se13}{Optimal Nbr of components,
min+1se iBrierScore w criterion.} \item{lambda.min14}{Optimal Nbr of
components, min iSchmidScore w criterion.} \item{lambda.se14}{Optimal Nbr of
components, min+1se iSchmidScore w criterion.} \item{errormat1-14}{If
\code{details=TRUE}, matrices with the error values for every folds across
each of the components and each of the criteria} \item{completed.cv1-14}{If
\code{details=TRUE}, matrices with logical values for every folds across
each of the components and each of the criteria: \code{TRUE} if the
computation was completed and \code{FALSE} it is failed.}
\item{All_indics}{All results of the functions that perform error
computation, for each fold, each component and error criterion.}
}
\description{
This function cross-validates \link{plsRcox} models.\cr
}
\details{
It only computes the recommended iAUCSH criterion. Set \code{allCVcrit=TRUE}
to retrieve the 13 other ones.
}
\examples{
data(micro.censure)
data(Xmicro.censure_compl_imp)
set.seed(123456)
X_train_micro <- apply((as.matrix(Xmicro.censure_compl_imp)),FUN="as.numeric",MARGIN=2)[1:80,]
X_train_micro_df <- data.frame(X_train_micro)
Y_train_micro <- micro.censure$survyear[1:80]
C_train_micro <- micro.censure$DC[1:80]
#Should be run with a higher value of nt (at least 10)
(cv.plsRcox.res=cv.plsRcox(list(x=X_train_micro,time=Y_train_micro,status=C_train_micro),nt=3))
}
\references{
plsRcox, Cox-Models in a high dimensional setting in R, Frederic
Bertrand, Philippe Bastien, Nicolas Meyer and Myriam Maumy-Bertrand (2014).
Proceedings of User2014!, Los Angeles, page 152.\cr
Deviance residuals-based sparse PLS and sparse kernel PLS regression for
censored data, Philippe Bastien, Frederic Bertrand, Nicolas Meyer and Myriam
Maumy-Bertrand (2015), Bioinformatics, 31(3):397-404,
doi:10.1093/bioinformatics/btu660.
Cross validating extensions of kernel, sparse or regular partial least
squares regression models to censored data, Bertrand, F., Bastien, Ph. and
Maumy-Bertrand, M. (2018), \url{https://arxiv.org/abs/1810.01005}.
}
\seealso{
See Also \code{\link{plsRcox}}
}
\author{
Frédéric Bertrand\cr
\email{frederic.bertrand@utt.fr}\cr
\url{http://www-irma.u-strasbg.fr/~fbertran/}
}
\keyword{models}
\keyword{regression}
|
/man/cv.plsRcox.Rd
|
no_license
|
cran/plsRcox
|
R
| false
| true
| 10,322
|
rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/cv.plsRcox.R
\name{cv.plsRcox}
\alias{cv.plsRcox}
\title{Cross-validating a plsRcox-Model}
\usage{
cv.plsRcox(
data,
method = c("efron", "breslow"),
nfold = 5,
nt = 10,
plot.it = TRUE,
se = TRUE,
givefold,
scaleX = TRUE,
folddetails = FALSE,
allCVcrit = FALSE,
details = FALSE,
namedataset = "data",
save = FALSE,
verbose = TRUE,
...
)
}
\arguments{
\item{data}{A list of three items: \itemize{ \item\code{x} the explanatory
variables passed to \code{\link{plsRcox}}'s \code{Xplan} argument,
\item\code{time} passed to \code{\link{plsRcox}}'s \code{time} argument,
\item\code{status} \code{\link{plsRcox}}'s \code{status} argument. }}
\item{method}{A character string specifying the method for tie handling. If
there are no tied death times all the methods are equivalent. The Efron
approximation is used as the default here, it is more accurate when dealing
with tied death times, and is as efficient computationally.}
\item{nfold}{The number of folds to use to perform the cross-validation
process.}
\item{nt}{The number of components to include in the model. It this is not
supplied, 10 components are fitted.}
\item{plot.it}{Shall the results be displayed on a plot ?}
\item{se}{Should standard errors be plotted ?}
\item{givefold}{Explicit list of omited values in each fold can be provided
using this argument.}
\item{scaleX}{Shall the predictors be standardized ?}
\item{folddetails}{Should values and completion status for each folds be
returned ?}
\item{allCVcrit}{Should the other 13 CV criteria be evaled and returned ?}
\item{details}{Should all results of the functions that perform error
computations be returned ?}
\item{namedataset}{Name to use to craft temporary results names}
\item{save}{Should temporary results be saved ?}
\item{verbose}{Should some CV details be displayed ?}
\item{\dots}{Other arguments to pass to \code{\link{plsRcox}}.}
}
\value{
\item{nt}{The number of components requested}
\item{cv.error1}{Vector with the mean values, across folds, of, per fold
unit, Cross-validated log-partial-likelihood for models with 0 to nt
components.} \item{cv.error2}{Vector with the mean values, across folds, of,
per fold unit, van Houwelingen Cross-validated log-partial-likelihood for
models with 0 to nt components.} \item{cv.error3}{Vector with the mean
values, across folds, of iAUC_CD for models with 0 to nt components.}
\item{cv.error4}{Vector with the mean values, across folds, of iAUC_hc for
models with 0 to nt components.} \item{cv.error5}{Vector with the mean
values, across folds, of iAUC_sh for models with 0 to nt components.}
\item{cv.error6}{Vector with the mean values, across folds, of iAUC_Uno for
models with 0 to nt components.} \item{cv.error7}{Vector with the mean
values, across folds, of iAUC_hz.train for models with 0 to nt components.}
\item{cv.error8}{Vector with the mean values, across folds, of iAUC_hz.test
for models with 0 to nt components.} \item{cv.error9}{Vector with the mean
values, across folds, of iAUC_survivalROC.train for models with 0 to nt
components.} \item{cv.error10}{Vector with the mean values, across folds, of
iAUC_survivalROC.test for models with 0 to nt components.}
\item{cv.error11}{Vector with the mean values, across folds, of iBrierScore
unw for models with 0 to nt components.} \item{cv.error12}{Vector with the
mean values, across folds, of iSchmidScore (robust BS) unw for models with 0
to nt components.} \item{cv.error13}{Vector with the mean values, across
folds, of iBrierScore w for models with 0 to nt components.}
\item{cv.error14}{Vector with the mean values, across folds, of iSchmidScore
(robust BS) w for models with 0 to nt components.} \item{cv.se1}{Vector with
the standard error values, across folds, of, per fold unit, Cross-validated
log-partial-likelihood for models with 0 to nt components.}
\item{cv.se2}{Vector with the standard error values, across folds, of, per
fold unit, van Houwelingen Cross-validated log-partial-likelihood for models
with 0 to nt components.} \item{cv.se3}{Vector with the standard error
values, across folds, of iAUC_CD for models with 0 to nt components.}
\item{cv.se4}{Vector with the standard error values, across folds, of
iAUC_hc for models with 0 to nt components.} \item{cv.se5}{Vector with the
standard error values, across folds, of iAUC_sh for models with 0 to nt
components.} \item{cv.se6}{Vector with the standard error values, across
folds, of iAUC_Uno for models with 0 to nt components.} \item{cv.se7}{Vector
with the standard error values, across folds, of iAUC_hz.train for models
with 0 to nt components.} \item{cv.se8}{Vector with the standard error
values, across folds, of iAUC_hz.test for models with 0 to nt components.}
\item{cv.se9}{Vector with the standard error values, across folds, of
iAUC_survivalROC.train for models with 0 to nt components.}
\item{cv.se10}{Vector with the standard error values, across folds, of
iAUC_survivalROC.test for models with 0 to nt components.}
\item{cv.se11}{Vector with the standard error values, across folds, of
iBrierScore unw for models with 0 to nt components.} \item{cv.se12}{Vector
with the standard error values, across folds, of iSchmidScore (robust BS)
unw for models with 0 to nt components.} \item{cv.se13}{Vector with the
standard error values, across folds, of iBrierScore w for models with 0 to
nt components.} \item{cv.se14}{Vector with the standard error values, across
folds, of iSchmidScore (robust BS) w for models with 0 to nt components.}
\item{folds}{Explicit list of the values that were omited values in each
fold.} \item{lambda.min1}{Vector with the standard error values, across
folds, of, per fold unit, Cross-validated log-partial-likelihood for models
with 0 to nt components.} \item{lambda.min2}{Vector with the standard error
values, across folds, of, per fold unit, van Houwelingen Cross-validated
log-partial-likelihood for models with 0 to nt components.}
\item{lambda.min1}{Optimal Nbr of components, min Cross-validated
log-partial-likelihood criterion.} \item{lambda.se1}{Optimal Nbr of
components, min+1se Cross-validated log-partial-likelihood criterion.}
\item{lambda.min2}{Optimal Nbr of components, min van Houwelingen
Cross-validated log-partial-likelihood.} \item{lambda.se2}{Optimal Nbr of
components, min+1se van Houwelingen Cross-validated log-partial-likelihood.}
\item{lambda.min3}{Optimal Nbr of components, max iAUC_CD criterion.}
\item{lambda.se3}{Optimal Nbr of components, max+1se iAUC_CD criterion.}
\item{lambda.min4}{Optimal Nbr of components, max iAUC_hc criterion.}
\item{lambda.se4}{Optimal Nbr of components, max+1se iAUC_hc criterion.}
\item{lambda.min5}{Optimal Nbr of components, max iAUC_sh criterion.}
\item{lambda.se5}{Optimal Nbr of components, max+1se iAUC_sh criterion.}
\item{lambda.min6}{Optimal Nbr of components, max iAUC_Uno criterion.}
\item{lambda.se6}{Optimal Nbr of components, max+1se iAUC_Uno criterion.}
\item{lambda.min7}{Optimal Nbr of components, max iAUC_hz.train criterion.}
\item{lambda.se7}{Optimal Nbr of components, max+1se iAUC_hz.train
criterion.} \item{lambda.min8}{Optimal Nbr of components, max iAUC_hz.test
criterion.} \item{lambda.se8}{Optimal Nbr of components, max+1se
iAUC_hz.test criterion.} \item{lambda.min9}{Optimal Nbr of components, max
iAUC_survivalROC.train criterion.} \item{lambda.se9}{Optimal Nbr of
components, max+1se iAUC_survivalROC.train criterion.}
\item{lambda.min10}{Optimal Nbr of components, max iAUC_survivalROC.test
criterion.} \item{lambda.se10}{Optimal Nbr of components, max+1se
iAUC_survivalROC.test criterion.} \item{lambda.min11}{Optimal Nbr of
components, min iBrierScore unw criterion.} \item{lambda.se11}{Optimal Nbr
of components, min+1se iBrierScore unw criterion.}
\item{lambda.min12}{Optimal Nbr of components, min iSchmidScore unw
criterion.} \item{lambda.se12}{Optimal Nbr of components, min+1se
iSchmidScore unw criterion.} \item{lambda.min13}{Optimal Nbr of components,
min iBrierScore w criterion.} \item{lambda.se13}{Optimal Nbr of components,
min+1se iBrierScore w criterion.} \item{lambda.min14}{Optimal Nbr of
components, min iSchmidScore w criterion.} \item{lambda.se14}{Optimal Nbr of
components, min+1se iSchmidScore w criterion.} \item{errormat1-14}{If
\code{details=TRUE}, matrices with the error values for every folds across
each of the components and each of the criteria} \item{completed.cv1-14}{If
\code{details=TRUE}, matrices with logical values for every folds across
each of the components and each of the criteria: \code{TRUE} if the
computation was completed and \code{FALSE} it is failed.}
\item{All_indics}{All results of the functions that perform error
computation, for each fold, each component and error criterion.}
}
\description{
This function cross-validates \link{plsRcox} models.\cr
}
\details{
It only computes the recommended iAUCSH criterion. Set \code{allCVcrit=TRUE}
to retrieve the 13 other ones.
}
\examples{
data(micro.censure)
data(Xmicro.censure_compl_imp)
set.seed(123456)
X_train_micro <- apply((as.matrix(Xmicro.censure_compl_imp)),FUN="as.numeric",MARGIN=2)[1:80,]
X_train_micro_df <- data.frame(X_train_micro)
Y_train_micro <- micro.censure$survyear[1:80]
C_train_micro <- micro.censure$DC[1:80]
#Should be run with a higher value of nt (at least 10)
(cv.plsRcox.res=cv.plsRcox(list(x=X_train_micro,time=Y_train_micro,status=C_train_micro),nt=3))
}
\references{
plsRcox, Cox-Models in a high dimensional setting in R, Frederic
Bertrand, Philippe Bastien, Nicolas Meyer and Myriam Maumy-Bertrand (2014).
Proceedings of User2014!, Los Angeles, page 152.\cr
Deviance residuals-based sparse PLS and sparse kernel PLS regression for
censored data, Philippe Bastien, Frederic Bertrand, Nicolas Meyer and Myriam
Maumy-Bertrand (2015), Bioinformatics, 31(3):397-404,
doi:10.1093/bioinformatics/btu660.
Cross validating extensions of kernel, sparse or regular partial least
squares regression models to censored data, Bertrand, F., Bastien, Ph. and
Maumy-Bertrand, M. (2018), \url{https://arxiv.org/abs/1810.01005}.
}
\seealso{
See Also \code{\link{plsRcox}}
}
\author{
Frédéric Bertrand\cr
\email{frederic.bertrand@utt.fr}\cr
\url{http://www-irma.u-strasbg.fr/~fbertran/}
}
\keyword{models}
\keyword{regression}
|
#' Determine bearish engulfing pattern using a OHLC price series
#'
#' @param x OHLC prices.
#' @return TRUE if bearish engulfing pattern detected
#' @export
bearish.engulf <- function(x) {
BT <- CandleBodyTop(x)
BB <- CandleBodyBottom(x)
Lag.BT <- quantmod::Lag(BT)
Lag.BB <- quantmod::Lag(BB)
U <- bullish.candle(x)
D <- bearish.candle(x)
Lag.U <- quantmod::Lag(U)
result <- xts::reclass(D &
Lag.U &
BT >= Lag.BT &
BB <= Lag.BB, x)
colnames(result) <- "bearish engulfing"
return(result)
}
|
/R/bearish.engulf.R
|
no_license
|
Roshan2540/CandleStickPattern
|
R
| false
| false
| 578
|
r
|
#' Determine bearish engulfing pattern using a OHLC price series
#'
#' @param x OHLC prices.
#' @return TRUE if bearish engulfing pattern detected
#' @export
bearish.engulf <- function(x) {
BT <- CandleBodyTop(x)
BB <- CandleBodyBottom(x)
Lag.BT <- quantmod::Lag(BT)
Lag.BB <- quantmod::Lag(BB)
U <- bullish.candle(x)
D <- bearish.candle(x)
Lag.U <- quantmod::Lag(U)
result <- xts::reclass(D &
Lag.U &
BT >= Lag.BT &
BB <= Lag.BB, x)
colnames(result) <- "bearish engulfing"
return(result)
}
|
#
# This is the server logic of a Shiny web application. You can run the
# application by clicking 'Run App' above.
#
# Find out more about building applications with Shiny here:
#
# http://shiny.rstudio.com/
#
library(shiny)
##########################
# E D A #
##########################
# Define server logic required to draw a histogram
shinyServer(function(input, output, session) {
sep <- reactive({
return(input$separator)
})
load_data <- reactive({
req( input$dataLoad )
# inference <- get.delim(input$dataLoad$datapath, 1000, comment = '#',
# delims = c("\t","\t| +", " ", ";", ","))
#
#podmienka, ak bude delimiter v subore ',' precitame to ako csv
if( sep() == 'csv') {
read.csv(input$dataLoad$datapath, header = ifelse(!is.null(input$header), TRUE, FALSE))
#podmienka, ak bude delimiter v subore '\' precitame to ako delim
} else if (sep() == 'tab') {
read.delim(input$dataLoad$datapath, header = ifelse(!is.null(input$header), TRUE, FALSE))
#podmienka, ak bude delimiter v subore ' ' precitame to ako table
} else {
read.table(input$dataLoad$datapath, header = ifelse(!is.null(input$header), TRUE, FALSE))
}
})
col <- reactive({
input$variables
})
output$plot <- renderPlot({
req(input$upload,input$show_descr)
#plot podla zvoleneho typu plotu
if ( input$plotType == 'Histogram' ) {
breaks <- hist(load_data()[,col()])$breaks
if( mode(load_data()[,col()]) == 'numeric' & class(load_data()[,col()]) != 'factor') {
c <- sample(1:657,1)
ggplot( data = load_data(), aes( x = load_data()[,col()] )) +
geom_histogram(fill = colors()[ifelse(c %in% seq(152,359,1),sample(1:657,1),c)],aes(y = ..count../sum(..count..)),
breaks = breaks, color = "black") +
xlab(col()) +
ylab('Proportions of data') +
theme_app()
} else {
return(NULL)
}
} else if ( input$plotType == 'ScatterPlot' ) {
var1 <- input$variables
var2 <- input$variables_one
if(mode(load_data()[,col()]) == 'numeric') {
ggplot( data = load_data(), aes( x = load_data()[,var1] ,
y = load_data()[,var2] ) ) +
geom_point() +
theme_app()
} else {
return(NULL)
}
} else if ( input$plotType == 'BarPlot' ) {
if(mode(load_data()[,col()]) == 'numeric' & class(load_data()[,col()]) != 'factor') {
ggplot( data = load_data(), aes( x = load_data()[,col()] )) +
geom_bar() +
theme_app()
} else {
return(NULL)
}
} else if ( input$plotType == 'Pie' ) {
if(mode(load_data()[,col()]) == 'numeric' & class(load_data()[,col()]) != 'factor') {
ggplot( data = load_data(), aes( x = load_data()[,col()]) ) +
geom_bar() + coord_polar() +
theme_app()
} else {
return(NULL)
}
} else if ( input$plotType == 'Boxplot' ) {
if(mode(load_data()[,col()]) == 'numeric' & class(load_data()[,col()]) != 'factor') {
ggplot( data = load_data(), aes( x = load_data()[,col()] ,
y = load_data()[,col()] ) ) +
geom_boxplot() +
theme_app()
} else {
return(NULL)
}
}
})
output$data <- renderTable({
input$upload
isolate({load_data()})
})
#zobrazi mi drop down menu, kde budu na vyber premenne, ktore sa vykreslia
output$test <- renderUI({
req(input$dataLoad, input$separator, input$upload)
if(input$plotType == 'ScatterPlot') {
isolate({
tagList(div(style = "display:inline-block",selectInput( inputId = "variables",
label = "Choose x-axis variable",
choices = c(colnames(load_data())),
width = '50%')),
div(style = "display:inline-block",selectInput( inputId = "variables_one",
label = "Choose y-axis variable",
choices = c(colnames(load_data())),
width = '50%')
))})
} else {
selectInput( inputId = "variables",
label = "Choose x-axis variable",
choices = c(colnames(load_data())))
}
})
output$descr <- renderPrint({
if (!is.null(input$descr_load)) {
readLines(input$descr_load$datapath)
}
})
variables <- reactive({
cols <- colnames(load_data())
cls <- vector(mode = 'character', length = length(cols))
for (i in 1:length(cols)) {
cls[i] <- class(load_data()[,cols[i]])
}
df <- as.data.frame(cbind(cols,cls))
colnames(df) <- c('Variable','Class')
df
})
output$vars <- renderTable({
variables()
})
summary_vals <- reactive({
req(input$dataLoad,input$variables,input$header,input$show_descr)
col.name <- col()
char <- rep('-',length(col.name))
if (mode(load_data()[,col.name]) == 'numeric' & class(load_data()[,col.name]) != 'factor') {
min <- min(load_data()[,col.name])
max <- max(load_data()[,col.name])
med <- median(load_data()[,col.name])
stQ <- quantile(load_data()[,col.name], probs = 0.25, type = 1)
rdQ <- quantile(load_data()[,col.name], probs = 0.75, type = 1)
mean <- mean(load_data()[,col.name])
sd <- sd(load_data()[,col.name])
skw <- skewness(load_data()[,col.name])
kurt <- kurtosis(load_data()[,col.name])
df <- data.table(col.name,min,max,med,mean,stQ,rdQ,sd,skw,kurt)
colnames(df) <- c('Variable','Min','Max','Median','Mean','1st Quartile','3rd Quartile',
'Standard deviation','Skewness','Kurtosis')
} else if (typeof(load_data()[,col.name]) == 'character') {
df <- data.table(t(char))
colnames(df) <- c('Variable','Min','Max','Median','Mean','1st Quartile','3rd Quartile',
'Standard deviation','Skewness','Kurtosis')
} else if (class(load_data()[,col.name]) == 'factor') {
string <- levels(load_data()[,col.name])
df <- data.table(string)
colnames(df) <- col.name
rownames(df) <- paste("V",1:length(string),sep = "")
df <- head(df,10)
}
df
})
output$characteristics <- renderTable({
summary_vals()
})
##########################
# M O D E L #
##########################
observe({
updateSelectInput(session,"pred_var",choices = colnames(load_data()))
})
observeEvent(input$depend_var,{
showModal(modalDialog( title = "Choose features",
checkboxGroupInput( inputId = "test_check",
label = "Features",
choices = c( colnames( load_data() ) ) ) ) )
})
output$apply_model <- renderUI({
if (length(input$test_check) != 0) {
actionButton(inputId = "apply_ml",
label = "Apply technique")
} else {
return(NULL)
}
})
output$regression <- renderPrint({
req(input$test_check,input$apply_ml)
isolate({
pred.var <- input$pred_var
features <- input$test_check
formula <- as.formula(paste(pred.var,paste(features,collapse = "+"), sep = "~"))
model.lm <- lm(formula, data = load_data())
return(summary(model.lm))
})
})
##########################
# HYPOTHESIS #
##########################
observe({
updateSelectInput(session,"test_var",choices = colnames(load_data()))
updateSelectInput(session,"test_var1",choices = colnames(load_data()))
})
observeEvent(input$hyp_var,{
if (input$one_s_test == "One sample Kolomgorov - Smirnov test") {
showModal(modalDialog( title = "Type of hypothesis",
radioButtons( inputId = "hyp_check",
label = "Hypothesis",
choices = c("H0 : FX = F0 vs H1: FX != F0")),
selectInput(inputId = "dist_type",
label = "Which type of distribution you want you data compare to?",
choices = c("Normal",
"Uniform",
"Exponential")),
conditionalPanel(
condition = "input.dist_type == 'Normal'",
numericInput(inputId = "norm_mean",
label = "Set mean of normal distribution",
value = 0),
numericInput(inputId = "norm_var",
label = "Set variance of normal distribution",
value = 1)),
conditionalPanel(
condition = "input.dist_type == 'Exponential'",
numericInput(inputId = "exp_mean",
label = "Set mean of exponential distribution",
value = 1)),
conditionalPanel(
condition = "input.dist_type == 'Uniform'",
sliderInput(inputId = "unif_pars",
label = "Set interval of uniform distribution",
value = c(0,1),
min = -100,
max = 100))))
} else {
showModal(modalDialog( title = "Type of hypothesis",
radioButtons( inputId = "hyp_check",
label = "Hypothesis",
choices = c("H0 : mu = mu0 vs H1: mu != mu0",
"H0 : mu <= mu0 vs H1: mu > mu0",
"H0 : mu => mu0 vs H1: mu < mu0")),
numericInput(inputId = "hyp_par",
label = "Set mu0",
value = 0)))
}
})
mu0 <- reactive({
return(input$hyp_par)
})
X <- reactive({
return(assign(input$test_var, load_data()[,colnames(load_data()) == input$test_var]))
})
Y <- reactive({
return(assign(input$test_var1, load_data()[,colnames(load_data()) == input$test_var1]))
})
distr <- reactive({
if(input$dist_type == "Normal") {
return("pnorm")
} else if (input$dist_type == "Exponential") {
return("pexp")
} else {
return("punif")
}
})
params <- reactive({
if(input$dist_type == "Normal") {
return(c(input$norm_mean,input$norm_var))
} else if (input$dist_type == "Exponential") {
return(input$exp_mean)
}
})
output$testOutput <- renderPrint({
h1 <- "H0 : mu = mu0 vs H1: mu != mu0"
h2 <- "H0 : mu <= mu0 vs H1: mu > mu0"
h3 <- "H0 : mu >= mu0 vs H1: mu < mu0"
#One sample
if (input$tests == "One sample for quantitative data") {
# One sample T test
if(input$one_s_test == "One sample T-test") {
req(input$hyp_var,input$hyp_check,input$hyp_par)
if (input$hyp_check == "H0 : mu = mu0 vs H1: mu != mu0") {
return(t.test(assign(input$test_var,X()),mu = mu0()))
} else if (input$hyp_check == "H0 : mu <= mu0 vs H1: mu > mu0") {
return(t.test(assign(input$test_var,X()),mu = mu0(), alternative = "greater"))
} else if (input$hyp_check == "H0 : mu >= mu0 vs H1: mu < mu0") {
return(t.test(assign(input$test_var,X()),mu = mu0(), alternative = "less"))
}
# One sample wilcoxon
} else if (input$one_s_test == "One sample Wilcoxon rank sum test") {
req(input$hyp_var,input$hyp_check,input$hyp_par)
if (input$hyp_check == "H0 : mu = mu0 vs H1: mu != mu0") {
return(wilcox.test(assign(input$test_var,X()),mu = mu0()))
} else if (input$hyp_check == "H0 : mu <= mu0 vs H1: mu > mu0") {
return(wilcox.test(assign(input$test_var,X()),mu = mu0(), alternative = "greater"))
} else if (input$hyp_check == "H0 : mu >= mu0 vs H1: mu < mu0") {
return(wilcox.test(assign(input$test_var,X()),mu = mu0(), alternative = "less"))
}
#One sample signed test
} else if (input$one_s_test == "One sample signed test") {
req(input$hyp_var,input$hyp_check,input$hyp_par)
if (input$hyp_check == "H0 : mu = mu0 vs H1: mu != mu0") {
return(wilcox.test(assign(input$test_var,X()),mu = mu0()))
} else if (input$hyp_check == "H0 : mu <= mu0 vs H1: mu > mu0") {
return(wilcox.test(assign(input$test_var,X()),mu = mu0(), alternative = "greater"))
} else if (input$hyp_check == "H0 : mu >= mu0 vs H1: mu < mu0") {
return(wilcox.test(assign(input$test_var,X()),mu = mu0(), alternative = "less"))
}
#One sample KS test
} else if (input$one_s_test == "One sample Kolomgorov - Smirnov test") {
req(input$dist_type,input$hyp_check)
return(ks.test(X(),distr(),params()[1],params()[2]))
} else if (input$one_s_test == "One sample signed test") {
req(input$hyp_var,input$hyp_check,input$hyp_par)
if (input$hyp_check == "H0 : mu = mu0 vs H1: mu != mu0") {
return(wilcox.test(assign(input$test_var,X()),mu = mu0()))
} else if (input$hyp_check == "H0 : mu <= mu0 vs H1: mu > mu0") {
return(wilcox.test(assign(input$test_var,X()),mu = mu0(), alternative = "greater"))
} else if (input$hyp_check == "H0 : mu >= mu0 vs H1: mu < mu0") {
return(wilcox.test(assign(input$test_var,X()),mu = mu0(), alternative = "less"))
}
#One sample chi square test
} else if (input$one_s_test == "One sample chi square test on sample variance") {
req(input$hyp_var,input$hyp_check,input$hyp_par)
if (input$hyp_check == "H0 : mu = mu0 vs H1: mu != mu0") {
return(wilcox.test(assign(input$test_var,X()),mu = mu0()))
} else if (input$hyp_check == "H0 : mu <= mu0 vs H1: mu > mu0") {
return(wilcox.test(assign(input$test_var,X()),mu = mu0(), alternative = "greater"))
} else if (input$hyp_check == "H0 : mu >= mu0 vs H1: mu < mu0") {
return(wilcox.test(assign(input$test_var,X()),mu = mu0(), alternative = "less"))
}
}
#Two sample tests
} else if (input$tests == "Two sample for quantitative data") {
if (input$tw_s_test == "Two sample T-test") {
req(input$hyp_var,input$hyp_check,input$hyp_par)
if (input$hyp_check == "H0 : mu = mu0 vs H1: mu != mu0") {
return(t.test(assign(input$test_var,X()),assign(input$test_var,Y()),mu = mu0()))
} else if (input$hyp_check == "H0 : mu <= mu0 vs H1: mu > mu0") {
return(t.test(assign(input$test_var,X()),assign(input$test_var,Y()),mu = mu0(), alternative = "greater"))
} else if (input$hyp_check == "H0 : mu >= mu0 vs H1: mu < mu0") {
return(t.test(assign(input$test_var,X()),assign(input$test_var,Y()),mu = mu0(), alternative = "less"))
}
} else if (input$tw_s_test == "Two sample Wilcoxon rank sum test") {
req(input$hyp_var,input$hyp_check,input$hyp_par)
if (input$hyp_check == "H0 : mu = mu0 vs H1: mu != mu0") {
return(wilcox.test(assign(input$test_var,X()),assign(input$test_var,Y()),mu = mu0()))
} else if (input$hyp_check == "H0 : mu <= mu0 vs H1: mu > mu0") {
return(wilcox.test(assign(input$test_var,X()),assign(input$test_var,Y()),mu = mu0(), alternative = "greater"))
} else if (input$hyp_check == "H0 : mu >= mu0 vs H1: mu < mu0") {
return(wilcox.test(assign(input$test_var,X()),assign(input$test_var,Y()),mu = mu0(), alternative = "less"))
}
} else if (input$tw_s_test == "Two sample Kolomgorov - Smirnov test") {
req(input$hyp_var,input$hyp_check,input$hyp_par)
if (input$hyp_check == "H0 : mu = mu0 vs H1: mu != mu0") {
return(wilcox.test(assign(input$test_var,X()),assign(input$test_var,Y())))
} else if (input$hyp_check == "H0 : mu <= mu0 vs H1: mu > mu0") {
return(wilcox.test(assign(input$test_var,X()),assign(input$test_var,Y()), alternative = "greater"))
} else if (input$hyp_check == "H0 : mu >= mu0 vs H1: mu < mu0") {
return(wilcox.test(assign(input$test_var,X()),assign(input$test_var,Y()),mu = mu0(), alternative = "less"))
}
} else if (input$tw_s_test == "Two sample chi square test on equal sample variances") {
req(input$hyp_var,input$hyp_check,input$hyp_par)
if (input$hyp_check == "H0 : mu = mu0 vs H1: mu != mu0") {
return(var.test(assign(input$test_var,X()),assign(input$test_var,Y()),ratio = mu0()))
} else if (input$hyp_check == "H0 : mu <= mu0 vs H1: mu > mu0") {
return(var.test(assign(input$test_var,X()),assign(input$test_var,Y()),ratio = mu0(), alternative = "greater"))
} else if (input$hyp_check == "H0 : mu >= mu0 vs H1: mu < mu0") {
return(var.test(assign(input$test_var,X()),assign(input$test_var,Y()),ratio = mu0(), alternative = "less"))
}
}
}
})
})
|
/Application/server.R
|
no_license
|
tomasj12/ShinyApp
|
R
| false
| false
| 17,599
|
r
|
#
# This is the server logic of a Shiny web application. You can run the
# application by clicking 'Run App' above.
#
# Find out more about building applications with Shiny here:
#
# http://shiny.rstudio.com/
#
library(shiny)
##########################
# E D A #
##########################
# Define server logic required to draw a histogram
shinyServer(function(input, output, session) {
sep <- reactive({
return(input$separator)
})
load_data <- reactive({
req( input$dataLoad )
# inference <- get.delim(input$dataLoad$datapath, 1000, comment = '#',
# delims = c("\t","\t| +", " ", ";", ","))
#
#podmienka, ak bude delimiter v subore ',' precitame to ako csv
if( sep() == 'csv') {
read.csv(input$dataLoad$datapath, header = ifelse(!is.null(input$header), TRUE, FALSE))
#podmienka, ak bude delimiter v subore '\' precitame to ako delim
} else if (sep() == 'tab') {
read.delim(input$dataLoad$datapath, header = ifelse(!is.null(input$header), TRUE, FALSE))
#podmienka, ak bude delimiter v subore ' ' precitame to ako table
} else {
read.table(input$dataLoad$datapath, header = ifelse(!is.null(input$header), TRUE, FALSE))
}
})
col <- reactive({
input$variables
})
output$plot <- renderPlot({
req(input$upload,input$show_descr)
#plot podla zvoleneho typu plotu
if ( input$plotType == 'Histogram' ) {
breaks <- hist(load_data()[,col()])$breaks
if( mode(load_data()[,col()]) == 'numeric' & class(load_data()[,col()]) != 'factor') {
c <- sample(1:657,1)
ggplot( data = load_data(), aes( x = load_data()[,col()] )) +
geom_histogram(fill = colors()[ifelse(c %in% seq(152,359,1),sample(1:657,1),c)],aes(y = ..count../sum(..count..)),
breaks = breaks, color = "black") +
xlab(col()) +
ylab('Proportions of data') +
theme_app()
} else {
return(NULL)
}
} else if ( input$plotType == 'ScatterPlot' ) {
var1 <- input$variables
var2 <- input$variables_one
if(mode(load_data()[,col()]) == 'numeric') {
ggplot( data = load_data(), aes( x = load_data()[,var1] ,
y = load_data()[,var2] ) ) +
geom_point() +
theme_app()
} else {
return(NULL)
}
} else if ( input$plotType == 'BarPlot' ) {
if(mode(load_data()[,col()]) == 'numeric' & class(load_data()[,col()]) != 'factor') {
ggplot( data = load_data(), aes( x = load_data()[,col()] )) +
geom_bar() +
theme_app()
} else {
return(NULL)
}
} else if ( input$plotType == 'Pie' ) {
if(mode(load_data()[,col()]) == 'numeric' & class(load_data()[,col()]) != 'factor') {
ggplot( data = load_data(), aes( x = load_data()[,col()]) ) +
geom_bar() + coord_polar() +
theme_app()
} else {
return(NULL)
}
} else if ( input$plotType == 'Boxplot' ) {
if(mode(load_data()[,col()]) == 'numeric' & class(load_data()[,col()]) != 'factor') {
ggplot( data = load_data(), aes( x = load_data()[,col()] ,
y = load_data()[,col()] ) ) +
geom_boxplot() +
theme_app()
} else {
return(NULL)
}
}
})
output$data <- renderTable({
input$upload
isolate({load_data()})
})
#zobrazi mi drop down menu, kde budu na vyber premenne, ktore sa vykreslia
output$test <- renderUI({
req(input$dataLoad, input$separator, input$upload)
if(input$plotType == 'ScatterPlot') {
isolate({
tagList(div(style = "display:inline-block",selectInput( inputId = "variables",
label = "Choose x-axis variable",
choices = c(colnames(load_data())),
width = '50%')),
div(style = "display:inline-block",selectInput( inputId = "variables_one",
label = "Choose y-axis variable",
choices = c(colnames(load_data())),
width = '50%')
))})
} else {
selectInput( inputId = "variables",
label = "Choose x-axis variable",
choices = c(colnames(load_data())))
}
})
output$descr <- renderPrint({
if (!is.null(input$descr_load)) {
readLines(input$descr_load$datapath)
}
})
variables <- reactive({
cols <- colnames(load_data())
cls <- vector(mode = 'character', length = length(cols))
for (i in 1:length(cols)) {
cls[i] <- class(load_data()[,cols[i]])
}
df <- as.data.frame(cbind(cols,cls))
colnames(df) <- c('Variable','Class')
df
})
output$vars <- renderTable({
variables()
})
summary_vals <- reactive({
req(input$dataLoad,input$variables,input$header,input$show_descr)
col.name <- col()
char <- rep('-',length(col.name))
if (mode(load_data()[,col.name]) == 'numeric' & class(load_data()[,col.name]) != 'factor') {
min <- min(load_data()[,col.name])
max <- max(load_data()[,col.name])
med <- median(load_data()[,col.name])
stQ <- quantile(load_data()[,col.name], probs = 0.25, type = 1)
rdQ <- quantile(load_data()[,col.name], probs = 0.75, type = 1)
mean <- mean(load_data()[,col.name])
sd <- sd(load_data()[,col.name])
skw <- skewness(load_data()[,col.name])
kurt <- kurtosis(load_data()[,col.name])
df <- data.table(col.name,min,max,med,mean,stQ,rdQ,sd,skw,kurt)
colnames(df) <- c('Variable','Min','Max','Median','Mean','1st Quartile','3rd Quartile',
'Standard deviation','Skewness','Kurtosis')
} else if (typeof(load_data()[,col.name]) == 'character') {
df <- data.table(t(char))
colnames(df) <- c('Variable','Min','Max','Median','Mean','1st Quartile','3rd Quartile',
'Standard deviation','Skewness','Kurtosis')
} else if (class(load_data()[,col.name]) == 'factor') {
string <- levels(load_data()[,col.name])
df <- data.table(string)
colnames(df) <- col.name
rownames(df) <- paste("V",1:length(string),sep = "")
df <- head(df,10)
}
df
})
output$characteristics <- renderTable({
summary_vals()
})
##########################
# M O D E L #
##########################
observe({
updateSelectInput(session,"pred_var",choices = colnames(load_data()))
})
observeEvent(input$depend_var,{
showModal(modalDialog( title = "Choose features",
checkboxGroupInput( inputId = "test_check",
label = "Features",
choices = c( colnames( load_data() ) ) ) ) )
})
output$apply_model <- renderUI({
if (length(input$test_check) != 0) {
actionButton(inputId = "apply_ml",
label = "Apply technique")
} else {
return(NULL)
}
})
output$regression <- renderPrint({
req(input$test_check,input$apply_ml)
isolate({
pred.var <- input$pred_var
features <- input$test_check
formula <- as.formula(paste(pred.var,paste(features,collapse = "+"), sep = "~"))
model.lm <- lm(formula, data = load_data())
return(summary(model.lm))
})
})
##########################
# HYPOTHESIS #
##########################
observe({
updateSelectInput(session,"test_var",choices = colnames(load_data()))
updateSelectInput(session,"test_var1",choices = colnames(load_data()))
})
observeEvent(input$hyp_var,{
if (input$one_s_test == "One sample Kolomgorov - Smirnov test") {
showModal(modalDialog( title = "Type of hypothesis",
radioButtons( inputId = "hyp_check",
label = "Hypothesis",
choices = c("H0 : FX = F0 vs H1: FX != F0")),
selectInput(inputId = "dist_type",
label = "Which type of distribution you want you data compare to?",
choices = c("Normal",
"Uniform",
"Exponential")),
conditionalPanel(
condition = "input.dist_type == 'Normal'",
numericInput(inputId = "norm_mean",
label = "Set mean of normal distribution",
value = 0),
numericInput(inputId = "norm_var",
label = "Set variance of normal distribution",
value = 1)),
conditionalPanel(
condition = "input.dist_type == 'Exponential'",
numericInput(inputId = "exp_mean",
label = "Set mean of exponential distribution",
value = 1)),
conditionalPanel(
condition = "input.dist_type == 'Uniform'",
sliderInput(inputId = "unif_pars",
label = "Set interval of uniform distribution",
value = c(0,1),
min = -100,
max = 100))))
} else {
showModal(modalDialog( title = "Type of hypothesis",
radioButtons( inputId = "hyp_check",
label = "Hypothesis",
choices = c("H0 : mu = mu0 vs H1: mu != mu0",
"H0 : mu <= mu0 vs H1: mu > mu0",
"H0 : mu => mu0 vs H1: mu < mu0")),
numericInput(inputId = "hyp_par",
label = "Set mu0",
value = 0)))
}
})
mu0 <- reactive({
return(input$hyp_par)
})
X <- reactive({
return(assign(input$test_var, load_data()[,colnames(load_data()) == input$test_var]))
})
Y <- reactive({
return(assign(input$test_var1, load_data()[,colnames(load_data()) == input$test_var1]))
})
distr <- reactive({
if(input$dist_type == "Normal") {
return("pnorm")
} else if (input$dist_type == "Exponential") {
return("pexp")
} else {
return("punif")
}
})
params <- reactive({
if(input$dist_type == "Normal") {
return(c(input$norm_mean,input$norm_var))
} else if (input$dist_type == "Exponential") {
return(input$exp_mean)
}
})
output$testOutput <- renderPrint({
h1 <- "H0 : mu = mu0 vs H1: mu != mu0"
h2 <- "H0 : mu <= mu0 vs H1: mu > mu0"
h3 <- "H0 : mu >= mu0 vs H1: mu < mu0"
#One sample
if (input$tests == "One sample for quantitative data") {
# One sample T test
if(input$one_s_test == "One sample T-test") {
req(input$hyp_var,input$hyp_check,input$hyp_par)
if (input$hyp_check == "H0 : mu = mu0 vs H1: mu != mu0") {
return(t.test(assign(input$test_var,X()),mu = mu0()))
} else if (input$hyp_check == "H0 : mu <= mu0 vs H1: mu > mu0") {
return(t.test(assign(input$test_var,X()),mu = mu0(), alternative = "greater"))
} else if (input$hyp_check == "H0 : mu >= mu0 vs H1: mu < mu0") {
return(t.test(assign(input$test_var,X()),mu = mu0(), alternative = "less"))
}
# One sample wilcoxon
} else if (input$one_s_test == "One sample Wilcoxon rank sum test") {
req(input$hyp_var,input$hyp_check,input$hyp_par)
if (input$hyp_check == "H0 : mu = mu0 vs H1: mu != mu0") {
return(wilcox.test(assign(input$test_var,X()),mu = mu0()))
} else if (input$hyp_check == "H0 : mu <= mu0 vs H1: mu > mu0") {
return(wilcox.test(assign(input$test_var,X()),mu = mu0(), alternative = "greater"))
} else if (input$hyp_check == "H0 : mu >= mu0 vs H1: mu < mu0") {
return(wilcox.test(assign(input$test_var,X()),mu = mu0(), alternative = "less"))
}
#One sample signed test
} else if (input$one_s_test == "One sample signed test") {
req(input$hyp_var,input$hyp_check,input$hyp_par)
if (input$hyp_check == "H0 : mu = mu0 vs H1: mu != mu0") {
return(wilcox.test(assign(input$test_var,X()),mu = mu0()))
} else if (input$hyp_check == "H0 : mu <= mu0 vs H1: mu > mu0") {
return(wilcox.test(assign(input$test_var,X()),mu = mu0(), alternative = "greater"))
} else if (input$hyp_check == "H0 : mu >= mu0 vs H1: mu < mu0") {
return(wilcox.test(assign(input$test_var,X()),mu = mu0(), alternative = "less"))
}
#One sample KS test
} else if (input$one_s_test == "One sample Kolomgorov - Smirnov test") {
req(input$dist_type,input$hyp_check)
return(ks.test(X(),distr(),params()[1],params()[2]))
} else if (input$one_s_test == "One sample signed test") {
req(input$hyp_var,input$hyp_check,input$hyp_par)
if (input$hyp_check == "H0 : mu = mu0 vs H1: mu != mu0") {
return(wilcox.test(assign(input$test_var,X()),mu = mu0()))
} else if (input$hyp_check == "H0 : mu <= mu0 vs H1: mu > mu0") {
return(wilcox.test(assign(input$test_var,X()),mu = mu0(), alternative = "greater"))
} else if (input$hyp_check == "H0 : mu >= mu0 vs H1: mu < mu0") {
return(wilcox.test(assign(input$test_var,X()),mu = mu0(), alternative = "less"))
}
#One sample chi square test
} else if (input$one_s_test == "One sample chi square test on sample variance") {
req(input$hyp_var,input$hyp_check,input$hyp_par)
if (input$hyp_check == "H0 : mu = mu0 vs H1: mu != mu0") {
return(wilcox.test(assign(input$test_var,X()),mu = mu0()))
} else if (input$hyp_check == "H0 : mu <= mu0 vs H1: mu > mu0") {
return(wilcox.test(assign(input$test_var,X()),mu = mu0(), alternative = "greater"))
} else if (input$hyp_check == "H0 : mu >= mu0 vs H1: mu < mu0") {
return(wilcox.test(assign(input$test_var,X()),mu = mu0(), alternative = "less"))
}
}
#Two sample tests
} else if (input$tests == "Two sample for quantitative data") {
if (input$tw_s_test == "Two sample T-test") {
req(input$hyp_var,input$hyp_check,input$hyp_par)
if (input$hyp_check == "H0 : mu = mu0 vs H1: mu != mu0") {
return(t.test(assign(input$test_var,X()),assign(input$test_var,Y()),mu = mu0()))
} else if (input$hyp_check == "H0 : mu <= mu0 vs H1: mu > mu0") {
return(t.test(assign(input$test_var,X()),assign(input$test_var,Y()),mu = mu0(), alternative = "greater"))
} else if (input$hyp_check == "H0 : mu >= mu0 vs H1: mu < mu0") {
return(t.test(assign(input$test_var,X()),assign(input$test_var,Y()),mu = mu0(), alternative = "less"))
}
} else if (input$tw_s_test == "Two sample Wilcoxon rank sum test") {
req(input$hyp_var,input$hyp_check,input$hyp_par)
if (input$hyp_check == "H0 : mu = mu0 vs H1: mu != mu0") {
return(wilcox.test(assign(input$test_var,X()),assign(input$test_var,Y()),mu = mu0()))
} else if (input$hyp_check == "H0 : mu <= mu0 vs H1: mu > mu0") {
return(wilcox.test(assign(input$test_var,X()),assign(input$test_var,Y()),mu = mu0(), alternative = "greater"))
} else if (input$hyp_check == "H0 : mu >= mu0 vs H1: mu < mu0") {
return(wilcox.test(assign(input$test_var,X()),assign(input$test_var,Y()),mu = mu0(), alternative = "less"))
}
} else if (input$tw_s_test == "Two sample Kolomgorov - Smirnov test") {
req(input$hyp_var,input$hyp_check,input$hyp_par)
if (input$hyp_check == "H0 : mu = mu0 vs H1: mu != mu0") {
return(wilcox.test(assign(input$test_var,X()),assign(input$test_var,Y())))
} else if (input$hyp_check == "H0 : mu <= mu0 vs H1: mu > mu0") {
return(wilcox.test(assign(input$test_var,X()),assign(input$test_var,Y()), alternative = "greater"))
} else if (input$hyp_check == "H0 : mu >= mu0 vs H1: mu < mu0") {
return(wilcox.test(assign(input$test_var,X()),assign(input$test_var,Y()),mu = mu0(), alternative = "less"))
}
} else if (input$tw_s_test == "Two sample chi square test on equal sample variances") {
req(input$hyp_var,input$hyp_check,input$hyp_par)
if (input$hyp_check == "H0 : mu = mu0 vs H1: mu != mu0") {
return(var.test(assign(input$test_var,X()),assign(input$test_var,Y()),ratio = mu0()))
} else if (input$hyp_check == "H0 : mu <= mu0 vs H1: mu > mu0") {
return(var.test(assign(input$test_var,X()),assign(input$test_var,Y()),ratio = mu0(), alternative = "greater"))
} else if (input$hyp_check == "H0 : mu >= mu0 vs H1: mu < mu0") {
return(var.test(assign(input$test_var,X()),assign(input$test_var,Y()),ratio = mu0(), alternative = "less"))
}
}
}
})
})
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/GRangesUtils.R
\name{filterChrGR}
\alias{filterChrGR}
\title{Filters unwanted seqlevels from a Genomic Ranges object or similar object}
\usage{
filterChrGR(
gr = NULL,
remove = NULL,
underscore = TRUE,
standard = TRUE,
pruningMode = "coarse"
)
}
\arguments{
\item{gr}{A \code{GRanges} object or another object containing \code{seqlevels}.}
\item{remove}{A character vector indicating the seqlevels that should be removed if manual removal is desired for certain seqlevels.
If no manual removal is desired, \code{remove} should be set to \code{NULL}.}
\item{underscore}{A boolean value indicating whether to remove all seqlevels whose names contain an underscore (for example "chr11_KI270721v1_random").}
\item{standard}{A boolean value indicating whether only standard chromosomes should be kept. Standard chromosomes are defined by
\code{GenomeInfoDb::keepStandardChromosomes()}.}
\item{pruningMode}{The name of the pruning method to use (from\code{GenomeInfoDb::seqinfo()}) when seqlevels must be removed from a \code{GRanges} object.
When some of the seqlevels to drop from the given \code{GRanges} object are in use (i.e. have ranges on them), the ranges on these sequences need
to be removed before the seqlevels can be dropped. Four pruning modes are currently defined: "error", "coarse", "fine", and "tidy".}
}
\description{
This function allows for removal of manually designated or more broadly undesirable seqlevels from a Genomic Ranges object or similar object
}
|
/man/filterChrGR.Rd
|
permissive
|
GreenleafLab/ArchR
|
R
| false
| true
| 1,567
|
rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/GRangesUtils.R
\name{filterChrGR}
\alias{filterChrGR}
\title{Filters unwanted seqlevels from a Genomic Ranges object or similar object}
\usage{
filterChrGR(
gr = NULL,
remove = NULL,
underscore = TRUE,
standard = TRUE,
pruningMode = "coarse"
)
}
\arguments{
\item{gr}{A \code{GRanges} object or another object containing \code{seqlevels}.}
\item{remove}{A character vector indicating the seqlevels that should be removed if manual removal is desired for certain seqlevels.
If no manual removal is desired, \code{remove} should be set to \code{NULL}.}
\item{underscore}{A boolean value indicating whether to remove all seqlevels whose names contain an underscore (for example "chr11_KI270721v1_random").}
\item{standard}{A boolean value indicating whether only standard chromosomes should be kept. Standard chromosomes are defined by
\code{GenomeInfoDb::keepStandardChromosomes()}.}
\item{pruningMode}{The name of the pruning method to use (from\code{GenomeInfoDb::seqinfo()}) when seqlevels must be removed from a \code{GRanges} object.
When some of the seqlevels to drop from the given \code{GRanges} object are in use (i.e. have ranges on them), the ranges on these sequences need
to be removed before the seqlevels can be dropped. Four pruning modes are currently defined: "error", "coarse", "fine", and "tidy".}
}
\description{
This function allows for removal of manually designated or more broadly undesirable seqlevels from a Genomic Ranges object or similar object
}
|
# Exercise 4: complex Shiny UI layouts
# Load libraries so they are available
library("shiny")
# Use source() to execute the `app_ui.R` and `app_server.R` files. These will
# define the UI value and server function respectively.
source("app_ui.R")
source("app_server.R")
# You will need to fill in the `app_ui.R` file to create the layout.
# Run the app through this file.
# Create a new `shinyApp()` using the loaded `ui` and `server` variables
shinyApp(ui = ui, server = server)
|
/exercise-4/app.R
|
permissive
|
jstnwoo-1623155/chapter-19-exercises
|
R
| false
| false
| 487
|
r
|
# Exercise 4: complex Shiny UI layouts
# Load libraries so they are available
library("shiny")
# Use source() to execute the `app_ui.R` and `app_server.R` files. These will
# define the UI value and server function respectively.
source("app_ui.R")
source("app_server.R")
# You will need to fill in the `app_ui.R` file to create the layout.
# Run the app through this file.
# Create a new `shinyApp()` using the loaded `ui` and `server` variables
shinyApp(ui = ui, server = server)
|
testlist <- list(A = structure(c(2.31584307392677e+77, 9.53818252170339e+295, 1.22808535475903e+146, 4.12396251261199e-221, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0), .Dim = c(5L, 7L)), B = structure(0, .Dim = c(1L, 1L)))
result <- do.call(multivariance:::match_rows,testlist)
str(result)
|
/multivariance/inst/testfiles/match_rows/AFL_match_rows/match_rows_valgrind_files/1613100292-test.R
|
no_license
|
akhikolla/updatedatatype-list3
|
R
| false
| false
| 343
|
r
|
testlist <- list(A = structure(c(2.31584307392677e+77, 9.53818252170339e+295, 1.22808535475903e+146, 4.12396251261199e-221, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0), .Dim = c(5L, 7L)), B = structure(0, .Dim = c(1L, 1L)))
result <- do.call(multivariance:::match_rows,testlist)
str(result)
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/pipe.R
\name{\%>\%}
\alias{\%>\%}
\title{Pipe}
\usage{
}
\description{
Pipe a data structure forward into a function call.
}
\details{
\code{x \%>\% f()} becomes \code{f(x)}.
The pipe takes \code{x} and inserts it into the first argument of \code{f()}:
\code{x \%>\% f(y)} becomes \code{f(x, y)}
You can use a period \code{.} to dictate exactly where you want the piped data to be inserted:
\code{x \%>\% f(y, .)} becomes \code{f(y, x)}
You can do function composition using multiple pipes:
\code{x \%>\% f() \%>\% g()} becomes \code{g(f(x))}
}
\examples{
tibble(
x = 1:5,
y = 6:10
) \%>\%
mutate(z = x + y) \%>\%
filter(z > 10)
# becomes:
# filter(mutate(tibble(x = 1:5, y = 6:10), z = x + y), z > 10)
#> # A tibble: 3 x 3
#> x y z
<int> <int> <int>
#> 3 8 11
#> 4 9 13
#> 5 10 15
-----------------------------------
library(gapminder)
gapminder \%>\%
group_by(year) \%>\%
summarize(lifeExp_mean = mean(lifeExp))
# becomes:
# summarize(group_by(gapminder, year), lifeExp_mean = mean(lifeExp))
#> # A tibble: 12 x 2
#> year lifeExp_mean
<int> <dbl>
#> 1 1952 49.1
#> 2 1957 51.5
#> 3 1962 53.6
}
\seealso{
dplyr verbs: \code{\link[=group_by]{group_by()}}, \code{\link[=summarize]{summarize()}}, \code{\link[=mutate]{mutate()}}, \code{\link[=filter]{filter()}}, \code{\link[=arrange]{arrange()}}, \code{\link[=select]{select()}}
}
|
/man/grapes-greater-than-grapes.Rd
|
permissive
|
rhenyu/qelp
|
R
| false
| true
| 1,515
|
rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/pipe.R
\name{\%>\%}
\alias{\%>\%}
\title{Pipe}
\usage{
}
\description{
Pipe a data structure forward into a function call.
}
\details{
\code{x \%>\% f()} becomes \code{f(x)}.
The pipe takes \code{x} and inserts it into the first argument of \code{f()}:
\code{x \%>\% f(y)} becomes \code{f(x, y)}
You can use a period \code{.} to dictate exactly where you want the piped data to be inserted:
\code{x \%>\% f(y, .)} becomes \code{f(y, x)}
You can do function composition using multiple pipes:
\code{x \%>\% f() \%>\% g()} becomes \code{g(f(x))}
}
\examples{
tibble(
x = 1:5,
y = 6:10
) \%>\%
mutate(z = x + y) \%>\%
filter(z > 10)
# becomes:
# filter(mutate(tibble(x = 1:5, y = 6:10), z = x + y), z > 10)
#> # A tibble: 3 x 3
#> x y z
<int> <int> <int>
#> 3 8 11
#> 4 9 13
#> 5 10 15
-----------------------------------
library(gapminder)
gapminder \%>\%
group_by(year) \%>\%
summarize(lifeExp_mean = mean(lifeExp))
# becomes:
# summarize(group_by(gapminder, year), lifeExp_mean = mean(lifeExp))
#> # A tibble: 12 x 2
#> year lifeExp_mean
<int> <dbl>
#> 1 1952 49.1
#> 2 1957 51.5
#> 3 1962 53.6
}
\seealso{
dplyr verbs: \code{\link[=group_by]{group_by()}}, \code{\link[=summarize]{summarize()}}, \code{\link[=mutate]{mutate()}}, \code{\link[=filter]{filter()}}, \code{\link[=arrange]{arrange()}}, \code{\link[=select]{select()}}
}
|
## A pair of functions that cache the inverse of a matrix
## Creates a special matrix object that can cache its inverse
makeCacheMatrix <- function(x = matrix()) {
## Initialize the inverse property
m <- NULL
##Set the matrix
set <- function(y) {
x <<- y
m <<- NULL
}
## Return the matrix
get <- function() x
## Method to set the inverse of the matrix
setinv <- function(inverse) m <<- inverse
## Method to get the inverse of the matrix
getinv <- function() m
## Return a list of the methods
list(set = set, get = get,
setinv = setinv,
getinv = getinv)
}
## Compute the inverse of the special matrix returned by "makeCacheMatrix" above.
## If the inverse has already been calculated (and the matrix has not changed),
## then the "cachemean" should retrieve the inverse from the cache.
cachemean <- function(x, ...) {
## Return a matrix that is the inverse of 'x'
m <- x$getinv()
## Just return the inverse if its already set
if(!is.null(m)) {
message("Getting cached data")
return(m)
}
## Get the matrix from our object
data <- x$get()
## Calculate the inverse using matrix multiplication
m <- solve(data)
## Set the inverse to the object
x$setinv(m)
## Return the matrix
m
}
|
/cachematrix_v2.R
|
no_license
|
ashwanibhagra/ProgrammingAssignment2
|
R
| false
| false
| 1,409
|
r
|
## A pair of functions that cache the inverse of a matrix
## Creates a special matrix object that can cache its inverse
makeCacheMatrix <- function(x = matrix()) {
## Initialize the inverse property
m <- NULL
##Set the matrix
set <- function(y) {
x <<- y
m <<- NULL
}
## Return the matrix
get <- function() x
## Method to set the inverse of the matrix
setinv <- function(inverse) m <<- inverse
## Method to get the inverse of the matrix
getinv <- function() m
## Return a list of the methods
list(set = set, get = get,
setinv = setinv,
getinv = getinv)
}
## Compute the inverse of the special matrix returned by "makeCacheMatrix" above.
## If the inverse has already been calculated (and the matrix has not changed),
## then the "cachemean" should retrieve the inverse from the cache.
cachemean <- function(x, ...) {
## Return a matrix that is the inverse of 'x'
m <- x$getinv()
## Just return the inverse if its already set
if(!is.null(m)) {
message("Getting cached data")
return(m)
}
## Get the matrix from our object
data <- x$get()
## Calculate the inverse using matrix multiplication
m <- solve(data)
## Set the inverse to the object
x$setinv(m)
## Return the matrix
m
}
|
#-------- packages --------
library(tidyverse)
library(spdep)
library(ggthemes)
library(ggmap)
library(viridis)
library(lubridate)
library(gstat)
library(sp)
library(sf)
library(classInt)
library(lmtest)
library(tseries)
library(broom)
library(mgcv)
#-------- data and directory --------
paste0(here::here(), "/final project/data") %>% setwd()
fares <- read_csv("fares.csv")
trips <- read_csv("trips.csv")
taxi_data <- trips %>%
left_join(fares, by = c("medallion", "hack_license", "vendor_id", "pickup_datetime")) %>%
select(-hack_license, -vendor_id, -rate_code, -store_and_fwd_flag) %>%
filter(total_amount > 0) %>%
mutate(total_amount = log(total_amount)) %>%
filter(pickup_latitude >= 40.70, pickup_latitude <= 40.83,
pickup_longitude >= -74.025, pickup_longitude <= -73.93) %>%
mutate(hour = hour(pickup_datetime),
wday = wday(pickup_datetime, label = TRUE),
month = month(pickup_datetime, label = TRUE))
taxi_data <- taxi_data %>%
select(total_amount, pickup_longitude, pickup_latitude, passenger_count, trip_distance, wday, hour, month)
manhattan <- readRDS("manhattan.rds")
taxi_data_sample <- sample_n(taxi_data, 1000) %>%
select(total_amount, pickup_longitude, pickup_latitude, passenger_count, trip_distance, wday, hour, month)
spdf <- SpatialPointsDataFrame(coords = taxi_data[, 2:3], data = taxi_data,
proj4string = CRS("+proj=longlat +datum=WGS84 +ellps=WGS84 +towgs84=0,0,0"))
taxi_sf <- st_as_sf(spdf)
weights <- knn2nb(knearneigh(taxi_sf, k = 50))
weights <- nb2listw(weights, zero.policy = TRUE)
#-------- functions --------
linear_regression <- function(data, formula){
lm(formula, data)
}
persp_color <- function(data, palette, n_colors = 100) {
if(any(!is_numeric(data), na.rm = TRUE)) {
stop("data contains non-numeric values.")
}
n_rows <- nrow(data)
n_cols <- ncol(data)
jet.colors <- colorRampPalette(palette)
color <- jet.colors(n_colors)
z = data
zfacet <- z[-1, -1] + z[-1, -n_cols] + z[-n_rows, -1] + z[-n_rows, -n_cols]
facetcol <- cut(zfacet, n_colors)
return(color[facetcol])
}
#-------- visualization and summary--------
ggmap(manhattan, darken = 0.5) +
scale_fill_viridis(option = 'plasma') +
geom_bin2d(data = taxi_data_sample, aes(pickup_longitude, pickup_latitude), bins = 50, alpha = 0.6) +
labs(x = "longitude",
y = "latitude",
fill = "count")
taxi_data %>%
ggplot(aes(hour, total_amount, color = wday)) +
geom_boxplot() +
facet_wrap(~month) +
theme_linedraw()
#-------- krigging --------
set.seed(1234)
# GAM
gam <- gam(total_amount ~ s(pickup_longitude) + s(pickup_latitude) + passenger_count + s(trip_distance) + wday + hour + month, family = "gaussian", data = taxi_data)
cbind(fitted(gam), residuals(gam)) %>%
as_tibble() %>%
ggplot(aes(V1, V2)) +
geom_point()
# data
taxi_sf_complete <- bind_cols(taxi_sf, residuals(gam), fitted(gam)) %>%
rename("residuals" = "...10",
"fitted" = "...11")
sample_size <- floor(0.80*nrow(taxi_sf_complete))
select <- sample(seq_len(nrow(taxi_sf_complete)), size = sample_size)
test <- taxi_sf_complete[select,]
validation <- taxi_sf_complete[-select,]
combined_data <- bind_rows(test %>%
mutate(type = "test"),
validation %>%
mutate(type = "validation"))
# variograms
taxi_variogram <- variogram(residuals ~ 1,
data = select(taxi_sf_complete, -fitted),
cutoff = 10)
plot(taxi_variogram)
spherical_fit <- fit.variogram(taxi_variogram, vgm(0.05, "Sph", 5, 0.05))
plot(taxi_variogram, spherical_fit)
# ordinary kriging
geosp <- as_Spatial(select(taxi_sf_complete, -fitted))
prediction_grid <- spsample(geosp, nrow(taxi_sf), type = "regular")
test_sp <- as_Spatial(select(test, -fitted))
predict <- as_Spatial(select(validation, -fitted))
spherical_krige <- krige(formula = residuals ~ 1, locations=geosp, model=spherical_fit,
newdata=prediction_grid, nmax=15)
plot_data <- spherical_krige %>% as_tibble() %>%
mutate(fit = "spherical")
final_data <- bind_cols(head(taxi_sf_complete, -84), spherical_krige$var1.pred) %>%
mutate(fitted = fitted + ...12) %>%
select(-...12)
# validation
spherical_validation <- krige(total_amount ~ 1, locations=test_sp, newdata= validation, model=spherical_fit)
exponential_validation <- krige(total_amount ~ 1, locations=test_sp, newdata= validation, model=exponential_fit)
spherical_difference <- validation$total_amount - spherical_validation$var1.pred
exponential_difference <- validation$total_amount - exponential_validation$var1.pred
rmseSph <- sqrt(sum(spherical_difference^2)/length(spherical_difference))
MESph <- sum(spherical_difference/length(spherical_difference))
rmseExp <- sqrt(sum(exponential_difference^2)/length(exponential_difference))
MEExp <- sum(exponential_difference/length(exponential_difference))
# cross-validation
spherical_cross_validation <- krige.cv(ni ~ 1, test_sp, model=spherical_fit, nfold=nrow(test_sp))
exponential_cross_validation <- krige.cv(ni ~ 1, test_sp, model=exponential_fit, nfold=nrow(test_sp))
sphcvmean <- mean(spherical_cross_validation$residual)
expcvmean <- mean(exponential_cross_validation$residual)
sphcvrmse <- sqrt(mean(spherical_cross_validation$residual^2))
expcvrmse <- sqrt(mean(exponential_cross_validation$residual^2))
sphcvmsdr <- mean((spherical_cross_validation$residual^2)/ (spherical_cross_validation$var1.var))
expcvmsdr <- mean((exponential_cross_validation$residual^2)/ (exponential_cross_validation$var1.var))
ggplot(combined_data, aes(type, total_amount)) +
geom_boxplot(fill = "gold", color = "black") +
stat_summary(fun=mean, geom="point", shape = 21, size = 3, fill = "white") +
theme_par() +
theme(axis.title.x = element_blank())
ggplot(combined_data, aes(total_amount, color = type, fill = type)) +
geom_histogram(alpha = 0.8, show.legend = F) +
facet_wrap(~ type) +
theme_par() +
scale_color_ptol() +
scale_fill_ptol()
tibble(SSE = c(attr(spherical_fit, "SSErr"), attr(exponential_fit, "SSErr")),
`variogram fit` = c("spherical", "exponential")) %>%
knitr::kable(caption = "Sum of squared errors for 2 variogram model fits", digits = 5)
combined_data %>%
filter(type == "test") %>%
ggplot(aes(pickup_longitude, pickup_latitude)) +
geom_bin2d(aes(color=total_amount), bins = 50, alpha = 0.6) +
coord_equal() +
scale_color_gradient_tableau() +
theme_par() +
labs(color = "total amount") +
theme(axis.title.x = element_blank(),
axis.title.y = element_blank(),
legend.position = "bottom",
legend.key.width=unit(1.5,"cm"),
axis.text.y = element_blank(),
axis.text.x = element_blank(),
axis.ticks = element_blank())
final_data %>%
ggplot(aes(pickup_longitude, pickup_latitude)) +
geom_point(aes(color=total_amount), show.legend = FALSE) +
coord_equal() +
scale_color_gradient_tableau() +
theme_map()
plot_data %>%
ggplot(aes(x1, x2)) +
geom_tile(aes(fill=var1.var)) +
coord_equal() +
facet_wrap(~ fit) +
scale_fill_gradient_tableau() +
theme_par() +
labs(fill = "variance") +
theme(axis.title.x = element_blank(),
axis.title.y = element_blank(),
legend.position = "bottom",
legend.key.width=unit(1.5,"cm"),
axis.text.y = element_blank(),
axis.text.x = element_blank(),
axis.ticks = element_blank())
summary(spherical_krige)$data %>%
knitr::kable(caption = "Spherical kriging predicted values and variances", digits = 5)
#-------- autocorrelation --------
ggplot(final_data, aes(pickup_longitude, pickup_latitude, color = exp(fitted), alpha = exp(fitted)))+
geom_point(size = 0.1, show.legend = F) +
scale_color_gradient2_tableau() +
coord_fixed(ratio = 1.1) +
theme_map() +
labs(fill = "fare") +
theme(legend.position = "top")
ggplot(final_data) +
geom_histogram(aes(exp(total_amount)), bins = 100, position = "dodge") +
#geom_histogram(aes(exp(fitted)), bins = 100, alpha = 0.6, color = "red")
theme_minimal()
model <- taxi_data %>% linear_regression(total_amount ~ pickup_longitude + pickup_latitude + hour + wday + month + trip_distance + trip_time_in_secs)
model %>%
broom::tidy() %>%
knitr::kable()
model %>%
broom::glance() %>%
knitr::kable()
bind_rows(bptest(model) %>% broom::tidy(),
jarque.bera.test(model$residuals) %>% broom::tidy()) %>%
knitr::kable()
final_data %>%
mutate(residuals2 = total_amount - fitted) %>%
ggplot() +
geom_point(aes(x=fitted, y=residuals2)) +
theme_minimal() +
geom_hline(aes(yintercept = 0), color = "royalblue")
taxi_data %>%
bind_cols(model$residuals) %>%
rename("residuals" = "...21") %>%
ggplot(aes(pickup_longitude, pickup_latitude, color = residuals, alpha = residuals))+
geom_point(size = 0.1, show.legend = F) +
viridis::scale_color_viridis() +
coord_fixed(ratio = 1.1) +
theme_map() +
labs(fill = "residual") +
theme(legend.position = "top")
moran.test(model$residuals, listw = weights, zero.policy = TRUE) %>%
broom::tidy() %>%
knitr::kable()
moran <- moran.mc(model$residuals, nsim = 100, listw = weights, zero.policy = TRUE)
moran %>%
broom::tidy() %>%
knitr::kable()
moran %>%
plot()
#-------- models --------
formula <- as.formula(total_amount ~ pickup_longitude + pickup_latitude + hour + wday + month + trip_distance + trip_time_in_secs)
model <- glm(formula, family = "gaussian", taxi_data)
lapply(lm.LMtests(model, listw = weights, zero.policy = TRUE, test = "all"), tidy) %>%
bind_rows(.id = "test") -> lm_results
sp_error_model <- errorsarlm(model, data = taxi_sf, listw = weights, zero.policy = TRUE, na.action = na.omit)
sp_lag_model <- lagsarlm(formula, taxi_sf, listw = weights, zero.policy = TRUE)
car_model <- spautolm(model, listw = weights, family = "CAR", zero.policy = TRUE)
model_names <- c("OLS", "SARerr", "SARlag", "CAR")
models <- list(model, sp_error_model, sp_lag_model, car_model)
names(models) <- model_names
comparison <- tibble(model = model_names,
`Log Likelihood` = sapply(models, logLik),
AIC = sapply(models, AIC),
BIC = sapply(models, BIC))
lapply(models, function(x) moran.test(residuals(x), weights, zero.policy = TRUE) %>% tidy()) %>%
bind_rows(.id = "model") -> moran_results
lapply(models, function(x) jarque.bera.test(residuals(x)) %>% tidy()) %>%
bind_rows(.id = "model") -> jarque_berra_results
lapply(models, function(x) shapiro.test(residuals(x)) %>% tidy()) %>%
bind_rows(.id = "model") -> shapiro_results
bind_rows(models$OLS %>% bptest() %>% tidy(),
models$SARerr %>% bptest.sarlm() %>% tidy(),
models$SARlag %>% bptest.sarlm() %>% tidy()) %>%
mutate(model = c("OLS", "SARerr", "SARlag")) %>%
select(model, statistic, p.value, parameter) -> bp_results
model_outputs <- lapply(models, residuals) %>%
bind_rows(.id = "model") %>%
pivot_longer(cols = 2:191, names_to = "row", values_to = "residual") %>%
left_join(lapply(models, fitted) %>%
bind_rows(.id = "model") %>%
pivot_longer(cols = 2:191, names_to = "row", values_to = "fitted"),
by = c("model", "row"))
comparison %>% kable(caption = "Diagnostic result comparison", digits = 5)
lm_results[, -5] %>% kable(caption = "Lagrange multiplier diagnostics for spatial dependence", digits = 5)
jarque_berra_results[, -5] %>% kable(caption = "Jarque Bera test",
digits = 5)
shapiro_results[, -4] %>% kable(caption = "Shapiro-Wilk normality test",
digits = 5)
model_outputs %>%
ggplot(aes(residual, color = model, fill = model)) +
geom_histogram(position = "dodge", alpha = 0.7, show.legend = F) +
facet_wrap(~model) +
scale_color_ptol() +
scale_fill_ptol() +
theme_light()
bp_results %>% kable(caption = "Breusch-Pagan test", digits = 5)
model_outputs %>%
ggplot(aes(fitted, residual, color = model, fill = model)) +
geom_point(alpha = 0.7, show.legend = F) +
facet_wrap(~model) +
scale_color_ptol() +
scale_fill_ptol() +
theme_light()
moran_results[, -c(7:8)] %>% kable(caption = "Moran I test under randomisation", digits = 5)
model %>% tidy() %>% kable(caption = "Ordinary least squares model (OLS)", digits = 5)
sp_error_model %>% tidy() %>% kable(caption = "Simultaneous autoregressive error model (SARerr)", digits = 5)
sp_lag_model %>% tidy() %>% kable(caption = "Simultaneous autoregressive lag model (SARlag)", digits = 5)
summary(car_model)$Coef %>% kable(caption = "Conditionally autoregressive model (CAR)", digits = 5)
gls_model <- gls(total_amount ~ pickup_longitude + pickup_latitude, correlation = corSpher(form = ~pickup_longitude + pickup_latitude, nugget = TRUE), data = taxi_sf)
bind_rows(bptest(gls_model) %>% broom::tidy(),
jarque.bera.test(gls_model$residuals) %>% broom::tidy()) %>%
knitr::kable()
jarque.bera.test(gls_model$residuals)
soilm <- taxi_data_sample %>%
select(row = pickup_longitude, column = pickup_latitude, total_amount) %>%
pivot_wider(names_from = "column", values_from = "total_amount") %>%
select(-row) %>%
as.matrix()
trend <- medpolish(soilm)
fit <- matrix(nrow = length(x), ncol = length(y))
for (i in seq_along(x)){
for (j in seq_along(y)){
fit[i,j] <- trend$row[i] + trend$col[j] + trend$overall
}}
|
/final project/scripts/data import.R
|
no_license
|
antoniojurlina/spatial_analysis
|
R
| false
| false
| 13,139
|
r
|
#-------- packages --------
library(tidyverse)
library(spdep)
library(ggthemes)
library(ggmap)
library(viridis)
library(lubridate)
library(gstat)
library(sp)
library(sf)
library(classInt)
library(lmtest)
library(tseries)
library(broom)
library(mgcv)
#-------- data and directory --------
paste0(here::here(), "/final project/data") %>% setwd()
fares <- read_csv("fares.csv")
trips <- read_csv("trips.csv")
taxi_data <- trips %>%
left_join(fares, by = c("medallion", "hack_license", "vendor_id", "pickup_datetime")) %>%
select(-hack_license, -vendor_id, -rate_code, -store_and_fwd_flag) %>%
filter(total_amount > 0) %>%
mutate(total_amount = log(total_amount)) %>%
filter(pickup_latitude >= 40.70, pickup_latitude <= 40.83,
pickup_longitude >= -74.025, pickup_longitude <= -73.93) %>%
mutate(hour = hour(pickup_datetime),
wday = wday(pickup_datetime, label = TRUE),
month = month(pickup_datetime, label = TRUE))
taxi_data <- taxi_data %>%
select(total_amount, pickup_longitude, pickup_latitude, passenger_count, trip_distance, wday, hour, month)
manhattan <- readRDS("manhattan.rds")
taxi_data_sample <- sample_n(taxi_data, 1000) %>%
select(total_amount, pickup_longitude, pickup_latitude, passenger_count, trip_distance, wday, hour, month)
spdf <- SpatialPointsDataFrame(coords = taxi_data[, 2:3], data = taxi_data,
proj4string = CRS("+proj=longlat +datum=WGS84 +ellps=WGS84 +towgs84=0,0,0"))
taxi_sf <- st_as_sf(spdf)
weights <- knn2nb(knearneigh(taxi_sf, k = 50))
weights <- nb2listw(weights, zero.policy = TRUE)
#-------- functions --------
linear_regression <- function(data, formula){
lm(formula, data)
}
persp_color <- function(data, palette, n_colors = 100) {
if(any(!is_numeric(data), na.rm = TRUE)) {
stop("data contains non-numeric values.")
}
n_rows <- nrow(data)
n_cols <- ncol(data)
jet.colors <- colorRampPalette(palette)
color <- jet.colors(n_colors)
z = data
zfacet <- z[-1, -1] + z[-1, -n_cols] + z[-n_rows, -1] + z[-n_rows, -n_cols]
facetcol <- cut(zfacet, n_colors)
return(color[facetcol])
}
#-------- visualization and summary--------
ggmap(manhattan, darken = 0.5) +
scale_fill_viridis(option = 'plasma') +
geom_bin2d(data = taxi_data_sample, aes(pickup_longitude, pickup_latitude), bins = 50, alpha = 0.6) +
labs(x = "longitude",
y = "latitude",
fill = "count")
taxi_data %>%
ggplot(aes(hour, total_amount, color = wday)) +
geom_boxplot() +
facet_wrap(~month) +
theme_linedraw()
#-------- krigging --------
set.seed(1234)
# GAM
gam <- gam(total_amount ~ s(pickup_longitude) + s(pickup_latitude) + passenger_count + s(trip_distance) + wday + hour + month, family = "gaussian", data = taxi_data)
cbind(fitted(gam), residuals(gam)) %>%
as_tibble() %>%
ggplot(aes(V1, V2)) +
geom_point()
# data
taxi_sf_complete <- bind_cols(taxi_sf, residuals(gam), fitted(gam)) %>%
rename("residuals" = "...10",
"fitted" = "...11")
sample_size <- floor(0.80*nrow(taxi_sf_complete))
select <- sample(seq_len(nrow(taxi_sf_complete)), size = sample_size)
test <- taxi_sf_complete[select,]
validation <- taxi_sf_complete[-select,]
combined_data <- bind_rows(test %>%
mutate(type = "test"),
validation %>%
mutate(type = "validation"))
# variograms
taxi_variogram <- variogram(residuals ~ 1,
data = select(taxi_sf_complete, -fitted),
cutoff = 10)
plot(taxi_variogram)
spherical_fit <- fit.variogram(taxi_variogram, vgm(0.05, "Sph", 5, 0.05))
plot(taxi_variogram, spherical_fit)
# ordinary kriging
geosp <- as_Spatial(select(taxi_sf_complete, -fitted))
prediction_grid <- spsample(geosp, nrow(taxi_sf), type = "regular")
test_sp <- as_Spatial(select(test, -fitted))
predict <- as_Spatial(select(validation, -fitted))
spherical_krige <- krige(formula = residuals ~ 1, locations=geosp, model=spherical_fit,
newdata=prediction_grid, nmax=15)
plot_data <- spherical_krige %>% as_tibble() %>%
mutate(fit = "spherical")
final_data <- bind_cols(head(taxi_sf_complete, -84), spherical_krige$var1.pred) %>%
mutate(fitted = fitted + ...12) %>%
select(-...12)
# validation
spherical_validation <- krige(total_amount ~ 1, locations=test_sp, newdata= validation, model=spherical_fit)
exponential_validation <- krige(total_amount ~ 1, locations=test_sp, newdata= validation, model=exponential_fit)
spherical_difference <- validation$total_amount - spherical_validation$var1.pred
exponential_difference <- validation$total_amount - exponential_validation$var1.pred
rmseSph <- sqrt(sum(spherical_difference^2)/length(spherical_difference))
MESph <- sum(spherical_difference/length(spherical_difference))
rmseExp <- sqrt(sum(exponential_difference^2)/length(exponential_difference))
MEExp <- sum(exponential_difference/length(exponential_difference))
# cross-validation
spherical_cross_validation <- krige.cv(ni ~ 1, test_sp, model=spherical_fit, nfold=nrow(test_sp))
exponential_cross_validation <- krige.cv(ni ~ 1, test_sp, model=exponential_fit, nfold=nrow(test_sp))
sphcvmean <- mean(spherical_cross_validation$residual)
expcvmean <- mean(exponential_cross_validation$residual)
sphcvrmse <- sqrt(mean(spherical_cross_validation$residual^2))
expcvrmse <- sqrt(mean(exponential_cross_validation$residual^2))
sphcvmsdr <- mean((spherical_cross_validation$residual^2)/ (spherical_cross_validation$var1.var))
expcvmsdr <- mean((exponential_cross_validation$residual^2)/ (exponential_cross_validation$var1.var))
ggplot(combined_data, aes(type, total_amount)) +
geom_boxplot(fill = "gold", color = "black") +
stat_summary(fun=mean, geom="point", shape = 21, size = 3, fill = "white") +
theme_par() +
theme(axis.title.x = element_blank())
ggplot(combined_data, aes(total_amount, color = type, fill = type)) +
geom_histogram(alpha = 0.8, show.legend = F) +
facet_wrap(~ type) +
theme_par() +
scale_color_ptol() +
scale_fill_ptol()
tibble(SSE = c(attr(spherical_fit, "SSErr"), attr(exponential_fit, "SSErr")),
`variogram fit` = c("spherical", "exponential")) %>%
knitr::kable(caption = "Sum of squared errors for 2 variogram model fits", digits = 5)
combined_data %>%
filter(type == "test") %>%
ggplot(aes(pickup_longitude, pickup_latitude)) +
geom_bin2d(aes(color=total_amount), bins = 50, alpha = 0.6) +
coord_equal() +
scale_color_gradient_tableau() +
theme_par() +
labs(color = "total amount") +
theme(axis.title.x = element_blank(),
axis.title.y = element_blank(),
legend.position = "bottom",
legend.key.width=unit(1.5,"cm"),
axis.text.y = element_blank(),
axis.text.x = element_blank(),
axis.ticks = element_blank())
final_data %>%
ggplot(aes(pickup_longitude, pickup_latitude)) +
geom_point(aes(color=total_amount), show.legend = FALSE) +
coord_equal() +
scale_color_gradient_tableau() +
theme_map()
plot_data %>%
ggplot(aes(x1, x2)) +
geom_tile(aes(fill=var1.var)) +
coord_equal() +
facet_wrap(~ fit) +
scale_fill_gradient_tableau() +
theme_par() +
labs(fill = "variance") +
theme(axis.title.x = element_blank(),
axis.title.y = element_blank(),
legend.position = "bottom",
legend.key.width=unit(1.5,"cm"),
axis.text.y = element_blank(),
axis.text.x = element_blank(),
axis.ticks = element_blank())
summary(spherical_krige)$data %>%
knitr::kable(caption = "Spherical kriging predicted values and variances", digits = 5)
#-------- autocorrelation --------
ggplot(final_data, aes(pickup_longitude, pickup_latitude, color = exp(fitted), alpha = exp(fitted)))+
geom_point(size = 0.1, show.legend = F) +
scale_color_gradient2_tableau() +
coord_fixed(ratio = 1.1) +
theme_map() +
labs(fill = "fare") +
theme(legend.position = "top")
ggplot(final_data) +
geom_histogram(aes(exp(total_amount)), bins = 100, position = "dodge") +
#geom_histogram(aes(exp(fitted)), bins = 100, alpha = 0.6, color = "red")
theme_minimal()
model <- taxi_data %>% linear_regression(total_amount ~ pickup_longitude + pickup_latitude + hour + wday + month + trip_distance + trip_time_in_secs)
model %>%
broom::tidy() %>%
knitr::kable()
model %>%
broom::glance() %>%
knitr::kable()
bind_rows(bptest(model) %>% broom::tidy(),
jarque.bera.test(model$residuals) %>% broom::tidy()) %>%
knitr::kable()
final_data %>%
mutate(residuals2 = total_amount - fitted) %>%
ggplot() +
geom_point(aes(x=fitted, y=residuals2)) +
theme_minimal() +
geom_hline(aes(yintercept = 0), color = "royalblue")
taxi_data %>%
bind_cols(model$residuals) %>%
rename("residuals" = "...21") %>%
ggplot(aes(pickup_longitude, pickup_latitude, color = residuals, alpha = residuals))+
geom_point(size = 0.1, show.legend = F) +
viridis::scale_color_viridis() +
coord_fixed(ratio = 1.1) +
theme_map() +
labs(fill = "residual") +
theme(legend.position = "top")
moran.test(model$residuals, listw = weights, zero.policy = TRUE) %>%
broom::tidy() %>%
knitr::kable()
moran <- moran.mc(model$residuals, nsim = 100, listw = weights, zero.policy = TRUE)
moran %>%
broom::tidy() %>%
knitr::kable()
moran %>%
plot()
#-------- models --------
formula <- as.formula(total_amount ~ pickup_longitude + pickup_latitude + hour + wday + month + trip_distance + trip_time_in_secs)
model <- glm(formula, family = "gaussian", taxi_data)
lapply(lm.LMtests(model, listw = weights, zero.policy = TRUE, test = "all"), tidy) %>%
bind_rows(.id = "test") -> lm_results
sp_error_model <- errorsarlm(model, data = taxi_sf, listw = weights, zero.policy = TRUE, na.action = na.omit)
sp_lag_model <- lagsarlm(formula, taxi_sf, listw = weights, zero.policy = TRUE)
car_model <- spautolm(model, listw = weights, family = "CAR", zero.policy = TRUE)
model_names <- c("OLS", "SARerr", "SARlag", "CAR")
models <- list(model, sp_error_model, sp_lag_model, car_model)
names(models) <- model_names
comparison <- tibble(model = model_names,
`Log Likelihood` = sapply(models, logLik),
AIC = sapply(models, AIC),
BIC = sapply(models, BIC))
lapply(models, function(x) moran.test(residuals(x), weights, zero.policy = TRUE) %>% tidy()) %>%
bind_rows(.id = "model") -> moran_results
lapply(models, function(x) jarque.bera.test(residuals(x)) %>% tidy()) %>%
bind_rows(.id = "model") -> jarque_berra_results
lapply(models, function(x) shapiro.test(residuals(x)) %>% tidy()) %>%
bind_rows(.id = "model") -> shapiro_results
bind_rows(models$OLS %>% bptest() %>% tidy(),
models$SARerr %>% bptest.sarlm() %>% tidy(),
models$SARlag %>% bptest.sarlm() %>% tidy()) %>%
mutate(model = c("OLS", "SARerr", "SARlag")) %>%
select(model, statistic, p.value, parameter) -> bp_results
model_outputs <- lapply(models, residuals) %>%
bind_rows(.id = "model") %>%
pivot_longer(cols = 2:191, names_to = "row", values_to = "residual") %>%
left_join(lapply(models, fitted) %>%
bind_rows(.id = "model") %>%
pivot_longer(cols = 2:191, names_to = "row", values_to = "fitted"),
by = c("model", "row"))
comparison %>% kable(caption = "Diagnostic result comparison", digits = 5)
lm_results[, -5] %>% kable(caption = "Lagrange multiplier diagnostics for spatial dependence", digits = 5)
jarque_berra_results[, -5] %>% kable(caption = "Jarque Bera test",
digits = 5)
shapiro_results[, -4] %>% kable(caption = "Shapiro-Wilk normality test",
digits = 5)
model_outputs %>%
ggplot(aes(residual, color = model, fill = model)) +
geom_histogram(position = "dodge", alpha = 0.7, show.legend = F) +
facet_wrap(~model) +
scale_color_ptol() +
scale_fill_ptol() +
theme_light()
bp_results %>% kable(caption = "Breusch-Pagan test", digits = 5)
model_outputs %>%
ggplot(aes(fitted, residual, color = model, fill = model)) +
geom_point(alpha = 0.7, show.legend = F) +
facet_wrap(~model) +
scale_color_ptol() +
scale_fill_ptol() +
theme_light()
moran_results[, -c(7:8)] %>% kable(caption = "Moran I test under randomisation", digits = 5)
model %>% tidy() %>% kable(caption = "Ordinary least squares model (OLS)", digits = 5)
sp_error_model %>% tidy() %>% kable(caption = "Simultaneous autoregressive error model (SARerr)", digits = 5)
sp_lag_model %>% tidy() %>% kable(caption = "Simultaneous autoregressive lag model (SARlag)", digits = 5)
summary(car_model)$Coef %>% kable(caption = "Conditionally autoregressive model (CAR)", digits = 5)
gls_model <- gls(total_amount ~ pickup_longitude + pickup_latitude, correlation = corSpher(form = ~pickup_longitude + pickup_latitude, nugget = TRUE), data = taxi_sf)
bind_rows(bptest(gls_model) %>% broom::tidy(),
jarque.bera.test(gls_model$residuals) %>% broom::tidy()) %>%
knitr::kable()
jarque.bera.test(gls_model$residuals)
soilm <- taxi_data_sample %>%
select(row = pickup_longitude, column = pickup_latitude, total_amount) %>%
pivot_wider(names_from = "column", values_from = "total_amount") %>%
select(-row) %>%
as.matrix()
trend <- medpolish(soilm)
fit <- matrix(nrow = length(x), ncol = length(y))
for (i in seq_along(x)){
for (j in seq_along(y)){
fit[i,j] <- trend$row[i] + trend$col[j] + trend$overall
}}
|
#:# libraries
library(digest)
library(mlr)
library(OpenML)
library(farff)
#:# config
set.seed(1)
#:# data
dataset <- getOMLDataSet(data.name = "lupus")
head(dataset$data)
#:# preprocessing
head(dataset$data)
#:# model
task = makeClassifTask(id = "task", data = dataset$data, target = "STATUS")
lrn = makeLearner("classif.bartMachine", par.vals = list(num_trees = 100, num_burn_in = 25), predict.type = "prob")
#:# hash
#:# 818e601221dccc5501618cd5d8f51482
hash <- digest(list(task, lrn))
hash
#:# audit
cv <- makeResampleDesc("CV", iters = 5)
r <- mlr::resample(lrn, task, cv, measures = list(acc, auc, tnr, tpr, ppv, f1))
ACC <- r$aggr
ACC
#:# session info
sink(paste0("sessionInfo.txt"))
sessionInfo()
sink()
|
/models/openml_lupus/classification_STATUS/818e601221dccc5501618cd5d8f51482/code.R
|
no_license
|
lukaszbrzozowski/CaseStudies2019S
|
R
| false
| false
| 753
|
r
|
#:# libraries
library(digest)
library(mlr)
library(OpenML)
library(farff)
#:# config
set.seed(1)
#:# data
dataset <- getOMLDataSet(data.name = "lupus")
head(dataset$data)
#:# preprocessing
head(dataset$data)
#:# model
task = makeClassifTask(id = "task", data = dataset$data, target = "STATUS")
lrn = makeLearner("classif.bartMachine", par.vals = list(num_trees = 100, num_burn_in = 25), predict.type = "prob")
#:# hash
#:# 818e601221dccc5501618cd5d8f51482
hash <- digest(list(task, lrn))
hash
#:# audit
cv <- makeResampleDesc("CV", iters = 5)
r <- mlr::resample(lrn, task, cv, measures = list(acc, auc, tnr, tpr, ppv, f1))
ACC <- r$aggr
ACC
#:# session info
sink(paste0("sessionInfo.txt"))
sessionInfo()
sink()
|
# making calibration plots for an arbitrary outcome
# outcome_of_interest <- names(final_forests_missingness)[1]
make_calibration_plot <- function(forest, outcome_of_interest,
master=master_pool){
# -> get tau's
tau_df <- forest$tau_df
# -> add ventiles
qcut <- function(x, n) {
cut(x, quantile(x, seq(0, 1, length = n + 1)), labels = seq_len(n),
include.lowest = TRUE)
}
n_calibration_quantiles <- 20
calibration_df <- tau_df %>% mutate(calibration_quantiles = grf_tau_hat %>% qcut(n_calibration_quantiles))
master <- master %>% left_join(calibration_df, by='sid') %>% mutate(blocknum = as_factor(blocknum))
itt_vars_calibration <- c("d13andunder","d14","d15","d16","d17andover","dlearningdisabled","dfreelunch",
"dblack","dhispanic","dother","dgrade9","dgrade10","gpa_pre_zeros",
"numAs_pre","numBs_pre","numCs_pre","numDs_pre","numFs_pre","missing_gpa_pre",
"days_absent_pre_zeros","missing_attend_pre","mathxil_z_pre_np_zeros","readxil_z_pre_np_zeros",
"mathxil_z_pre_missing","readxil_z_pre_missing","oss_dis_pre_zeros","incidents_pre_zeros",
"any_arrests_pre","violent_pre","property_pre","drug_pre", "blocknum")
# add
quantile_itts <- lm(paste0(outcome_of_interest, " ~ ",
"dmatch:calibration_quantiles + calibration_quantiles +",
paste(itt_vars_calibration, collapse='+')),
data=master) %>% broom::tidy() %>% filter(term %>% startsWith('dmatch:')) %>%
mutate( term = term %>% stringr::str_replace('dmatch:calibration_quantiles', "")) %>%
rename(calibration_quantiles = term)
# get avg pte for each bucket
avg_pte_df <- calibration_df %>% group_by(calibration_quantiles) %>% summarise(pte = mean(grf_tau_hat))
# get the statistic from the linear test; this will be the plot subtitle
calibration_test_statistic <- lm(paste0(outcome_of_interest, " ~ ",
"dmatch + grf_tau_hat + grf_tau_hat*dmatch +",
paste0(itt_vars_calibration, collapse="+")),
data=master) %>% broom::tidy() %>% filter(term %in% "dmatch:grf_tau_hat")
calibration_test_note <- paste0("PTE x Treatment interaction estimate is ",
calibration_test_statistic$estimate %>% round(3),
" (", calibration_test_statistic$std.error %>% round(3), "), with a p-value of ",
calibration_test_statistic$p.value %>% round(3))
quantile_calibration_plot <- quantile_itts %>% left_join(avg_pte_df) %>%
ggplot(aes(x=pte, y=estimate)) +
geom_point() +
geom_smooth(method='lm', se=F, color='black', linetype = 'dashed') +
geom_abline(intercept = 0, slope = 1) +
xlab("Average Quantile Individual PTE") +
ylab("ITT Estimate for Quantile") +
ggtitle(paste0("Calibration Plot: ", outcome_of_interest),
subtitle = calibration_test_note)
return(quantile_calibration_plot)
}
|
/analysis_utils/calibration_plots.R
|
no_license
|
noahrsebek/match_ml_hte_mirror
|
R
| false
| false
| 3,273
|
r
|
# making calibration plots for an arbitrary outcome
# outcome_of_interest <- names(final_forests_missingness)[1]
make_calibration_plot <- function(forest, outcome_of_interest,
master=master_pool){
# -> get tau's
tau_df <- forest$tau_df
# -> add ventiles
qcut <- function(x, n) {
cut(x, quantile(x, seq(0, 1, length = n + 1)), labels = seq_len(n),
include.lowest = TRUE)
}
n_calibration_quantiles <- 20
calibration_df <- tau_df %>% mutate(calibration_quantiles = grf_tau_hat %>% qcut(n_calibration_quantiles))
master <- master %>% left_join(calibration_df, by='sid') %>% mutate(blocknum = as_factor(blocknum))
itt_vars_calibration <- c("d13andunder","d14","d15","d16","d17andover","dlearningdisabled","dfreelunch",
"dblack","dhispanic","dother","dgrade9","dgrade10","gpa_pre_zeros",
"numAs_pre","numBs_pre","numCs_pre","numDs_pre","numFs_pre","missing_gpa_pre",
"days_absent_pre_zeros","missing_attend_pre","mathxil_z_pre_np_zeros","readxil_z_pre_np_zeros",
"mathxil_z_pre_missing","readxil_z_pre_missing","oss_dis_pre_zeros","incidents_pre_zeros",
"any_arrests_pre","violent_pre","property_pre","drug_pre", "blocknum")
# add
quantile_itts <- lm(paste0(outcome_of_interest, " ~ ",
"dmatch:calibration_quantiles + calibration_quantiles +",
paste(itt_vars_calibration, collapse='+')),
data=master) %>% broom::tidy() %>% filter(term %>% startsWith('dmatch:')) %>%
mutate( term = term %>% stringr::str_replace('dmatch:calibration_quantiles', "")) %>%
rename(calibration_quantiles = term)
# get avg pte for each bucket
avg_pte_df <- calibration_df %>% group_by(calibration_quantiles) %>% summarise(pte = mean(grf_tau_hat))
# get the statistic from the linear test; this will be the plot subtitle
calibration_test_statistic <- lm(paste0(outcome_of_interest, " ~ ",
"dmatch + grf_tau_hat + grf_tau_hat*dmatch +",
paste0(itt_vars_calibration, collapse="+")),
data=master) %>% broom::tidy() %>% filter(term %in% "dmatch:grf_tau_hat")
calibration_test_note <- paste0("PTE x Treatment interaction estimate is ",
calibration_test_statistic$estimate %>% round(3),
" (", calibration_test_statistic$std.error %>% round(3), "), with a p-value of ",
calibration_test_statistic$p.value %>% round(3))
quantile_calibration_plot <- quantile_itts %>% left_join(avg_pte_df) %>%
ggplot(aes(x=pte, y=estimate)) +
geom_point() +
geom_smooth(method='lm', se=F, color='black', linetype = 'dashed') +
geom_abline(intercept = 0, slope = 1) +
xlab("Average Quantile Individual PTE") +
ylab("ITT Estimate for Quantile") +
ggtitle(paste0("Calibration Plot: ", outcome_of_interest),
subtitle = calibration_test_note)
return(quantile_calibration_plot)
}
|
#' @include widget.r serializer.r
NULL
#' Widget_Function
#'
#' Description
Widget_Function <- R6Class(
'Widget_Function',
inherit = Widget,
public = list(
serializer = NULL,
limit = NULL,
function_name = NULL,
handle_backbone = function(msg) {
msg_limit <- msg$sync_data$limit
msg_name <- msg$sync_data$function_name
if(!is.null(msg_name)) {
self$register_function(msg_name)
} else {
log_info("No name value provided for Widget_Function")
}
if(!is.null(msg_limit)) {
self$register_limit(msg_limit)
} else {
log_info("No limit value provided for Widget_Function")
}
},
handle_custom = function(msg) {
if(!is.null(msg$event)) {
if(msg$event == 'sync') {
self$send_signature(self$function_name, self$limit)
} else if (msg$event == 'invoke') {
self$handle_invoke(msg, self$function_name, self$limit)
} else {
log_info(paste("Unhandled custom event: ", msg$event))
}
} else {
log_info("No event value in custom Widget_Dataframe comm message")
}
},
handle_invoke = function(msg, func_name, limit) {
if(!is.null(msg$args)) {
result <- NULL
tryCatch({
result <- self$invoke_function(func_name, msg$args, limit)
self$send_update("result", result)
self$send_ok()
}, error = function(e) {
err_msg <- paste("Error invoking function", func_name)
self$send_error(err_msg)
log_error(e)
log_error(err_msg)
})
} else {
err_msg <- "No arguments were provided for Widget_Function invocation!"
self$send_error(err_msg)
log_info(err_msg)
}
},
register_function = function(func_name) {
self$function_name <- func_name
response <- self$send_signature(func_name, self$limit)
self$handle_function_response(response)
},
invoke_function = function(func_name, args, limit=self$limit) {
#get function from user/global env
func <- get(func_name, envir = .GlobalEnv)
#resolve args from defined env with args from client
converted_args <- self$convert_args(func, args)
#call the function with the list of args (convert_args is a list of values in order)
result <- do.call(func, converted_args)
serialized_result <- self$serializer$serialize(result)
return (serialized_result)
},
#used to resolve the difference between the default variable values of the function
#with the variable values from the client args sent over
convert_args = function(func, args) {
func_param_list <- formals(func)
the_converted_args <- list()
for (param in names(func_param_list)) {
#is_required when variable does not have a default value
is_required <- self$func_variable_is_required(func_param_list[[param]])
client_arg <- NULL
if(is_required) {
#variable is required/does not have a default value so
#we must use the arg passed from the client because nothing else
client_arg <- args[[param]]
} else {
if(param %in% names(args)) {
#not required, but client passed in a value so use it rather than the default value
client_arg <- args[[param]]
} else {
#not required as has a default value and no client arg passed in, so use default value
client_arg <- func_param_list[[param]]
}
}
switch (
class(func_param_list[[param]]),
numeric = the_converted_args[[param]] <- as.numeric(client_arg),
character = the_converted_args[[param]] <- as.character(client_arg),
logical = the_converted_args[[param]] <- as.logical(client_arg),
list = the_converted_args[[param]] <- client_arg,
the_converted_args[[param]] <- client_arg
)
}
return (the_converted_args)
},
send_signature = function(func_name, limit) {
signature <- self$get_signature(func_name)
if(!is.null(signature)) {
self$send_update("signature", signature)
return (TRUE)
} else {
err_msg <- paste("Could not determing signature for function:", func_name)
log_info(err_msg)
return (err_msg)
}
},
#is required if variable from the function definition does not have a default value
func_variable_is_required = function(var) {
return (var == '' && class(var) == 'name')
},
get_signature = function(func_name) {
names <- list()
tryCatch({
func <- get(func_name, envir = .GlobalEnv)
func_param_list <- formals(func)
for(param in names(func_param_list)) {
names[[param]] <- list()
#required if default value not provided/empty string and class is name
is_required <- self$func_variable_is_required(func_param_list[[param]])
names[[param]][['required']] <- is_required
if(!is_required) {
names[[param]][['value']] <- func_param_list[[param]]
} else {
names[[param]][['value']] <- list()
}
switch (
class(func_param_list[[param]]),
numeric = names[[param]][['type']] <- "Number",
character = names[[param]][['type']] <- "String",
logical = names[[param]][['type']] <- "Boolean",
list = names[[param]][['type']] <- "Array",
names[[param]][['type']] <- class(func_param_list[[param]])
)
}
}, error = function(e) {
log_error(err_msg)
log_error(paste("Error getting signature of function:", func_name))
})
return (names)
},
initialize = function(comm, serializer) {
super$initialize(comm)
self$serializer <- serializer
}
)
)
|
/kernel-r/declarativewidgets/R/widget_function.r
|
permissive
|
marami52/declarativewidgets
|
R
| false
| false
| 7,062
|
r
|
#' @include widget.r serializer.r
NULL
#' Widget_Function
#'
#' Description
Widget_Function <- R6Class(
'Widget_Function',
inherit = Widget,
public = list(
serializer = NULL,
limit = NULL,
function_name = NULL,
handle_backbone = function(msg) {
msg_limit <- msg$sync_data$limit
msg_name <- msg$sync_data$function_name
if(!is.null(msg_name)) {
self$register_function(msg_name)
} else {
log_info("No name value provided for Widget_Function")
}
if(!is.null(msg_limit)) {
self$register_limit(msg_limit)
} else {
log_info("No limit value provided for Widget_Function")
}
},
handle_custom = function(msg) {
if(!is.null(msg$event)) {
if(msg$event == 'sync') {
self$send_signature(self$function_name, self$limit)
} else if (msg$event == 'invoke') {
self$handle_invoke(msg, self$function_name, self$limit)
} else {
log_info(paste("Unhandled custom event: ", msg$event))
}
} else {
log_info("No event value in custom Widget_Dataframe comm message")
}
},
handle_invoke = function(msg, func_name, limit) {
if(!is.null(msg$args)) {
result <- NULL
tryCatch({
result <- self$invoke_function(func_name, msg$args, limit)
self$send_update("result", result)
self$send_ok()
}, error = function(e) {
err_msg <- paste("Error invoking function", func_name)
self$send_error(err_msg)
log_error(e)
log_error(err_msg)
})
} else {
err_msg <- "No arguments were provided for Widget_Function invocation!"
self$send_error(err_msg)
log_info(err_msg)
}
},
register_function = function(func_name) {
self$function_name <- func_name
response <- self$send_signature(func_name, self$limit)
self$handle_function_response(response)
},
invoke_function = function(func_name, args, limit=self$limit) {
#get function from user/global env
func <- get(func_name, envir = .GlobalEnv)
#resolve args from defined env with args from client
converted_args <- self$convert_args(func, args)
#call the function with the list of args (convert_args is a list of values in order)
result <- do.call(func, converted_args)
serialized_result <- self$serializer$serialize(result)
return (serialized_result)
},
#used to resolve the difference between the default variable values of the function
#with the variable values from the client args sent over
convert_args = function(func, args) {
func_param_list <- formals(func)
the_converted_args <- list()
for (param in names(func_param_list)) {
#is_required when variable does not have a default value
is_required <- self$func_variable_is_required(func_param_list[[param]])
client_arg <- NULL
if(is_required) {
#variable is required/does not have a default value so
#we must use the arg passed from the client because nothing else
client_arg <- args[[param]]
} else {
if(param %in% names(args)) {
#not required, but client passed in a value so use it rather than the default value
client_arg <- args[[param]]
} else {
#not required as has a default value and no client arg passed in, so use default value
client_arg <- func_param_list[[param]]
}
}
switch (
class(func_param_list[[param]]),
numeric = the_converted_args[[param]] <- as.numeric(client_arg),
character = the_converted_args[[param]] <- as.character(client_arg),
logical = the_converted_args[[param]] <- as.logical(client_arg),
list = the_converted_args[[param]] <- client_arg,
the_converted_args[[param]] <- client_arg
)
}
return (the_converted_args)
},
send_signature = function(func_name, limit) {
signature <- self$get_signature(func_name)
if(!is.null(signature)) {
self$send_update("signature", signature)
return (TRUE)
} else {
err_msg <- paste("Could not determing signature for function:", func_name)
log_info(err_msg)
return (err_msg)
}
},
#is required if variable from the function definition does not have a default value
func_variable_is_required = function(var) {
return (var == '' && class(var) == 'name')
},
get_signature = function(func_name) {
names <- list()
tryCatch({
func <- get(func_name, envir = .GlobalEnv)
func_param_list <- formals(func)
for(param in names(func_param_list)) {
names[[param]] <- list()
#required if default value not provided/empty string and class is name
is_required <- self$func_variable_is_required(func_param_list[[param]])
names[[param]][['required']] <- is_required
if(!is_required) {
names[[param]][['value']] <- func_param_list[[param]]
} else {
names[[param]][['value']] <- list()
}
switch (
class(func_param_list[[param]]),
numeric = names[[param]][['type']] <- "Number",
character = names[[param]][['type']] <- "String",
logical = names[[param]][['type']] <- "Boolean",
list = names[[param]][['type']] <- "Array",
names[[param]][['type']] <- class(func_param_list[[param]])
)
}
}, error = function(e) {
log_error(err_msg)
log_error(paste("Error getting signature of function:", func_name))
})
return (names)
},
initialize = function(comm, serializer) {
super$initialize(comm)
self$serializer <- serializer
}
)
)
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/plot_amm_from_matrix.R
\name{plot_amm_from_matrix}
\alias{plot_amm_from_matrix}
\title{plot an ancestry matrix (or multiple such matrices) from its (their) matrix form}
\usage{
plot_amm_from_matrix(X)
}
\arguments{
\item{X}{input tibble with a factor or character column \code{ID} that gives
the "name" of the ancestry matrix that will be used if you want to facet
over the values in \code{ID}. And also \code{X} must have a list column \code{anc_match_matrix} each
element of which is a logical ancestry match matrix. \code{X} may have a list column
of tibbles called \code{psa_tibs} that says which cells are the primary shared ancestors.}
}
\value{
\code{plot_amm_from_matrix()} returns a ggplot object: each facet is an image of the
ancestry match matrix. It is facet-wrapped over the values in the ID column of \code{X}.
}
\description{
For illustration purposes, if you want to simply plot an ancestry
matrix (or several) to show particular values, then this is the
handy function for you.
}
\examples{
# get some input: all the 2-generation AMMs in `example_amms`
X <- example_amms[stringr::str_detect(names(example_amms), "2gen$")] \%>\%
tibble::enframe(name = "ID", value = "anc_match_matrix")
# plot those
g <- plot_amm_from_matrix(X) +
ggplot2::facet_wrap(~ ID)
}
|
/man/plot_amm_from_matrix.Rd
|
no_license
|
cran/CKMRpop
|
R
| false
| true
| 1,365
|
rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/plot_amm_from_matrix.R
\name{plot_amm_from_matrix}
\alias{plot_amm_from_matrix}
\title{plot an ancestry matrix (or multiple such matrices) from its (their) matrix form}
\usage{
plot_amm_from_matrix(X)
}
\arguments{
\item{X}{input tibble with a factor or character column \code{ID} that gives
the "name" of the ancestry matrix that will be used if you want to facet
over the values in \code{ID}. And also \code{X} must have a list column \code{anc_match_matrix} each
element of which is a logical ancestry match matrix. \code{X} may have a list column
of tibbles called \code{psa_tibs} that says which cells are the primary shared ancestors.}
}
\value{
\code{plot_amm_from_matrix()} returns a ggplot object: each facet is an image of the
ancestry match matrix. It is facet-wrapped over the values in the ID column of \code{X}.
}
\description{
For illustration purposes, if you want to simply plot an ancestry
matrix (or several) to show particular values, then this is the
handy function for you.
}
\examples{
# get some input: all the 2-generation AMMs in `example_amms`
X <- example_amms[stringr::str_detect(names(example_amms), "2gen$")] \%>\%
tibble::enframe(name = "ID", value = "anc_match_matrix")
# plot those
g <- plot_amm_from_matrix(X) +
ggplot2::facet_wrap(~ ID)
}
|
list.of.packages <- c("ggplot2", "showtext", "reshape2", "plyr", "stringr")
ensure.packages <- function(packages=list.of.packages) {
new.packages <- packages[!(packages %in% installed.packages()[,"Package"])]
if(length(new.packages)) install.packages(new.packages)
for (package in packages) {
library(package, character.only=TRUE)
}
}
ensure.packages()
bvd_raw = "
beat i: ɪ bit
poot u: ʊ put
bot ɒ ɑ: bart
bot ɒ ʌ but
bert ɜ: ɑ: bart
but ʌ ɑ: bart
bird ɜ: eə bared
boat əʊ aʊ bout
bait eɪ e bet
bait eɪ aɪ bite
bite aɪ ɪ bit
bat æ ʌ but
beard ɪə eə bared
buoyed ɔɪ ɔ: board
"
matches <- as.data.frame(
str_match_all(bvd_raw, "([^\\s]+)\\s+([^\\s]+)\\s+([^\\s]+)\\s+([^\\s]+)")[[1]])
bvd <- {}
invisible(apply(matches, 1, FUN=function(row){
bvd[row["V2"]] <<- row["V3"]
bvd[row["V5"]] <<- row["V4"]
}))
hvd_raw <- "heed /i:/ hid /ɪ/ head /e/ heard /ɜ:/ had /æ/ hud /ʌ/ hard /ɑ:/ hod /ɒ/ hoard /ɔ:/ whod /u:/ hood /ʊ/"
matches <- as.data.frame(
str_match_all(hvd_raw, "([a-z]+)\\s+/([^/]{1,2})/")[[1]])
hvd <- {}
monophthongs <- c()
invisible(apply(matches, 1, FUN=function(row){
hvd[row["V2"]] <<- row["V3"]
monophthongs <<- c(monophthongs, row["V2"])
}))
dict <- function(content=NULL) {
dct <- new.env(hash=TRUE)
if (!is.null(content)) {
expressions <- parse(text = deparse(substitute(content)))[[1]]
for (i in seq(2, length(expressions))) {
expression <- expressions[[i]]
name <- as.character(expression[[2]])
value <- expression[[3]]
dct[[name]] <- eval(value)
}
}
return (dct)
}
# Set up fonts
font_add("Cabin", "../fonts/Cabin/Cabin-Regular.ttf")
font_add("Cabin-Italic", "../fonts/Cabin/Cabin-Italic.ttf")
font_add("DejaVuSans", "../fonts/dejavu-fonts-ttf-2.37/ttf/DejaVuSans.ttf")
showtext_auto()
rad <- function(a) a / 180 * pi
lobanov <- function(df, f1="f1", f2="f2", vowel="vowel", group=c(), reduce=TRUE) {
ddply(df, group, function(df.grp) {
f1.grp <- df.grp[,f1]
f2.grp <- df.grp[,f2]
mn.f1.grp <- mean(f1.grp, na.rm=TRUE)
mn.f2.grp <- mean(f2.grp, na.rm=TRUE)
sd.f1.grp <- sd(f1.grp, na.rm=TRUE)
sd.f2.grp <- sd(f2.grp, na.rm=TRUE)
ddply(df.grp, vowel, function(df.vwl) {
f1.vwl <- df.vwl[,f1]
f2.vwl <- df.vwl[,f2]
f1.vwl.nrm <- (f1.vwl - mn.f1.grp) / sd.f1.grp
f2.vwl.nrm <- (f2.vwl - mn.f2.grp) / sd.f2.grp
if (reduce) {
f1.vwl.nrm <- median(f1.vwl.nrm)
f2.vwl.nrm <- median(f2.vwl.nrm)
}
data.frame(f1=f1.vwl.nrm, f2=f2.vwl.nrm)
})
})
}
colors <- new.env()
with(colors, {
pre <- "#F8BBD0"
post <- "#E91E63"
arrow <- "#444444"
hvd <- "#176FC1" # "#0288D1"
ssbe <- "#cccccc"
ssbe.label <- "#cccccc"
panel.background <- "#eeeeee"
panel.grid <- "#ffffff"
})
outDir <- "images"
inDir <- "data"
dataDir <- inDir
DPI <- as.numeric(Sys.getenv("DPI"))
if (is.na(DPI)) {
DPI <- 300
}
cat(sprintf("Using %d dpi\n", DPI))
|
/src/plots/R/settings.R
|
no_license
|
mwibrow/baap-2018-poster
|
R
| false
| false
| 3,044
|
r
|
list.of.packages <- c("ggplot2", "showtext", "reshape2", "plyr", "stringr")
ensure.packages <- function(packages=list.of.packages) {
new.packages <- packages[!(packages %in% installed.packages()[,"Package"])]
if(length(new.packages)) install.packages(new.packages)
for (package in packages) {
library(package, character.only=TRUE)
}
}
ensure.packages()
bvd_raw = "
beat i: ɪ bit
poot u: ʊ put
bot ɒ ɑ: bart
bot ɒ ʌ but
bert ɜ: ɑ: bart
but ʌ ɑ: bart
bird ɜ: eə bared
boat əʊ aʊ bout
bait eɪ e bet
bait eɪ aɪ bite
bite aɪ ɪ bit
bat æ ʌ but
beard ɪə eə bared
buoyed ɔɪ ɔ: board
"
matches <- as.data.frame(
str_match_all(bvd_raw, "([^\\s]+)\\s+([^\\s]+)\\s+([^\\s]+)\\s+([^\\s]+)")[[1]])
bvd <- {}
invisible(apply(matches, 1, FUN=function(row){
bvd[row["V2"]] <<- row["V3"]
bvd[row["V5"]] <<- row["V4"]
}))
hvd_raw <- "heed /i:/ hid /ɪ/ head /e/ heard /ɜ:/ had /æ/ hud /ʌ/ hard /ɑ:/ hod /ɒ/ hoard /ɔ:/ whod /u:/ hood /ʊ/"
matches <- as.data.frame(
str_match_all(hvd_raw, "([a-z]+)\\s+/([^/]{1,2})/")[[1]])
hvd <- {}
monophthongs <- c()
invisible(apply(matches, 1, FUN=function(row){
hvd[row["V2"]] <<- row["V3"]
monophthongs <<- c(monophthongs, row["V2"])
}))
dict <- function(content=NULL) {
dct <- new.env(hash=TRUE)
if (!is.null(content)) {
expressions <- parse(text = deparse(substitute(content)))[[1]]
for (i in seq(2, length(expressions))) {
expression <- expressions[[i]]
name <- as.character(expression[[2]])
value <- expression[[3]]
dct[[name]] <- eval(value)
}
}
return (dct)
}
# Set up fonts
font_add("Cabin", "../fonts/Cabin/Cabin-Regular.ttf")
font_add("Cabin-Italic", "../fonts/Cabin/Cabin-Italic.ttf")
font_add("DejaVuSans", "../fonts/dejavu-fonts-ttf-2.37/ttf/DejaVuSans.ttf")
showtext_auto()
rad <- function(a) a / 180 * pi
lobanov <- function(df, f1="f1", f2="f2", vowel="vowel", group=c(), reduce=TRUE) {
ddply(df, group, function(df.grp) {
f1.grp <- df.grp[,f1]
f2.grp <- df.grp[,f2]
mn.f1.grp <- mean(f1.grp, na.rm=TRUE)
mn.f2.grp <- mean(f2.grp, na.rm=TRUE)
sd.f1.grp <- sd(f1.grp, na.rm=TRUE)
sd.f2.grp <- sd(f2.grp, na.rm=TRUE)
ddply(df.grp, vowel, function(df.vwl) {
f1.vwl <- df.vwl[,f1]
f2.vwl <- df.vwl[,f2]
f1.vwl.nrm <- (f1.vwl - mn.f1.grp) / sd.f1.grp
f2.vwl.nrm <- (f2.vwl - mn.f2.grp) / sd.f2.grp
if (reduce) {
f1.vwl.nrm <- median(f1.vwl.nrm)
f2.vwl.nrm <- median(f2.vwl.nrm)
}
data.frame(f1=f1.vwl.nrm, f2=f2.vwl.nrm)
})
})
}
colors <- new.env()
with(colors, {
pre <- "#F8BBD0"
post <- "#E91E63"
arrow <- "#444444"
hvd <- "#176FC1" # "#0288D1"
ssbe <- "#cccccc"
ssbe.label <- "#cccccc"
panel.background <- "#eeeeee"
panel.grid <- "#ffffff"
})
outDir <- "images"
inDir <- "data"
dataDir <- inDir
DPI <- as.numeric(Sys.getenv("DPI"))
if (is.na(DPI)) {
DPI <- 300
}
cat(sprintf("Using %d dpi\n", DPI))
|
context("Test plotting trajectory types")
test_that("test plot_trajectory_type", {
plot <- ggplot() +
theme_void()
new_plot <- plot %>%
plot_trajectory_types(trajectory_types$id, ymin = seq_along(trajectory_types$id), ymax = seq_along(trajectory_types$id) + 1, size = 1.5)
testthat::expect_true(is.ggplot(new_plot))
})
|
/package/tests/testthat/test-plotting_trajectory_types.R
|
permissive
|
dynverse/dynbenchmark
|
R
| false
| false
| 337
|
r
|
context("Test plotting trajectory types")
test_that("test plot_trajectory_type", {
plot <- ggplot() +
theme_void()
new_plot <- plot %>%
plot_trajectory_types(trajectory_types$id, ymin = seq_along(trajectory_types$id), ymax = seq_along(trajectory_types$id) + 1, size = 1.5)
testthat::expect_true(is.ggplot(new_plot))
})
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.