blob_id stringlengths 40 40 | directory_id stringlengths 40 40 | path stringlengths 2 327 | content_id stringlengths 40 40 | detected_licenses listlengths 0 91 | license_type stringclasses 2 values | repo_name stringlengths 5 134 | snapshot_id stringlengths 40 40 | revision_id stringlengths 40 40 | branch_name stringclasses 46 values | visit_date timestamp[us]date 2016-08-02 22:44:29 2023-09-06 08:39:28 | revision_date timestamp[us]date 1977-08-08 00:00:00 2023-09-05 12:13:49 | committer_date timestamp[us]date 1977-08-08 00:00:00 2023-09-05 12:13:49 | github_id int64 19.4k 671M ⌀ | star_events_count int64 0 40k | fork_events_count int64 0 32.4k | gha_license_id stringclasses 14 values | gha_event_created_at timestamp[us]date 2012-06-21 16:39:19 2023-09-14 21:52:42 ⌀ | gha_created_at timestamp[us]date 2008-05-25 01:21:32 2023-06-28 13:19:12 ⌀ | gha_language stringclasses 60 values | src_encoding stringclasses 24 values | language stringclasses 1 value | is_vendor bool 2 classes | is_generated bool 2 classes | length_bytes int64 7 9.18M | extension stringclasses 20 values | filename stringlengths 1 141 | content stringlengths 7 9.18M |
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
631cca8ead6ac1ebdc9808f19a27741df7abffbc | a78ce6dbca90fc5521d6f645c0df5284f7e8047c | /74688018/examplePackage/man/examplePackage-package.Rd | 96a2f31d19afd3c4dac58eebbf218e491c10fd71 | [] | no_license | eddelbuettel/stackoverflow | 9bf5cdc097510cb7239290dcb58255f3aa762b9f | de57106c911095509013e47dba8a9893fdaa3f46 | refs/heads/master | 2023-08-18T05:11:41.956688 | 2023-08-14T19:42:50 | 2023-08-14T19:42:50 | 157,002,433 | 6 | 1 | null | null | null | null | UTF-8 | R | false | false | 364 | rd | examplePackage-package.Rd | \name{examplePackage-package}
\alias{examplePackage-package}
\alias{examplePackage}
\docType{package}
\title{\packageTitle{examplePackage}}
\description{\packageDescription{examplePackage}}
\section{Package Content}{\packageIndices{examplePackage}}
\author{\packageAuthor{examplePackage}}
\section{Maintainer}{\packageMaintainer{examplePackage}}
\keyword{package}
|
8f1c5cf5a138be29f2e006872fb02a273c7e2819 | 7139bcfbcf42995caf49d0127a773ecb665449a2 | /GetTrimData.R | 5cf0716eba0cc652edbcfa5746f46649ee632cea | [] | no_license | ioanvlad/ExData_Plotting1 | d43137fabcc9074f029f14471fd5923f63675c3e | 0249f7a379c7cfe5f026630beb7a21a55a239c3b | refs/heads/master | 2021-01-22T05:38:49.907671 | 2017-02-12T03:16:55 | 2017-02-12T03:16:55 | 81,687,857 | 0 | 0 | null | 2017-02-11T22:49:24 | 2017-02-11T22:49:24 | null | UTF-8 | R | false | false | 1,333 | r | GetTrimData.R | GetTrimData <- function() {
# This function checks if the saved reduced data file needed for assignment
# 1 exists. If it does, it loads the data from it. If it does not, it
# downloads the zip file from the web, unzips it, extracts only the
# subset corresponding to the relevant dates, saves it to a file for future
# use, and returns it to the caller
reduced_data_file <- 'reduced_data.txt'
if(file.exists(reduced_data_file)) {
my_data <- read.table(reduced_data_file, header=TRUE, sep=';')
} else {
url <- paste0('https://d396qusza40orc.cloudfront.net/',
'exdata%2Fdata%2Fhousehold_power_consumption.zip')
txt_file <- 'household_power_consumption.txt'
zip_file <- paste0(txt_file, '.zip')
download.file(url, zip_file)
unzip(zip_file)
file.remove(zip_file)
all <- read.table(txt_file, header=TRUE, sep=';')
my_data <- subset(all, Date=='1/2/2007' | Date=='2/2/2007')
write.table(my_data, file=reduced_data_file, sep=';', quote=FALSE,
row.names = FALSE, col.names = TRUE)
file.remove(txt_file)
rm(reduced_data_file, url, txt_file, zip_file, all)
}
return(my_data)
}
|
21bcc06dbc0affcbab166ec739c4d58337cefdd4 | 39f88826e318b0b351667806602c6957d5ae01d0 | /man/get_data_sec_investment_companies.Rd | fee8189812cf828f007532f75ea1c9e4d85dc28a | [
"MIT"
] | permissive | jngod2011/fundManageR | 9b79169ba3b8157d8ae9ade3bd066c2b5f82c255 | cea3e117a217bb7c770670ddd440822094a728cc | refs/heads/master | 2020-03-28T07:22:15.138769 | 2018-07-29T16:26:53 | 2018-07-29T16:26:53 | null | 0 | 0 | null | null | null | null | UTF-8 | R | false | true | 3,076 | rd | get_data_sec_investment_companies.Rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/sec_functions.R
\name{get_data_sec_investment_companies}
\alias{get_data_sec_investment_companies}
\title{SEC registered investment companies}
\usage{
get_data_sec_investment_companies(nest_data = TRUE,
return_message = TRUE)
}
\arguments{
\item{return_message}{\code{TRUE} return a message after data import}
\item{only_most_recent}{\code{TRUE} return only the most recent year}
}
\value{
nested \code{data_frame} or \code{data_frame} if \code{nest_data = FALSE}
}
\description{
This function returns information about SEC
registered investment companies including:\itemize{
\item Insurance Companies
\item Insurance Accounts
\item Mutual Funds
\item Closed-end Funds
}
}
\examples{
get_data_sec_investment_companies(nest_data = TRUE, return_message = TRUE)
}
\references{
\href{http://sec.gov}{The Securities and Exchange Commission}
}
\seealso{
Other SEC: \code{\link{get_data_edgar_filing_streams}},
\code{\link{get_data_mmf_owned_debt_securities}},
\code{\link{get_data_recent_insider_trades}},
\code{\link{get_data_rf_leis}},
\code{\link{get_data_rf_sec_13F_companies}},
\code{\link{get_data_sec_bankruptcies}},
\code{\link{get_data_sec_broker_dealers}},
\code{\link{get_data_sec_ciks}},
\code{\link{get_data_sec_closed_end_funds}},
\code{\link{get_data_sec_cusips}},
\code{\link{get_data_sec_failed_to_deliver_securities}},
\code{\link{get_data_sec_filer}},
\code{\link{get_data_sec_filing_entities}},
\code{\link{get_data_sec_foia_requests}},
\code{\link{get_data_sec_money_market_funds}},
\code{\link{get_data_sec_municipal_advisors}},
\code{\link{get_data_sec_securities_filing_counts}},
\code{\link{get_data_sec_securities_metrics_by_exchange}},
\code{\link{get_data_securities_offerings}},
\code{\link{get_data_us_public_companies}},
\code{\link{get_dictionary_sec_filing_codes}},
\code{\link{get_dictionary_sec_form_codes}},
\code{\link{get_dictionary_sic_codes}}
Other entity search: \code{\link{get_data_adv_managers_current_period_summary}},
\code{\link{get_data_adv_managers_filings}},
\code{\link{get_data_adv_managers_metadata}},
\code{\link{get_data_adv_managers_periods_summaries}},
\code{\link{get_data_finra_entities}},
\code{\link{get_data_nareit_entities}},
\code{\link{get_data_reit_funds}},
\code{\link{get_data_rf_leis}},
\code{\link{get_data_rf_sec_13F_companies}},
\code{\link{get_data_sec_bankruptcies}},
\code{\link{get_data_sec_broker_dealers}},
\code{\link{get_data_sec_ciks}},
\code{\link{get_data_sec_closed_end_funds}},
\code{\link{get_data_sec_cusips}},
\code{\link{get_data_sec_filer}},
\code{\link{get_data_sec_filing_entities}},
\code{\link{get_data_sec_money_market_funds}},
\code{\link{get_data_sec_municipal_advisors}},
\code{\link{get_data_securities_offerings}},
\code{\link{get_data_us_public_companies}},
\code{\link{get_data_ycombinator_alumni}}
Other fund search: \code{\link{get_data_sec_filer}}
}
\concept{SEC}
\concept{entity search}
\concept{fund search}
|
04573ce31ac9109f4c2aca7094404abdc96b5cc4 | 6ef25123fac82d300307f2ecc785cac407b89eb1 | /tests/testthat.R | 6fb0d8f06fc13986b69ba8766fe96d5a0c725500 | [] | no_license | cran/fecR | 988e3953c1a7f0e0251de8fa6065c9b0add741c2 | 38c0ea017c4719699aefc518a275e7944c9dda71 | refs/heads/master | 2021-01-12T10:52:09.937971 | 2017-09-09T13:19:53 | 2017-09-09T13:19:53 | 72,737,800 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 52 | r | testthat.R | library(testthat)
library(fecR)
test_check("fecR")
|
ea8d63162f9a29b6d450b4113662116d09688e08 | 3982d0e473e189cebc52944722aa0aa43568f70e | /plot_heatmaps.R | 45ec9476569e9d34069875560e2e2dd265678fbf | [] | no_license | Joshmoss11/TKO | e887894617b506283a951371b4e78a0de0d53fff | c9cd94e5400bbc5ed9d4f56f9256514985addaf2 | refs/heads/master | 2020-12-25T14:34:07.220876 | 2016-09-27T14:12:02 | 2016-09-27T14:12:02 | 66,776,313 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 1,193 | r | plot_heatmaps.R | #mat.dir <- '/mnt/lustre/hms-01/fs01/joshua.moss/cedar/embryo/tko_analysis/heatmapper_results'
mat.dir <- '~/tko_analysis'
tko.k4.f <- file.path(mat.dir,'tko_h3k4me3_matrix.tab')
tko.k4 <- as.matrix(read.table(tko.k4.f,header=F,skip=2,nrows=-1))
rotate <- function(x) t(apply(x, 2, rev))
library(RColorBrewer)
#bk = c(seq(-0.1,3, length=100),seq(3.1,387.2,length=100))
tko.k4.p <- tko.k4; tko.k4.p[tko.k4>30] <- 30
bk = c(seq(0,30,length=100))
hmcols<- colorRampPalette(c("white","red"))(length(bk)-1)
png(file.path(mat.dir,"tko_k4_heatmap.png"), width=300, height = 800)
image(rotate(tko.k4.p),useRaster = TRUE,axes=F,col=hmcols,breaks=bk)
dev.off()
png("tko_k4_heatmap.png", width=300, height = 800)
heatmap(tko.k4[nrow(tko.k4):1,],scale='none',Rowv=NA,Colv=NA,col=hmcols,breaks=bk)
dev.off()
library(gplots)
png("tko_k4_heatmap.png", width=300, height = 800)
bk = unique(c(seq(-0.1,3, length=100),seq(3,9.7,length=100)))
hmcols<- colorRampPalette(c("red","while"))(length(bk)-1)
heatmap.2(tko.k4, col=hmcols,breaks=bk, Rowv= FALSE , Colv=FALSE, dendrogram="none", useRaster = TRUE, symkey=FALSE, symm=F, symbreaks=T, scale="none", trace="none", labRow=NA, labCol=NA,key=F)
dev.off()
|
695e784d28f53702c12d7fa63bbeedbfa5cab76a | 9ba17a398ef142f8edd71f591bdb494615d043fe | /decisionTree/preprocess/NAs.R | 395eb9da0d3e587802ef3ede1d4423abe36ea6a6 | [] | no_license | jaumeCloquellCapo/data-mining-preprocess-and-classification | 6fff17f635c0a07e77a6ec8cb2654a31c0d46cb3 | 62e19b26ed9453e6da25ddc913d22975c51dbb48 | refs/heads/master | 2020-04-28T23:29:08.736974 | 2019-03-14T16:18:39 | 2019-03-14T16:18:39 | 175,656,386 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 3,349 | r | NAs.R | library(mice)
library(missForest)
#Función que elimina las filas que continen algún atributo con valor NA
na.delete <- function(dataset){
return(na.omit(dataset))
}
imputacionKnnTotal <- function(x){
#' Se incluye dentro una primera función imputacionKnn
imputacionKnn <- function(x, y){
#' Se le pasa una matriz o data.frame x y el índice de una de sus columnas
#' Busca en la columna las instancias perdidas y para esas las variables que tienen valores perdidos de cara
#' a que el test no de fallos ya que como no hay más de un valor perdido por instancia será como
#' máximo el mismo número de instancias perdidas.
#' Se buscan las instancias con valores perdidos para dichas variables y se omiten para el train,
#' estando entre ellas evidentemente las que vamos a predecir.
#' Se construyen train y test y se entrena con K-NN, usando CV y 10 repeticiones, el resultado
#' es la matriz o data.frame original pero con dicha columna modificada.
require(caret)
# Instancia perdida de la columna
instanciasPerdidas <- which(is.na(x[,y])|x[,y]=="")
# Otras variables con datos perdidos en dichas instancias
variablesPerdidas <- which(sapply((1:dim(x)[2])[-y], function(z) any(is.na(x[instanciasPerdidas,z]) | x[instanciasPerdidas,z]=="")))
# Búsqueda de instancias con perdidos obviando, en caso de que estén, aquellas variables descartadas
if(length(variablesPerdidas)!=0){
instanciasX <- sapply(1:dim(x)[1], function(z) sum(is.na(x[z,-variablesPerdidas]))+sum(x[z,-variablesPerdidas]=="", na.rm = TRUE))
} else {
instanciasX <- sapply(1:dim(x)[1], function(z) sum(is.na(x[z,]))+sum(x[z,]=="", na.rm = TRUE))
}
# Quedarme con los índices de las instancias con perdidos
instanciasX <- which(instanciasX!=0)
if(length(variablesPerdidas)!=0){
train <- x[-instanciasX, -c(y,variablesPerdidas)]
test <- x[instanciasPerdidas, -c(y,variablesPerdidas)]
} else {
train <- x[-instanciasX,-y]
test <- x[instanciasPerdidas,-y]
}
train.class <- x[-instanciasX,y]
variablesNumericas <- which(sapply(1:dim(train)[2], function(z) is.numeric(train[,z])))
# Elimino la clase en caso de que esté entre ellas
variablesNumericas <- variablesNumericas[!variablesNumericas==y]
modelo <- caret::train(train[,variablesNumericas], train.class,
method = "knn",
tuneLength = 10,
trControl = trainControl(method = "cv"))
modelo.predict <- predict(modelo,test[,variablesNumericas])
if(is.factor(modelo.predict)){
x[instanciasPerdidas,y] <- as.character(modelo.predict)
} else {
x[instanciasPerdidas,y] <- modelo.predict
}
x
}
#' Segunda parte de la función:
#' Le paso todas las varaibles perdidas a la función anterior
#' para ello voy pasando el mismo data.frame, con los datos originales en cada iteración
#' y en cada una de ellas voy sustituyendo en una copia del dataframe los perdidos con
#' las imputaciones.
y <- x
variablesPerdidas <- which(sapply((1:dim(x)[2]), function(z) any(is.na(x[,z]) | x[,z]=="")))
for(i in variablesPerdidas){
print(i)
n <- imputacionKnn(x,i)
y[,i] <- n[,i]
}
y
} |
a25c6e41a9977d345e4afbf6a654a0bbb164def1 | ba945883b338fa643b6edddf530083efcc09e184 | /R/8_experiment.R | ca2b34866000abcd2f7b83fef26c02905d600a55 | [
"MIT"
] | permissive | passt/dyngen | 592abe0d299464c9e71b0b60af117d1b8b3772ef | 0967ed8af06a318d01971983fd05b87572995a4e | refs/heads/master | 2022-12-30T20:44:45.136214 | 2020-07-15T09:18:19 | 2020-07-15T09:18:19 | null | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 9,863 | r | 8_experiment.R | #' Sample cells from the simulations
#'
#' [generate_experiment()] runs samples cells along the different simulations.
#' [experiment_snapshot()] assumes that cells are sampled from a heterogeneous pool of cells.
#' Cells will thus be sampled uniformily from the trajectory.
#' [experiment_synchronised()] assumes that all the cells are synchronised and
#' are sampled at different timepoints.
#'
#' @param model A dyngen intermediary model for which the simulations have been run with [generate_cells()].
#' @param weight_bw \[snapshot\] A bandwidth parameter for determining the distribution of
#' cells along each edge in order to perform weighted sampling.
#' @param num_timepoints \[synchronised\] The number of time points used in the experiment.
#' @param pct_between \[synchronised\] The percentage of 'unused' simulation time.
#' @param realcount The name of a dataset in [realcounts]. If `NULL`, a random
#' dataset will be sampled from [realcounts].
#' @param sample_capture_rate A function that samples values for the simulated capture rates of genes.
#'
#' @rdname generate_experiment
#'
#' @importFrom stats rmultinom rnorm quantile
#' @export
generate_experiment <- function(model) {
# satisfy r cmd check
cell_id <- step_ix <- sim_time <- simulation_i <- from <- to <- time <- NULL
if (model$verbose) cat("Simulating experiment\n")
# first sample the cells from the sample, using the desired number of cells
step_ixs <- .generate_experiment_sample_cells(model)
cell_info <-
model$simulations$meta[step_ixs, , drop = FALSE] %>%
mutate(
step_ix = step_ixs
)
if ("group" %in% names(attributes(step_ixs))) {
cell_info$cell_group <- attr(step_ixs, "group")
} else {
# only shuffle if step sampling is not used
cell_info <- cell_info %>% sample_n(n())
}
cell_info <-
cell_info %>%
mutate(
cell_id = paste0("cell", row_number())
) %>%
select(cell_id, step_ix, simulation_i, sim_time, from, to, time, everything())
step_ixs <- cell_info$step_ix
# collect true simulated counts of sampled cells
tsim_counts <- model$simulations$counts[step_ixs, , drop = FALSE]
# fetch real expression data
realcount <- .generate_experiment_fetch_realcount(model)
# simulate library size variation from real data
count_simulation <- .simulate_counts_from_realcounts(tsim_counts, realcount, cell_info, sample_capture_rate = model$experiment_params$sample_capture_rate)
sim_counts <- count_simulation$sim_counts
cell_info <- count_simulation$cell_info
mol_info <- count_simulation$mol_info
# split up molecules
sim_wcounts <- sim_counts[, model$feature_info$mol_premrna, drop = FALSE]
sim_xcounts <- sim_counts[, model$feature_info$mol_mrna, drop = FALSE]
sim_ycounts <- sim_counts[, model$feature_info$mol_protein, drop = FALSE]
dimnames(sim_wcounts) <-
dimnames(sim_xcounts) <-
dimnames(sim_ycounts) <-
list(cell_info$cell_id, model$feature_info$feature_id)
if (model$simulation_params$compute_cellwise_grn) {
sim_cellwise_grn <- model$simulations$cellwise_grn[step_ixs, , drop = FALSE]
rownames(sim_cellwise_grn) <- cell_info$cell_id
} else {
sim_cellwise_grn <- NULL
}
if (model$simulation_params$compute_rna_velocity) {
sim_rna_velocity <- model$simulations$rna_velocity[step_ixs, , drop = FALSE]
rownames(sim_rna_velocity) <- cell_info$cell_id
} else {
sim_rna_velocity <- NULL
}
# combine into final count matrix
model$experiment <- list(
counts_premrna = sim_wcounts,
counts_mrna = sim_xcounts,
counts_protein = sim_ycounts,
feature_info = model$feature_info,
cell_info = cell_info,
cellwise_grn = sim_cellwise_grn,
rna_velocity = sim_rna_velocity
)
model
}
.simulate_counts_from_realcounts <- function(
tsim_counts,
realcount,
cell_info = tibble(cell_id = rownames(tsim_counts)),
sample_capture_rate = function(n) rnorm(n, 1, 0.05) %>% pmax(0)
) {
# satisfy r cmd check
num_molecules <- mult <- id <- NULL
# simulate library size variation from real data
realsums <- Matrix::rowSums(realcount)
dist_vals <- realsums / mean(realsums)
lib_size <- quantile(dist_vals, runif(nrow(cell_info)))
cell_info <-
cell_info %>%
mutate(
num_molecules = Matrix::rowSums(tsim_counts),
mult = quantile(dist_vals, runif(n())),
lib_size = sort(round(mean(num_molecules) * mult))[order(order(num_molecules))]
)
# simulate gene capture variation
mol_info <-
tibble(
id = colnames(tsim_counts),
capture_rate = sample_capture_rate(length(id))
)
# simulate sampling of molecules
tsim_counts_t <- Matrix::t(tsim_counts)
for (cell_i in seq_len(nrow(cell_info))) {
pi <- tsim_counts_t@p[[cell_i]] + 1
pj <- tsim_counts_t@p[[cell_i + 1]]
gene_is <- tsim_counts_t@i[pi:pj] + 1
gene_vals <- tsim_counts_t@x[pi:pj]
lib_size <- cell_info$lib_size[[cell_i]]
cap_rates <- mol_info$capture_rate[gene_is]
probs <- cap_rates * gene_vals
probs[probs < 0] <- 0 # sometimes these can become zero due to rounding errors
new_vals <- rmultinom(1, lib_size, probs)
tsim_counts_t@x[pi:pj] <- new_vals
}
sim_counts <- Matrix::drop0(Matrix::t(tsim_counts_t))
lst(
sim_counts,
cell_info,
mol_info
)
}
#' @export
#' @rdname generate_experiment
#' @importFrom GillespieSSA2 ssa_etl
list_experiment_samplers <- function() {
lst(
snapshot = experiment_snapshot,
synchronised = experiment_synchronised
)
}
#' @rdname generate_experiment
#' @export
experiment_snapshot <- function(
realcount = NULL,
sample_capture_rate = function(n) rnorm(n, 1, .05) %>% pmax(0),
weight_bw = 0.1
) {
# satisfy r cmd check
realcounts <- NULL
if (is.null(realcount)) {
data(realcounts, package = "dyngen", envir = environment())
realcount <- sample(realcounts$name, 1)
}
lst(
realcount,
sample_capture_rate,
fun = .generate_experiment_snapshot,
weight_bw
)
}
#' @rdname generate_experiment
#' @export
experiment_synchronised <- function(
realcount = NULL,
sample_capture_rate = function(n) rnorm(n, 1, .05) %>% pmax(0),
num_timepoints = 8,
pct_between = .75
) {
# satisfy r cmd check
realcounts <- NULL
if (is.null(realcount)) {
data(realcounts, package = "dyngen", envir = environment())
realcount <- sample(realcounts$name, 1)
}
lst(
realcount,
sample_capture_rate,
fun = .generate_experiment_synchronised,
num_timepoints,
pct_between
)
}
.generate_experiment_sample_cells <- function(model) {
# satisfy r cmd check
sim_time <- to <- time <- NULL
network <-
model$gold_standard$network
end_states <- setdiff(unique(network$to), unique(network$from))
sim_meta <-
model$simulations$meta %>%
mutate(orig_ix = row_number()) %>%
filter(sim_time >= 0, !to %in% end_states | time < 1)
params <- model$experiment_params
params$fun(
network = network,
sim_meta = sim_meta,
params = model$experiment_params,
num_cells = model$numbers$num_cells
)
}
#' @importFrom stats approxfun density
.generate_experiment_snapshot <- function(
network,
sim_meta,
params,
num_cells
) {
# satisfy r cmd check
pct <- cum_pct <- from <- to <- time <- `.` <- NULL
network <-
network %>%
mutate(
pct = length / sum(length),
cum_pct = cumsum(pct),
num_cells = diff(c(0, round(cum_pct * num_cells)))
) %>%
select(-pct, -cum_pct)
map(
seq_len(nrow(network)),
function(i) {
edge <- network %>% slice(i)
meta <-
inner_join(sim_meta, edge %>% select(from, to), c("from", "to"))
if (nrow(meta) > 1) {
meta %>%
mutate(
density = approxfun(density(time, bw = params$weight_bw))(time),
weight = 1 / density
) %>%
{sample(.$orig_ix, size = edge$num_cells, replace = TRUE, prob = .$weight)}
} else {
NULL
}
}
) %>%
unlist()
}
.generate_experiment_synchronised <- function(
network,
sim_meta,
params,
num_cells
) {
# satisfy r cmd check
sim_time <- t_scale <- timepoint_group <- selectable <- pct <- cum_pct <- timepoint_group <- orig_ix <- NULL
sim_meta2 <-
sim_meta %>%
mutate(
t_scale = sim_time / (max(sim_time)+1e-10) * params$num_timepoints,
timepoint_group = floor(t_scale),
selectable = (t_scale - timepoint_group) < (1 - params$pct_between)
) %>%
filter(selectable)
numbers <-
sim_meta2 %>%
group_by(timepoint_group) %>%
summarise(n = n()) %>%
mutate(
pct = n / sum(n),
cum_pct = cumsum(pct),
num_cells = diff(c(0, round(cum_pct * num_cells)))
)
map2(
numbers$timepoint_group,
numbers$num_cells,
function(gr, num) {
sim_meta2 %>% filter(timepoint_group == gr) %>% pull(orig_ix) %>% sample(size = num, replace = TRUE)
}
) %>%
unlist()
}
.generate_experiment_fetch_realcount <- function(model) {
# satisfy r cmd check
realcounts <- NULL
realcount_ <- model$experiment_params$realcount
# download realcount if needed-
realcount <-
if (is.character(realcount_)) {
data(realcounts, package = "dyngen", envir = environment())
assert_that(realcount_ %all_in% realcounts$name)
url <- realcounts$url[[match(realcount_, realcounts$name)]]
.download_cacheable_file(
url = url,
cache_dir = model$download_cache_dir,
verbose = model$verbose
)
} else if (is.matrix(realcount_) || is_sparse(realcount_)) {
realcount_
} else {
stop("realcount should be a url from dyngen::realcounts, or a sparse count matrix.")
}
realcount
} |
b977f2776d1b791f71ab82daf9b1ef8b641e7664 | ab3fd5416be6772a9e9c6a979b5d6d1681620883 | /man/apply_stylerignore.Rd | 89be09c4a2e37453b9102aacc6585aaeeaf693d7 | [] | no_license | jb388/styler | ff8eccda9eeeab52d7b7a5d0fe45790799a53b33 | e49afbf6eec54990f6bf07fad16ae61bbc1b37bd | refs/heads/master | 2021-01-02T19:00:34.486266 | 2020-02-10T23:48:28 | 2020-02-10T23:48:28 | null | 0 | 0 | null | null | null | null | UTF-8 | R | false | true | 709 | rd | apply_stylerignore.Rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/stylerignore.R
\name{apply_stylerignore}
\alias{apply_stylerignore}
\title{Ensure correct positional information for stylerignore expressions}
\usage{
apply_stylerignore(flattened_pd)
}
\arguments{
\item{flattened_pd}{A flattened parse table.}
}
\description{
Ensure correct positional information for stylerignore expressions
}
\details{
\itemize{
\item Get the positional information for tokens with a stylerignore tag from
\code{env_current}, which recorded that information from the input text.
\item Replace the computed lag_newlines and lag_spaces information in the parse
table with this information.
}
}
\keyword{internal}
|
51edd6a6590701871880ce741ee9eba7652fe53e | cbb2bd4a136c87a0799983385cee8584a3e85add | /tests/testthat/test-3agglomerate.R | ab6d83f737d5202263aed8d281e8fdc50913197a | [] | no_license | microbiome/MicrobiomeExperiment | b4220f7210871ea1579c552d384ee9f060e4e6a5 | 3012ee51ff8a9082d161130bc4fb962bb6757d07 | refs/heads/master | 2023-01-03T16:25:14.820550 | 2020-10-25T11:32:15 | 2020-10-25T11:32:15 | null | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 2,575 | r | test-3agglomerate.R | context("agglomerate")
test_that("agglomerate", {
gr <- GRanges("chr1",rep("1-6",11))
df <- DataFrame(n = c(1:11))
mcols(gr) <- df
grl <- splitAsList(gr,1:11)
mat <- matrix(1:110, nrow = 11)
xtse <- MicrobiomeExperiment(assays = list(mat = mat),
rowRanges = unname(grl))
tax_data <- DataFrame(Phylum = c(rep("a",3),rep("b",3),rep("c",3),rep("b",2)),
score = 1:11,
Family = c("c",NA,"d","e","f","g","h",NA,"h","e","f"),
n = 7:17)
rowData(xtse) <- tax_data
# mergeRows for agglomerateByRank
tax_factors <- MicrobiomeExperiment:::.get_tax_groups(xtse, col = 2)
actual_family <- actual <- mergeRows(xtse, f = tax_factors)
expect_s4_class(actual,class(xtse))
expect_equal(dim(actual),c(8,10))
expect_equal(assays(actual)$mat[8,1],c(8))
expect_equal(assays(actual)$mat[7,1],c(16))
tax_factors <- MicrobiomeExperiment:::.get_tax_groups(xtse, col = 1)
actual_phylum <- actual <- mergeRows(xtse, f = tax_factors)
expect_s4_class(actual,class(xtse))
expect_equal(dim(actual),c(3,10))
expect_equal(assays(actual)$mat[1,1],c(6))
expect_equal(assays(actual)$mat[2,1],c(36))
expect_equal(assays(actual)$mat[3,1],c(24))
#
expect_error(agglomerateByRank(xtse,"",na.rm=FALSE),
"'rank' must be an non empty single character value")
expect_error(agglomerateByRank(xtse,"Family",na.rm=""),
"'na.rm' must be TRUE or FALSE")
expect_error(agglomerateByRank(xtse,"Family",na.rm=FALSE,agglomerateTree=""),
"'agglomerateTree' must be TRUE or FALSE")
xtse2 <- xtse
rowData(xtse2) <- NULL
expect_error(agglomerateByRank(xtse2,"Family",na.rm=FALSE),
"taxonomyData needs to be populated")
#
actual <- agglomerateByRank(xtse,"Family",na.rm=FALSE)
expect_equivalent(rowData(actual),rowData(actual_family))
actual <- agglomerateByRank(xtse,"Phylum",na.rm=FALSE)
expect_equivalent(rowData(actual),rowData(actual_phylum))
#
actual <- agglomerateByRank(xtse,"Family", na.rm = TRUE)
expect_equal(dim(actual),c(6,10))
expect_equal(rowData(actual)$Family,c("c","d","e","f","g","h"))
actual <- agglomerateByRank(xtse,"Family", na.rm = FALSE) # the default
expect_equal(dim(actual),c(8,10))
expect_equal(rowData(actual)$Family,c("c",NA,"d","e","f","g","h",NA))
actual <- agglomerateByRank(xtse,"Phylum")
expect_equivalent(rowData(actual),rowData(actual_phylum))
})
|
4a5530c917fc187db29d16e414bdb4a210e2bbc8 | 2b379acc87342a02c4c607cff8575dc822261962 | /cachematrix.R | 42eb5e11d0ce9c59c0183b38fb334c4d74f482c2 | [] | no_license | yangweiliu/ProgrammingAssignment2 | 4009c432acf9f9cb6a63f5ab46efb9697802710c | 4375273dda5ef3fb751e174fc3d5397f62c3bca0 | refs/heads/master | 2021-01-25T02:29:59.010648 | 2015-08-23T23:04:43 | 2015-08-23T23:04:43 | 41,267,173 | 0 | 0 | null | 2015-08-23T21:00:55 | 2015-08-23T21:00:54 | null | UTF-8 | R | false | false | 1,318 | r | cachematrix.R | ## Put comments here that give an overall description of what your
## functions do
## The following is to create a matrix object that can cache its inverse
makeCacheMatrix <- function(x=matrix()) {
## Initialize
i <- NULL
## set matrix
set <- function(matrix) {
m <<- matrix
i <<- NULL
}
## get matrix
get <- function(){
m
}
## set inverse
set_Inverse <- function(inverse){
i <<- inverse
}
## get inverse
get_Inverse <- function() {
i
}
## Return method list
list(set=set, get=get, set_Inverse=set_Inverse, get_Inverse=get_Inverse)
}
## Compute inverse of the matrix returned by makeCacheMatrix.
## If inverse is already calculated (and the matrix has not changed),
## then the cachesolve should retrieve the inverse from the cache.
cacheSolve <- function(x, ...) {
## Return a matrix that is the inverse of 'x'
m <- x$get_Inverse()
## If already done, then just return the inverse
if(!is.null(m)) {
message("getting cached data")
return(m)
}
## Get matrix from object
data <- x$get()
## Calculate the inverse
m <- solve(data) %*% data
## Set inverse
x$set_Inverse(m)
## Return matrix
m
}
|
b3606735b7eddd6725a33c19978058e91eae8bfa | 6a2836aa5d43e5925c7d6f66f89121f8d55a4ee9 | /Code/logitModell/man/maxLikeEst.Rd | 1204c0dd102c9b8032d3f671bb9ac7bd902d9869 | [] | no_license | xuansonle/LogisticRegressionFromScratch | 3ab73b077c8757c00fb4b5a9ccc813c4faa36134 | 2a3b58c247e2f34e0a86ec382fd64cce3c0338e1 | refs/heads/master | 2022-08-01T23:52:19.567470 | 2020-05-18T09:53:37 | 2020-05-18T09:53:37 | null | 0 | 0 | null | null | null | null | UTF-8 | R | false | true | 834 | rd | maxLikeEst.Rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/logitModell.R
\name{maxLikeEst}
\alias{maxLikeEst}
\title{Maximum Likelihood}
\usage{
maxLikeEst(y, X)
}
\arguments{
\item{y}{a matrix/vector containing the dependent variable}
\item{X}{a matrix containing the intercept and all independent variables}
}
\value{
a list with maximum likelihood estimation results
}
\description{
This function calculates the maximum likelihood for binary logistic regression
}
\examples{
testData <- read.csv("https://stats.idre.ucla.edu/stat/data/binary.csv")[1:100,]
testData$rank <- factor(testData$rank)
testModell <- as.formula("admit ~ gre + gpa + rank")
testModelFrame <- model.frame(admit ~ gre + gpa + rank, testData)
maxLikeEst(X = model.matrix(testModell, testModelFrame), y = model.response(testModelFrame))
}
|
b963f7f5dbc2b62d85dba160982cb0691df92a33 | 7b36e0401cfb3d56f0f812a1e37c36a00cd86033 | /homework2/server.R | 62b657b997daea650eb04dbdc8109a21de32f2aa | [] | no_license | mong2/msan622 | d6c587d2fbb07634e41ac2a7de8a68dac0bcf899 | fa18e6032ea82f857f741afc2fd9a4ec1a1e4fcc | refs/heads/master | 2020-12-06T23:27:33.420498 | 2014-05-16T07:00:19 | 2014-05-16T07:00:19 | null | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 5,767 | r | server.R | library(ggplot2)
library(shiny)
# Load global data to be shared by all sessions
loadData <- function(){
data("movies", package = "ggplot2")
movies<-subset(movies, budget>0)
movies<-subset(movies, mpaa !="")
genre <- rep(NA, nrow(movies))
count <- rowSums(movies[, 18:24])
genre[which(count > 1)] = "Mixed"
genre[which(count < 1)] = "None"
genre[which(count == 1 & movies$Action == 1)] = "Action"
genre[which(count == 1 & movies$Animation == 1)] = "Animation"
genre[which(count == 1 & movies$Comedy == 1)] = "Comedy"
genre[which(count == 1 & movies$Drama == 1)] = "Drama"
genre[which(count == 1 & movies$Documentary == 1)] = "Documleveentary"
genre[which(count == 1 & movies$Romance == 1)] = "Romance"
genre[which(count == 1 & movies$Short == 1)] = "Short"
movies$genre=genre
df <- movies[, c("budget", "rating", "mpaa", "genre")]
return(df)
}
#Create plotting function
getPlot <- function(localFrame, MpaaRating = "None", colorScheme = "None", DotSize,
DotAlpha){
#Rating
if(MpaaRating == "All"){
localPlot <- ggplot(localFrame, aes(x = budget, y = rating, color=mpaa))
localPlot <- localPlot + geom_point(size = DotSize,alpha = DotAlpha, position = "jitter") +
scale_x_continuous(breaks= c(0.0e+00, 5.0e+07, 1.0e+08, 1.5e+08,2.0e+08),
labels=c(">0", ".5 Billion","1 Billion","1.5 Billion","2 Billion"))+
theme(axis.title.x=element_text(vjust=-0.03, face="bold",color="grey30"),
axis.title.y=element_text(face="bold",color="grey30"),
legend.title=element_text(color="grey30"),
legend.background= element_rect(),
legend.direction = "horizontal",
legend.justification = c(0,0),
legend.position = c(0.665,0),
legend.background = element_blank(),
plot.title=element_text(vjust= 1.4,size=16),
# panel.grid.minor.x = element_blank(),
# panel.grid.minor.y = element_blank(),
axis.ticks.x= element_blank())+
labs(color="MPAA RATING")+
xlab("Budget")+
ylab("IMDB Rating")
}
else{
if (MpaaRating == "NC-17"){
localFrame <-subset(localFrame, mpaa == "NC-17")
}else if (MpaaRating == "PG"){
localFrame <-subset(localFrame, mpaa == "PG")
}else if (MpaaRating == "PG-13"){
localFrame <-subset(localFrame, mpaa == "PG-13")
}else{
localFrame <-subset(localFrame, mpaa == "R")
}
localPlot <- ggplot(localFrame, aes(x = budget, y = rating, color = genre))
localPlot <- localPlot + geom_point(size = DotSize,alpha = DotAlpha, position = "jitter") +
scale_x_continuous(breaks= c(0.0e+00, 5.0e+07, 1.0e+08, 1.5e+08,2.0e+08),
labels=c(">0", ".5 Billion","1 Billion","1.5 Billion","2 Billion"))+
theme(axis.title.x=element_text(vjust=-0.03, face="bold",color="grey30"),
axis.title.y=element_text(face="bold",color="grey30"),
legend.title=element_text(color="grey30"),
legend.direction = "horizontal",
legend.justification = c(0,0),
legend.position = "bottom",
legend.background = element_blank(),
plot.title=element_text(vjust= 1.4,size=16),
# panel.grid.minor.x = element_blank(),
# panel.grid.minor.y = element_blank(),
axis.ticks.x= element_blank())+
labs(color="Movie Genres")+
xlab("Budget")+
ylab("IMDB Rating")
}
#color scheme
if(colorScheme == "Accent"){
localPlot <- localPlot +
scale_color_brewer(palette = "Accent")
}
else if (colorScheme == "Set1"){
localPlot <- localPlot +
scale_color_brewer(palette = "Set1")
}
else if (colorScheme == "Set2"){
localPlot <- localPlot +
scale_color_brewer(palette = "Set2")
}
else if (colorScheme == "Set3"){
localPlot <- localPlot +
scale_color_brewer(palette = "Set3")
}
else if (colorScheme == "Dark2"){
localPlot <- localPlot +
scale_color_brewer(palette = "Dark2")
}
else if (colorScheme == "Pastel1"){
localPlot <- localPlot +
scale_color_brewer(palette = "Pastel1")
}
else if (colorScheme == "Pastel2"){
localPlot <- localPlot +
scale_color_brewer(palette = "Pastel2")
}
else{
localPlot <- localPlot +
scale_color_grey(start = 0.2, end = 0.2)
}
return(localPlot)
}
#shared data
globalData <- loadData()
####Shiny Server #####
shinyServer(function(input, output){
cat("Press \"ESC\" to exit ...\n")
localFrame <-globalData
output$moviePlot <- renderPlot(
{
if(is.null(input$sortMovieGenres)){
localFrame <-localFrame
}
else{
localFrame <- subset(localFrame, localFrame$genre %in% input$sortMovieGenres)
}
moviePlot <- getPlot(
localFrame,
input$MpaaRating,
input$colorScheme,input$DotSize, input$DotAlpha)
print(moviePlot,width = 10, height = 15)
}
)
})
|
af540539954630551a9e188079e1f5c721bb858c | 4115c98348bf0e7fe944272d91fe351d58d22a96 | /R/get_RMF_from_NASIS_db.R | 662408e1199d1be40759707ce778e99ff8bda562 | [] | no_license | ncss-tech/soilDB | a933bf98a674fd54b5e1073a4497ee38a177bdf2 | 380440fc7b804b495aa711c130ab914c673a54be | refs/heads/master | 2023-09-02T14:19:17.348412 | 2023-09-02T00:56:16 | 2023-09-02T00:56:16 | 54,595,470 | 68 | 20 | null | 2023-09-01T19:00:48 | 2016-03-23T21:51:10 | R | UTF-8 | R | false | false | 2,391 | r | get_RMF_from_NASIS_db.R |
#' @title Get RMF data from local NASIS
#'
#' @description Prepare a list of `data.frame` objects with data from the "phrdxfeatures" and "phredoxfcolor" tables. These tables are related by "phrdxfiid" column, and related to horizon data via "phiid".
#'
#' @param SS logical, limit query to the selected set
#'
#' @param dsn optional path to local SQLite database containing NASIS table structure; default: `NULL`
#'
#' @return a `list` with two `data.frame` objects:
#' * `RMF`: contents of "phrdxfeatures" table, often >1 row per horizon
#' * `RMF_colors`: contents of "phredoxfcolor", usually >1 row per record in "phrdxfeatures"
#'
#' @export
get_RMF_from_NASIS_db <- function(SS = TRUE, dsn = NULL) {
# RMF
# unique-ness enforced via peiid (pedon-level) and phiid (horizon-level)
q <- "SELECT peiid, phiid,
rdxfeatpct, rdxfeatsize, rdxfeatcntrst, rdxfeathardness, rdxfeatshape, rdxfeatkind, rdxfeatlocation, rdxfeatboundary, phrdxfiid
FROM
pedon_View_1
INNER JOIN phorizon_View_1 ON pedon_View_1.peiid = phorizon_View_1.peiidref
INNER JOIN phrdxfeatures_View_1 ON phorizon_View_1.phiid = phrdxfeatures_View_1.phiidref
ORDER BY phiid, rdxfeatkind;"
# RMF colors
q.c <- "SELECT phrdxfiidref AS phrdxfiid,
colorpct, colorhue, colorvalue, colorchroma, colormoistst
FROM phredoxfcolor_View_1
ORDER BY phrdxfiidref, colormoistst;"
channel <- dbConnectNASIS(dsn)
# error condition, empty DF
# consider NULL
if (inherits(channel, 'try-error')) {
return(list(RMF = data.frame(), RMF_colors = data.frame()))
}
# toggle selected set vs. local DB
if (SS == FALSE) {
q <- gsub(pattern = '_View_1', replacement = '', x = q, fixed = TRUE)
q.c <- gsub(pattern = '_View_1', replacement = '', x = q.c, fixed = TRUE)
}
# exec queries
d <- dbQueryNASIS(channel, q, close = FALSE)
d.c <- dbQueryNASIS(channel, q.c)
# convert coded -> text/factor representation of values
d <- uncode(d, dsn = dsn)
d.c <- uncode(d.c, dsn = dsn)
# convert back to characters / numeric
d.c$colormoistst <- as.character(d.c$colormoistst)
d.c$colorhue <- as.character(d.c$colorhue)
# uncode creates factors, so we have to convert to character first
d.c$colorvalue <- as.numeric(as.character(d.c$colorvalue))
d.c$colorchroma <- as.numeric(as.character(d.c$colorchroma))
# done
return(list(RMF = d, RMF_colors = d.c))
}
|
6e09faf8c372540d4c9fb440e514d8e07457ea9f | c7ee5bc6a094dd513c2aba7a401495f6d93ec24c | /R/coefficient.song.solution.R | dd6dac098944887215875fe215d30522c4c579a6 | [] | no_license | cran/boussinesq | b6961abca782e0f188876c429bf74c860abbc723 | 3d77b848c8c156c162f8b7c967549bc388e326b3 | refs/heads/master | 2023-08-31T00:46:17.117323 | 2023-08-28T16:00:07 | 2023-08-28T16:34:06 | 17,694,868 | 0 | 1 | null | null | null | null | UTF-8 | R | false | false | 964 | r | coefficient.song.solution.R | NULL
#'
#' Alogorithm for resolution of the series coefficient \eqn{a_n} for the dimensionless formula for \eqn{H} in \code{\link{beq.song.dimensionless}}
#'
#' @param n approximation order
#' @param lambda dimensionless parameter related to \eqn{\alpha} see Song at al, 2007
#'
#'
#' @return the \eqn{a_n} series coefficient
#'
#' @note For major details, see Song at al, 2007
#'
#' @references Song, Zhi-yao;Li, Ling;David, Lockington. (2007), "Note on Barenblatt power series solution to Boussinesq equation",Applied Mathematics and Mechanics,
#' \url{https://link.springer.com/article/10.1007/s10483-007-0612-x} ,\doi{10.1007/s10483-007-0612-x}
#'
#' @export
#' @author Emanuele Cordano
#'
#'
#'
coefficient.song.solution <- function(n=4,lambda=0) {
a <- array(NA,n)
a[1]=1/4
a[2]=(2*lambda-1)/16
for (i in 3:n) {
a[i]=(2*lambda+1-i)/i^2*a[i-1]
for (k in 2:(i-1)) a[i]=a[i]-2*(i+1)/i*a[k]*a[i+1-k]
}
return(a)
}
|
0ec7894bfa71e479cb4c5e7b77adebea58adb4dd | 377c8851390a7e85f2cca30246e0f9f6df7d13cc | /r_scripts/response_analysis_pilot.r | 216011454c02a03d0b34c30ac4c931be63db94cf | [] | no_license | GallupGovt/ngs2 | bb9eca850dc0d76b39d7aa16aeb1ef59d6d640fb | ed9443400bcd2a46907dae6701a7bd4580499772 | refs/heads/master | 2023-05-26T16:45:47.820293 | 2020-12-17T09:48:56 | 2020-12-17T09:48:56 | 147,573,574 | 4 | 4 | null | 2023-05-22T21:36:06 | 2018-09-05T20:07:10 | R | UTF-8 | R | false | false | 29,970 | r | response_analysis_pilot.r | # Empanelment stitching for attrition analysis within NGS2
# Author: Matt Hoover <matt_hoover@gallup.com>
# directory setup
if(Sys.info()['sysname'] == "Windows") {
setwd(paste0('C:/Users/pablo_diego-rosell/Desktop/Projects/DARPA/Cycle 1/',
'Research Protocols/Registration/Experiment 2/Mock data'))
} else {
if(Sys.info()['user'] == 'matt_hoover') {
setwd('~/git/NGS2')
} else {
setwd("~/Research/Gallup/GallupAnalytics/RandD/NGS2/NGS2-Experiment")
}
}
rm(list=ls())
# libraries
library(foreign)
library(reshape2)
# renaming dictionaries
EMPANEL_NUMERIC_VARS <- c(
'Q11_age',
'Q15_total_hours',
'Q25_adult_hh_num',
'Q26_child_hh_num',
'Q28_friends_num',
'Q34_internet_hours_day',
'Q36_social_media_people_num',
'Q37_social_media_hours_day'
)
EMPANEL_YESNO_VARS <- c(
'Q14_job',
'Q29_born_in_country',
'Q30_move_last5',
'Q35_social_networks',
'Q38_online_research',
'Q39_send_survey_invites'
)
NAME_MATCHES <- c(
'StartDate',
'EndDate',
'Status',
'IPAddress',
'Progress',
'Duration..in.seconds.',
'Finished',
'RecordedDate',
'ResponseId',
'RecipientLastName',
'RecipientFirstName',
'RecipientEmail',
'ExternalReference',
'LocationLatitude',
'LocationLongitude',
'DistributionChannel',
'UserLanguage',
'Q0_intro_screen',
'Q0_yins_screen',
'Q0_contact_screen',
'Q3_consent',
'Q6_1_extravert',
'Q6_2_critical',
'Q6_3_dependable',
'Q6_4_anxious',
'Q6_5_open',
'Q6_6_reserved',
'Q6_7_sympathetic',
'Q6_8_disorganized',
'Q6_9_calm',
'Q6_10_conventional',
'Q7_1_group_inferiority',
'Q7_2_use_force',
'Q7_3_more_chances',
'Q7_4_step_on',
'Q7_5_stay_place',
'Q7_6_top_bottom',
'Q7_7_inferior',
'Q7_8_kept_place',
'Q7_9_equal',
'Q7_10_equality_ideal',
'Q7_11_equal_chance',
'Q7_12_equalize_conditions',
'Q7_13_social_equality',
'Q7_14_fewer_problems',
'Q7_15_incomes_equal',
'Q7_16_no_dominate',
'Q8_1_neglect_needs',
'Q8_2_others_needs',
'Q8_3_sensitive_feelings',
'Q8_4_not_helpful',
'Q8_5_be_helpful',
'Q8_6_no_aid',
'Q8_7_be_responsive',
'Q8_8_help_others',
'Q8_9_no_involvement',
'Q8_10_no_help_others',
'Q8_11_turn_to_others',
'Q8_12_emotion_avoid',
'Q8_13_trouble_themselves',
'Q8_14_ignored_hurt',
'Q9_1_depend_on_self',
'Q9_2_rely_on_self',
'Q9_3_do_own_thing',
'Q9_4_personal_identity_important',
'Q9_5_do_job_better',
'Q9_6_winning_everything',
'Q9_7_competition_nature',
'Q9_8_others_better_tense',
'Q9_9_coworker_prize_proud',
'Q9_10_coworker_wellbeing',
'Q9_11_spend_time_others',
'Q9_12_good_cooperate_others',
'Q9_13_family_together',
'Q9_14_care_family_duty',
'Q9_15_family_stick_together',
'Q9_16_respect_group_decision',
'Q11_age',
'Q12_gender',
'Q13_education',
'Q14_job',
'Q15_total_hours',
'Q16_employer_selfemployed',
'Q17_occupation',
'Q18_employ_situation',
'Q18_employ_situation_text',
'Q19_income_US',
'Q20_income_MA',
'Q21_income_PH',
'Q22_income_feeling',
'Q23_religion',
'Q23_religion_text',
'Q24_religion_freq',
'Q25_adult_hh_num',
'Q26_child_hh_num',
'Q27_marital_status',
'Q28_friends_num',
'Q29_born_in_country',
'Q30_move_last5',
'Q31_country_origin',
'Q32_1_smartphone',
'Q32_2_computer',
'Q32_3_tablet',
'Q33_internet_where',
'Q34_internet_hours_day',
'Q35_social_networks',
'Q36_social_media_people_num',
'Q37_social_media_hours_day',
'Q38_online_research',
'Q39_send_survey_invites',
'Q40_mobile_number'
)
PANEL_VARNAMES <- c(
'Employee_Key_Value',
'DEMO_GENDER',
'DEMO_AGE',
'demo_division',
'demo_region',
'DEMO_EDUCATION_NEW',
'DEMO_EMPLOYMENT_STATUS',
'MEMBERSHIP_START_DATE',
'MEMBERSHIP_STATUS'
)
Q6_ANSWERS <- list(
'Disagree strongly' = 1,
'Disagree moderately' = 2,
'Disagree a little' = 3,
'Neither agree nor disagree' = 4,
'Agree a little' = 5,
'Agree moderately' = 6,
'Agree strongly' = 7
)
Q7_ANSWERS <- list(
'Very negative' = 1,
'Negative' = 2,
'Slightly negative' = 3,
'Neither positive nor negative' = 4,
'Slightly positive' = 5,
'Positive' = 6,
'Very positive' = 7
)
Q8_ANSWERS <- list(
'Extremely uncharacteristic of me (1)' = 1,
'2' = 2,
'3' = 3,
'4' = 4,
'5' = 5,
'6' = 6,
'Extremely characteristic of me (7)' = 7
)
Q9_ANSWERS <- list(
'Never or definitely no (1)' = 1,
'2' = 2,
'3' = 3,
'4' = 4,
'5' = 5,
'6' = 6,
'7' = 7,
'8' = 8,
'Always or definitely yes (9)' = 9
)
Q16_ANSWERS <- list(
'1' = 'Self-employed',
'2' = 'Work for an employer'
)
Q17_ANSWERS <- list(
'1' = 'Professional',
'2' = 'Managerial',
'3' = 'Secretarial of clerical',
'4' = 'Service or labor',
'5' = 'Sales or retail',
'6' = 'Farmer or rancher',
'7' = 'Military',
'8' = 'Other'
)
Q18_ANSWERS <- list(
'1' = 'Student',
'2' = 'Homemaker (including stay-at-home parents)',
'3' = 'Retired',
'4' = 'Disabled',
'5' = 'Unemployed',
'6' = 'Other'
)
Q19_ANSWERS <- list(
'1' = 'Less than $6,000 per year',
'2' = '$6,000-$11,999 per year',
'3' = '$12,000-$23,999 per year',
'4' = '$24,000-$35,999 per year',
'5' = '$36,000-$47,999 per year',
'6' = '$48,000-$59,999 per year',
'7' = '$60,000-$89,999 per year',
'8' = '$90,000-$119,999 per year',
'9' = '$120,000-$179,999 per year',
'10' = '$180,000-$239,999 per year',
'11' = '$240,000 or more per year',
'12' = 'Don\'t Know'
)
Q22_ANSWERS <- list(
'1' = 'Living comfortably on present income',
'2' = 'Getting by on present income',
'3' = 'Finding it difficult on present income',
'4' = 'Finding it very difficult on present income'
)
Q23_ANSWERS <- list(
'1' = 'Protestant',
'2' = 'Roman Catholic',
'3' = 'Mormon/Latter-Day Saints',
'4' = 'Other Christian Religion',
'5' = 'Jewish',
'6' = 'Muslim/Islam',
'7' = 'Islam/Muslim (Shiite)',
'8' = 'Islam/Muslim (Sunni)',
'9' = 'Hinduism',
'10' = 'Buddhism',
'11' = 'Sikhism',
'12' = 'Primal-indigenous/African Traditional and Diasporic/Animist/Nature Worship/Paganism',
'13' = 'Chinese Traditional Religion/Confucianism',
'14' = 'Spiritism',
'15' = 'Other Non-Christian Religion',
'16' = 'No Religion/Atheist/Agnostic',
'17' = 'Don\'t know',
'18' = 'Other (Write in:)'
)
Q24_ANSWERS <- list(
'1' = 'Weekly (at least once a week)',
'2' = 'Monthly (at least once a month)',
'3' = 'Annually (at least once a year)',
'4' = 'Do not regularly attend religious services'
)
Q32_ANSWERS <- list(
'1' = '7 days',
'2' = '6 days',
'3' = '5 days',
'4' = '4 days',
'5' = '3 days',
'6' = '2 days',
'7' = '1 day',
'8' = 'Less than once a week',
'9' = 'Never'
)
Q33_ANSWERS <- list(
'1' = 'On a mobile device',
'2' = 'On a computer or tablet at home',
'3' = 'On a computer or tablet at work',
'4' = 'On a computer or tablet at school',
'5' = 'On a computer or tablet at a public place (e.g., library)',
'6' = 'Other'
)
TIMING_VARNAMES <- c(
'StartDate',
'EndDate',
'Duration',
'RecordedDate',
'ResponseId',
'ExternalReference',
'Q1',
'Q2_Browser',
'Q2_Version',
'Q2_Operating.System',
'Q2_Resolution'
)
# function definitions
bb_decision_timing <- function(d, exp1 = FALSE) {
# calculate the average time (across rounds) it takes a player to make a
# decision
if(exp1) {
tmp <- dcast(subset(d, event == 'cooperationEvent'),
id + datetime ~ data.name, value.var = 'data.value')
} else {
tmp <- dcast(subset(d, event == 'CooperationDecision'),
id + datetime ~ data.name, value.var = 'data.value')
names(tmp)[which(names(tmp) == 'curRound')] <- 'round'
}
tmp <- tmp[grepl('^[0-9]', tmp$round), ]
tmp$round <- as.numeric(tmp$round)
res <- do.call(rbind, lapply(split(tmp, tmp$round), function(x) {
start <- min(x$datetime)
x <- subset(x, grepl('^[0-9]{4}', pid))
if(exp1) {
return(data.frame(round = unique(x$round), pid = as.numeric(x$pid),
secs = x$datetime - start))
} else {
return(do.call(rbind, lapply(split(x, x$pid), function(y) {
return(data.frame(round = unique(y$round),
pid = unique(as.numeric(y$pid)),
secs = unique(y$datetime - start)))
})))
}
}))
res <- aggregate(res$secs, by = list(res$pid), mean, na.rm = TRUE)
names(res) <- c('pid', 'secs_to_decision')
return(res)
}
bb_end_scores <- function(d, recast = FALSE) {
# take a breadboard dataset and determine final score by participant
tmp <- subset(d, event == 'FinalScore')
if(recast) {
tmp <- dcast(tmp, id + datetime ~ data.name, value.var = 'data.value')
tmp <- tmp[grep('^[0-9]', tmp$pid), c('id', 'datetime', 'pid', 'score')]
names(tmp)[3] <- 'playerid'
} else {
tmp <- tmp[grep('^[0-9]', tmp$data.name),
c('id', 'datetime', 'data.name', 'data.value')]
names(tmp)[3:4] <- c('playerid', 'score')
}
return(data.frame(pid = as.numeric(tmp$playerid),
end_score = as.numeric(tmp$score)))
}
bb_gameplay <- function(d, exp1 = FALSE) {
# determines rounds played per person
if(exp1) {
tmp <- dcast(subset(d, event == 'cooperationEvent'),
id + datetime ~ data.name, value.var = 'data.value')
tmp <- tmp[!is.na(as.numeric(tmp$pid)) & !is.na(as.numeric(tmp$round)), ]
tmp[, c('pid', 'round')] <- apply(tmp[, c('pid', 'round')], 2, function(x) {
as.numeric(x)
})
tmp <- dcast(tmp, pid ~ round, fun.aggregate = length)
names(tmp)[2:ncol(tmp)] <- paste0('r', names(tmp)[2:ncol(tmp)])
} else {
tmp <- dcast(subset(d, event == 'CooperationDecision'),
id + datetime ~ data.name, value.var = 'data.value')
tmp <- tmp[!is.na(as.numeric(tmp$pid)), ]
names(tmp)[which(names(tmp) == 'curRound')] <- 'round'
tmp[, c('pid', 'round')] <- apply(tmp[, c('pid', 'round')], 2, function(x) {
as.numeric(x)
})
tmp <- dcast(tmp, pid ~ round, fun.aggregate = length)
tmp[, 2:ncol(tmp)] <- apply(tmp[, 2:ncol(tmp)], 2, function(x) {
ifelse(x > 0, 1, 0)
})
names(tmp)[2:ncol(tmp)] <- paste0('r', names(tmp)[2:ncol(tmp)])
}
return(tmp)
}
bb_login <- function(d, end_time) {
# create unique logins for players, using the last loging prior to final
# scores being recorded for a game
tmp <- dcast(subset(d, event == 'clientLogIn'),
id + datetime ~ data.name, value.var = 'data.value')
tmp <- tmp[grepl('^[0-9]', tmp$clientId), ]
tmp <- tmp[order(tmp$datetime, decreasing = TRUE), ]
tmp <- tmp[tmp$datetime <= end_time, ]
tmp <- tmp[!duplicated(tmp$clientId), ]
return(data.frame(pid = as.numeric(tmp$clientId),
ip_address = tmp$ipAddress, logged_in = 1))
}
bb_passed_training <- function(d, experiment = c(1, 2, 3)) {
# calculate which players passed training
if(experiment == 1) {
tmp <- dcast(subset(d, event == 'cooperationEvent'),
id + datetime ~ data.name, value.var = 'data.value')
tmp <- subset(tmp, grepl('^[0-9]{4}', pid) & round == 'p1')
return(data.frame(pid = as.numeric(tmp$pid), passed_training = 1))
} else if(experiment == 2) {
qcast <- dcast(subset(d, grepl('^q', event)),
id + datetime ~ data.name, value.var = 'data.value')
q_success <- do.call(rbind, lapply(split(qcast, qcast$pid), function(x) {
res <- apply(x[, 3:ncol(x)], 2, function(y) {
return(y[!is.na(y)])
})
res <- sum(res %in% c('randomly', 'net_total', 'leave_neighborhood',
'pay_100_gain_100')) / 4
return(data.frame(pid = unique(x$pid), frac_q_correct = res))
}))
tmp <- dcast(subset(d, event == 'ChooseGroup'),
id + datetime ~ data.name, value.var = 'data.value')
tmp <- subset(tmp, grepl('^[0-9]{4}', pid))
return(merge(data.frame(pid = as.numeric(tmp$pid), passed_training = 1),
q_success, on = 'pid'))
} else {
tmp <- subset(d, event == 'PlayerWaiting')
return(data.frame(pid = as.numeric(tmp$data.value), passed_training = 1))
}
}
bb_data_merge <- function(login, training, play = NULL, decision = NULL,
scores = NULL, exp3 = FALSE, prefix = 'bb') {
# merge various breadboard summary pieces together
if(exp3) {
tmp <- merge(login, training, on = 'pid', all = TRUE)
names(tmp) <- paste(prefix, names(tmp), sep = '_')
} else {
tmp <- merge(login, training, on = 'pid', all = TRUE)
tmp <- merge(tmp , play, on = 'pid', all = TRUE)
tmp <- merge(tmp, decision, on = 'pid', all = TRUE)
tmp <- merge(tmp, scores, on = 'pid', all = TRUE)
names(tmp) <- paste(prefix, names(tmp), sep = '_')
}
return(tmp)
}
convert_time <- function(d) {
# convert a vector of breadboard date strings to datetime
return(strptime(d, '%Y-%m-%d %H:%M:%S'))
}
drop_extra_rows <- function(d) {
# takes data from qualtrics and drops extraneous rows that don't provide
# any actual data, but just meta-information
extra_rows <- apply(d, 2, function(x) {grep('^\\{', x)})
stopifnot(length(unique(extra_rows)) == 1)
d <- d[-(1:unique(extra_rows)), ]
return(d)
}
relabel_values <- function(d, regex, dict, single = FALSE) {
# takes a data frame (d) and uses a regular expression (regex) to identify
# relevant variables to apply a value-redefining dictionary (dict) to make
# variable values numeric
if(single) {
tmp <- d[, grep(regex, names(d))]
return(do.call(c, ifelse(tmp %in% names(dict), dict[tmp], NA)))
} else {
return(apply(d[, grep(regex, names(d))], 2, function(x) {
do.call(c, ifelse(x %in% names(dict), dict[x], NA))
}))
}
}
reverse_code <- function(var, max) {
# reverse-codes a numeric value, given a scale maximum (max)
return(abs(var - max) + 1)
}
tipi_scale <- function(var1, var2) {
# takes two variables and calculates the row mean
return(apply(cbind(var1, var2), 1, mean))
}
variable_rename <- function(d, dict) {
# takes data and dictionary and renames variables
for(i in 1:length(names(d))) {
names(d)[i] <- ifelse(names(d)[i] %in% dict$label,
dict$verbose[which(names(d)[i] == dict$label)],
names(d)[i])
}
return(d)
}
# load all data
comps <- read.csv('data/ngs2_empanelment_pilot_completes.csv', header = TRUE,
sep = ',', stringsAsFactors = FALSE)
parts <- read.csv('data/ngs2_empanelment_pilot_partials.csv', header = TRUE,
sep = ',', stringsAsFactors = FALSE)
bb_ids <- read.csv('data/empanelment_breadboard_ids.csv', header = TRUE,
sep = ',', stringsAsFactors = FALSE)
dist1k <- read.csv('data/ngs2_empanelment_distribution_additional_1k.csv',
header = TRUE, sep = ',', stringsAsFactors = FALSE)
times <- read.csv('data/ngs2_pilot_timing_completes.csv', header = TRUE,
sep = ',', stringsAsFactors = FALSE)
panel <- read.spss('data/ngs2_pilot_panel.sav', to.data.frame = TRUE)
bb1 <- read.csv('NGS2-Cycle1-Experiment1/data/ngs2_e1_pilot_2017-07-12-01_9222.csv',
header = TRUE, sep = ',', stringsAsFactors = FALSE)
bb2 <- read.csv('NGS2-Cycle1-Experiment2/data/ngs2_e2_pilot_2017-07-12-01_10381.csv',
header = TRUE, sep = ',', stringsAsFactors = FALSE)
bb3 <- read.csv('NGS2-Cycle1-Experiment3/data/ngs2_e3_pilot_2017-07-12-01_9409.csv',
header = TRUE, sep = ',', stringsAsFactors = FALSE)
cdict <- read.csv('data/empanelment_dictionary.csv', header = TRUE, sep = ',',
stringsAsFactors = FALSE)
pdict <- read.csv('data/empanelment_partial_dictionary.csv', header = TRUE,
sep = ',', stringsAsFactors = FALSE)
# 1. deal with survey responses, both `completes` and `partials`
# rename variables for both `completes` and `partials`
comps <- variable_rename(comps, cdict)
parts <- variable_rename(parts, pdict)
# drop extraneous rows of redundant information
# note: the hard-coding sucks for `partials`, but not a great way around it
comps <- drop_extra_rows(comps)
parts <- parts[2:nrow(parts), ]
# make answer values numerics for `completes`
# note: qualtrics export for `partials` already outputs values as numerics,
# though due to the weird header rows, they do need to be made numeric
comps[, grep('^Q6_', names(comps))] <- relabel_values(comps, '^Q6_', Q6_ANSWERS)
comps[, grep('^Q7_', names(comps))] <- relabel_values(comps, '^Q7_', Q7_ANSWERS)
comps[, grep('^Q8_', names(comps))] <- relabel_values(comps, '^Q8_', Q8_ANSWERS)
comps[, grep('^Q9_', names(comps))] <- relabel_values(comps, '^Q9_', Q9_ANSWERS)
comps[, EMPANEL_NUMERIC_VARS] <- apply(comps[, EMPANEL_NUMERIC_VARS], 2,
function(x) {as.numeric(x)}
)
# comps[, EMPANEL_YESNO_VARS] <- apply(comps[, EMPANEL_YESNO_VARS], 2,
# function(x) {ifelse(x == '', NA, ifelse(grepl('Yes', x), 1, 0))}
# )
# convert values in `partials` to numeric
parts[, grep('^Q[0|6-9]_', names(parts))] <- apply(
parts[, grep('^Q[0|6-9]_', names(parts))], 2, function(x) {as.numeric(x)}
)
parts[, c('Status', 'Finished', 'Q3_consent')] <- apply(
parts[, c('Status', 'Finished', 'Q3_consent')], 2, function(x) {
as.numeric(x)
}
)
parts[, EMPANEL_NUMERIC_VARS] <- apply(parts[, EMPANEL_NUMERIC_VARS], 2,
function(x) {as.numeric(x)}
)
# parts[, EMPANEL_YESNO_VARS] <- apply(parts[, EMPANEL_YESNO_VARS], 2,
# function(x) {ifelse(x == '', NA, ifelse(grepl('Yes', x), 1, 0))}
# )
# convert `partials` to same format as the `completes`
parts$Status <- NA
parts$Finished <- ifelse(parts$Finished == 1, TRUE, FALSE)
parts$Q3_consent <- ifelse(parts$Q3_consent == 1,
'I consent to participate in this experiment.', NA)
parts[, grep('^Q16_', names(parts))] <- relabel_values(parts, '^Q16_',
Q16_ANSWERS, TRUE)
parts[, grep('^Q17_', names(parts))] <- relabel_values(parts, '^Q17_',
Q17_ANSWERS, TRUE)
parts[, grep('^Q18_.*n$', names(parts))] <- relabel_values(parts, '^Q18_.*n$',
Q18_ANSWERS, TRUE)
parts[, grep('^Q19_', names(parts))] <- relabel_values(parts, '^Q19_',
Q19_ANSWERS, TRUE)
parts[, grep('^Q22_', names(parts))] <- relabel_values(parts, '^Q22_',
Q22_ANSWERS, TRUE)
parts[, grep('^Q23_.*n$', names(parts))] <- relabel_values(parts, '^Q23_.*n$',
Q23_ANSWERS, TRUE)
parts[, grep('^Q24_', names(parts))] <- relabel_values(parts, '^Q24_',
Q24_ANSWERS, TRUE)
parts[, grep('^Q32_', names(parts))] <- relabel_values(parts, '^Q32_', Q32_ANSWERS)
parts[, grep('^Q33_', names(parts))] <- relabel_values(parts, '^Q33_',
Q33_ANSWERS, TRUE)
# split `partials` name variable into first and last names
parts$RecipientFirstName <- sapply(strsplit(parts$Name, ','), function(x) {
trimws(x[2])
})
parts$RecipientLastName <- sapply(strsplit(parts$Name, ','), function(x) {
trimws(x[1])
})
# create variables in `partials` for parity with `completes`
parts$Progress <- NA
parts$Duration..in.seconds. <- NA
parts$DistributionChannel <- NA
parts$UserLanguage <- NA
# create variables in `completes` for parity with `partials`
comps$Q0_intro_screen <- NA
comps$Q0_yins_screen <- NA
comps$Q0_contact_screen <- NA
# order `partials` and `completes` for appending
parts <- parts[, NAME_MATCHES]
qstart <- min(grep('^Q', names(comps)))
q0start <- min(grep('^Q0', names(comps)))
comps <- comps[, names(comps)[c(1:(qstart - 1), q0start:ncol(comps),
qstart:(q0start - 1))]]
parts$source <- 'partial'
comps$source <- 'complete'
# append `completes` and `partials`
d <- rbind(comps, parts)
# create scales
# tipi
# http://gosling.psy.utexas.edu/scales-weve-developed/ten-item-personality-measure-tipi/
# directions:
# 1. Reverse-code items (2, 4, 6, 8, 10)
# 2. Take average of item pairs ('R' reverse-scored):
# A. Extraversion: [1, 6R]
# B. Agreeableness: [2R, 7]
# C. Conscientiousness: [3, 8R]
# D. Emotional Stability: [4R, 9]
# E. Openness to Experiences: [5, 10R]
d$Q6_2_critical_REV <- reverse_code(d$Q6_2_critical, 7)
d$Q6_4_anxious_REV <- reverse_code(d$Q6_4_anxious, 7)
d$Q6_6_reserved_REV <- reverse_code(d$Q6_6_reserved, 7)
d$Q6_8_disorganized_REV <- reverse_code(d$Q6_8_disorganized, 7)
d$Q6_10_conventional_REV <- reverse_code(d$Q6_10_conventional, 7)
d$tipi_extraversion <- rowMeans(
d[, c('Q6_1_extravert', 'Q6_6_reserved_REV')]
)
d$tipi_agreeableness <- rowMeans(
d[, c('Q6_7_sympathetic', 'Q6_2_critical_REV')]
)
d$tipi_conscientiousness <- rowMeans(
d[, c('Q6_3_dependable', 'Q6_8_disorganized_REV')]
)
d$tipi_emot_stability <- rowMeans(
d[, c('Q6_9_calm', 'Q6_4_anxious_REV')]
)
d$tipi_open_experiences <- rowMeans(
d[, c('Q6_5_open', 'Q6_10_conventional_REV')]
)
# social dominance orientation
# https://dash.harvard.edu/bitstream/handle/1/3207711/Sidanius_SocialDominanceOrientation.pdf
# directions
# 1. Reverse-code items (9 through 16)
# 2. Take average
d$Q7_9_equal_REV <- reverse_code(d$Q7_9_equal, 7)
d$Q7_10_equality_ideal_REV <- reverse_code(d$Q7_10_equality_ideal, 7)
d$Q7_11_equal_chance_REV <- reverse_code(d$Q7_11_equal_chance, 7)
d$Q7_12_equalize_conditions_REV <- reverse_code(d$Q7_12_equalize_conditions, 7)
d$Q7_13_social_equality_REV <- reverse_code(d$Q7_13_social_equality, 7)
d$Q7_14_fewer_problems_REV <- reverse_code(d$Q7_14_fewer_problems, 7)
d$Q7_15_incomes_equal_REV <- reverse_code(d$Q7_15_incomes_equal, 7)
d$Q7_16_no_dominate_REV <- reverse_code(d$Q7_16_no_dominate, 7)
d$soc_dom_orient <- rowMeans(
d[, grep('Q7_[1-8]_|Q7_.*REV$', names(d))]
)
# communal orientation scale
# http://fetzer.org/sites/default/files/images/stories/pdf/selfmeasures/CollectiveOrientation.pdf
# directions
# 1. Reverse-code items (3, 4, 6, 9, 10, 12, 13)
# 2. Take average
d$Q8_3_sensitive_feelings_REV <- reverse_code(d$Q8_3_sensitive_feelings, 7)
d$Q8_4_not_helpful_REV <- reverse_code(d$Q8_4_not_helpful, 7)
d$Q8_6_no_aid_REV <- reverse_code(d$Q8_6_no_aid, 7)
d$Q8_9_no_involvement_REV <- reverse_code(d$Q8_9_no_involvement, 7)
d$Q8_10_no_help_others_REV <- reverse_code(d$Q8_10_no_help_others, 7)
d$Q8_12_emotion_avoid_REV <- reverse_code(d$Q8_12_emotion_avoid, 7)
d$Q8_13_trouble_themselves_REV <- reverse_code(d$Q8_13_trouble_themselves, 7)
d$comm_orient_scale <- rowSums(
d[, grep('^Q8_[12578]([14]|_)|Q8_.*_REV$', names(d))]
)
# cultural orientation scales
# http://fetzer.org/sites/default/files/images/stories/pdf/selfmeasures/CollectiveOrientation.pdf
# directions
# 1. Sum scores:
# A. Horizontal individualism: [1, 2, 3, 4]
# B. Vertical individualism: [5, 6, 7, 8]
# C. Horizontal collectivism: [9, 10, 11, 12]
# D. Vertical collectivism: [13, 14, 15, 16]
d$horiz_indiv <- rowSums(
d[, grep('Q9_[1234]_', names(d))]
)
d$vert_indiv <- rowSums(
d[, grep('Q9_[5678]_', names(d))]
)
d$horiz_collect <- rowSums(
d[, grep('Q9_(9|1[012])_', names(d))]
)
d$vert_collect <- rowSums(
d[, grep('Q9_1[3456]_', names(d))]
)
# 2. bring in breadboard ids for those that went from empanelment to experiment
# merge in breadboard ids
d <- merge(
d,
bb_ids[, c('LastName', 'FirstName', 'USERID')],
by.x = c('RecipientLastName', 'RecipientFirstName'),
by.y = c('LastName', 'FirstName'),
all.x = TRUE
)
names(d)[grep('USERID', names(d))] <- 'bb_id'
d$bb_id <- as.numeric(d$bb_id)
# 3. deal with the experiment timing data
# drop extraneous rows of redundant information
times <- drop_extra_rows(times)
# convert variables to numeric
times$Duration..in.seconds. <- as.numeric(times$Duration..in.seconds.)
times$Finished <- as.logical(times$Finished)
# keep only valid responses
times <- times[times$ExternalReference != '', ]
# rename survey duration variable
names(times)[grep('^Duration', names(times))] <- 'Duration'
times <- times[, TIMING_VARNAMES]
names(times) <- paste('pilot', names(times), sep = '_')
# merge timing data into empanelment
d <- merge(
d,
times,
by.x = 'ExternalReference',
by.y = 'pilot_ExternalReference',
all.x = TRUE
)
# 4. add in additional 1000 panel members contacted
names(dist1k) <- paste('dist1k', names(dist1k), sep = '_')
d <- merge(
d,
dist1k[, c('dist1k_Response.Id', 'dist1k_External.Data.Reference',
'dist1k_Status', 'dist1k_End.Date')],
by.x = 'ExternalReference',
by.y = 'dist1k_External.Data.Reference',
all = TRUE
)
d$source <- ifelse(is.na(d$source), 'dist1k', d$source)
# 5. add in panel demographics
panel <- panel[, PANEL_VARNAMES]
names(panel) <- paste('panel', names(panel), sep = '_')
# redo datetime variable from numeric
panel$panel_MEMBERSHIP_START_DATE <- as.POSIXct(panel$panel_MEMBERSHIP_START_DATE,
origin = '1582-10-14')
# merge panel in with existing data
d <- merge(
d,
panel,
by.x = 'ExternalReference',
by.y = 'panel_Employee_Key_Value',
all = TRUE
)
d$source <- ifelse(is.na(d$source), 'panel', d$source)
# 6. bring in breadboard data
# create datetimes from timestamps
bb1$datetime <- convert_time(bb1$datetime)
bb2$datetime <- convert_time(bb2$datetime)
bb3$datetime <- convert_time(bb3$datetime)
# identify logins
bb1_login <- bb_login(bb1, bb1$datetime[bb1$event == 'initStart'])
bb2_login <- bb_login(bb2, bb2$datetime[bb2$event == 'StepStart' &
bb2$data.value == 'initStep'])
bb3_login <- bb_login(bb3, bb3$datetime[bb3$event == 'GameStart'])
# determine who passed training
bb1_training <- bb_passed_training(bb1, experiment = 1)
bb2_training <- bb_passed_training(bb2, experiment = 2)
bb3_training <- bb_passed_training(bb3, experiment = 3)
# identify game play through rounds
bb1_play <- bb_gameplay(bb1, exp1 = TRUE)
bb2_play <- bb_gameplay(bb2, exp1 = FALSE)
# identify time for player decisions across rounds
bb1_decision <- bb_decision_timing(bb1, exp1 = TRUE)
bb2_decision <- bb_decision_timing(bb2, exp1 = FALSE)
# identify ending scores
bb1_scores <- bb_end_scores(bb1, recast = FALSE)
bb2_scores <- bb_end_scores(bb2, recast = TRUE)
# create final breadboard datasets
bb1_summary <- bb_data_merge(bb1_login, bb1_training, bb1_play,
bb1_decision, bb1_scores, exp3 = FALSE,
prefix = 'bb1')
bb2_summary <- bb_data_merge(bb2_login, bb2_training, bb2_play,
bb2_decision, bb2_scores, exp3 = FALSE,
prefix = 'bb2')
bb3_summary <- bb_data_merge(bb3_login, bb3_training, exp3 = TRUE,
prefix = 'bb3')
# merge breadboard summaries to data
d <- merge(d, bb1_summary, by.x = 'bb_id', by.y = 'bb1_pid', all = TRUE)
d <- merge(d, bb2_summary, by.x = 'bb_id', by.y = 'bb2_pid', all = TRUE)
d <- merge(d, bb3_summary, by.x = 'bb_id', by.y = 'bb3_pid', all = TRUE)
d$source <- ifelse(is.na(d$source), 'bb', d$source)
# 7. derive variables for analysis
d$sample_allocation <- ifelse(d$ExternalReference %in%
dist1k$dist1k_External.Data.Reference, 1, 2)
d$days_with_panel <- as.numeric(
as.POSIXct('2017-07-12') -
d$panel_MEMBERSHIP_START_DATE
)
d$began_empanelment <- ifelse(d$source == 'complete' | d$source == 'partial', 1, 0)
d$stop_prior_to_consent <- ifelse(d$began_empanelment == 1 & is.na(d$Q3_consent), 1,
ifelse(d$began_empanelment == 1, 0, NA))
d$stop_at_consent <- ifelse(d$stop_prior_to_consent == 0 & is.na(d$Q6_1_extravert),
1, ifelse(d$stop_prior_to_consent == 0, 0, NA))
start_stop <- c(which(names(d) == 'Q6_1_extravert'),
which(names(d) == 'Q38_online_research'))
d$stop_at_other_point <- apply(d[, start_stop[1]:start_stop[2]], 1, function(x) {
ifelse(is.na(x[1]), NA,
ifelse(length(x[!is.na(x)]) / (start_stop[2] - start_stop[1] + 1) < .95,
1, 0))
})
d$completed_empanelment <- ifelse(
!is.na(d$Q39_send_survey_invites), 1,
ifelse(d$source == 'partial' | d$source == 'complete', 0, NA)
)
d$completed_experiment_survey <- ifelse(d$source != 'complete', NA,
ifelse(d$pilot_Q1 != '', 1, 0))
d$agreed_exp1 <- ifelse(d$completed_experiment_survey == 1,
ifelse(grepl('8pm', d$pilot_Q1), 1, 0), NA)
d$agreed_exp2 <- ifelse(d$completed_experiment_survey == 1,
ifelse(grepl('9pm', d$pilot_Q1), 1, 0), NA)
d$agreed_exp3 <- ifelse(d$completed_experiment_survey == 1,
ifelse(grepl('10pm', d$pilot_Q1), 1, 0), NA)
# write data to disk
write.csv(d, file = 'pilot_data_merged.csv', row.names = FALSE, na = '')
|
c24fe0ebcc2bc67ab1de206e24b082d42c88cb34 | f84555689fd60f3a8811c7a2dd63a04a4c141477 | /man/ets.low_F_NP.Rd | 7e3d7f7780246f3a01a1169596f55b86958d8bd9 | [] | no_license | raphael210/MaziBox | ca4edada9a187a603a7664e4cb5143537345eccd | 0a341bf9ba183383c36f2ab63cd8320138552828 | refs/heads/master | 2021-04-26T16:44:49.165127 | 2017-04-24T03:26:48 | 2017-04-24T03:26:48 | 79,436,863 | 0 | 0 | null | 2017-04-24T03:26:49 | 2017-01-19T09:23:48 | R | UTF-8 | R | false | true | 278 | rd | ets.low_F_NP.Rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/funset.R
\name{ets.low_F_NP}
\alias{ets.low_F_NP}
\title{get ETS of stocks with low forecasting}
\usage{
ets.low_F_NP()
}
\value{
ETS object.
}
\description{
get ETS of stocks with low forecasting
}
|
05e799546d6d2224b06658c7d13e5bb88303975b | c8b151735d2abc1fd181f1dabb4724936506dbd6 | /deathCauses_Rplot.R | 7cb7d8e15000c8682911423687bb05047d4a0e86 | [] | no_license | Jo11jo/The-4-Loop | 41128b1a93f0fe8c884b1376e454c09229cc8331 | 0c9e8cf060abd60980e31fa496f0c5192dd03953 | refs/heads/master | 2021-05-12T01:26:49.547612 | 2018-01-19T16:07:43 | 2018-01-19T16:07:43 | 117,559,652 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 465 | r | deathCauses_Rplot.R | library('tidyverse')
library('ggplot2')
install.packages('ggplot2')
common_deathCauses <- read.csv('common_deathCauses.csv')
# This makes a bar graph of the most common death causes in the years 1820 until 1920
ggplot(common_deathCauses, aes(x = reorder(cause, deaths), y = deaths), position = position_stack(reverse = TRUE)) + coord_flip() + theme(legend.position = "top") +
geom_bar(stat = "identity", fill = "red") +
labs(x = "Death Causes", y = "Deaths")
|
cdac17cfed1395bb9ed3ab4244169adc76cd7e13 | 0a906cf8b1b7da2aea87de958e3662870df49727 | /diceR/inst/testfiles/connectivity_matrix/libFuzzer_connectivity_matrix/connectivity_matrix_valgrind_files/1609958623-test.R | acb36c95cec9860068f7272ab17f7635e028bdb5 | [] | no_license | akhikolla/updated-only-Issues | a85c887f0e1aae8a8dc358717d55b21678d04660 | 7d74489dfc7ddfec3955ae7891f15e920cad2e0c | refs/heads/master | 2023-04-13T08:22:15.699449 | 2021-04-21T16:25:35 | 2021-04-21T16:25:35 | 360,232,775 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 429 | r | 1609958623-test.R | testlist <- list(x = c(1.65468513579984e-316, 1.12414655062708e+79, NaN, NaN, 1.64966219107908e-115, 3.522512689419e-312, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0))
result <- do.call(diceR:::connectivity_matrix,testlist)
str(result) |
cfc8a127fc5cc9cfa11ddf884505901d33800f37 | bf90dbae3ffc02f046740589591afbd0288a62f8 | /R/bayesPiVector.r | a2dc2ce85b34161b2230f728b5ea3902d03056ea | [
"MIT"
] | permissive | atredennick/EoAR | 65a70d59a429a013e3f8453e638dc626c6aa8a1e | 068e9d70940ccd3f87b66e0567c27273ac71b6a8 | refs/heads/master | 2023-07-10T21:02:07.010382 | 2021-08-13T14:47:38 | 2021-08-13T14:47:38 | 289,321,390 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 3,631 | r | bayesPiVector.r | #' @export
#'
#' @title Baysian estimation of a multinomial
#' proportion vector assuming a conjugate
#' prior.
#'
#' @description This routine assumes you have an observation
#' from a multinomial distribution with k classes (k >= 2, parameter
#' \code{x}) and have assumed that the multinomial distribution's
#' proportion vector ("pi vector") follows a Dirichelet distribution.
#' If so, this routine estimates the proportion vector's
#' posterior distribution mean, variance, and mode.
#'
#' @param x An integer vector
#' containing the number of observed 'successes' in
#' each catagory of the multinomial. Total number of trials is \code{sum(x)}.
#' The number of catagories is K = \code{length(x)}.
#'
#' @param pseudoCounts A vector of real-valued "pseudo counts"
#' for the K catagories in the problem. This is sometimes called
#' the "concentration" parameter.
#'
#'
#' @details
#'
#' Computations are elementary because the Dirichlet(a1, a2, ..., aK) prior is
#' conjugate for the multinomial. Nearly every text on Bayesian
#' estimation shows that given values for \code{x} and \code{pseudoCounts},
#' the posterior distribution of the mulitinomial's p vector
#' is,
#' \deqn{Dirichlet(x1+a1, x2+a2, ..., xk+ak).}
#' Hence, the Bayes point estimator of the multinomial's proportions is,
#' \deqn{phat_i = (xi+ai) / sum(xi + ai),}
#' which is the mean of the posterior. Standard error of the
#' posterior is,
#' \deqn{se.phat_i=sqrt((xi+ai)*(A-ai)/(A^2*(A+1))).}
#' where A = sum(xi + ai). If \code{(xi+ai)>1} for all i, mode of the
#' posterior for the proportion vector is,
#' \deqn{(xi+ai-1)/(A-K).}
#'
#'
#' The default value for \code{pseudoCounts}
#' corresponds to the Jeffery's prior. The Jeffery's prior is
#' proportional to the root of Fisher's information and
#' is equal to Dirichlet(1/K,1/K, ..., 1/K).
#'
#' @return A data frame with number of rows equal to
#' \code{length(x)}
#' containing the Baysian point estimates for the proportion
#' in each catagory.
#' The data frame has the
#' following columns:
#' \enumerate{
#' \item \code{phat} : the Bayes point estimates equal to the
#' mean vector of the posterior distribution. This column sums to 1.0
#' \item \code{phat.mode} : if \code{xi+ai} > 1 for all i, this
#' column contains the mode vector of the posterior. Mode vector
#' is the most vector of proportions with maximum likelihood.
#' If any \code{xi+ai} < 1, \code{phat.mode = NA}.
#' \item \code{se.phat} : the standard
#' error vector of the posterior distribution.
#' \item \code{psuedoCounts} : the vector of pseudoCounts
#' associated with the Dirichlet posterior. This vector
#' can be used to accumulate counts over muliple calls.
#' }
#'
#' @author Trent McDonald
#'
#'
#' @seealso \code{\link{agrestiCoullPhat}}
#'
#' @examples
#' bayesPiVector(c(1,5), c(.5,.5)) # Jeffery's prior
#' bayesPiVector(c(1,5), c(1, 1)) # flat prior
#'
#' # When prior data is available:
#' x.prior <- 5
#' n.prior <- 100
#' bayesPiVector(c(1,5), c(x.prior+0.5, n.prior-x.prior+0.5))
#'
#' # Simulation: point est bias and ci coverage
#' trueP <- c(0.01, 0.04, 0.95)
#' n <- 20
#' x <- rbinom( 1000, n, trueP)
#' baPhat <- apply(x, 1, bayesPiVector, pseudoCounts=rep(1,3)/3 )
#' muBA <- mean(baPhat$phat)
#'
bayesPiVector <- function(x, pseudoCounts=rep(1,length(x))/length(x)){
aPost <- x + pseudoCounts
A <- sum(aPost)
phat <- aPost / A
se <- sqrt(aPost*(A-aPost) / (A^2*(A+1)))
if( all(aPost>1) ){
phat.mode <- (aPost-1) / (A - length(aPost))
} else {
phat.mode <- NA
}
data.frame(phat=phat, se.phat=se, phat.mode=phat.mode, pseudoCounts=aPost)
}
|
c6b70b730107f55bbe3c2cfd75bde0fa684700cc | 4d9a2b7d548a1787f7444b0247c4ee3f55b5cbed | /start_bn2.R | da834eea6c2939809e31cfcabab0d6f3099d9901 | [] | no_license | matteovadi/bit_app | 1d45aa5a3d9faa22d7bff5173b2f3051ee7c8cc6 | 9bfb46eec5eafef5cf4730e01f8b97e177af2111 | refs/heads/main | 2023-01-08T14:00:56.844921 | 2020-11-01T22:51:27 | 2020-11-01T22:51:27 | 296,031,716 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 9,581 | r | start_bn2.R | ## inizio.programma_bn ##
setwd("/Users/matteovadi/Desktop/bookintime/BIT")
# Inizializzazione #
rm(list = ls())
library(RcppQuantuccia)
h.max = 8
residuo6180 = h.max
residuo6250 = h.max
duedate6180_bn = as.Date(ISOdate(2020,9,30))
duedate6250_bn = as.Date(ISOdate(2020,9,30))
X = c(as.Date(ISOdate(2020,4,14)),
as.Date(ISOdate(2020,4,25)),
as.Date(ISOdate(2020,5,1)),
as.Date(ISOdate(2020,6,2)),
as.Date(ISOdate(2020,8,15)),
as.Date(ISOdate(2020,11,1)),
as.Date(ISOdate(2020,11,13)),
as.Date(ISOdate(2020,12,8)),
as.Date(ISOdate(2020,12,24)),
as.Date(ISOdate(2020,12,25)),
as.Date(ISOdate(2020,12,26)),
as.Date(ISOdate(2020,12,31)),
as.Date(ISOdate(2021,1,1)),
as.Date(ISOdate(2021,1,6)))
###############################################################################################
############################################ Start ############################################
num_click = 0
editore = "-"
titolo = "INIZIO"
num_copie = 0
codice_ord = "0"
formato = "-"
# 32x44
valore.soglia = 12000
###############################################################################################
###############################################################################################
scadenziario_bn = data.frame(NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL)
storia.residuo6180.bn = data.frame(NULL)
storia.residuo6250.bn = data.frame(NULL)
storia.date6180.bn = data.frame(NULL)
storia.date6250.bn = data.frame(NULL)
ordinare = data.frame(NULL)
if(num_click < valore.soglia){
macchina = "6180"
if(formato == "25x35"| formato == "25X35"){
prod.oraria = 6500
} else {
prod.oraria = 5200
}
tempo.lav = num_click/prod.oraria
while(residuo6180 - tempo.lav < 0){
if(residuo6180 == 0){
duedate6180_bn = duedate6180_bn + 1
#vincoli sui giorni festivi
for(i in 1:14){
if (duedate6180_bn == X[i]){
duedate6180_bn = duedate6180_bn + 1
}
}
#sabato
if(isWeekend(duedate6180_bn)) {
duedate6180_bn = duedate6180_bn + 1
}
#domenica
if(isWeekend(duedate6180_bn)) {
duedate6180_bn = duedate6180_bn + 1
}
residuo6180 = h.max
} else {
num_click_format = num_click - (tempo.lav - residuo6180)*prod.oraria
#aggiorna scadenario
duedate_format = format(duedate6180_bn, "%a %d %b %Y")
if(nrow(scadenziario_bn) == 0){
ordinare = data.frame(as.numeric(1))
storia.residuo6180.bn = data.frame(0)
storia.residuo6250.bn = data.frame(residuo6250)
storia.date6180.bn = data.frame(as.character(duedate6180_bn))
storia.date6250.bn = data.frame(as.character(duedate6250_bn))
scadenziario_bn = data.frame(duedate_format, codice_ord, macchina, formato, titolo, editore, num_copie, round(num_click_format,0), 0)
} else {
ordinare = rbind(ordinare, as.numeric(nrow(ordinare)+1))
storia.residuo6180.bn = rbind(storia.residuo6180.bn, 0)
storia.residuo6250.bn = rbind(storia.residuo6250.bn, residuo6250)
storia.date6180.bn = rbind(storia.date6180.bn, as.character(duedate6180_bn))
storia.date6250.bn = rbind(storia.date6250.bn, as.character(duedate6250_bn))
scadenziario_bn = rbind(scadenziario_bn, c( duedate_format, codice_ord, macchina, formato, titolo, editore, num_copie, round(num_click_format,0), 0))
}
tempo.lav = tempo.lav - residuo6180
num_click = tempo.lav*prod.oraria
duedate6180_bn = duedate6180_bn + 1
#vincoli sui giorni festivi
for(i in 1:14){
if (duedate6180_bn == X[i]){
duedate6180_bn = duedate6180_bn + 1
}
}
#sabato
if(isWeekend(duedate6180_bn)) {
duedate6180_bn = duedate6180_bn + 1
}
#domenica
if(isWeekend(duedate6180_bn)) {
duedate6180_bn = duedate6180_bn + 1
}
residuo6180 = h.max
}
}
residuo6180 = residuo6180 - tempo.lav
duedate_format = format(duedate6180_bn, "%a %d %b %Y")
if(nrow(scadenziario_bn) == 0){
ordinare = data.frame(as.numeric(1))
storia.residuo6180.bn = data.frame(residuo6180)
storia.residuo6250.bn = data.frame(residuo6250)
storia.date6180.bn = data.frame(as.character(duedate6180_bn))
storia.date6250.bn = data.frame(as.character(duedate6250_bn))
scadenziario_bn = data.frame(duedate_format, codice_ord, macchina, formato, titolo, editore, num_copie, round(num_click,0), round(residuo6180,2))
} else {
ordinare = rbind(ordinare, as.numeric(nrow(ordinare)+1))
storia.residuo6180.bn = rbind(storia.residuo6180.bn, residuo6180)
storia.residuo6250.bn = rbind(storia.residuo6250.bn, residuo6250)
storia.date6180.bn = rbind(storia.date6180.bn, as.character(duedate6180_bn))
storia.date6250.bn = rbind(storia.date6250.bn, as.character(duedate6250_bn))
scadenziario_bn = rbind(scadenziario_bn, c(duedate_format, codice_ord, macchina, formato, titolo, editore, num_copie, round(num_click,0), round(residuo6180,2)))
}
} else { ##############
macchina = "6250"
if(formato == "25x35"| formato == "25X35"){
prod.oraria = 9400
} else {
prod.oraria = 7400
}
tempo.lav = num_click/prod.oraria
while(residuo6250 - tempo.lav < 0){
if(residuo6250 == 0){
duedate6250_bn = duedate6250_bn + 1
#vincoli sui giorni festivi
for(i in 1:14){
if (duedate6250_bn == X[i]){
duedate6250_bn = duedate6250_bn + 1
}
}
#sabato
if(isWeekend(duedate6250_bn)) {
duedate6250_bn = duedate6250_bn + 1
}
#domenica
if(isWeekend(duedate6250_bn)) {
duedate6250_bn = duedate6250_bn + 1
}
residuo6250 = h.max
} else {
num_click_format = num_click - (tempo.lav - residuo6250)*prod.oraria
#aggiorna scadenario
duedate_format = format(duedate6250_bn, "%a %d %b %Y")
if(nrow(scadenziario_bn) == 0){
ordinare = data.frame(as.numeric(1))
storia.residuo6180.bn = data.frame(residuo6180)
storia.residuo6250.bn = data.frame(0)
storia.date6180.bn = data.frame(as.character(duedate6180_bn))
storia.date6250.bn = data.frame(as.character(duedate6250_bn))
scadenziario_bn = data.frame(duedate_format, codice_ord, macchina, formato, titolo, editore, num_copie, round(num_click_format,0), 0)
} else {
ordinare = rbind(ordinare, as.numeric(nrow(ordinare)+1))
storia.residuo6180.bn = rbind(storia.residuo6180.bn, residuo6180)
storia.residuo6250.bn = rbind(storia.residuo6250.bn, 0)
storia.date6180.bn = rbind(storia.date6180.bn, as.character(duedate6180_bn))
storia.date6250.bn = rbind(storia.date6250.bn, as.character(duedate6250_bn))
scadenziario_bn = rbind(scadenziario_bn, c(duedate_format, codice_ord, macchina, formato, titolo, editore, num_copie, round(num_click_format,0), 0))
}
tempo.lav = tempo.lav - residuo6250
num_click = tempo.lav*prod.oraria
duedate6250_bn = duedate6250_bn + 1
#vincoli sui giorni festivi
for(i in 1:14){
if (duedate6250_bn == X[i]){
duedate6250_bn = duedate6250_bn + 1
}
}
#sabato
if(isWeekend(duedate6250_bn)) {
duedate6250_bn = duedate6250_bn + 1
}
#domenica
if(isWeekend(duedate6250_bn)) {
duedate6250_bn = duedate6250_bn + 1
}
residuo6250 = h.max
}
}
residuo6250 = residuo6250 - tempo.lav
duedate_format = format(duedate6250_bn, "%a %d %b %Y")
if(nrow(scadenziario_bn) == 0){
ordinare = data.frame(as.numeric(1))
storia.residuo6180.bn = data.frame(residuo6180)
storia.residuo6250.bn = data.frame(residuo6250)
storia.date6180.bn = data.frame(as.character(duedate6180_bn))
storia.date6250.bn = data.frame(as.character(duedate6250_bn))
scadenziario_bn = data.frame(duedate_format, codice_ord, macchina, formato, titolo, editore, num_copie, round(num_click,0), round(residuo6250,2))
} else {
ordinare = rbind(ordinare, as.numeric(nrow(ordinare)+1))
storia.residuo6180.bn = rbind(storia.residuo6180.bn, residuo6180)
storia.residuo6250.bn = rbind(storia.residuo6250.bn, residuo6250)
storia.date6180.bn = rbind(storia.date6180.bn, as.character(duedate6180_bn))
storia.date6250.bn = rbind(storia.date6250.bn, as.character(duedate6250_bn))
scadenziario_bn = rbind(scadenziario_bn, c(duedate_format, codice_ord, macchina, formato, titolo, editore, num_copie, round(num_click,0), round(residuo6250,2)))
}
} #if soglia
scadenziario_bn = cbind(ordinare, scadenziario_bn)
colnames(scadenziario_bn) = c("#","Data", "Ordine", "Macchina", "Formato", "Titolo", "Editore", "Copie", "Click", "Tempo residuo")
colnames(storia.residuo6180.bn) = "Residuo6180"
colnames(storia.residuo6250.bn) = "Residuo6250"
colnames(storia.date6180.bn) = "Data6180"
colnames(storia.date6250.bn) = "Data6250"
colnames(ordinare) = "#"
save(h.max, residuo6180, residuo6250, duedate6180_bn, duedate6250_bn, X, scadenziario_bn, storia.residuo6180.bn, storia.residuo6250.bn, storia.date6180.bn, storia.date6250.bn, valore.soglia, ordinare, file = "xannulla_bn.RData")
save(h.max, residuo6180, residuo6250, duedate6180_bn, duedate6250_bn, X, scadenziario_bn, storia.residuo6180.bn, storia.residuo6250.bn, storia.date6180.bn, storia.date6250.bn, valore.soglia, ordinare, file = "save_bn.RData")
rm(list = ls())
|
a6682ee63608ced604b87e7519789d608b0ab4a1 | b6eb519b8ef49e9466060b5fe8f287f97aca5bcc | /NYTIMES_API_WEEKLY_CUT.R | 43d1703b4babf85bc67ef108e7f8f4c64e55b796 | [] | no_license | ChangMinSeung/NYTIMES_API_WEEKLY_CUT__Practice | 9ce3959b972074e338be53ca481b8363dae12f8b | fac0f5231208af68b2178643c9be92428a55b754 | refs/heads/master | 2020-04-05T20:11:36.349526 | 2018-12-28T05:57:30 | 2018-12-28T05:57:30 | 157,168,638 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 831 | r | NYTIMES_API_WEEKLY_CUT.R |
#NYTIMES API 접속
Sys.setenv(NYTIMES_AS_KEY = "INTER_YOUR_KEY")
Sys.getenv("NYTIMES_AS_KEY")
install.packages("rtimes")
library('rtimes')
#NYTIMES API 데이터 요청
NYNK_6month <- as_search(q ="North Korea",
begin_date = "20180601",
end_date = "20180731",
all_results = TRUE)
#pub_date Y_M_D 변환
NYNK_6month_DF <- data.frame(NYNK_6month$data)
NYNK_6month_DF$pub_date <- as.Date(NYNK_6month_DF$pub_date, "%Y-%m-%d")
#주별로 나누기
NYNK_6month_DF$week <- cut(NYNK_6month_DF$pub_date, breaks = "week", start.on.monday = F)
#합계 계산 할 수 있게 카운트 입력
NYNK_6month_DF$count <- 1
#주별로 합계 구하기
install.packages("reshape2")
library(reshape2)
(y <- dcast(NYNK_6month_DF, week ~ ., value.var = "count", sum))
|
17a28c9fff8bbac4e1520baa97b66ad97fc2336f | 280275f0e84b1a15a5273dc1bcbb705d651ed573 | /MODIStspPruebas.R | 475a4081792b9a383510f091d7bb681aded2ca61 | [] | no_license | Carlitoshsh/Testing-R | c3c440470b6c76dea1ed40843029fd15b5640cf7 | d124ba4bd9701c4ece224f7e71a8137e74bdc551 | refs/heads/master | 2020-05-02T09:34:06.586009 | 2019-03-28T01:38:10 | 2019-03-28T01:38:10 | 177,875,065 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 326 | r | MODIStspPruebas.R | #Package available on https://github.com/ropensci/MODIStsp
#install.packages("gWidgetsRGtk2")
#library(gWidgetsRGtk2)
#install.packages("MODIStsp")
library(MODIStsp)
options_file <- "C:/Users/carlo/Desktop/pruebas/options.json"
# --> Launch the processing
MODIStsp(gui = FALSE, options_file = options_file, verbose = TRUE) |
ea1f0d6abe235b3e7084960d3b05b4d0a58c769a | 0afd8381918f860af0238fae12377fe4b4e5b921 | /man/fars_map_state.Rd | d725ab60f044aae5dfb4b89a448a03d9dcd6d24f | [] | no_license | rafaelcb/FARS | 46b06de8f128621049f96db483965b27be417fea | c70c499cfbfa131c09d19e3ca8f3574d346706e5 | refs/heads/master | 2021-01-01T06:28:14.628023 | 2017-07-23T20:48:16 | 2017-07-23T20:48:16 | 97,430,175 | 0 | 0 | null | null | null | null | UTF-8 | R | false | true | 969 | rd | fars_map_state.Rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/fars_functions.R
\name{fars_map_state}
\alias{fars_map_state}
\title{Create map with Fatality Analysis Reporting System data}
\usage{
fars_map_state(state.num, year)
}
\arguments{
\item{state.num}{A numeric value with the state number to plot}
\item{year}{A numeric value with the year for which the data will be plotted.}
}
\value{
Plots a map with points marking the locations of accidents.
}
\description{
This function creates a map with fatalities in motor vehicle crashes for a
specific year and state. The function fails if an invalid year or state is
passed as input.
}
\note{
The csv files with the data need to be downloaded previously from the
US National Highway Traffic Safety Administration's Fatality Analysis
Reporting System website
(\url{https://www.nhtsa.gov/research-data/fatality-analysis-reporting-system-fars})
}
\examples{
\dontrun{fars_map_state(3, 2015)}
}
|
9cee6af3479ad8d2178ece6f7887191cec95b84d | 2e6ede33c821f7da4d2735b971a8a25e6b376c8f | /aufgabenpool/start.R | 9c7494326de864e892874915bc0f1c611bb2f40b | [] | no_license | japaweiss/TEST-aufgabenpool | 0dba28d41a9deb32ced7cd3725eea1190cb4f597 | 6d09503fc0371190c26653cc14abc389c2bc3063 | refs/heads/master | 2021-06-02T00:54:45.666915 | 2019-10-03T16:58:51 | 2019-10-03T16:58:51 | 39,435,022 | 0 | 0 | null | 2015-07-21T09:12:25 | 2015-07-21T08:56:46 | null | UTF-8 | R | false | false | 1,649 | r | start.R | ## Exams-Paket laden
library("exams")
## Vektor mit den Aufgaben erstellen
myexam <- c("ZRr_01_zinsJahr","ZRr_02_zinsMonat")
## Erzeugung der PDFs und dazugehöriger Lösungen
exams2pdf(myexam, n = 3, name = c("PDF-Aufgabenblatt", "PDF-Loesungen"),
encoding = "UTF-8",
dir = "C:/aufgabenpool/output",
edir = "C:/aufgabenpool/exercises",
template = c("C:/aufgabenpool/templates/exam.tex", "C:/aufgabenpool/templates/solution.tex"),
header = list(
Date = "2015-07-21",
ID = function(i) formatC(i, width = 5, flag = "0")
))
## Weitere Exportmoeglichkeiten (mit diesen Aufgaben nicht getestet): weitere Informationen: https://cran.r-project.org/web/packages/exams/index.html
## generate a single PDF exam (shown in PDF viewer)
exams2pdf(myexam, n = 1,
encoding = "UTF-8",
edir = "C:/aufgabenpool/exercises",
template = "C:/aufgabenpool/templates/exam.tex")
## generate a single HTML exam (shown in browser)
exams2html(myexam, n = 1,
encoding = "UTF-8",
edir = "C:/aufgabenpool/exercises",
template = "C:/aufgabenpool/templates/plain.html")
## generate three HTML exams without solutions in output directory
exams2html(myexam, n = 3, name = "html-demo", solution = FALSE,
encoding = "UTF-8",
dir = "C:/aufgabenpool/output",
edir = "C:/aufgabenpool/exercises",
template = "C:/aufgabenpool/templates/plain.html")
## generate Moodle exam with three replications per question
exams2moodle(myexam, n = 1, name = "moodle-demo",
encoding = "UTF-8",
dir = "C:/aufgabenpool/output",
edir = "C:/aufgabenpool/exercises")
|
27968e2177078ea6eebe833f3d0f2cbc54b7f5cc | 91ddd0b092f72ddbb3442a4f0ad04873b8638015 | /man/construct_incompMat.Rd | 4532a31dabdcc853ebc3950e8188e6f5b3886417 | [
"MIT"
] | permissive | PMildenb/SteppedPower | d601258f53d7d399b17fedef03e451360b18bd4f | b7e0512714924142e8a6430ae45869f994eb3c56 | refs/heads/master | 2023-06-24T19:18:23.895589 | 2022-08-10T10:41:00 | 2022-08-10T10:41:00 | 156,396,213 | 1 | 0 | NOASSERTION | 2021-07-07T08:26:32 | 2018-11-06T14:29:19 | R | UTF-8 | R | false | true | 1,250 | rd | construct_incompMat.Rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/construct_DesMat.R
\name{construct_incompMat}
\alias{construct_incompMat}
\title{Constructs a matrix of 0 and 1 for unobserved and observed cluster periods, respectively.}
\usage{
construct_incompMat(incomplete, dsntype, timepoints, Cl, trtmatrix = NULL)
}
\arguments{
\item{incomplete}{integer, either a scalar (only for SWD) or a matrix.
A vector defines the number of periods before and after the switch from
control to intervention that are observed. A matrix consists of `1`s for
observed clusterperiods and `0`s or `NA` for unobserved clusterperiods.}
\item{dsntype}{character, defines the type of design. Options are "SWD",
"parallel" and "parallel_baseline", defaults to "SWD".}
\item{timepoints}{numeric (scalar or vector), number of timepoints (periods).
If design is swd, timepoints defaults to length(Cl)+1.
Defaults to 1 for parallel designs.}
\item{Cl}{integer (vector), number of clusters per sequence group (in SWD),
or number in control and intervention (in parallel designs)}
\item{trtmatrix}{an optional user defined matrix
to define treatment allocation}
}
\value{
a matrix
}
\description{
Mostly useful to build incomplete stepped wedge designs
}
|
32e12cdfccdd09defcfe6c7cf1eb60d3642e94c8 | c4e48240f47280fd3b57f1e682e50841aa527763 | /analysis/functions/func_qqplot.R | d08e8eb21271b7c76d4a65616b65435e4f15b5b7 | [] | no_license | bwtimmermans/SRS_wave_analysis | 66ba6739d31f108661de879a178febb71b9d0fd6 | ce06c2bbb8424988eec6fb699f0130c5154ec081 | refs/heads/master | 2022-10-14T13:05:27.067573 | 2022-09-22T20:21:57 | 2022-09-22T20:21:57 | 181,764,121 | 1 | 0 | null | null | null | null | UTF-8 | R | false | false | 2,558 | r | func_qqplot.R | # qqplot function derived from extRemes package.
# Modified to allow changing font size of legend and line size of regression fit.
func_qqplot <- function (x, y, pch = 19, xlab = "x Quantiles", ylab = "y Quantiles",
regress = TRUE, make.plot = TRUE, cex.leg = 3.0, lwd.reg = 3.0, ...)
{
args <- list(...)
out <- list()
out$call <- match.call()
out$names <- list(x = as.character(substitute(x)), y = as.character(substitute(y)))
x <- sort(na.omit(x))
y <- sort(na.omit(y))
qy <- extRemes::quantilefun(y)
m <- length(x)
n <- length(y)
N <- m + n
M <- m * n/N
K <- 1.36
p <- (1:m - 1)/(m - 1)
yq <- qy(p)
#print(paste("qy:",qy(p - K/sqrt(M))))
yl <- qy(p - K/sqrt(M))
yu <- qy(p + K/sqrt(M))
if (make.plot) {
if (is.null(args$xlim) && is.null(args$ylim))
plot(x, yq, pch = pch, xlim = range(x), ylim = range(yq,
yl, yu, na.rm = TRUE), xlab = xlab, ylab = ylab,
...)
else if (is.null(args$xlim))
plot(x, yq, pch = pch, xlim = range(x), xlab = xlab,
ylab = ylab, ...)
else if (is.null(args$ylim))
plot(x, yq, pch = pch, ylim = range(yq, yl, yu, na.rm = TRUE),
xlab = xlab, ylab = ylab, ...)
else plot(x, yq, pch = pch, xlab = xlab, ylab = ylab,
...)
lines(x, yl, lty = 2, col = "gray", lwd = lwd.reg)
lines(x, yu, lty = 2, col = "gray", lwd = lwd.reg)
abline(0, 1, lty = 2, col = "darkorange", lwd = lwd.reg)
}
if (regress) {
fit <- lm(y ~ x, data = data.frame(x = x, y = yq))
if (make.plot) {
lines(x, predict(fit), col = "grey", lty = 1, lwd = lwd.reg)
#legend("topleft", legend = c("1-1 line", "regression line",
# "95% confidence bands"), col = c("darkorange",
# "grey", "gray"), lty = c(2, 1, 2), bty = "n", cex = cex.leg)
legend("topleft", legend = c("1-1 line", "regression line","95% confidence bands"),
col = c("darkorange","grey","grey"), lty = c(2, 1, 2), lwd = c(lwd.reg, lwd.reg, lwd.reg), bty = "n", cex = cex.leg)
}
out$regression <- fit
}
else if (make.plot)
legend("bottomright", legend = c("1-1 line", "95% confidence bands"),
col = c("darkorange", "gray"), lty = c(2, 2), lwd = c(lwd.reg, lwd.reg, lwd.reg), bty = "n", cex = cex.leg)
out$qdata <- data.frame(x = x, y = yq, lower = yl, upper = yu)
class(out) <- "qqplot"
invisible(out)
}
|
b9d9075ac2561550ffda23eb5c10a3e67f4df017 | b34fbe86f91c485a05ca183df81a40a6bdcca6c6 | /R/knapsack_dynamic.R | 71a2b221d590baefb3a12ecd409d901b9a227ebb | [] | no_license | Jorisvdoorn/lab6group8 | 588ebec6ff93379ea31df986a4e8b2de921a89e4 | 982126f6f467e8c64ebb008695aab261766c6915 | refs/heads/master | 2020-08-07T09:55:07.717184 | 2019-10-13T18:45:52 | 2019-10-13T18:45:52 | 213,400,404 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 1,523 | r | knapsack_dynamic.R | #' Knapsack Dynamic
#'
#' Returns the maximum value given the capacity of knapsack by using dynamic programming.
#' @usage knapsack_dynamic(x, W)
#' @param x A \code{data.frame} consisting of two variables. \code{w} represents the object's weight and \code{v} is the value.
#' @param W the maximum capacity of the knapsack.
#' @return A \code{list} containing the maximum value and the elements.
#' @references \url{en.wikipedia.org/wiki/Knapsack_problem}
#' @name knapsack_dynamic
#' @export
knapsack_dynamic = function(x, W){
stopifnot(is.data.frame(x), length(colnames(x)) == 2, colnames(x) == c("w", "v"), W>=0)
# create a placeholder matrix of possible values
n_items = nrow(x)+1
w = W+1
value_matrix = matrix(0, nrow = n_items, ncol = w)
# fill the value_matrix with possible values
for (i in 2:nrow(value_matrix)){
for (j in 1:ncol(value_matrix)){
if (x[i-1,1] > j){
value_matrix[i,j] = value_matrix[i-1,j]
}
else {
value_matrix[i,j] = max(value_matrix[i-1,j], value_matrix[i-1,j-x[i-1,1]] + x[i-1,2])
}
}
}
# record the optimum value
results = list(value = round(value_matrix[n_items, w]))
# find the items
items = c()
while (value_matrix[n_items, w] > 0){
if (value_matrix[n_items, w] == value_matrix[n_items-1, w]){
n_items = n_items-1
}
else {
w = w - x[n_items-1,1]
n_items = n_items-1
items = c(items, n_items)
}
}
results$elements = sort(items)
return(results)
}
|
0e564ed2e825afc82c01a9c8d80d20691de7a5b9 | ffdea92d4315e4363dd4ae673a1a6adf82a761b5 | /data/genthat_extracted_code/addreg/examples/conv.test.Rd.R | b0eb66af32d0521f7c80df107c29bdcb89a29643 | [] | no_license | surayaaramli/typeRrh | d257ac8905c49123f4ccd4e377ee3dfc84d1636c | 66e6996f31961bc8b9aafe1a6a6098327b66bf71 | refs/heads/master | 2023-05-05T04:05:31.617869 | 2019-04-25T22:10:06 | 2019-04-25T22:10:06 | null | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 278 | r | conv.test.Rd.R | library(addreg)
### Name: conv.test
### Title: Convergence Test Based on L2 Norm
### Aliases: conv.test
### Keywords: models misc
### ** Examples
theta.old <- c(4,5,6)
theta.new <- c(4.05,5,6)
conv.test(theta.old, theta.new, 0.01)
conv.test(theta.old, theta.new, 0.005)
|
2dd930a449374dae03d1a56f9aee2b2803d5616d | a47ce30f5112b01d5ab3e790a1b51c910f3cf1c3 | /output/sources/authors/258/bipartite/mgen.R | 5529d5c1c9401345d4fd50a4f02d2609e32ae760 | [] | no_license | Irbis3/crantasticScrapper | 6b6d7596344115343cfd934d3902b85fbfdd7295 | 7ec91721565ae7c9e2d0e098598ed86e29375567 | refs/heads/master | 2020-03-09T04:03:51.955742 | 2018-04-16T09:41:39 | 2018-04-16T09:41:39 | 128,578,890 | 5 | 0 | null | null | null | null | UTF-8 | R | false | false | 3,323 | r | mgen.R | mgen <- function(web, n=sum(web), keep.species=TRUE, rep.cell=TRUE, autotransform="sum", trials=100){
# function to generate a quantitative network based on a probability matrix
# by Diego Vazquez (brushed up for a more bipartite-consistent use of names by CFD)
# web a matrix with observation probabilities, emerging from some null model considerations external to this function; if an original network is used, this will be automatically converted to a probability matrix by dividing it by the number of interactions (CFD); ORIGINAL: a probability matrix
# n number of interactions to allocate into the new matrix
# autotransform: determines how a non-probability web is converted into probabilities;
# option 1: "sum": simply divide each entry by the sum of interactions in the web
# option 2: "equiprobable": product of marginal probabilities (col/rowSums divided by sum(web) (in a sense this is the basis of the r2dtable null model, just without the 'turn into integers' bit)
# keep.species: Random assignment of interactions may lead to empty columns or rows and hence reduce the dimensions of the simulated web. By default, this is prevented from happening, i.e. each row/column will receive at least one interaction. Setting keep.species to FALSE may (but need not) cause a loss of species.
# trials: allocating interactions (when rep.cell=TRUE) can be difficult or even impossible. When the number of attempts to allocate them exceeds trials * n it will be stopped and an error message will be returned. Default is 100. Setting 'trials' to a very large value may make this function run for hours and hours. Your choice!
if (sum(web) != 1) { # added by CFD
message(paste("This is not a probability matrix! I will proceed after transforming the entries according to option 'autotransform':", autotransform, "!"))
if (autotransform == "sum") {
m <- web/sum(web)
} else {# equiprobable, or anything else
m <- (rowSums(web)/sum(web)) %*% t(colSums(web)/sum(web))
}
} else m <- web
if (rep.cell == FALSE & n > (nrow(m)*ncol(m))){
message("Argument n should be smaller than the number of cells in matrix!")
}
else{
mac <- matrix(cumsum(m),nrow(m),ncol(m)) #Cumulative probability matrix
mint <- matrix(0,nrow(m),ncol(m)) #Interaction matrix
if (keep.species){
for (i in 1:nrow(m)){
c1 <- sample(ncol(m), replace=TRUE, prob=colSums(m))
c1 <- c1[1]
mint[i, c1] <- 1
}
for (i in 1:ncol(m)){
if(sum(mint[,i]) == 0){
r1 <- sample(nrow(m), replace=TRUE, prob=rowSums(m))
r1 <- r1[1]
mint[r1, i] <- 1
}
}
}
while.counter <- 0
while (sum(mint) < n){
rand <- runif(1, 0, 1)
ri <- min(which(mac >= rand))
if (rep.cell == TRUE) mint[ri] <- mint[ri] + 1
if (rep.cell == FALSE) mint[ri] <- 1
while.counter <- while.counter + 1
if (while.counter >= trials*n) stop("Cannot allocate the requested interactions in a reasonable amount of time! \n Either increase 'trials' or decrease 'n'.")
}
mint
}
}
# mgen(web=Safariland)
# mgen(web=Safariland, autotransform="equiprobab")
# mgen(web=Safariland/sum(Safariland), n=sum(Safariland), keep.species=FALSE)
# mgen(web=Safariland/sum(Safariland), n=200, rep.cell=F) |
4f193ced394047fd4c95e4d47d45ef7daeaa47b8 | 21733d8a9b0d59e63f575623848e79d0e5062092 | /CourseProject/Capstone/capstone_project_app/server.R | f187980caae29b5b9b7edeb4d653707b031dc467 | [] | no_license | zhangjinge588/datasciencecoursera | f66c6bc679650464b138f0ac072ab0812546eb2f | df33e473858122a95cec2a8273d0e232b28ac4c3 | refs/heads/master | 2020-08-27T16:33:08.814478 | 2020-05-04T16:29:55 | 2020-05-04T16:29:55 | 217,433,692 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 5,631 | r | server.R | #
# This is the server logic of a Shiny web application. You can run the
# application by clicking 'Run App' above.
#
# Find out more about building applications with Shiny here:
#
# http://shiny.rstudio.com/
#
library(shiny)
library(tm)
create_corpus <- function(data, language='en') {
my_corpus <- VCorpus(VectorSource(data))
my_corpus <- tm_map(my_corpus, content_transformer(tolower))
# my_corpus <- tm_map(my_corpus, stripWhitespace)
# my_corpus <- tm_map(my_corpus, function(x) stemDocument(x, language))
# my_corpus <- tm_map(my_corpus, removeWords, stopwords(language))
my_corpus <- tm_map(my_corpus, removeNumbers)
my_corpus <- tm_map(my_corpus, content_transformer(function(x) gsub("http.*", "", x, ignore.case = TRUE)))
my_corpus <- tm_map(my_corpus, function(x) removePunctuation(x, ucp=TRUE, preserve_intra_word_contractions=TRUE))
my_corpus <- tm_map(my_corpus, content_transformer(function(x) gsub("[^[:alnum:][:blank:]+?&/\\-]", "", x)))
my_corpus <- tm_map(my_corpus, content_transformer(function(x) gsub(" *\\b[[:alpha:]]{1,1}\\b *", " ", x)))
my_corpus <- tm_map(my_corpus, content_transformer(function(x) gsub("[^A-Za-z]", " ", x)))
my_corpus <- tm_map(my_corpus, stripWhitespace)
my_corpus
}
stupid_backoff = function(input_vector, word_frequency_list, backoff_unigram, topN=5, lambda=0.4) {
# we need to start with n-gram prefix.
# e.g. We have 4-gram word frequency but input_vector has length 2, then we have to start with bigram prefix.
# e.g. We have 4-gram word frequency but input_vector has length 10, then we have to start with trigram prefix.
word_length = length(input_vector)
n = min(length(word_frequency_list)-1, word_length)
length_unigrams = length(backoff_unigram)
if (n == 0) {
names(backoff_unigram[1:topN])
} else {
# Initialize final score to be 1.
score_df = data.frame(word=names(backoff_unigram),
unigram_score = backoff_unigram,
score=replicate(length_unigrams, 1),
computed=replicate(length_unigrams, FALSE),
find_n_gram=replicate(length_unigrams, 1))
discount = 1
while (n >= 1) {
prefix = paste(input_vector[(word_length-n+1):word_length], collapse = " ")
# print (n)
# print (prefix)
# print ("###########")
if (prefix %in% names(word_frequency_list[[n]])) {
count_prefix = word_frequency_list[[n]][prefix]
score_df['temp_word'] = sapply(score_df['word'], function (x) paste(prefix, x))
# will return na if not in word frequency list
score_df['temp_score'] = sapply(score_df['temp_word'], function (x) word_frequency_list[[n+1]][x])
needs_compute = score_df['computed'] == FALSE & !is.na(score_df['temp_score'])
score_df[needs_compute,'score'] = discount / count_prefix * score_df[needs_compute, 'temp_score']
# Set the words after computing the score to be computed.
score_df[needs_compute, 'find_n_gram'] = n+1
score_df[needs_compute, 'n_gram'] = score_df[needs_compute, 'temp_word']
score_df[needs_compute, 'n_gram_freq'] = score_df[needs_compute, 'temp_score']
score_df[needs_compute, 'n_minus_1_gram'] = prefix
score_df[needs_compute, 'n_minus_1_gram_freq'] = count_prefix
score_df[needs_compute, 'discount'] = discount
score_df[needs_compute, 'computed'] = TRUE
}
n = n - 1
discount = discount * lambda
}
# Remaining uncomputed words will be replaced with unigram score * discount.
score_df[score_df['computed']==FALSE, 'score'] = discount * 0.000001 * score_df[score_df['computed']==FALSE, 'unigram_score']
index = order(score_df$score, decreasing = TRUE)[1:topN]
# final_score = score_df[index,'score']
# names(final_score) = score_df[index,'word']
# score_df[index,c("word", "unigram_score", "score",
# "find_n_gram", "n_gram", "n_gram_freq", "n_minus_1_gram", "n_minus_1_gram_freq", "discount")]
score_df[index,c("word")]
}
}
score_input = function(input,word_frequency_list,backoff_unigram,topN=5,lambda=0.4) {
input_corpus = create_corpus(input)
input_after_tokenization = trimws(input_corpus[[1]]$content)
stupid_backoff(strsplit(input_after_tokenization, " ")[[1]],word_frequency_list,backoff_unigram,topN,lambda)
}
filtered_word_freq_sample = readRDS("filtered_word_freq.rds")
backoff_unigram_sample_remove_stop_words = readRDS("unigram_probabilities.rds")
# Define server logic required to draw a histogram
shinyServer(function(input, output) {
result = eventReactive(input$submit, {
score_input(input$textInput,
filtered_word_freq_sample,
backoff_unigram_sample_remove_stop_words,
topN=input$topN,
lambda=input$discount)
})
output$value <- renderTable({
result()
}, colnames = FALSE, rownames = TRUE)
})
|
84dc6346c78d6935dd88cc15ba6b6699b1ee0292 | a53a47b66692362a3257c59afb754cf4d6777b07 | /inst/doc/nearestCentroid.R | 90b7507d642ec3b919b0ab40745bd5bb0ec12e16 | [] | no_license | cran/lolR | a755f8ebf1a6e5b9082d0ffc5d1f2f70034767b2 | 0e92f4f31cb5eccecdcc6451ddd9f6028cf6f06b | refs/heads/master | 2021-05-04T12:57:38.205418 | 2020-06-26T21:30:03 | 2020-06-26T21:30:03 | 120,304,157 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 1,321 | r | nearestCentroid.R | ## -----------------------------------------------------------------------------
require(lolR)
require(ggplot2)
require(MASS)
n=400
d=2
## -----------------------------------------------------------------------------
testdat <- lol.sims.mean_diff(n, d)
X <- testdat$X
Y <- testdat$Y
data <- data.frame(x1=X[,1], x2=X[,2], y=Y)
data$y <- factor(data$y)
ggplot(data, aes(x=x1, y=x2, color=y)) +
geom_point() +
xlab("x1") +
ylab("x2") +
ggtitle("Simulated Data") +
xlim(-4, 6) +
ylim(-4, 4)
## -----------------------------------------------------------------------------
classifier <- lol.classify.nearestCentroid(X, Y)
data <- cbind(data, data.frame(size=1))
data <- rbind(data, data.frame(x1=classifier$centroids[,1], x2=classifier$centroids[,2], y="center", size=5))
ggplot(data, aes(x=x1, y=x2, color=y, size=size)) +
geom_point() +
xlab("x1") +
ylab("x2") +
ggtitle("Data with estimated Centers") +
guides(size=FALSE) +
xlim(-4, 6) +
ylim(-4, 4)
## -----------------------------------------------------------------------------
Yhat <- predict(classifier, X)
data$y[1:(length(data$y) - 2)] <- Yhat
ggplot(data, aes(x=x1, y=x2, color=y, size=size)) +
geom_point() +
xlab("x1") +
ylab("x2") +
ggtitle("Data with Predictions") +
guides(size=FALSE) +
xlim(-4, 6) +
ylim(-4, 4)
|
b36132f8506292e56371e5d03b20beade9b45b1c | b023b0d675db64020da9955d7d0ee669b36ffeee | /pollutantmeanold.R | cc7c14fa7fe743562d3411a9fe9ab5137a3e1bc2 | [] | no_license | chanovin/R-Prog | 55c7f91e17bf8eb6b7a4454c201fe8ba4059f39a | 92644f2b8f627d24352dc7f9d9b40c14fb9aa43a | refs/heads/master | 2020-02-26T16:28:05.099489 | 2016-10-18T01:34:46 | 2016-10-18T01:34:46 | 71,196,656 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 1,517 | r | pollutantmeanold.R | pollutantmean <- function(directory, pollutant, id = 1:332) {
## 'directory' is a character vector of length 1 indicating
## the location of the CSV files
## 'pollutant' is a character vector of length 1 indicating
## the name of the pollutant for which we will calculate the
## mean; either "sulfate" or "nitrate".
## 'id' is an integer vector indicating the monitor ID numbers
## to be used
# vector containing all values of the desired pollutant
despoll <- NULL
# for each id passed
for(i in id){
# convert id to 3 digit width
idstr <- sprintf("%03d", i)
# identify file reqd
fileloc <- paste(getwd(),"/",directory, "/", idstr, ".csv",sep="")
# read the file in
monitordata <- read.csv(fileloc)
# for each entry in the file
for (j in 1:nrow(monitordata)){
# pull the pollutant desired
newpoll <- monitordata[j,pollutant]
# if it isn't NA for this entry
if(!is.na(newpoll)){
# append it onto the pollutant vector
despoll <- append(despoll,newpoll)
}
}
}
## Return the mean of the pollutant across all monitors list
## in the 'id' vector (ignoring NA values)
## NOTE: Do not round the result!
mean(despoll)
}
|
e1bcd7f4ef25540cf7f13fc5282833c645decc36 | 027165157365f3abed4569620b7a708f365208d8 | /man/delete_fmri_data.Rd | 30ff23e98d829b337f2d708c4904039418f581ea | [] | no_license | neuroconductor/kirby21.fmri | 2b4d0a1d498ae0d8adcb306e87717137be48c893 | 02ab8d98d6ce8e9095966b64086f99654e7a9f8d | refs/heads/master | 2021-07-06T07:32:20.562175 | 2021-05-15T01:04:13 | 2021-05-15T01:04:15 | 79,593,151 | 0 | 0 | null | null | null | null | UTF-8 | R | false | true | 432 | rd | delete_fmri_data.Rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/delete_fmri_data.R
\name{delete_fmri_data}
\alias{delete_fmri_data}
\title{Delete fMRI Image Filenames}
\usage{
delete_fmri_data(...)
}
\arguments{
\item{...}{arguments to pass to \code{\link{delete_kirby21_data}},
\code{modalities = "fMRI"} so it cannot be specified}
}
\value{
Nothing is returned
}
\description{
Delete the files for the fMRI images
}
|
68d0d60c19685503bc8d6666ed8a84167fcc8fa2 | a712c2035083d98870728ce622b90f6bed0cc149 | /portopt/R/retrieve_close.R | 2a3c045997ad3e58a8d7e083e08da1e72113d020 | [] | no_license | Dom-Owens-UoB/FinProject | 59e2a5dd3da88e69b44c7d957dc346a542a39b15 | d960f84581202d41df1eff99ea8fd066154c1c3b | refs/heads/master | 2020-09-05T04:10:17.648126 | 2020-01-17T12:58:10 | 2020-01-17T12:58:10 | 219,978,477 | 1 | 0 | null | null | null | null | UTF-8 | R | false | false | 1,149 | r | retrieve_close.R | #' Retreieve the close prices between specified dates
#' Columns containing missing values will
#' @param data A dataframe to be subsetted by date. The dataframe must have a column
#' with the name 'date'.
#' @param from A character string which gives the date of the earliest
#' entry e.g. 2018-01-01.
#' @param to A character string which gives the date of the latest
#' entry e.g. 2018-01-01.
#' @param missing Set equal to false if you want to retain missing values.
#'
#' @return A dataframe with observations between the specified dates.
#' @export
#'
#' @importFrom magrittr %>%
#' @importFrom magrittr %<>%
#'
retrieve.close <- function(data,from,to,missing = TRUE){
subset <- data %>% dplyr::filter(as.Date(data$date) >= from) %>%
dplyr::filter(as.Date(.$date) < to)
#Next identify the columns containing missing values.
if (missing){
to.remove <- colnames(subset)[colSums(is.na(subset)) > 0]
subset %<>% dplyr::select(-to.remove) #Remove columns.
}
subset <- subset[,-1] #Remove first column otherwise output incorrect.
subset$date <- as.Date(subset$date)
subset <- dplyr::arrange(subset,date)
return(subset)
}
|
67d24ebbaad522504b9a1437f823e6b03469e780 | 160a8c80035360a176900fc3a7b9b56905a682a6 | /R/main.R | 66fa8934fbbb010b85653382673844ae981f3c62 | [] | no_license | marinegenomicslab/red_drum_map | 9635caef86f31c3bf557787129c8bfc9cdf2933f | bf2cf11cb70f0aa5367e0928be16dc68d197404b | refs/heads/master | 2020-05-23T03:53:18.913236 | 2016-10-17T13:02:29 | 2016-10-17T13:02:29 | null | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 278 | r | main.R | # Create tables and figures
# See data/README for descriptions of data files
# Create the figures
system("Rscript R/figure_1.R")
system("Rscript R/figure_2.R")
system("Rscript R/figure_3.R")
# Create the tables
system("Rscript R/table_1.R")
system("Rscript R/table_2.R")
|
3f4ac828902e3e05452650287c394fed8db0296d | 89dd5bf35cc8f2b3c66cfc5902e9db33445a77fd | /Scripts/all_sdgs_data_format.R | 7ff7e2bf42b0e7a7a8180c03a0a658ebd4a4f897 | [] | no_license | spoonerf/SDG_HBModels | 59e40994d1949ad460a4cdc1c16375abfb82c92c | 8388d01a833ee50698a6ca6ffcdacaa2bd40a8b1 | refs/heads/master | 2021-05-18T13:02:48.457631 | 2020-03-31T10:38:11 | 2020-03-31T10:38:11 | 251,253,596 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 1,512 | r | all_sdgs_data_format.R | library(dplyr)
library(readr)
library(janitor)
library(tidyr)
library(rnaturalearth)
library(rnaturalearthdata)
source("~/Documents/ECEHH/SDG_Tracker/sdg_useful.R")
source("Scripts/get_iso3.R")
sdg_df <-
read.csv("Data/All_SDGS_by_Country.csv", na = c("", "NA"), stringsAsFactors = FALSE) %>% clean_names()
table(sdg_df$nature) #shows how the data was collected:
# C = Country Data
# CA = Country Adjusted Data
# E = Estimated Data
# G = Global Monitoring Data
# M = Modelled Data
# N = Non-Relevant
table(sdg_df$x_sex)
unique(sdg_df$geo_area_name[is.na(get_iso(sdg_df$geo_area_name))])
np <- sdg_df %>%
select(indicator,
geo_area_name,
time_period,
value,
x_age,
x_sex,
x_reporting_type) %>%
mutate(iso_a3 = get_iso(geo_area_name)) %>%
filter(x_reporting_type == "G" & !is.na(iso_a3)) %>% #removing estimated and modelled data
select(-x_reporting_type) %>%
select(indicator, country = geo_area_name, iso_a3, year = time_period, value, age = x_age,
sex = x_sex) %>%
arrange(indicator, country, year)
most_recent <- np %>%
group_by(country, iso_a3, indicator) %>%
summarize(newest_data = max(year)) %>%
arrange(newest_data)
world <-
janitor::clean_names(rnaturalearth::ne_countries(scale = "medium", returnclass = "sf"))
data_check <- world %>%
left_join(., most_recent, by = "iso_a3")
ggplot(data = data_check)+
geom_sf(aes(fill = newest_data))+
scale_fill_viridis_c(option = "plasma", trans = "sqrt")
|
82d02540059fe41f78e0594b325602bb6180ee28 | 2da593c68a2bb1fd6ee94b8e5e68a894d37f6c10 | /man/reference.gff.file.Rd | e1f3d06e6f45f8f477d6ede318c655fd5646d17c | [] | no_license | anu-bioinfo/neoantigenR | 78e3f1e3ceb63234d43f8a3495fd12364b7daa98 | f9a4533b18ba4086e5b5a37a26f940c5915083ad | refs/heads/master | 2020-04-05T03:42:03.272403 | 2018-04-18T20:38:55 | 2018-04-18T20:38:55 | null | 0 | 0 | null | null | null | null | UTF-8 | R | false | true | 511 | rd | reference.gff.file.Rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/conf.R
\docType{data}
\name{reference.gff.file}
\alias{reference.gff.file}
\title{the path for reference annotated gene database file}
\format{An object of class \code{character} of length 1.}
\usage{
reference.gff.file
}
\value{
the path for reference annotated gene database file
}
\description{
the path for reference annotated gene database file
}
\examples{
reference.gff.file="gencode.v19.annotation.gff3"
}
\keyword{datasets}
|
cd96837c059346d5b633b9290418faea1746f53a | 853ca7600c8146f64982399ec8d191febf42f6f1 | /010_DIAG.R | 913b856d0280f4e5ddd440ca707e61e111047e5c | [] | no_license | mvoigt87/CancerEdinburgh | 3839f55b9db2bb179cf1aa089c34f15eafbd0910 | 044e661279f34240baadd40aa39913f4e1876206 | refs/heads/master | 2020-03-30T16:38:04.182898 | 2018-10-10T14:59:01 | 2018-10-10T14:59:01 | 151,418,596 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 4,753 | r | 010_DIAG.R | ###
### A file for creating a mask for the final analysis
### - Part 1 - Time Series Analysis for stage at diagnosis -
###
### 0.1 - Load Packages
library(tidyverse)
library(forcats)
library(data.table)
library(broom)
library(survival)
require(MASS)
require(Hmisc)
### 0.2 - Load Data (load any data set)
# set path to data
setwd("C:/Users/.../RCodeData")
load('BREAST.Rdata')
# give the data set a short name
d <- breastcure
# 1. Descriptives
table(d$sex1)
# low case number will probably lead to the exclusion of male cancer patients
d <- d %>% filter(sex1=="female")
### 1.1. variable of main interest - stage at diagnosis
table(d$dxstate1)
round(prop.table(table(d$dxstate1)),3)
### 1.1.2 Age at diagnosis/stage at diagnosis
# probably we will need to create meaningful age groups - using a age group variable (ageGr)
table(d$ageGr)
round(prop.table(table(d$ageGr)),3)
round(prop.table(table(d$dxstate1, d$ageGr),2),3)
### 2. General Variables for Analysis
#####################################
# 0-1 Variable for stage at diagnosis
d <- d %>% mutate(diag2 = ifelse(DxState<=1,"early","late"))
class(d$DxState) # in case it is not a factor = change
d$diag2 <- as.factor(d$diag2)
# Year by which to split the time series
d1 <- d %>% filter(DxYear<=2000)
d2 <- d %>% filter(DxYear>2000)
### 3. Missing Value treatment
##############################
d %>% count(is.na(d$Age)) # data.table package might be better
d$Age[is.na(d$Age)] <- mean(d$Age,na.rm=T) # set to mean age
### Change reference categories for covariate analysis
### --------------------------------------------------
# Age at diagnosis
# d <- d %>% mutate(age = (DxYear+(DxMon/12)) - BirthYear)
# Environmental variables
# SES (Education)
# marital status variable
class(d$marstat1)
d <- within(d, marstat1 <- relevel(marstat1, ref = "married"))
### 4. Logistic regression - (early vs. late diagnosis)
#######################################################
model_1 <- glm(diag2 ~ marstat1 + ageGr , # add variables as it goes
family=binomial(link='logit'),
data=d)
summary(model_1)
# Obtain the odds ratio
coeff <- exp(model_1$coefficients)
# 4.2 Model test
anova(model_1, test="Chisq")
# 4.3 Comparing different times
model_1A <- glm(diag2 ~ marstat1 + ageGr ,
family=binomial(link='logit'),
data=d1) # before 2000
summary(model_1A)
model_1B <- glm(diag2 ~ marstat1 + ageGr ,
family=binomial(link='logit'),
data=d2) # after 2000
summary(model_1B)
### 5. Ordered logistic regression
###################################
# (outcome can be a an factor variable with ordered categories from little to much)
# prepare the outcome
class(d$dxstate1)
## fit ordered logit model and store results 'm'
m <- polr(dxstate1 ~ marstat1 + ageGr, data = d, Hess=TRUE)
## view a summary of the model
summary(m)
# Help for interpretation from: https://stats.idre.ucla.edu/r/dae/ordinal-logistic-regression/
# Next we see the usual regression output coefficient table including the value of each coefficient, standard errors, and t value, which is simply the ratio of the coefficient to its standard error. There is no significance test by default.
# Next we see the estimates for the two intercepts, which are sometimes called cutpoints. The intercepts indicate where the latent variable is cut to make the three groups that we observe in our data. Note that this latent variable is continuous. In general, these are not used in the interpretation of the results. The cutpoints are closely related to thresholds, which are reported by other statistical packages.
# Finally, we see the residual deviance, -2 * Log Likelihood of the model as well as the AIC. Both the deviance and AIC are useful for model comparison.
## Odds ratio, coefficients, and p-values
exp(coef(m))
(ctable <- coef(summary(m)))
p <- pnorm(abs(ctable[, "t value"]), lower.tail = FALSE) * 2
## combined table
(ctable <- cbind(ctable, "p value" = p))
## normally distributed CIs
confint.default(m)
## OR and CI
(ci <- confint(m))
exp(cbind(OR = coef(m), ci))
### 6. Time Trend Analysis
##########################
X <- aggregate(d$diag2=="late", by=list(Year=d$DxYear, AgeGR=d$ageGr), FUN=sum)
X.2 <- aggregate(d$diag2=="early", by=list(Year=d$DxYear, AgeGR=d$ageGr), FUN=sum)
X <- X %>% inner_join(X.2, by= c("Year", "AgeGR")) %>% mutate(perc = x.x/(x.x+x.y))
X %>% ggplot(aes(x=Year, y=perc, color=AgeGR)) +
geom_line() + # looks a little noisy with many age groups and few cases
scale_y_continuous(name = "% late diagnosis")
# plot(X$Year,X$x, type = "l")
|
4835d9b51e1492eb199fccf64269da767255ecf5 | ffdea92d4315e4363dd4ae673a1a6adf82a761b5 | /data/genthat_extracted_code/ggenealogy/examples/plotDegMatrix.Rd.R | b02a632fc9caa88c0fa63d4b700c3416a7e7942e | [] | no_license | surayaaramli/typeRrh | d257ac8905c49123f4ccd4e377ee3dfc84d1636c | 66e6996f31961bc8b9aafe1a6a6098327b66bf71 | refs/heads/master | 2023-05-05T04:05:31.617869 | 2019-04-25T22:10:06 | 2019-04-25T22:10:06 | null | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 439 | r | plotDegMatrix.Rd.R | library(ggenealogy)
### Name: plotDegMatrix
### Title: Returns the image object to show the heat map of degrees between
### the inputted set of vertices
### Aliases: plotDegMatrix
### ** Examples
data(sbGeneal)
ig <- dfToIG(sbGeneal)
varieties <- c("Bedford", "Calland", "Narow", "Pella", "Tokyo", "Young", "Zane")
p <- plotDegMatrix(varieties, ig, sbGeneal)
p + ggplot2::scale_fill_continuous(low = "white", high = "darkgreen")
|
c35a6f047c9911f417d7fc403f3ad252d6215a04 | 4c027025f70a9393c511cbfa066afb63614b2e2f | /IRIS.R | 39403904786cb322b5c6fcf535a85e9726cac44d | [] | no_license | Anandxu/PCA-and-K-means | f28cbe6b6b5991fc40973437a7ded6b5e3d15c95 | 9d94fd59979956adbbbd12d38545eac47de52a36 | refs/heads/master | 2020-08-27T18:53:52.629439 | 2019-10-25T06:18:42 | 2019-10-25T06:18:42 | 217,463,930 | 1 | 0 | null | null | null | null | UTF-8 | R | false | false | 932 | r | IRIS.R | library(cluster)
library(factoextra)
library(NbClust)
library(fpc)
library(ggplot2)
library(ggfortify)
data(iris)
head(iris)
iris2 <- iris[1:4]
#PCA
PCAtemp <- prcomp(iris2)
summary(PCAtemp)
PCAtemp$rotation
screeplot(PCAtemp, type = "lines")
abline(v = 2,lty =2, col = "red")
autoplot(PCAtemp)
autoplot(PCAtemp, data = iris, col = "Species", loadings = TRUE, loadings.label = TRUE)
#k-means
k.max <- 15
wss <- sapply(1:k.max, function(k){kmeans(iris2, k, nstart = 10)$tot.withinss})
plot(1:k.max,wss,type = "b",pch = 19, frame = FALSE,
xlab = "Number of clusters K",ylab = "Total within-clusters sum of squares")
abline(v = 3,lty =2, col = "red")
fviz_nbclust(iris2, kmeans, method = c("silhouette"))
iris.kmeans <- kmeans(iris2,3)
autoplot(iris.kmeans, data = iris2, frame = TRUE, frame.type = "norm")
dis <- dist(iris2)^2
sil <- silhouette(iris.kmeans$cluster, dis)
plot(sil)
plot(iris$Species,iris.kmeans$cluster)
|
ddbe129b4cfbd7bc60d09b9a83b6dc5e0f1b540d | 7de45ce117ac9f1e9df09ed3cd8aea73ac773857 | /R/xform.R | a397efed0920e1bdd9a3489cdef25485bc9c52f7 | [] | no_license | acamargofb/RNifti | 1043593ff757b1bdffc9a3f41e12bfbbad68c992 | 44e92773d30a6efc519dac141b1c56bf056f9868 | refs/heads/master | 2020-08-24T20:08:55.035931 | 2019-09-26T14:43:27 | 2019-09-26T14:43:27 | null | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 7,210 | r | xform.R | #' Obtain or replace the ``xform'' transforms for an image
#'
#' These functions convert the ``qform'' or ``sform'' information in a NIfTI
#' header to or from a corresponding affine matrix. These two ``xform''
#' mechanisms are defined by the NIfTI standard, and may both be in use in a
#' particular image header. They define the relationship between the storage
#' order of the image and real space.
#'
#' Image orientation is indicated using a three-character string, with each
#' character indicating the approximate world-space direction of the positive
#' axes in the first, second and third dimensions, in order. Each character may
#' be `R' for left-to-right, `L' for right-to-left, `A' for posterior-to-
#' anterior, `P' for anterior-to-posterior, `S' for inferior-to-superior, or
#' `I' for superior-to-inferior. The default for NIfTI is RAS, meaning that the
#' first dimension points towards the right, the second towards the front and
#' the third towards the top. An xform matrix is an affine transform relative
#' to that default.
#'
#' The upper-left 3x3 matrix in a 3D affine transform governs scale, rotation
#' and skew, while the last column is a translation. (The \code{rotation}
#' function extracts the rotation part alone.) The final row is always
#' (0,0,0,1). Reorienting an image involves permuting and possibly reversing
#' some of the axes, both in the data and the metadata. The sense of the
#' translation may also need to be reversed, but this is only possible if the
#' image dimensions are known, which isn't the case when reorienting an xform
#' alone.
#'
#' @param image,x An image, in any acceptable form (see
#' \code{\link{retrieveNifti}}), or a 4x4 numeric xform matrix.
#' @param useQuaternionFirst A single logical value. If \code{TRUE}, the
#' ``qform'' matrix will be used first, if it is defined; otherwise the
#' ``sform'' matrix will take priority.
#' @param value A new 4x4 qform or sform matrix, or orientation string. If a
#' matrix has a \code{"code"} attribute, the appropriate qform or sform code
#' is also set.
#' @return For \code{xform}, an affine matrix corresponding to the ``qform''
#' or ``sform'' information in the image header. For \code{orientation}, a
#' string with three characters indicating the (approximate) orientation of
#' the image. The replacement forms return the modified object.
#'
#' @note The qform and sform replacement functions are for advanced users only.
#' Modifying the transforms without knowing what you're doing is usually
#' unwise, as you can make the image object inconsistent.
#'
#' @examples
#' im <- readNifti(system.file("extdata", "example.nii.gz", package="RNifti"))
#' xform(im)
#'
#' # Remove the qform information
#' qform(im) <- structure(diag(4), code=0L)
#'
#' # The same as above, since the sform is unmodified
#' xform(im)
#'
#' # The identity matrix corresponds to RAS orientation
#' orientation(diag(4))
#'
#' @author Jon Clayden <code@@clayden.org>
#' @references The NIfTI-1 standard (\url{http://www.nitrc.org/docman/view.php/26/64/nifti1.h})
#' is the definitive reference on ``xform'' conventions.
#' @export
xform <- function (image, useQuaternionFirst = TRUE)
{
return (.Call("getXform", image, isTRUE(useQuaternionFirst), PACKAGE="RNifti"))
}
#' @rdname xform
#' @export
"qform<-" <- function (x, value)
{
return (.Call("setXform", x, value, TRUE, PACKAGE="RNifti"))
}
#' @rdname xform
#' @export
"sform<-" <- function (x, value)
{
return (.Call("setXform", x, value, FALSE, PACKAGE="RNifti"))
}
#' @rdname xform
#' @export
orientation <- function (x, useQuaternionFirst = TRUE)
{
return (.Call("getOrientation", x, isTRUE(useQuaternionFirst), PACKAGE="RNifti"))
}
#' @rdname xform
#' @export
"orientation<-" <- function (x, value)
{
return (.Call("setOrientation", x, as.character(value), PACKAGE="RNifti"))
}
#' @rdname xform
#' @export
rotation <- function (x, useQuaternionFirst = TRUE)
{
return (.Call("getRotation", x, isTRUE(useQuaternionFirst), PACKAGE="RNifti"))
}
#' Transform points between voxel and ``world'' coordinates
#'
#' These functions are used to transform points from dimensionless pixel or
#' voxel coordinates to ``real-world'' coordinates, typically in millimetres,
#' and back. Actual pixel units can be obtained using the
#' \code{\link{pixunits}} function. The \code{origin} function gives the voxel
#' coordinates of the real-world origin.
#'
#' @param points A vector giving the coordinates of a point, or a matrix with
#' one point per row.
#' @param image The image in whose space the points are given, or a 4x4 numeric
#' xform matrix.
#' @param simple A logical value: if \code{TRUE} then the transformation is
#' performed simply by rescaling the points according to the voxel dimensions
#' recorded in the \code{image}. Otherwise the full xform matrix is used.
#' @param ... Additional arguments to \code{\link{xform}}.
#' @return A vector or matrix of transformed points.
#'
#' @note Voxel coordinates are assumed by these functions to use R's indexing
#' convention, beginning from 1.
#'
#' @examples
#' im <- readNifti(system.file("extdata", "example.nii.gz", package="RNifti"))
#'
#' # Find the origin
#' origin(im)
#'
#' @author Jon Clayden <code@@clayden.org>
#' @seealso \code{\link{xform}}, \code{\link{pixdim}}, \code{\link{pixunits}}
#' @export
voxelToWorld <- function (points, image, simple = FALSE, ...)
{
if (simple)
{
if (!is.matrix(points))
points <- matrix(points, nrow=1)
voxelDims <- pixdim(image)[seq_len(ncol(points))]
return (drop(t(apply(points-1, 1, function(x) x*abs(voxelDims)))))
}
else
{
if (!is.matrix(points))
points <- matrix(points, nrow=1)
affine <- xform(image, ...)
nDims <- ncol(points)
if (nDims != 2 && nDims != 3)
stop("Points must be two- or three-dimensional")
if (nDims == 2)
affine <- matrix(affine[c(1,2,4,5,6,8,13,14,16)], ncol=3, nrow=3)
points <- cbind(points-1, 1)
newPoints <- affine %*% t(points)
return (drop(t(newPoints[1:nDims,,drop=FALSE])))
}
}
#' @rdname voxelToWorld
#' @export
worldToVoxel <- function (points, image, simple = FALSE, ...)
{
if (simple)
{
if (!is.matrix(points))
points <- matrix(points, nrow=1)
voxelDims <- pixdim(image)[seq_len(ncol(points))]
return (drop(t(apply(points, 1, function(x) x/abs(voxelDims)) + 1)))
}
else
{
if (!is.matrix(points))
points <- matrix(points, nrow=1)
affine <- solve(xform(image, ...))
nDims <- ncol(points)
if (nDims != 2 && nDims != 3)
stop("Points must be two- or three-dimensional")
if (nDims == 2)
affine <- matrix(affine[c(1,2,4,5,6,8,13,14,16)], ncol=3, nrow=3)
points <- cbind(points, 1)
newPoints <- affine %*% t(points) + 1
return (drop(t(newPoints[1:nDims,,drop=FALSE])))
}
}
#' @rdname voxelToWorld
#' @export
origin <- function (image, ...)
{
worldToVoxel(c(0,0,0), image, ...)
}
|
8267e270a0e8e409c959596e979910c5eef4bfa9 | 934f98f884deeb66925cee156022f6d6329b25a2 | /R/datashieh_CI.R | f606ad6ac39bd29ebedd5047f1842f00af672953 | [
"MIT"
] | permissive | arcaldwell49/deffectsize | 970f2b3908bb25ae15940d26c70b1ee66998187f | d3c4b58a3d72b686a6de95f7d9447b305e460d46 | refs/heads/main | 2023-03-27T17:43:20.614061 | 2021-03-23T09:50:27 | 2021-03-23T09:50:27 | null | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 5,371 | r | datashieh_CI.R | #' Function to compute CI around Shieh's effect size estimators
#'
#' @param Group.1 a (non-empty) numeric vector of data values.
#' @param Group.2 a (non-empty) numeric vector of data values.
#' @param conf.level confidence level of the interval
#' @param unbiased a logical variable indicating whether to compute the biased or unbiased estimator.
#' If TRUE, unbiased estimator is computed (Hedges' g or Hedges' g'). Otherwise, bias estimator is computed (Cohen's d or Cohen's d').
#' @param alternative a character string specifying the alternative hypothesis, must be one of "two.sided" (default), "greater" or "less".
#' @param na.rm set whether Missing Values should be excluded (na.rm = TRUE) or not (na.rm = FALSE) - defaults to TRUE.
#'
#' @export datashieh_CI
#'
#' @exportS3Method datashieh_CI default
#' @exportS3Method print datashieh_CI
#'
#' @keywords Cohen's effect sizes, confidence interval
#' @return Returns Cohen's estimators of effect size and (1-alpha)% confidence interval around it, standard error
#' @importFrom stats na.omit sd pt uniroot
datashieh_CI <- function(Group.1,Group.2,conf.level,unbiased, alternative,na.rm) UseMethod("datashieh_CI")
datashieh_CIEst <- function(Group.1,
Group.2,
conf.level=.95,
unbiased=TRUE,
alternative="two.sided",
na.rm=TRUE){
if (na.rm == TRUE ) {
Group.1 <- na.omit(Group.1)
Group.2 <- na.omit(Group.2)
} else {
Group.1 <- Group.1
Group.2 <- Group.2
}
if(inherits(Group.1,c("numeric","integer")) == FALSE |inherits(Group.2,c("numeric","integer")) == FALSE)
stop("Data are neither numeric nor integer")
n1 <- length(Group.1)
n2 <- length(Group.2)
N <- n1+n2
m1 <- mean(Group.1)
m2 <- mean(Group.2)
sd1 <- sd(Group.1)
sd2 <- sd(Group.2)
q1 <- n1/N
q2 <- n2/N
shieh.d <- (m1-m2)/sqrt(sd1^2/q1+sd2^2/q2)
df <- ((sd1^2/n1+sd2^2/n2)^2)/((sd1^2/n1)^2/(n1-1)+(sd2^2/n2)^2/(n2-1))
w_obs <- (m1-m2)/sqrt(sd1^2/n1+sd2^2/n2)
if(unbiased==TRUE){
corr <- gamma(df/2)/(sqrt(df/2)*gamma((df-1)/2))
} else {corr <- 1}
ES <- shieh.d*corr
if(alternative=="two.sided"){
# lower limit = limit of lambda such as 1-pt(q=t_obs, df=df, ncp = lambda) = (1-conf.level)/2 = alpha/2
f=function(lambda,rep) 1-pt(q=w_obs, df=df, ncp = lambda)-rep
out=uniroot(f,c(0,2),rep=(1-conf.level)/2,extendInt = "yes")
lambda.1 <- out$root
delta.1 <- lambda.1/sqrt(N) # lambda = delta * sqrt(N)
# <--> delta = lambda/sqrt(N)
# upper limit = limit of lambda such as pt(q=t_obs, df=df, ncp = lambda) = (1-conf.level)/2 = alpha/2
f=function(lambda,rep) pt(q=w_obs, df=df, ncp = lambda)-rep
out=uniroot(f,c(0,2),rep=(1-conf.level)/2,extendInt = "yes")
lambda.2 <- out$root
delta.2 <- lambda.2/sqrt(N) # lambda = delta * sqrt(N)
# <--> delta = lambda/sqrt(N)
result <- c(delta.1*corr, delta.2*corr)
} else if (alternative == "greater"){
# lower limit = limit of lambda such as 1-pt(q=t_obs, df=df, ncp = lambda) = (1-conf.level) = alpha
f=function(lambda,rep) 1-pt(q=w_obs, df=df, ncp = lambda)-rep
out=uniroot(f,c(0,2),rep=1-conf.level,extendInt = "yes")
lambda.1 <- out$root
delta.1 <- lambda.1/sqrt(N)
# upper limit = + Inf
delta.2 <- +Inf # if our expectation is mu1 > mu2, then we expect that (mu1-mu2)> 0 and therefore
# we want to check only the lower limit of the CI
result <- c(delta.1*corr, delta.2)
} else if (alternative == "less"){
# lower limit = limit of lambda such as 1-pt(q=t_obs, df=DF, ncp = lambda) = (1-conf.level) = alpha
# with DF = (sd1^2/n1 + sd2^2/n2)^2 / ((sd1^2/n1)^2/(n1-1) + (sd2^2/n2)^2/(n2-1))
delta.1 <- -Inf # if our expectation is mu1 < mu2, then we expect that (mu1-mu2)< 0 and therefore
# we want to check only the upper limit of the CI
# upper limit = limit of lambda such as pt(q=t_obs, df=df, ncp = lambda) = (1-conf.level) = alpha
f=function(lambda,rep) pt(q=w_obs, df=df, ncp = lambda)-rep
out=uniroot(f,c(0,2),rep=1-conf.level,extendInt = "yes")
lambda.2 <- out$root
delta.2 <- lambda.2/sqrt(N) # See explanation in two.sided CI
result <- c(delta.1, delta.2*corr)
}
# print results
meth <- "Confidence interval around the raw mean difference"
# Return results in list()
invisible(
list(ES = ES,
conf.level = conf.level,
CI = result)
)
}
# Adding a default method in defining a function called datashieh_CI.default
datashieh_CI.default <- function(
Group.1,
Group.2,
conf.level=.95,
unbiased=TRUE,
alternative="two.sided",
na.rm=TRUE){
out <- datashieh_CIEst(Group.1,Group.2,conf.level,unbiased,alternative,na.rm)
out$ES <- out$ES
out$call <- match.call()
out$CI <- out$CI
out$conf.level <- out$conf.level
class(out) <- "datashieh_CI"
out
}
print.datashieh_CI <- function(x,...){
cat("Call:\n")
print(x$call)
cat("\nEffect size estimate :\n")
print(round(x$ES,3))
cat(paste0("\n",x$conf.level*100," % confidence interval around effect size estimate:\n"))
print(round(x$CI,3))
}
|
a4abc33be36e07cdd72d0ee1b41e31b42c4bd62f | 1790c48507e09f3c019ea2535afd0326e2b54c4a | /man/contig_1993.Rd | 921c4b3e9b51e04152f5daf86121c8815c1ba41e | [] | no_license | zalmquist/networkMethods | 2d7a06a23688d86863a5fc8385504979a1f0edac | 6d613882355180358b0e667e048e0a269380b46a | refs/heads/master | 2021-03-13T00:10:25.427612 | 2015-11-26T22:30:04 | 2015-11-26T22:30:04 | 39,535,954 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 594 | rd | contig_1993.Rd | % Generated by roxygen2 (4.1.1): do not edit by hand
% Please edit documentation in R/data.R
\docType{data}
\name{contig_1993}
\alias{contig_1993}
\title{Correlates of War 1993: Contiguity among nations}
\format{A network object:
\describe{
\item{contig_1993}{network}
}}
\source{
\url{http://www.correlatesofwar.org/}.
data(contig_1993)
contig_1993
\url{http://www.correlatesofwar.org/}.
data(contig_1993)
contig_1993
}
\usage{
contig_1993
contig_1993
}
\description{
Correlates of War 1993: Contiguity among nations
Correlates of War 1993: Contiguity among nations
}
\keyword{datasets}
|
6078c065e96e8ec9ad1a9cc117b49489659cfbd2 | f65ef863249f0a1d1309f531d8f34cda029b2e60 | /man/genesbytbruceitimeseries.Rd | cef89caa01e60290ebe8d44985243c468bb60097 | [] | no_license | duncantl/REuPathDB | f01892a1685119f426e00683f33597ca7dc2953b | 8f9764c73df29e87cd7f779c8a5f9ee6aaa6904b | refs/heads/master | 2020-05-18T16:47:42.147123 | 2011-12-06T17:39:50 | 2011-12-06T17:39:50 | null | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 3,570 | rd | genesbytbruceitimeseries.Rd | \name{genesbytbruceitimeseries}
\alias{GenesByTbruceiTimeSeries}
\title{Identify <i>T.brucei</i> based on fold change expression difference between a "comparison" time point stage and a reference time point.}
\description{Find genes based on fold change expression difference between a "comparison" time point stage and a reference time point.
<br><br>(1) Expression profiling of <i>T. brucei</i> microarray data.<br><br> Find genes based on their expression profile during in vitro differentiation from slender bloodstream forms to stumpy procyclic forms. For this study RNA from T. brucei strain EATRO1125 (clone AnTat 1.1) was isolated as follows: Low density (bf-ld) blood forms (pre-differentiation), high density (bf-hd) blood forms (time 0), 0.5, 1, 12, 24, 48 and 72hrs post differentiation.<br>For additional details please access the publication: <a href=http://www.ncbi.nlm.nih.gov/pubmed/19857263>http://www.ncbi.nlm.nih.gov/pubmed/19857263</a>
<br><br><br>(2) Expression profiling of <i>T. brucei</i> differentiation series.<br><br>Expression profile of all trypanosome genes in rodent-derived pleomorphic slender forms, stumpy forms and at 1h, 6h, 18h, and 48h through synchronous in vitro differentiation to procyclic forms. The dataset was derived by microarray analysis (JCVI vs 3), using 5 biological replicates and was carefully tethered into a biological chacterisation of the same samples during their differentiation.<br>For additional details please access the publication: <a href="http://www.ncbi.nlm.nih.gov/pubmed/19747379">http://www.ncbi.nlm.nih.gov/pubmed/19747379</a>}
\arguments{
\item{tb_fc_profile_pd}{Choose a time series}
\item{tb_ts_fc_two_pd}{Choose one or more time point. NOTE: if more than one is chosen the fold change will be calculated using the average of all samples within the group
Provide one or more values. Use comma as a delimter.}
\item{tb_ts_fc_one_pd}{Choose one or more time point. NOTE: if more than one is chosen the fold change will be calculated using the average of all samples within the group
Provide one or more values. Use comma as a delimter.}
\item{fold_change}{Enter a non-negative number. NOTE: Fold change is reported in the summary as positive numbers for up-regulated genes and negative numbers for down-regulated genes}
\item{regulated_dir}{For ConditionA vs. ConditionB, select up-regulated for genes where ConditionA > ConditionB and select down-regulated for genes where ConditionB > ConditionA.}
\item{protein_coding_only}{Should only protein coding genes be returned?}
\item{o-fields}{Single valued attributes of the feature.
Provide one or more values. Use comma as a delimter.}
\item{o-tables}{Multi-valued attributes of the feature.
Provide one or more values. Use comma as a delimter.}
\item{.convert}{a logical value or a function that controls how the result of the method is returned. If this is a function, the character string or raw vector is passed to this function and it converts it appropriately. If this is a logical value and \code{TRUE}, then we attempt to convert the result based on its Content-Type returned by the Web server. If this is \code{FALSE}, the value from the Web server is returned as is.}
\item{.url}{the URL for the Web request. This defaults to the correct value, but can be specified by the caller if the method is available at a different URL, e.g. locally or in a mirror server.}
\item{.json}{a logical value controlling whether to use the JSON or the XML version of the method}}
\value{text/xml
text/plain}
\author{}
|
6b702dd2ca13021f03f9c40487a55bc1bf325c3b | cfcf2a2002bf6099ed5bbfcfa215f3c83efb14a2 | /07c_texmex_slimline.R | 6ef5374bf39ac8a1e822696cca08bf3334c2daef | [] | no_license | griffada/AQUACAT_UKCEH | c07dcbf1ac277cd4759929e3cc2fe121cdc68fb5 | cee49f0fa5a8b3d1fc7dab7f02da4f64648ffc5a | refs/heads/master | 2023-08-16T08:02:51.831710 | 2021-10-22T13:50:48 | 2021-10-22T13:50:48 | 281,631,316 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 23,894 | r | 07c_texmex_slimline.R | #~~~~~~~~~~~~~~~~~~~~~~~
# Adam Griffin, 2020-02-21
#
# Rewritten functions to perform HT modelling on big dataset.
# Takes texmex and pulled it apart, making use of a small number of internal
# texmex functions.
# Generally, this is an aim to reduce overheads by removing copying of large
# objects and large list structures.
#
# This is tested in slimline_testing2.R
#
# For aquaCAT, Project 07441.
#
# Created ABG 2020-08-14, edited 2020-09-03
#
# OUTPUTS: None
#
#~~~~~~~~~~~~~~~~~~~~~~~
library(texmex)
migpd_slim <- function(mth, mqu, penalty = "gaussian", maxit = 10000,
trace = 0, verbose = FALSE, priorParameters = NULL, cov = "observed",
family = gpd){
# Extension of **migpd** from texmex, removing data from the arguments.
# Reduces the number of outputs to lower overheads.
# fetches DATA from the global environment.
if(!exists("DATA")){
stop("Needs DATA object; one event per row, one location per column.")
}
theCall <- match.call()
if (is.null(colnames(DATA))) {
colnames(DATA) <- paste(rep("Column", ncol(DATA)),
1:ncol(DATA), sep = "")
}
d <- dim(DATA)[2]
if (missing(mth) & missing(mqu))
stop("you must provide one of mth or mqu")
if (!missing(mth) & !missing(mqu))
stop("you must provide precisely one of mth or mqu")
if (!(family$name %in% c("GPD", "CGPD"))) {
stop("family should be either gpd or cgpd")
}
if (!missing(mth))
mth <- rep(mth, length = d)
if (!missing(mqu))
mqu <- rep(mqu, length = d)
if (missing(mqu))
mqu <- sapply(1:d, function(i, mth) 1 - mean(DATA[,i] > mth[i]), mth = mth)
if (missing(mth))
mth <- sapply(1:d, function(i, prob) quantile(DATA[,i], prob = prob[i], na.rm=T), prob = mqu)
if (penalty %in% c("quadratic", "gaussian") &
is.null(priorParameters)) {
gp = list(c(0, 0), matrix(c(100^2, 0, 0, 0.25), nrow = 2))
priorParameters <- vector("list", length = length(mth))
for (i in 1:length(mth)) priorParameters[[i]] <- gp
names(priorParameters) <- dimnames(DATA)[[2]]
}
else if (penalty %in% c("quadratic", "gaussian")) {
nm <- names(priorParameters)
if (is.null(nm)) {
stop("priorParameters must be a named list")
}
else if (any(!is.element(nm, dimnames(DATA)[[2]]))) {
stop("the names of priorParameters must match the column names of the DATA")
}
}
#####
wrapgpd_slim <- function(i, mth, penalty, maxit, verbose, trace,
priorParameters) {
if (verbose)
cat("Fitting model", i, "\n")
if (!is.null(priorParameters))
priorParameters <-
priorParameters[[(1:length(priorParameters))[
names(priorParameters) == dimnames(DATA)[[2]][i]]]]
x <- c(DATA[,i])
mth <- mth[i]
evm(x, th = mth, penalty = penalty, priorParameters = priorParameters,
maxit = maxit, trace = trace, cov = cov, family = family)
}
#####
modlist <- lapply(1:d, wrapgpd_slim, penalty = penalty,
mth = mth, verbose = verbose,
priorParameters = priorParameters,
maxit = maxit, trace = trace)
if (length(dimnames(DATA)[[2]]) == dim(DATA)[[2]]) {
names(modlist) <- dimnames(DATA)[[2]]
}
names(mth) <- names(mqu) <- dimnames(DATA)[[2]]
res <- list(models = modlist, mth = mth, mqu = mqu)
oldClass(res) <- "migpd"
invisible(res)
}
mexTransform_slim <- function(marginfns, mth, r=NULL, method = "mixture",
divisor = "n+1", na.rm = TRUE){
# if required, r output from makeReferenceMarginalDistribution
# Extension of **mexTransform** from texmex, removing data and models from the arguments.
# Reduces the number of outputs to lower overheads.
# fetches MODELS and DATA from the global environment
if(!exists("DATA")){
stop("Needs DATA object; one event per row, one location per column.")
}
if(!exists("MODELS")){
stop("Needs MODELS object; a list of one marginal model per location, output from migpd, $models item.")
}
if (!is.element(method, c("mixture", "empirical")))
stop("method should be either 'mixture' or 'empirical'")
if (!is.element(divisor, c("n", "n+1")))
stop("divisor can be 'n' or 'n+1'")
if (is.null(r)){
r <- list(mth=mth)
r$transData <- lapply(1:dim(DATA)[2], function(i)DATA[,i])
}
#####
transFun <- function(i, th, divisor, method){
x <- DATA[,i]
r <- DATA[,i]
mod <- MODELS[[i]]
th <- th[i]
if (divisor == "n") divisor <- length(r)
else if (divisor == "n+1") divisor <- length(r) + 1
ox <- order(x)
r <- sort(r)
run <- rle(r)
p <- cumsum(run$lengths) / divisor
p <- rep(p, run$lengths) # this calculated from r
Femp <- p[sapply(x,function(y) which.min(abs(r-y)))]
if (method == "mixture"){
sigma <- exp(mod$coefficients[1])
xi <- mod$coefficients[2]
Para <- (1 + xi * (x - th) / sigma) ^ (-1 / xi)
# this calculated from model fitted to r but data values are x
Para <- 1 - mean(r > th) * Para
Para[Para==1] <- 1 - 1e-8 # log doesn't like zeroes.
res <- ifelse((x <= th), Femp, Para)
}
else res <- Femp
res[ox] <- sort(res)
res
} # Close transfun
#####
res <- sapply(1:ncol(DATA), transFun, th = r$mth,
divisor = divisor, method=method)
colnames(res) <- names(MODELS)
x <- list(TRANSFORMED=marginfns$p2q(res))
x
}
PosGumb.Laplace.negloglik <- function (yex, ydep, a, b, m, s, constrain, v, aLow){
BigNumber <- 10^40
WeeNumber <- 10^(-10)
if (a < aLow[1] | s < WeeNumber | a > 1 - WeeNumber | b > 1 - WeeNumber) {
res <- BigNumber
}
else {
mu <- a * yex + m * yex^b
sig <- s * yex^b
res <- sum(0.5 * log(2 * pi) + log(sig) + 0.5 * ((ydep - mu)/sig)^2)
if (is.infinite(res)) {
if (res < 0) {
res <- -BigNumber
}
else {
res <- BigNumber
}
warning("Infinite value of Q in mexDependence")
}
else if (constrain) {
zpos <- range(ydep - yex)
z <- range((ydep - yex * a)/(yex^b))
zneg <- range(ydep + yex)
if (!texmex:::ConstraintsAreSatisfied(a, b, z, zpos, zneg, v)) {
res <- BigNumber
}
}
}
res
}
PosGumb.Laplace.negProfileLogLik <- function (yex, ydep, a, b, constrain, v, aLow) {
Z <- (ydep - yex * a)/(yex^b)
m <- mean(Z)
s <- sd(Z)
res <- PosGumb.Laplace.negloglik(yex, ydep, a, b, m = m,
s = s, constrain, v, aLow = aLow)
res <- list(profLik = res, m = m, s = s)
res
}
mexDependence_slim <- function (whch, dqu, mth, mqu=0.7, margins = "laplace",
constrain = TRUE, v = 10, maxit = 1e+06,
start = c(0.01, 0.01), marTransform = "mixture",
referenceMargin = NULL, marginsTransformed = NULL,
nOptim = 1, zspot=FALSE){
# Extension of **mexDependence** from texmex, removing data from the arguments.
# Reduces the number of outputs to lower overheads.
# fetches DATA from the global environment
if(!exists("DATA")){
stop("Needs DATA object; one event per row, one location per column.")
}
theCall <- match.call()
errcode <- 0
marginfns <- list(casefold(margins),
p2q = switch(casefold(margins),
gumbel = function(p) -log(-log(p)),
laplace = function(p) ifelse(p < 0.5, log(2*p), -log(2 * (1-p)))),
q2p = switch(casefold(margins),
gumbel = function(q) exp(-exp(-q)),
laplace = function(q) ifelse(q < 0, exp(q)/2, 1 - 0.5 * exp(-q))))
if(!is.null(marginsTransformed)){
x <- list(transformed = marginsTransformed)
}else{
x <- mexTransform_slim(marginfns = marginfns, mth = mth, method = marTransform,
r = referenceMargin)
}
x$referenceMargin <- referenceMargin
if (marginfns[[1]] == "gumbel" & constrain) {
warning("With Gumbel margins, you can't constrain, setting constrain=FALSE")
constrain <- FALSE
}
if (missing(whch)) {
message("Missing 'which'. Conditioning on", dimnames(x$transformed)[[2]][1],
"\n")
whch <- 1
}
else if (length(whch) > 1)
stop("which must be of length 1")
else if (is.character(whch))
whch <- match(whch, dimnames(x$transformed)[[2]])
if (missing(dqu)) {
message(paste("Assuming same quantile for dependence thesholding as was used\n",
"to fit corresponding marginal model...\n"))
dqu <- mqu[whch]
}
dth <- quantile(x$transformed[, whch], dqu)
dependent <- seq_len(ncol(DATA))[-whch]
if (length(dqu) < length(dependent))
dqu <- rep(dqu, length = length(dependent))
aLow <- ifelse(margins[[1]] == "gumbel", 10^(-10),
-1 + 10^(-10))
if (missing(start)) {
start <- c(0.01, 0.01)
}
else if (inherits(start, "mex")) {
start <- start$dependence$coefficients[1:2, ]
}
if (length(start) == 2) {
start <- matrix(rep(start, length(dependent)), nrow = 2)
}
if (length(start) != 2 * length(dependent)) {
stop(paste("start should be of type 'mex' or be a vector of length 2, or be a matrix",
"with 2 rows and ncol equal to the number of dependence models to be estimated"))
}
#####
qfun <- function(X, yex, wh, aLow, margins, constrain, v, maxit, start) {
#print(X)
Qpos <- function(param, yex, ydep, constrain, v, aLow) {
a <- param[1]
b <- param[2]
res <- PosGumb.Laplace.negProfileLogLik(yex, ydep, a, b, constrain, v, aLow)
res$profLik
}
o <- try(optim(par = start, fn = Qpos, control = list(maxit = maxit),
yex = yex[wh], ydep = X[wh], constrain = constrain,
v = v, aLow = aLow))
if (inherits(o, "try-error")) {
if(interactive()) browser()
warning("Error in optim call from mexDependence 0101")
errcode <- 101
o <- as.list(o)
o$par <- rep(NA,6)
o$value <- NA
}
else if (o$convergence != 0) {
warning("Non-convergence in mexDependence 0102")
errcode <- 102
o <- as.list(o)
o$par <- rep(NA,6)
}
else if (nOptim > 1) {
for (i in 2:nOptim) {
o <- try(optim(par = o$par, fn = Qpos, control = list(maxit = maxit),
yex = yex[wh], ydep = X[wh], constrain = constrain,
v = v, aLow = aLow), silent = TRUE)
if (inherits(o, "try-error")) {
warning("Error in optim call from mexDependence 0103")
errcode <- 103
o <- as.list(o)
o$par <- rep(NA, 6)
o$value <- NA
(break)()
}
else if (o$convergence != 0) {
warning("Non-convergence in mexDependence 0104")
errcode <- 104
o <- as.list(o)
o$par <- rep(NA, 6)
(break)()
}
}
}
if (!is.na(o$par[1])) {
if (margins == "gumbel" & o$par[1] <= 10^(-5) &
o$par[2] < 0) {
lo <- c(10^(-10), -Inf, -Inf, 10^(-10), -Inf,
10^(-10))
Qneg <- function(yex, ydep, param) {
param <- param[-1]
b <- param[1]
cee <- param[2]
d <- param[3]
m <- param[4]
s <- param[5]
obj <- function(yex, ydep, b, cee, d, m, s) {
mu <- cee - d * log(yex) + m * yex^b
sig <- s * yex^b
log(sig) + 0.5 * ((ydep - mu)/sig)^2
}
res <- sum(obj(yex, ydep, b, cee, d, m, s))
res
}
o <- try(optim(c(0, 0, 0, 0, 0, 1), Qneg, method = "L-BFGS-B",
lower = lo,
upper = c(1, 1 - 10^(-10), Inf, 1 - 10^(-10), Inf, Inf),
yex = yex[wh], ydep = X[wh]),
silent = TRUE)
if (inherits(o, "try-error") || o$convergence !=
0) {
warning("Non-convergence in mexDependence 0105")
errcode <- 105
o <- as.list(o)
o$par <- rep(NA, 6)
}
}
else {
Z <- (X[wh] - yex[wh] * o$par[1])/(yex[wh]^o$par[2])
o$par <- c(start, 0, 0, mean(Z), sd(Z))
}
}
c(o$par[1:6], o$value)
}
#####
yex <- c(x$transformed[, whch])
wh <- yex > unique(dth)
res <- sapply(1:length(dependent),
function(X, dat, yex, wh, aLow, margins, constrain, v, maxit, start){
qfun(dat[, X], yex, wh, aLow, margins, constrain, v, maxit, start[, X])},
dat = as.matrix(x$transformed[, dependent]), yex = yex,
wh = wh, aLow = aLow, margins = marginfns[[1]], constrain = constrain,
v = v, maxit = maxit, start = start)
loglik <- -res[7, ]
res <- matrix(res[1:6, ], nrow = 6)
dimnames(res)[[1]] <- c(letters[1:4], "m", "s")
dimnames(res)[[2]] <- dimnames(x$transformed)[[2]][dependent]
#FUDGE TO FIX
ww <- which(is.na(res[,1]))
ww0 <- ww-1
if(any(ww0==0)){ww0[ww0==0] <- min(which(!is.na(res[,1])))}
res[,ww] <- res[,ww0] #FUDGE TO FIX
gdata <- as.matrix(x$transformed[wh, -whch])
####
tfun <- function(i, data_temp, yex, a, b, cee, d) {
data_temp <- data_temp[, i]
a <- a[i]
b <- b[i]
cee <- cee[i]
d <- d[i]
if (is.na(a))
rep(NA, length(data_temp))
else {
if (a < 10^(-5) & b < 0)
a <- cee - d * log(yex)
else a <- a * yex
(data_temp - a)/(yex^b)
}
}
####
z <- try(sapply(1:(dim(gdata)[[2]]), tfun, data_temp = gdata,
yex = yex[wh], a = res[1, ], b = res[2, ],
cee = res[3,], d = res[4, ]))
if (inherits(z, c("Error", "try-error"))) {
errcode <- 106
warning("Error in mexDependence 0106")
z <- matrix(nrow = 0, ncol = dim(DATA)[[2]] - 1)
}
else if (!is.array(z)) {
warning("Error in mexDependence 0107")
z <- matrix(nrow = 0, ncol = dim(DATA)[[2]] - 1)
errcode <- 107
}
dimnames(z) <- list(NULL, dimnames(x$transformed)[[2]][dependent])
if(zspot){
print(dim(z))
}
COEFFS[,,whch] <<- res
if(is.array(z) && !inherits(z, c("Error", "try-error"))){
Z[[whch]] <<- z
}
res2 <- list(dth = unique(dth),
dqu = unique(dqu), whch = whch,
conditioningVariable = colnames(DATA)[whch],
#loglik = loglik,
marginfns = marginfns, constrain = constrain,
v = v)
#oldClass(res2) <- "mexDependence" # A bit of a lie, but helps things work.
if(errcode != 0){print(errcode)}
output <- list(margins = list(#transformed=x$transformed,
referenceMargin=x$referenceMargin),
dependence = res2,
errcode = errcode,
zspot = dim(z))
#oldClass(output) <- "mex" # A bit of a lie but helps things work.
output
}
mexMonteCarlo_slim <- function(marginfns, referenceMargin=NULL,
mth, mqu, nSample, mexList, mult = 10){
# Extension of **mexMonteCarlo** from texmex, removing data from the arguments.
# Reduces the number of outputs to lower overheads.
# fetches DATA from the global environment
# fetches TRANSFORMED from the global environment
d <- length(mexList)
dth <- sapply(mexList, function(l) l$dependence$dth)
nData <- dim(DATA)[1]
wMa <- apply(TRANSFORMED,1,which.max)
wMaTa <- sapply(1:nData, function(i){TRANSFORMED[i, wMa[i]] >= dth[wMa[i]]})
whch <- sample((1:nData)[wMaTa], size = nSample, replace = TRUE)
MCsampleOriginal <- DATA[whch, ]
MCsampleLaplace <- TRANSFORMED[whch, ]
whichMax <- apply(MCsampleLaplace, 1, which.max)
dqu <- sapply(mexList, function(l) l$dependence$dqu)
whichMaxAboveThresh <- sapply(1:nSample,
function(i) MCsampleLaplace[i, whichMax[i]] >= dth[whichMax[i]])
nReplace <- sapply(1:d, function(i){sum(whichMax==i & whichMaxAboveThresh)})
if(RCM=="09"){
nReplace[676] <- 0
}
nR <- rep(0, d)
names(nR) <- names(DATA)
STA <- Sys.time()
STA0 <- Sys.time()
for (i in which(nReplace>0)) {
if((i < 10) | (i %% 10 == 0)){
#print(i)
#print(nReplace[i])
I <- difftime(Sys.time(), STA, units="secs")
I0 <- difftime(Sys.time(), STA0, units="secs")
print(paste("MMC processing, percent remaining", 100*round((d-i)/d ,2)))
print(paste("Time remaining", round((d-i)/i * I0,2)))
print(paste("Since last readout:", round(I,2)))
STA <- Sys.time()
print(STA)
}
replace <- whichMax == i & whichMaxAboveThresh
if (nReplace[i] > 0) {
try({
MCsampleOriginal[replace, ] <- predict.mex_slim(whch=i,
referenceMargin=referenceMargin,
marginfns=marginfns,
constrain=mexList[[i]]$dependence$constrain,
coeffs_in = COEFFS[,,i],
z_in = Z[[i]],
pqu = dqu[i],
mth=mth,
mqu=mqu,
nsim = nSample * d * mult,
d=d, iii=i)[1:nReplace[i],]
})
}
}
res <- list(nR = nReplace, MCsample = MCsampleOriginal, whichMax = whichMax,
whichMaxAboveThresh = whichMaxAboveThresh)
# oldClass(res) <- "mexMC" # A bit of a lie, but keeps things smooth
res
}
coef.migpd_slim <- function(mth, mqu, ...){
# Extension of **coef.migpd** from texmex, removing data from the arguments.
# Reduces the number of outputs to lower overheads.
# fetches MODELS from the global environment
if(!exists("MODELS")){
stop("Needs MODELS object; a list of one marginal model per location, output from migpd, $models item.")
}
co <- sapply(MODELS, coef)
up <- sapply(MODELS, endPoint, verbose = FALSE)
co <- rbind(mth, mqu, co, up)
dimnames(co) <- list(c("Threshold", "P(X < threshold)",
"sigma", "xi", "Upper end point"),
names(MODELS))
co[3, ] <- exp(co[3, ])
co
}
revTransform_slim <- function (x, data_temp, qu, th = 0, sigma = 1, xi = 0,
method = c("mixture", "empirical")){
# Extension of **revTransform** from texmex, removing data from the arguments.
# Reduces the number of outputs to lower overheads.
# fetches DATA from the global environment
if(!exists("DATA")){
stop("Needs DATA object; one event per row, one location per column.")
}
method <- match.arg(method)
n <- length(data_temp)
probs <- (1:n)/(n + 1)
px <- as.integer(pmax( pmin( round( x*(n+1) ), n), 1))
res <- sort(data_temp)[px]
if (method == "mixture") {
i.x <- x >= qu
i.r <- res > th
i.rx <- i.x & i.r # apply(cbind(i.x, i.r), 1, all)
if(is.na(i.rx[1])){
print(str(i.rx))
if(interactive()) browser()
i.rx <- rep(FALSE, length(i.rx))
}
if (sum(i.rx > 0)) {
wh <- texmex:::u2gpd(x[i.rx], p = 1 - qu, th = th, sigma = sigma,
xi = xi)
rth <- res[i.rx]
o <- order(rth)
rth <- rth[o]
rth[length(rth):(length(rth) - length(wh) + 1)] <- rev(sort(wh))
rth <- rth[order(o)]
res[i.rx] <- rth
}
}
res[order(x)] <- sort(res)
res
}
makeYsubMinusI <- function( i, z, v , y ){
v <- v[ , i ]
z <- z[ , i ]
if ( !is.na( v[ 1 ] ) ){
if( v[ 1 ] < 10^(-5) & v[ 2 ] < 0 ){
if( v[ 4 ] < 10^(-5 ) ) d <- 0
else d <- v[ 4 ]
a <- v[ 3 ] - d * log( y )
}
else a <- v[ 1 ] * y
} # close if( !is.na...
else a <- NA
a + ( y^v[ 2 ] ) * z
}
predict.mex_slim <- function(whch, referenceMargin=NULL, marginfns,
constrain, coeffs_in, z_in,
mth, mqu, pqu = .99, nsim = 1000, trace=10,
smoothZdistribution=FALSE, d, iii, ...){
# Extension of **predict.mex** from texmex, removing data from the arguments.
# Reduces the number of outputs to lower overheads.
# fetches DATA and TRANSFORMED from the global environment
if(!exists("DATA")){
stop("Needs DATA object; one event per row, one location per column.")
}
if(!exists("TRANSFORMED")){
stop("Needs TRANSFORMED object; laplace-transformed output from mexTransform, $transformed item")
}
# if(is.null(referenceMargin)){
# migpd <- list(transformed=TRANSFORMED)
# } else {
# migpd <- referenceMargin
# }
marginfns <- marginfns
constrain <- constrain
################################################################
MakeThrowData <- function(dco,z,coxi,coxmi){
distFun <- marginfns$q2p
z <- z[!is.na(z[,1]), !is.na(z[1,])]
z <- as.matrix(z[ sample(1:( dim(z)[1]), size=nsim, replace=TRUE) ,])
if(smoothZdistribution){
z <- apply(z,2,function(x)x + rnorm(length(x),0,bw.nrd(x)))
}
tick <- rep(FALSE, nsim)
MAXIT <- 10
nit <- 0
KEEP <- TRUE
while(sum(tick)<2 & nit < MAXIT){
ui <- runif(nsim , min = max(c(mqu[whch], pqu)))
y <- marginfns$p2q(ui)
ymi <- sapply(1:(dim(z)[[2]]), makeYsubMinusI, z=z, v=dco , y=y)
tick <- y > apply(ymi,1,max)
tick[is.na(tick)] <- FALSE
#print(sum(tick))
if (sum(tick) >= 2) {
ymi <- ymi[tick,]
y <- y[tick]
ui <- ui[tick]
}
nit <- nit+1
if (nit == MAXIT) {print(paste("Lots of retries for", iii))}
}
if(nit > MAXIT-1){
print(paste("MAXIT hit: sum(tick)= ",sum(tick)))
KEEP <- FALSE
tick[1:2] <- TRUE
tick[-(1:2)] <- FALSE
ymi <- ymi[1:2,]
y <- y[1:2]
ui <- ui[1:2]
}
xmi <- apply(ymi, 2, distFun)
xmi[xmi > (1 - 1e-10)] <- (1 - 1e-10) # ! # FUDGE TO AVOID EXACTLY 1
xi <- texmex:::u2gpd( ui, p = 1 - mqu[whch], th = mth[whch],
sigma = coxi[1], xi = coxi[2] )
for( i in 1:( dim(xmi)[[2]] ) ){
if(all(is.na(xmi[,i]))){next}
xmi[, i] <- revTransform_slim(
xmi[, i], as.matrix(DATA[, -whch])[, i],
th = mth[-whch][i],
qu = mqu[-whch][i],
sigma=coxmi[1, i], xi=coxmi[2, i])
}
sim <- data.frame( xi , xmi)
names( sim ) <- c( colnames( DATA )[ whch ],
colnames( DATA )[ -whch ])
if(!KEEP){
sim <- matrix(NA, nrow=2, ncol=d)
}
sim
}
################################################################
bootRes <- NULL
cox <- coef.migpd_slim(mth, mqu)[3:4, whch]
coxmi <- as.matrix(coef.migpd_slim(mth, mqu)[3:4, -whch])
sim <- MakeThrowData(dco=coeffs_in,
z=z_in,
coxi=cox,
coxmi=coxmi)
as.matrix(sim[, order(c(whch, c(1:d)[-whch]))])
#res
}
|
f302f1ebea0fa4273b3d14d72891372efc69eefc | 26987ef5df4d9c081b77d438cd3443fc8c4a4ca2 | /plot3.R | 82a05d32714b0c7c3aab82637fc9aa130d3db1cc | [] | no_license | joexu28/Exploratory-Data-Analysis-Project1 | db7d8ec89f57cf62bbb69ae2f6690347a9b0b218 | 0522c8b16d1064f2cbd635748228e1024e421fb5 | refs/heads/master | 2021-04-05T06:30:35.871199 | 2020-03-19T14:57:18 | 2020-03-19T14:57:18 | null | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 1,179 | r | plot3.R | library(dplyr)
library(data.table)
if (!file.exists("HPC.zip")){
download.file("https://d396qusza40orc.cloudfront.net/exdata%2Fdata%2Fhousehold_power_consumption.zip", destfile = "HPC.zip")
unzip(zipfile = "HPC.zip")
}
setwd("./exdata_data_household_power_consumption/")
HPC <- fread("household_power_consumption.txt", stringsAsFactors = FALSE, na.strings = "?")
patterns <- c("^1/2/2007", "^2/2/2007")
HPC <- filter(HPC, grepl(paste(patterns, collapse = "|"), HPC$Date) )
HPC$dateTime <- strptime(paste(HPC$Date, HPC$Time, sep = " "),
format = "%d/%m/%Y %H:%M:%S")
png("plot3.png", width=480, height=480)
#windows(width = 480, height = 480)
plot(x=HPC$dateTime, y=HPC$Sub_metering_1, type = "n", xlab = "", ylab = "Engery sub metering")
points(x=HPC$dateTime, y=HPC$Sub_metering_1, col="black", type = "l")
points(x=HPC$dateTime, y=HPC$Sub_metering_2, col="red", type = "l")
points(x=HPC$dateTime, y=HPC$Sub_metering_3, col="blue", type = "l")
legend("topright", col = c("black", "red", "blue"), lty = c(1, 1), lwd = c(1, 1),
legend = c("Sub_metering_1", "Sub_metering_2", "Sub_metering_3"))
dev.off()
|
ec461569649df020119f1562628ef13b0fc1c865 | e668b887e946b7c8ddee5d621cf4eadd0d4770cb | /basics/04_tests.R | 43b12bceaad753597cb30d82776e0077cda18f0d | [] | no_license | LisaGotzian/statistics-course-r | bfd4fa50aee97e248ab12b9bd16c82682a00f77e | 76127e36c3923084f6dd4d3687813a1a1a59d9fe | refs/heads/master | 2020-06-05T19:05:24.818607 | 2019-07-19T10:13:18 | 2019-07-19T10:13:18 | 192,519,710 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 2,097 | r | 04_tests.R | #------------------ Simple tests in R ------------------
#
# Goal: show t-tests and other simple tests
#
# Lisa Gotzian, April 24, 2019
#shapiro-wilk
sample1 <- rnorm(70, mean = 0, sd = 1)
shapiro.test(sample1) # check the slides: what does this test?
# F-Test
sample1 <- rnorm(70, mean = 0, sd = 1)
sample2 <- rnorm(80, mean = 1, sd = 1)
var.test(sample1, sample2) # what does this test?
# Chi-Square
coffeeEarly <- as.table(rbind(c(40, 60), c(30, 20)))
dimnames(coffeeEarly) <- list(coffee = c("coffee", "non-coffee"),
time = c("early","late"))
chisq.test(coffeeEarly)
(Xsq <- chisq.test(M)) # Prints test summary
Xsq$observed # observed counts (same as M)
Xsq$expected # expected counts under the null
Xsq$residuals # Pearson residuals
Xsq$stdres # standardized residuals
# T tests
sample1 <- c(0.3,
5.0,
6.8,
7.0,
-0.1,
-0.5,
0.5,
0.0,
0.0,
-2.0)
sample2 <- c(-3.0,
-2.5,
0.0,
5.5,
-0.1,
-2.5,
-3.0,
-7.0,
-4.5,
0.0)
t.test(sample1, sample2) # What's the result?
arrival <- cbind(sample1, sample2)
library(reshape2)
arrival <- melt(arrival)
boxplot(value~ Var2, data = arrival)
data(sleep)
View(sleep)
boxplot(extra ~ group, data = sleep, main = "Effect of sleep-fostering drugs")
#abline(h = tapply(sleep$extra, sleep$group, mean))
t.test(extra ~ group, data = sleep, paired = TRUE)
plot(extra ~ group, data = sleep)
tapply(sleep$extra, sleep$group, mean) #mean by group
tapply(sleep$extra, sleep$group, hist)
# wilcoxon test
sample1 <- c(1.3,
1.7,
3.7,
2.7,
3.7,
3.7,
2.3,
2.0,
1.3,
1.7)
sample2 <- c(2.3,
2.7,
2.0,
2.0,
1.7,
1.3,
3.0,
3.0,
1.3,
3.7)
wilcox.test(sample1, sample2)
|
30eea81ccd403daa3857f53e10da5909ab093c48 | f294979b4774239fad1b3487bcb01ff103e20a44 | /R/analysis.r | 6cb8bd3d0d08422d1fba1a3f4663d6d38fcb63d7 | [] | no_license | lvanden/fortune500 | 8a1b40a84e4659ae5dc6e0162a1e3a7611274f87 | 34693b4c2b20e69c1ed53039f80959239f43d30f | refs/heads/main | 2023-02-23T03:36:36.354761 | 2021-01-21T02:29:28 | 2021-01-21T02:29:28 | 325,390,241 | 0 | 0 | null | 2021-01-21T02:29:29 | 2020-12-29T21:26:10 | R | UTF-8 | R | false | false | 9,566 | r | analysis.r | library(dplyr)
library(tidyverse)
library(ggplot2)
library(viridis)
# read in csv file
company_list <- read_csv("fortune_500_companies.csv")
# parse DMARC record ----
company_df <- company_list %>%
filter(rank <= 100) %>%
dplyr::mutate(
# remove quotes around record and remove new lines
record_string = str_replace_all(gsub('"', '', dmarc_record), '\\n', ''),
policy = str_extract(dmarc_record, "p=[a-z]+"),
pct = str_extract(dmarc_record, "pct=\\d+"),
rua = str_extract(record_string, "rua=mailto:[^;]+"),
ruf = str_extract(record_string, "ruf=mailto:[^;]+")
) %>%
# parse rua ----
select(company, policy, domain, rank, rua) %>%
# add empty bars function ----
add_empty_bars() %>%
dplyr::mutate(
# split data for multiple addresses
rua_domains = strsplit(rua, split = ","),
policy = case_when(
is.na(policy) ~ "no DMARC record",
TRUE ~ policy
)
) %>%
arrange(policy, company)
#give each company an id
company_df$id = 1:nrow(company_df)
# Add empty bars for spacing
add_empty_bars <- function (company_df) {
company_df %>%
add_row(company = "", policy = "no DMARC record") %>%
add_row(company = "", policy = "no DMARC record") %>%
add_row(company = "", policy = "p=none") %>%
add_row(company = "", policy = "p=none") %>%
add_row(company = "", policy = "p=quarantine") %>%
add_row(company = "", policy = "p=quarantine") %>%
add_row(company = "", policy = "p=reject") %>%
add_row(company = "", policy = "p=reject")
}
# transform data ----
# transform data from wide to long
rua_df = data.frame(
company = rep(company_df$company, sapply(company_df$rua_domains, length)),
policy = rep(company_df$policy, sapply(company_df$rua_domains, length)),
domain = rep(company_df$domain, sapply(company_df$rua_domains, length)),
rank = rep(company_df$rank, sapply(company_df$rua_domains, length)),
rua = rep(company_df$rua, sapply(company_df$rua_domains, length)),
id = rep(company_df$id, sapply(company_df$rua_domains, length)),
rua_domain = unlist(company_df$rua_domains)
) %>%
# trim rua down to just domain
group_by(company) %>%
mutate(
rua_count = row_number(),
rua_domain = str_extract(rua_domain, "\\w*.\\w*$"),
rua_count = as.character((rua_count)),
value = case_when(
is.na(rua_domain) ~ 30,
TRUE ~ 30
),
# add report receivers
rua_id = dplyr::case_when(
is.na(rua_domain) ~ NA_real_,
rua_domain == "dmarcian.com" | rua_domain == "dmarcian.eu" ~ 2,
rua_domain == "agari.com" ~ 3,
rua_domain == "proofpoint.com" ~ 4,
rua_domain == "vali.email" ~ 5,
rua_domain == "cisco.com" ~ 6,
rua_domain == "returnpath.net" ~ 7,
TRUE ~ 1
),
# add policy id
policy_id = dplyr::case_when(
policy == "p=none" ~ 1,
policy == "p=quarantine" ~ 2,
policy == "p=reject" ~ 3,
TRUE ~ 0
)
) %>%
dplyr::ungroup()
# make rua_id discrete for ggplot
rua_df$rua_id <- as.factor(rua_df$rua_id)
# Prepare data for plot
data <- rua_df
# Get the name and the y position of each label (company name)
label_data <- data %>%
dplyr::group_by(id, company) %>%
summarize(tot=sum(value)) %>%
mutate(
number_of_bar = nrow(label_data),
angle = 90 - 360 * (id-0.5) /number_of_bar,
hjust = ifelse(angle < -90, 1, 0),
angle = ifelse(angle < -90, angle+180, angle)
) %>%
dplyr::select(-number_of_bar)
# Set a number of 'empty bar' to add at the end of each group
empty_bar <- 2
# prepare a data frame for base lines
base_data <- company_df %>%
group_by(policy) %>%
# empty bars for spacing ----
summarize(start=min(id)+ empty_bar, end=max(id)) %>%
#summarize(start=min(id), end=max(id)) %>%
rowwise() %>%
mutate(title=mean(c(start, end)))
# prepare a data frame for grid (scales)
grid_data <- base_data
grid_data$end <- grid_data$end[ c( nrow(grid_data), 1:nrow(grid_data)-1)] + 1
grid_data$start <- grid_data$start - 1
grid_data$end[1] <- 1
# Make the plot
colorBlindGrey8 <- c("#999999", "#009E73", "#56B4E9", "#0072B2",
"#F0E442", "#E69F00", "#CC79A7")
# Create circular bar chart to display report receivers
create_report_receivers_plot <- function(data) {
p <- ggplot(data) +
# Add the stacked bar
geom_bar(aes(x=as.factor(id), y=value, fill=rua_id), stat="identity") +
scale_fill_manual(
values = colorBlindGrey8,
name = "DMARC XML Receivers",
labels = c("self",
"dmarcian",
"Agari",
"Proofpoint",
"Vailmail",
"Cisco",
"Validity",
"no reporting"
)
) +
# Add gray hash lines.
geom_segment(data=grid_data, aes(x = end, y = 0, xend = start, yend = 0),
colour = "grey", alpha=1, size=0.5 , inherit.aes = FALSE ) +
geom_segment(data=grid_data, aes(x = end, y = 30, xend = start, yend = 30),
colour = "grey", alpha=1, size=0.5 , inherit.aes = FALSE ) +
geom_segment(data=grid_data, aes(x = end, y = 60, xend = start, yend = 60),
colour = "grey", alpha=1, size=0.5 , inherit.aes = FALSE ) +
geom_segment(data=grid_data, aes(x = end, y = 90, xend = start, yend = 90),
colour = "grey", alpha=1, size=0.5 , inherit.aes = FALSE ) +
ylim(-200,max(label_data$tot, na.rm=T)+80) +
theme_minimal() +
theme(
legend.key = element_rect(fill = "white", colour = "black"),
legend.title = element_text(color = "black", size = 12),
legend.text = element_text(color = "black", size = 10),
axis.text = element_blank(),
axis.title = element_blank(),
panel.grid = element_blank(),
plot.margin = unit(rep(-1,4), "cm")
) +
coord_polar() +
labs(title = "2020 DMARC Status for the Top 50 Fortune '500' Companies") +
# Add labels on top of each bar (company name)
geom_text(data=label_data, aes(x=id, y=tot+10, label=company, hjust=hjust),
color="black",
fontface="bold",
alpha=0.6,
size=4,
angle=label_data$angle,
inherit.aes = FALSE
) +
# Add base line information (policy groups)
geom_segment(data=base_data, aes(x = start, y = -5, xend = end, yend = -5),
colour = "black",
alpha=0.8,
size=0.6 ,
inherit.aes = FALSE
) +
geom_text(data=base_data, aes(x = title, y = -18, label=policy),
hjust=c(1,1,0,0),
colour = "black",
alpha=0.8,
size=4,
fontface="bold",
inherit.aes = FALSE
)
# Save as png
ggsave(p, file="test.png", width=15, height=10)
}
# Policy plot ----
create_policy_plot <- function(data) {
# update data for empty bars
data %>%
group_by(id) %>%
mutate(
policy = if_else(
company == "",
"no DMARC record",
policy
)
)
# Remove duplicate orgs so bar height is uniform
data <- data[!duplicated(data$id), ]
policy_plot <- ggplot(data) +
# Add the stacked bar
geom_bar(aes(x=as.factor(id), y=value, fill=policy), colour="black", stat="identity") +
scale_fill_manual(
values = c("white", "#E50F0F", "#EC9312", "#117733"),
name = "Policy"
) +
ylim(-200,max(label_data$tot, na.rm=T)+80) +
theme_minimal() +
theme(
legend.key = element_rect(fill = "white", colour = "black"),
legend.title = element_text(color = "black", size = 18),
legend.text = element_text(color = "black", size = 14),
axis.text = element_blank(),
axis.title = element_blank(),
panel.grid = element_blank(),
) +
coord_polar() +
# Add labels on top of each bar (company name)
geom_text(data=label_data, aes(x=id, y=30+10, label=company, hjust=hjust),
color="black",
fontface="bold",
alpha=0.6,
size=4,
angle=label_data$angle,
inherit.aes = FALSE
) +
# Add base line information (policy groups)
geom_segment(data=base_data, aes(x = start, y = -5, xend = end, yend = -5),
colour="black",
alpha=0.8,
size=0.6 ,
inherit.aes = FALSE
) +
geom_text(data=base_data, aes(x = title, y = -18, label=policy),
hjust=c(1,1,0,0),
colour = "black",
alpha=0.8,
size=4,
fontface="bold",
inherit.aes = FALSE
)
# Save as png
ggsave(policy_plot, file="dmarc_policies.png", width=15, height=10)
}
# Call function to create bar plot
# either create_report_receivers_plot or create_policy_plot passing in data as an argument
create_report_receivers(data)
|
b032851dd8def8c5483ab7f463c61ca0beb2809c | 83b714ef66b35b66e3e6f899b9f4d69a53cb7e9b | /pplots/figure/PPlot2.R | 013d75041ff0e25fd83449abc7bdf49b720cd377 | [] | no_license | ultraviolet3/pplots | 7fb278bb37083e460e04dea15300bcbf1e324fc3 | 3146d8f1e0150f071a12b1038b64468db9730a1b | refs/heads/master | 2021-01-10T02:02:22.446800 | 2015-10-09T13:52:29 | 2015-10-09T13:52:29 | 43,953,742 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 878 | r | PPlot2.R | ec <- function() {
if(!file.exists("exdata-data-household_power_consumption.zip")) {
temp <- tempfile()
download.file("http://d396qusza40orc.cloudfront.net/exdata%2Fdata%2Fhousehold_power_consumption.zip",temp)
file <- unzip(temp)
unlink(temp)
}
else {
wd=getwd()
fil="household_power_consumption.txt"
file=paste(wd,fil,sep="/")
}
pcon= read.csv(file,header=T,sep=";",stringsAsFactors = T, na.strings="?", blank.lines.skip = T)
pcon$Date <- as.Date(pcon$Date, format="%d/%m/%Y")
pcon= pcon[(pcon$Date=="2007-02-01") | (pcon$Date=="2007-02-02"),]
pcon$DT<-paste(pcon$Date,pcon$Time,sep=" ")
pcon$DT<-as.POSIXct(strptime(pcon$DT, format="%Y-%m-%d %H: %M: %S"))
plot(pcon$DT,pcon$Global_active_power,type="l",xlab="",ylab="Global Active Power (kilowatts)")
dev.copy(png,file="Plot2.png",width=480,height=480)
dev.off()
}
|
34278ba40f75a44580f5ef8d4a9efa701e9835fd | 144c098014b96b1c63daac5f0b369c3335ba6fb1 | /datawrangling.R | a38b99404199643fbad7f39753d457a7e731c909 | [] | no_license | MelainaWright/test | a03fe53ed9825cc93e59fb9d8ad40cb0f43a9722 | f467ba1c0c11abaa84c9590c3b915d1f742f313b | refs/heads/master | 2021-01-20T01:34:44.246038 | 2017-06-09T15:43:06 | 2017-06-09T15:43:06 | 89,299,563 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 10,880 | r | datawrangling.R | #############################################################
#ESM 262
#4/28/17
#datawrangling part 1 - importing and tidying
#book: R for data wrangling, Grolemund and Wickham
install.packages("tidyverse")
library(tidyverse)
###############################################################
#tibbles
#tibble is like a data frame but easier to work with
X = as_tibble(iris) #alt minus to get the arrow
#factors can only have the names in the set list. it's a controlled character column. can't create a new column and make setosa = shark
#create tibble from scratch
tibble(
x=1:5,
y=1,
z=x^2 + y
) #length of y is 1 so repeated it so it was 5 big as per what x is; y=1:3 wouldn't work; so unlike with dataframe, can have length 1 or length of x
#can have a column name in a tibble that is a number (unlike dataframe)
tb <- tibble(
":)" = "smile")
df <- tibble(
x=runif(5),
y=rnorm(5))
#put in console
#Wright, Melaina (""sharklady""), "11,3 Foobar Terrace"
#make it understand that quotes are not to protect the data but are part of the data name
#doubled double quote = single quote. single double quote quotes everything until the next double quote
getwd()
setwd("C:/Users/melai/Documents/BrenSpring2017/ESM262/test2/wrangle")
heights = read.csv("data/heights.csv")
#can tell it how to read a column in as numeric or whatever
#####################################################################
#importing terrible data
##excel would read the bottom stuff as a csv but the beginning metadata, it couldn't deal with this
#; kelp abundance in this transect
#; collected in 2001
#;
#transect,point,abundance
#786,8,5.5
read_csv("The first line of metadata
The second line of metadata
x,y,z
1,2,3", skip=2) #ignore first two lines of code
read_csv("# A comment I want to skip
x,y,z
1,2,3", comment= "#")
#has no column names, don't treat the first row as column names
read_csv("1,2,3\n4,5,6", col_names = FALSE)
#has no column names, don't treat the first row as column names, give it column names
read_csv("1,2,3\n4,5,6", col_names = c("x", "y", "z"))
##########################################################################
#importing terrible data example/parsing
ca = read_csv("data/CA_Features_20170401.zip")
#unzipped it but is not deliminated correctly
ca = read_delim("data/CA_Features_20170401.zip", delim="|")
#parsed means what function it used to create columns into integers, for example
View(ca)
#parsing - breaking it up into chunks that recognize and give name to chunk
#have different functions to break it up
x = parse_logical(c("TRUE","FALSE", "NA")) #works, returns a vector of logical values
str(x) #more verbose representation of what it is
y= parse_logical(c("TRUE","FALSE","NA", 42)) #didnt work
problems(y)
View(problems(y)) #expected 1/0/T/F/TRUE/FALSE but got the value 42
#when reads in characters, assumes characters are in UTF-8 format (look it up). R can read tildes and wear language characters and stuff
#want to test to see if a vector is a factor; build a vector of the acceptable values (levels)
fruit <- c("apple","banana")
parse_factor(c("apple","banana","sharks"), levels = fruit) #see if they are factors that meet our constraint of having to be apple and banana
Z = parse_factor(c("apple","banana","banana"), levels = fruit) #tells what the levels are when type Z into console
parse_datetime("2010-10-01") #year, month, day
#check to see if vector has the types of data you think it does and then convert those values if it doesnt
#convert vector of character value into other types if it is possible to be done
guess_parser("2017-04-28") #tells you what it thinks it is. it says it is a date
guess_parser("3/7/2017") #thinks it is a character, not a date
str(parse_guess("2017-04-28")) #says it knows it is a date
#force read csv to apply specific typing to stuff when it gets imported in
challenge <- read_csv(readr_example("challenge.csv")) #there are issues starting in row1001 where the x column doesn't look like an integer, instead it has "trailing characters" so use a double (a number like 1.123 or 45849; don't have to have decimals)
#force numbers to be read by a diff type; read the x column with this function and y column with this function
challenge <- read_csv(
readr_example("challenge.csv"),
col_types = cols(
x = col_double(),
y = col_character()
)
)
tail(challenge) #last few values look like dates so change to dates
challenge <- read_csv(
readr_example("challenge.csv"),
col_types = cols(
x = col_double(),
y = col_date()
)
)
#if did challenge2 <- read_csv(readr_example("challenge.csv"), guess_max = 1001) wouldn't have had to manually guess b/c would have included the trouble columns and it would've known it should be double and date
#######################################################################
#Unknown and 99999999 (for age) and 0 (for lat and long coordinates) values
#"unknown" value in data
ca$PRIMARY_LAT_DMS <- parse_character(ca$PRIMARY_LAT_DMS, na="Unknown") #got a vector back; need to stuff it back into the original data, so stuff it in; take this guy and then replace itself
#what if have like 9999999 as a fake number for an unknown age (do same thing just put 9999 instead of unknown)
#successfully gets rid of unknowns and na; NA droped out because were doing a comparison/sort
#can read it in and change unknowns to NAs at the same time
#######################################################################
#write data after cleaned up it
getwd()
write_csv(challenge,"challenge.csv") #forgot it is doubles not integer and dates not NA
read_csv("challenge.csv") #can then read it in as double and date
library(feather)
write_feather(challenge, "challenge.feather") #save tibble as a feather andthen can read into python
read_feather("challenge.feather") #has metadata in it and will remember that this column is a date and this one is a double, etc.
###########################################################################################
#tidying: table rearrangement
#got a tibble, types of data are correct, dealt with missing data; but it is not organized the write way to do what you want to do with it
#example of badly arranged data; info of the content is the same but is not arranged the same way in the following tables:
View(table1)
View(table2) #column describes what the next column means
View(table3) #two values smudged together in same cell
View(table4a) #variable values get converted into column names
View(table4b)
#want to reformat everything to be in the format of table 1. Every variable has its own column. Every obs has its own row. Each cell has its own value in it -> first normal form
#each row=a coherent observation, select rows
#aggregrate by variable in columns
#every cell has a value that is calcuable
#gathering######################################
#gathering for table 4a example to collapse values into a single column
tidy4a <- table4a %>%
gather(`1999`, `2000`, key = "year", value = "cases")
#%>%take what comes out of doing what goes before and make it the first argument of the next function
#same thing; for the two column names its gathering (1999 and 2000) requires backtick quotes, because otherwise R will treat them as numbers
tidy4a <- gather(table4a, `1999`, `2000`, key = "year", value = "cases") #key is new column that you will put the current column names in and cases are the values of the old columns they used to be in
View(tidy4a)
tidy4b <- table4b %>%
gather(`1999`, `2000`, key = "year", value = "population")
#join tables together###########################
left_join(tidy4a, tidy4b)
#spreading#######################################
#split column apart; when a value in one column is dependent on the value in another column/can't reference values in one column without another column. ex. 745 makes no sense until look and see if it is cases; want a column for cases and one for population
spread(table2, key = "type", value = "count")
#separating####################################
#when the data has the multiple types of data in one cell (ex. cases of disease/population; ex. name is Frew,James and want it to be first and last name)
#R looks for a character that doesn't belong and recognizes it as the delimiter
#note, the cases and population thinks they are characters (they are left justified)
table3 %>%
separate(rate, into = c("cases", "population"))
#add sep="/" to the end to make it realize a specific delimiter
#add convert=TRUE at the end to make it recognize population and cases as numbers
table3 %>%
separate(rate, into = c("cases", "population"), sep="/", convert=TRUE)
#can do sep=2 for it can start separating after the second column
###foo%>% bar(buz,...) is same as bar(foo, buz,...)
#unite############################################
#taking values and slaming them into a sequence
#specifiy column you want and then rename it to year
table5 %>%
unite(new, century, year)
table3 %>%
separate(year, into=c("century","year"), sep=2) %>%
unite(year, century, year, sep="")
#sep="" means no separator in year
#unite does the direct opposite of separate
################################################################################
#missing values
#NA (no data here/missing value)
#explicitly say there is nothing here with a NA vs. it just being empty (implicit)
#NA means that you say there is nothing there or leave it empty and it will assume that its missing data
stocks <- tibble(
year = c(2015, 2015, 2015, 2015, 2016, 2016, 2016),
qtr = c( 1, 2, 3, 4, 2, 3, 4),
return = c(1.88, 0.59, 0.35, NA, 0.92, 0.17, 2.66)
)
View(stocks)
#put a NA means put a value in there that says its not there
#vs. in 2016, we know there was a first quarter but there is just missing data (have data two quarters instead of 4 for the year)
#forcing table to populate missing values with NA (explicit missing data table)
stocks %>%
spread(year, return)
#get rid of any spurious NA columns, get rid any obs that have a NA for that obs (is implicitly represented by its virtue of it not being there)
stocks %>%
spread(year, return) %>%
gather(year, return, `2015`:`2016`, na.rm = TRUE)
#another way to make missing data explicit (NA)
stocks %>%
complete(year, qtr)
#fill###########################################################
#prof teaching
#Frew,James 262
# 296
# INT 33T
# implicitly implies that Frew,James is what is in the next empty column_to_rownames, order matters
treatment <- tribble(
~ person, ~ treatment, ~response,
"Derrick Whitmore", 1, 7,
NA, 2, 10,
NA, 3, 9,
"Katherine Burke", 1, 4
)
treatment %>%
fill(person)
|
c879c4c670c7aba5fed1b8d3ff0ffd57e89b41e0 | 5729b5786f86e793d0fe44cec251caef37e6ce78 | /man/MFA_ECM.Rd | 181e8360b2c3aab60744ce7e2924438f58b341ae | [] | no_license | cran/autoMFA | 8d3e4a62b8e2abdad81dd5806bd6c845dc418aea | 010e868f88a352d55743a5eac0d36b9d60d01206 | refs/heads/master | 2023-07-07T11:19:17.020869 | 2021-08-10T11:00:05 | 2021-08-10T11:00:05 | 394,796,914 | 0 | 0 | null | null | null | null | UTF-8 | R | false | true | 3,928 | rd | MFA_ECM.Rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/ECM.fit.R
\name{MFA_ECM}
\alias{MFA_ECM}
\title{ECM-Based MFA Estimation}
\usage{
MFA_ECM(
Y,
gmin = 1,
gmax = 10,
qmin = 1,
qmax = NULL,
eta = 0.005,
itmax = 500,
nkmeans = 5,
nrandom = 5,
tol = 1e-05,
conv_measure = "diff",
varimax = FALSE
)
}
\arguments{
\item{Y}{An \emph{n} by \emph{p} data matrix, where \emph{n} is the number of observations and \emph{p} is the number of dimensions of the data.}
\item{gmin}{The smallest number of components for which an MFA model will be fitted.}
\item{gmax}{The largest number of components for which an MFA model will be fitted.}
\item{qmin}{The smallest number of factors with which an MFA model will be fitted.}
\item{qmax}{The largest number of factors with which an MFA model will be fitted. Must obey the Ledermann bound.}
\item{eta}{The smallest possible entry in any of the error matrices \emph{D_i} \insertCite{Jian-HuaZhao2008FMEf}{autoMFA}.}
\item{itmax}{The maximum number of ECM iterations allowed for the estimation of each MFA model.}
\item{nkmeans}{The number of times the \emph{k}-means algorithm will be used to initialise models for each combination of \emph{g} and \emph{q}.}
\item{nrandom}{The number of randomly initialised models that will be used for each combination of g and q.}
\item{tol}{The ECM algorithm terminates if the measure of convergence falls below this value.}
\item{conv_measure}{The convergence criterion of the ECM algorithm. The default \code{'diff'} stops the ECM iterations if |l^{(k+1)} - l^{(k)}| < \code{tol} where l^{(k)} is the log-likelihood at the \emph{k}th ECM iteration. If \code{'ratio'}, then the convergence of the ECM iterations is measured using |(l^{(k+1)} - l^{(k)})/l^{(k+1)}|.}
\item{varimax}{Boolean indicating whether the output factor loading matrices should be constrained
using varimax rotation or not.}
}
\value{
A list containing the following elements:
\itemize{
\item{\code{model}:}{ A list specifying the final MFA model. This contains: \itemize{
\item{\code{B}:}{ A \emph{p} by \emph{p} by \emph{q} array containing the factor loading matrices for each component.}
\item{\code{D}:}{ A \emph{p} by \emph{p} by \emph{g} array of error variance matrices.}
\item{\code{mu}:}{ A \emph{p} by \emph{g} array containing the mean of each cluster.}
\item{\code{pivec}:}{ A 1 by \emph{g} vector containing the mixing
proportions for each FA in the mixture.}
\item{\code{numFactors}:}{ A 1 by \emph{g} vector containing the number of factors for each FA.}}
}
\item{\code{clustering}:}{ A list specifying the clustering produced by the final model. This contains: \itemize{
\item{\code{responsibilities}:}{ A \emph{n} by \emph{g} matrix containing the probability
that each point belongs to each FA in the mixture.}
\item{\code{allocations}:}{ A \emph{n} by 1 matrix containing which
FA in the mixture each point is assigned to based on the responsibilities.}}}
\item{\code{diagnostics}:}{ A list containing various pieces of information related to the fitting process of the algorithm. This contains: \itemize{
\item{\code{bic}:}{ The BIC of the final model.}
\item{\code{logL}:}{ The log-likelihood of the final model.}
\item{\code{times}:}{ A data frame containing the amount of time taken to fit each MFA model.}
\item{\code{totalTime}:}{ The total time taken to fit the final model.}}}
}
}
\description{
An implementation of an ECM algorithm for the MFA model which does not condition on the factors being known \insertCite{Jian-HuaZhao2008FMEf}{autoMFA}.
Performs a grid search from \code{gmin} to \code{gmax}, and \code{qmin} to \code{qmax}, respectively. The best combination of \emph{g} and \emph{q} is chosen to be the model with the minimum BIC.
}
\examples{
RNGversion('4.0.3'); set.seed(3)
MFA.fit <- MFA_ECM(autoMFA::MFA_testdata,3,3)
}
\references{
\insertRef{Jian-HuaZhao2008FMEf}{autoMFA}
}
|
40214dee99ffeb9c9781fe999307b1b71d97ee7a | 9dcd3df4848d12afca77453d93c6dfaebf3ad57d | /R/mixsiar.r | 8b5358008657224c6469bb037f87971b399618fb | [] | no_license | apmonr/MixSIAR | dce776b0136d172230b66801600215944e1f6a92 | b4e4397a82cde7fb57d06133e21381cc2c8bddba | refs/heads/master | 2020-12-29T00:42:00.042523 | 2013-10-17T22:32:33 | 2013-10-17T22:32:33 | null | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 12,390 | r | mixsiar.r | # Brian Stock
# Oct 15, 2013
#
# Previous version:
# MixSIAR GUI 0.2
# Major changes:
# Convert MixSIAR into a package-able form: function mixsiar() that builds GUI
# Made global all gWidgets that are used in other functions...quick fix for now
# Revised wording:
# - "Discrimination" instead of "Fractionation"
# - "Mixture" instead of "Consumer"
# Before running this script, a brand new user will need to install the latest
# versions of R and JAGS. The install.packages("gWidgetsRGtk2") command
# will prompt the user to install GTK+, which also needs to happen.
# Output file: output_JAGS.r
# Model file: write_JAGS_model.r
# Auxillary files:
# - build_mix_win.r
# - build_source_win.r
# - run_model.r
# - plot_data.r
# - plot_data_one_iso.r
# - plot_continuous_var.r
mixsiar <- function(){
# Check for R Version 3.0 or 2.15
minor <- as.numeric(R.Version()$minor)
major <- as.numeric(R.Version()$major)
if(major<3 && minor<15){
stop(paste("*** Error: You are running ", R.Version()$version.string, ", which is
out of date. Please update R to 3.0 or 2.15 and try again. ***",sep=""))
}
if (!"ggplot2" %in% installed.packages()) install.packages("ggplot2")
if (!"gWidgetsRGtk2" %in% installed.packages()) install.packages("gWidgetsRGtk2")
if (!"runjags" %in% installed.packages()) install.packages("runjags")
if (!"R2jags" %in% installed.packages()) install.packages("R2jags")
if (!"matrixStats" %in% installed.packages()) install.packages("matrixStats")
if (!"MASS" %in% installed.packages()) install.packages("MASS")
if (!"RColorBrewer" %in% installed.packages()) install.packages("RColorBrewer")
if (!"reshape" %in% installed.packages()) install.packages("reshape")
if (!"gWidgetsRGtk2" %in% installed.packages()) stop("*** Error: GTK+ is not installed ***")
if (!"R2jags" %in% installed.packages()) stop("*** Error: JAGS is not installed ***")
require(ggplot2)
require(gWidgetsRGtk2)
require(runjags)
require(R2jags)
require(matrixStats)
require(MASS)
require(RColorBrewer)
require(reshape)
runif(1)
rm(list=ls())
options("guiToolkit"="RGtk2")
source("output_JAGS.r")
source("write_JAGS_model.r")
source("build_mix_win.r")
source("build_source_win.r")
source("run_model.r")
source("plot_data.r")
source("plot_data_one_iso.r")
source("plot_continuous_var.r")
win<-gwindow("MixSIAR GUI", visible=FALSE)
grp_all <- ggroup(cont=win, horizontal=FALSE)
grp_input <- ggroup(cont=grp_all, horizontal=TRUE)
############################################################################
# Read in Data
############################################################################
grp_readin_data <- gframe(text="Read in data", cont=grp_input, horizontal=F)
grp_cons <<- ggroup(horizontal=TRUE, cont=grp_readin_data)
btn_cons <- gbutton(
text = "Load mixture data",
cont = grp_cons,
expand = T,
handler = function(h, ...){
build_mix_win()
}
)
grp_source <<- ggroup(horizontal=TRUE, cont = grp_readin_data)
btn_source <- gbutton(
text = "Load source data",
cont = grp_source,
expand = T,
handler = function(h, ...){
build_source_win()
}
)
grp_frac <- ggroup(horizontal=TRUE, cont = grp_readin_data)
btn_frac <- gbutton(
text = "Load discrimination data",
cont = grp_frac,
expand = T,
handler = function(h, ...){
gfile(
text = "Load discrimination data file",
type = "open",
action = "read.csv",
handler = function(h, ...)
{
tryCatch( # reads discrimination/fractionation/enrichment means data into 'FRAC'
{
data_frame_name <- make.names("FRAC")
the_data <- do.call(h$action, list(h$file))
assign(data_frame_name, the_data, envir = globalenv())
addSpring(grp_frac)
svalue(status_bar) <- "Discrimination data successfully loaded"
},
error = function(e){
svalue(status_bar) <- "Could not load data"
add(grp_frac,gimage("red x.png"))
}
)
}
)
row.names(FRAC)<-FRAC[,1] # store the row names of FRAC (sources)
FRAC <- as.matrix(FRAC[-1]) # remove source names column of FRAC
FRAC <- FRAC[order(rownames(FRAC)),] # rearrange FRAC so sources are in alphabetical order
# Make sure the iso columns of frac_mu and frac_sig2 are in the same order as S_MU and S_SIG
frac_mu_cols <- match(MU_names,colnames(FRAC)) # get the column numbers of FRAC that correspond to the means
frac_sig_cols <- match(SIG_names,colnames(FRAC)) # get the column numbers of FRAC that correspond to the SDs
frac_mu <- FRAC[,frac_mu_cols] # FRAC means
frac_sig2 <- FRAC[,frac_sig_cols]*FRAC[,frac_sig_cols] # FRAC variances
assign("frac_mu", frac_mu, envir = .GlobalEnv) # Assign 'frac_mu' and 'frac_sig2' to the global environment
assign("frac_sig2", frac_sig2, envir = .GlobalEnv)
add(grp_frac,gimage("check.png"))
}
)
#############################################################################
# User-specified MCMC parameters
#############################################################################
grp_set_parameters <- gframe(text="Specify MCMC parameters",
cont=grp_input,
horizontal=FALSE)
# Number of chains value is 'txt_num_chains'
grp_num_chains <- ggroup(cont=grp_set_parameters, horizontal=TRUE)
lbl_num_chains <- glabel(
"# of Chains: ",
cont = grp_num_chains
)
addSpring(grp_num_chains)
txt_num_chains <<- gedit("3", width=10, cont = grp_num_chains)
# Chain length value is 'txt_chain_length'
grp_chain_length <- ggroup(cont=grp_set_parameters, horizontal=TRUE)
lbl_chain_length <- glabel(
"Chain Length: ",
cont = grp_chain_length
)
addSpring(grp_chain_length)
txt_chain_length <<- gedit("10000", width=10, cont = grp_chain_length)
# Burn-in value is 'txt_burnin'
grp_burnin <- ggroup(cont=grp_set_parameters, horizontal=TRUE)
lbl_burnin <- glabel(
"Burn-in: ",
cont = grp_burnin
)
addSpring(grp_burnin)
txt_burnin <<- gedit("5000", width=10, cont = grp_burnin)
# Thinning value is 'txt_thin'
grp_thin <- ggroup(cont=grp_set_parameters, horizontal=TRUE)
lbl_thin <- glabel(
"Thin: ",
cont = grp_thin
)
addSpring(grp_thin)
txt_thin <<- gedit("10", width=10, cont = grp_thin)
####################################################################
# Model Error Options
####################################################################
grp_error <- gframe(text="Model Error Options", cont=grp_input, horizontal=F)
resid_err_box <<- gcheckbox("Include residual error", cont=grp_error) # Does the user want to include residual/SIAR error in the model? resid_var = 1/resid_tau, resid_tau ~ dgamma(.001,.001)
proc_err_box <<- gcheckbox("Include process error", cont=grp_error) # Does the user want to include process/MixSIR error in the model? process_var = p2[iso,i]*sig2_source[iso,i] + p2[iso,i]*sig2_frac[iso,i]
svalue(resid_err_box) <- TRUE # Default is residual/SIAR error AND process error
svalue(proc_err_box) <- TRUE
status_bar <<- gstatusbar("", progress.bar="gui", cont=grp_all, expand=TRUE)
####################################################################
# Isospace Plot
####################################################################
# The 'Plot data' button calls the plot_data function to make an isospace plot
grp_plot <- ggroup(cont=grp_all, horizontal=T)
plot_button <- gbutton(
text = "Make isospace plot",
cont = grp_plot,
expand = TRUE,
handler = function(h, ...){
if(n.iso==1){
plot_data_one_iso()
} else {
for(iso1 in 1:(n.iso-1)){
for(iso2 in (iso1+1):n.iso){
plot_data(c(iso1,iso2))
}
}
}
}
)
grp_plot_name <- ggroup(cont=grp_plot, horizontal=T, expand=T)
plot_lbl <- glabel("Save plot as:",cont=grp_plot_name)
plot_filename <<- gedit("isospace_plot", width=15, cont = grp_plot_name)
plot_save_pdf <<- gcheckbox("pdf", cont = grp_plot_name)
plot_save_png <<- gcheckbox("png", cont = grp_plot_name)
svalue(plot_save_pdf) <- TRUE
####################################################################
# Output options
####################################################################
grp_output <- gframe(text="Output options", cont=grp_all, horizontal=F)
## Choose working directory
#grp_dir <- ggroup(cont=grp_output, horizontal=T)
#chooseWD <- function(h,...){
# old_dir <- getwd()
# setwd(tclvalue(tkchooseDirectory()))
# new_dir <- getwd()
# file.copy(from=file.path(paste(old_dir,"/MixSIAR.txt",sep="")),
# to=file.path(paste(new_dir,"/MixSIAR.txt",sep="")))
#}
#choose_wd <- gbutton(
# text = "Choose working directory to save output",
# cont = grp_dir,
# expand = F,
# handler = chooseWD
#)
#save_work <- gcheckbox("Save R workspace", cont=grp_dir)
# Summary Statistics options
grp_summary <- ggroup(cont=grp_output, horizontal=T)
lbl_summary <- glabel("Summary Statistics", cont=grp_summary, expand=T)
grp_summary_right <- ggroup(cont=grp_summary, horizontal=F)
grp_summary_save <- ggroup(cont=grp_summary_right, horizontal=T)
summary_save <<- gcheckbox("Save summary statistics to file: ", cont=grp_summary_save)
summary_name <<- gedit("summary_statistics", width=20, cont=grp_summary_save)
svalue(summary_save) <- TRUE # Default is to save the summary statistics
# Posterior Density Plot options
grp_posterior <- ggroup(cont=grp_output, horizontal=T)
lbl_posterior <- glabel("Posterior Density Plot", cont=grp_posterior, expand=T)
grp_post_opt <- ggroup(cont = grp_posterior, horizontal=F)
sup_post <<- gcheckbox("Suppress plot output", cont = grp_post_opt)
grp_post_name <- ggroup(cont = grp_post_opt, horizontal=T, expand=T)
plot_post_lbl <- glabel("Save plot as:", cont = grp_post_name)
plot_post_name <<- gedit("posterior_density", width=20, cont = grp_post_name)
plot_post_save_pdf <<- gcheckbox("pdf", cont = grp_post_name)
svalue(plot_post_save_pdf) <- TRUE
plot_post_save_png <<- gcheckbox("png", cont = grp_post_name)
# Pairs Plot options
grp_pairs <- ggroup(cont=grp_output, horizontal=T)
lbl_pairs <- glabel("Pairs Plot", cont = grp_pairs, expand=T)
grp_pairs_opt <- ggroup(cont = grp_pairs, horizontal=F)
sup_pairs <<- gcheckbox("Suppress plot output", cont = grp_pairs_opt)
grp_pairs_name <- ggroup(cont = grp_pairs_opt, horizontal=T, expand=T)
plot_pairs_lbl <- glabel("Save plot as:", cont = grp_pairs_name)
plot_pairs_name <<- gedit("pairs_plot", width=20, cont = grp_pairs_name)
plot_pairs_save_pdf <<- gcheckbox("pdf", cont = grp_pairs_name)
svalue(plot_pairs_save_pdf) <- TRUE
plot_pairs_save_png <<- gcheckbox("png", cont = grp_pairs_name)
# XY Plot options
grp_xy <- ggroup(cont=grp_output, horizontal=T)
lbl_xy <- glabel("XY Plot", cont = grp_xy, expand=T)
grp_xy_opt <- ggroup(cont = grp_xy, horizontal=F)
sup_xy <<- gcheckbox("Suppress plot output", cont = grp_xy_opt)
grp_xy_name <- ggroup(cont = grp_xy_opt, horizontal=T, expand=T)
plot_xy_lbl <<- glabel("Save plot as:", cont = grp_xy_name)
plot_xy_name <<- gedit("xy_plot", width=20, cont = grp_xy_name)
plot_xy_save_pdf <<- gcheckbox("pdf", cont = grp_xy_name)
svalue(plot_xy_save_pdf) <- TRUE
plot_xy_save_png <<- gcheckbox("png", cont = grp_xy_name)
# Diagnostics options
grp_diag <- gframe(text="Diagnostics", cont=grp_output, horizontal=F)
grp_diag_opts <- ggroup(cont=grp_diag, horizontal=T)
gelman <<- gcheckbox("Gelman-Rubin (must have > 1 chain)", cont=grp_diag_opts)
svalue(gelman) <- TRUE
heidel <<- gcheckbox("Heidelberg-Welch", cont=grp_diag_opts)
svalue(heidel) <- TRUE
geweke <<- gcheckbox("Geweke", cont=grp_diag_opts)
svalue(geweke) <- TRUE
grp_diag_save <- ggroup(cont=grp_diag, horizontal=T)
diag_save <<- gcheckbox("Save diagnostics to file:", cont=grp_diag_save)
diag_name <<- gedit("diagnostics", width=20, cont=grp_diag_save)
lbl_diag <- glabel("Note: diagnostics will print in the R command line if you do not choose to save to file",cont=grp_diag)
svalue(diag_save) <- TRUE # Default is to save the diagnostics
# The 'RUN MODEL' button calls the main 'run_model' function, which writes the
# JAGS model file, calls JAGS, plots the JAGS output, and runs diagnostics
go_button <- gbutton(
text = "RUN MODEL",
cont = grp_all,
expand = TRUE,
handler = function(h, ...){
run_model()
}
)
# Show the GUI once all the code has run
visible(win) <- TRUE
}
|
12b8e7706e65740e2496caed4d7bdaae0e54fdfb | 043903e2b1d2e905c8b6bf10b4ae834743eb895d | /R/auth.R | d739946f8c2b56e74249b54dfb70d2bf88397324 | [
"MIT"
] | permissive | retina-dot-ai/rgoogleslides | 1653f91b197d8df59909bc86ac3e50a4013a3b7f | ec30e23aa52761595e49541007d420abc4304505 | refs/heads/master | 2021-01-19T20:48:31.232311 | 2017-04-17T22:26:52 | 2017-04-17T22:26:52 | 88,556,163 | 0 | 0 | null | 2017-04-17T22:26:03 | 2017-04-17T22:26:03 | null | UTF-8 | R | false | false | 1,278 | r | auth.R | .slidesEnv <- new.env(parent = emptyenv())
.slidesEnv$Token <- NULL
# Set token to environment
set_token <- function(value) {
.slidesEnv$Token <- value
return(value)
}
# Get token from environment
get_token <- function() {
.slidesEnv$Token
}
#' Authorize R package to access Google Slides API
#' @description This is a function to authorize the R package to access the Googleslides API. If no
#' client.id and client.secret is provided, the package would provide predefined values.
#' @importFrom httr oauth_app oauth_endpoints oauth2.0_token
#' @param client.id OAuth client ID. This is obtained from Google API Credentials
#' @param client.secret OAuth client secret. This is obtained from Google API Credentials
#' @export
authorize <- function(client.id = getOption("slides.client.id"),
client.secret = getOption("slides.client.secret")){
app <- oauth_app(appname = "googleslides", key = client.id, secret = client.secret)
endpoint <- oauth_endpoints("google")
token <- oauth2.0_token(endpoint = endpoint, app = app,
scope = c("https://www.googleapis.com/auth/presentations",
"https://www.googleapis.com/auth/drive.readonly"))
set_token(token)
return(invisible(token))
}
|
2fd40cc4180f90c1c5184ecbec36fb87203142f1 | 47248e6d22eb023dbfa7be13025e7b5697676d3b | /R/get_results.R | 6371b522a09a1df3509ee24edac977f0afcc07a7 | [] | no_license | markhwhiteii/stat-comp | 9b096676beac874146c81a8e0f7c4b37286a8cbe | a76ab0eb1a472f0f3dcc7e9a5e2c5d271626af2d | refs/heads/master | 2021-08-23T06:42:49.525748 | 2017-12-03T23:58:47 | 2017-12-03T23:58:47 | 105,834,980 | 2 | 1 | null | null | null | null | UTF-8 | R | false | false | 670 | r | get_results.R | get_results <- function(predicted, actual) {
conf_matrix <- as.data.frame(table(predicted, actual))
tp <- conf_matrix[(conf_matrix[1] == 1) & (conf_matrix[2] == 1), 3] %>%
ifelse(length(.) == 0, 0, .)
fp <- conf_matrix[(conf_matrix[1] == 1) & (conf_matrix[2] == 0), 3] %>%
ifelse(length(.) == 0, 0, .)
tn <- conf_matrix[(conf_matrix[1] == 0) & (conf_matrix[2] == 0), 3] %>%
ifelse(length(.) == 0, 0, .)
fn <- conf_matrix[(conf_matrix[1] == 0) & (conf_matrix[2] == 1), 3] %>%
ifelse(length(.) == 0, 0, .)
prec <- tp / (tp + fp)
rec <- tp / (tp + fn)
f1 <- 2 * ((prec * rec) / (prec + rec))
return(c(tp, fp, tn, fn, prec, rec, f1))
}
|
dbcd955fa3a3c0540a744a3c04c0d7360976bbe8 | a9ed2d55f7f585a25fdb2f548d82263ee3f567b6 | /man/numericorNULL-class.Rd | 62a520722a099003acb4846f56ad289c66e96f8b | [] | no_license | cran/distrTEst | 65a8ae1353d8be867f0fb2a7f33072c27e2222fe | 9dc34ea714bed295febf78605f2dd666d2f6d849 | refs/heads/master | 2022-11-19T16:02:11.686622 | 2022-11-12T21:30:05 | 2022-11-12T21:30:05 | 17,695,557 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 992 | rd | numericorNULL-class.Rd | \name{numericorNULL-class}
\docType{class}
\alias{numericorNULL-class}
\alias{DataframeorNULL-class}
\alias{CallorNULL-class}
\title{Classes "numericorNULL", "CallorNULL", and "DataframeorNULL"}
\description{ auxiliary classes; may contain either a numeric vector or NULL
[or a call / data.frame or NULL, respectively].}
\section{Objects from the Class}{A virtual Class: No objects may be created from it.}
\section{Methods}{
No methods defined with class "numericorNULL", "CallorNULL", and "DataframeorNULL" in the signature.
}
\author{
Thomas Stabla \email{statho3@web.de},\cr
Florian Camphausen \email{fcampi@gmx.de},\cr
Peter Ruckdeschel \email{peter.ruckdeschel@uni-oldenburg.de},\cr
Matthias Kohl \email{Matthias.Kohl@stamats.de}
}
\note{From version 1.8, the result slot of an object of class evaluation is of type "DataframeorNULL"}
\seealso{\code{\link{Evaluation-class}}}
\keyword{classes}
\concept{class union}
\concept{virtual}
|
1c2dd80808f15ab5c3d430438322a3860aacb5b6 | 63b4c2d6d75b451b0f66eb6c03162684e617334e | /tests/testthat.R | d21fba39dfa3967e0e052efffe7abee67dd446b6 | [] | no_license | nemochina2008/rangl | 8b1b50bc671999057c6842436e7c45d42a92da0d | 4f3027af452ea45cb1c24e16b4aadd48304dd873 | refs/heads/master | 2021-05-16T05:53:44.168584 | 2017-09-12T12:47:11 | 2017-09-12T12:47:11 | null | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 54 | r | testthat.R | library(testthat)
library(rangl)
test_check("rangl")
|
2af1a435a9050e9e5346406d38e9da773b30b0af | ffdea92d4315e4363dd4ae673a1a6adf82a761b5 | /data/genthat_extracted_code/RSEIS/examples/winmark.Rd.R | 99e1a8d2c12ee132bdcadf70b9deb4978ec20c24 | [] | no_license | surayaaramli/typeRrh | d257ac8905c49123f4ccd4e377ee3dfc84d1636c | 66e6996f31961bc8b9aafe1a6a6098327b66bf71 | refs/heads/master | 2023-05-05T04:05:31.617869 | 2019-04-25T22:10:06 | 2019-04-25T22:10:06 | null | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 914 | r | winmark.Rd.R | library(RSEIS)
### Name: winmark
### Title: Window Mark
### Aliases: winmark
### Keywords: aplot
### ** Examples
plot(c(0,1), c(0,1), type='n', xlab='', ylab='' )
winmark(.3, .7, side=3, col='brown', arrows=TRUE, leglen=.4)
winmark(.3, .7, side=1, col='blue', arrows=TRUE, leglen=.5)
winmark(.3, .7, side=2, col='green',
arrows=TRUE, alen=.05, leglen=.4)
winmark(.3, .7, leg=.65, bar=.6,
side=4, col='orange', arrows=TRUE, alen=.1, leglen=.125)
winmark(.3, .7, bar=.65, leg=.6,
side=4, col='seagreen', arrows=TRUE, alen=.1, leglen=.125)
############# examples with different legs showing
plot(c(0,1), c(0,1), type='n', xlab='', ylab='' )
winmark(.3, .7, side=3, col='brown',
arrows=TRUE, leglen=.4, LEGON=1)
winmark(.3, .4, side=1, col='brown',
arrows=TRUE, leglen=.4, LEGON=2)
winmark(.7, .9, side=1, col='blue',
arrows=TRUE, leglen=.4, LEGON=0)
|
6634eeb39e8547ab86e300d4f9cc3edf35b3b781 | 7255f071174e76a76399ec3cca5cd674cf397afd | /R/gender_df.R | d83923661acbb85dd3ac8bdc686b5a7904df88ee | [
"MIT"
] | permissive | bmschmidt/gender | 628fa527b599aa24e826ac8ba240b996b7a6c09e | 3ca8f9eb721c1eb22c5f6506f1cf8e3e93f969fb | refs/heads/master | 2021-01-18T17:59:42.170732 | 2016-09-15T19:21:25 | 2016-09-15T19:21:53 | 20,813,775 | 1 | 0 | null | 2016-09-08T22:02:45 | 2014-06-13T18:28:21 | R | UTF-8 | R | false | false | 3,001 | r | gender_df.R | #' Use gender prediction with data frames
#'
#' In a common use case for gender prediction, you have a data frame with a
#' column for first names and a column for birth years (or, two columns
#' specifying a minimum and a maximum potential birth year). This function wraps
#' the \code{\link{gender}} function to efficiently apply it to such a data
#' frame. The result is a data frame with one prediction of the gender for each
#' unique combination of first name and birth year. The resulting data frame can
#' then be merged back into your original data frame.
#'
#' @param data A data frame containing first names and birth year or range of
#' potential birth years.
#' @param name_col A string specifying the name of the column containing the
#' first names.
#' @param year_col Either a single string specifying the birth year associated
#' with the first name, or character vector with two elements: the names of
#' the columns with the minimum and maximum years for the range of potential
#' birth years.
#' @param method One of the historical methods provided by this package:
#' \code{"ssa"}, \code{"ipums"}, \code{"napp"}, or \code{"demo"}. See
#' \code{\link{gender}} for details.
#' @seealso \code{\link{gender}}
#' @export
#' @return A data frame with columns from the output of the \code{gender}
#' function, and one row for each unique combination of first names and birth
#' years.
#' @examples
#' library(dplyr)
#' demo_df <- data_frame(names = c("Hillary", "Hillary", "Hillary",
#' "Madison", "Madison"),
#' birth_year = c(1930, 2000, 1930, 1930, 2000),
#' min_year = birth_year - 1,
#' max_year = birth_year + 1,
#' stringsAsFactors = FALSE)
#'
#' # Using the birth year for the predictions.
#' # Notice that the duplicate value for Hillary in 1930 is removed
#' gender_df(demo_df, method = "demo",
#' name_col = "names", year_col = "birth_year")
#'
#' # Using a range of years
#' gender_df(demo_df, method = "demo",
#' name_col = "names", year_col = c("min_year", "max_year"))
gender_df <- function(data, name_col = "name", year_col = "year",
method = c("ssa", "ipums", "napp", "demo")) {
method <- match.arg(method)
stopifnot("data.frame" %in% class(data),
name_col %in% names(data),
length(year_col) >= 1,
length(year_col) <= 2,
year_col %in% names(data))
if (length(year_col) == 1) year_col <- c(year_col, year_col)
name_year_grouping <- list(name_col, year_col[1], year_col[2])
year_grouping <- list(year_col[1], year_col[2])
data %>%
distinct_(.dots = name_year_grouping) %>%
group_by_(.dots = year_grouping) %>%
do(results = gender(.[[name_col]],
years = c(.[[year_col[1]]][1], .[[year_col[2]]][1]),
method = method)) %>%
do(bind_rows(.$results)) %>%
ungroup()
}
|
043c791005a03e3ad5992827b614e6bf6f1e2050 | 4b0f440d75205d89c126a260d1da1b44a065deb0 | /3.R_basics/Estadística_en_R/Functions/Intervalo.Confianza.media.R | ac4d12ab16b8e04de8ea329539eb74c512dbcebe | [] | no_license | JoelDela/Master-Data-Science | 12802a2c41e54bc5babb00eefb7b5e06b9b70868 | 48d352661ceffbcf5d64dae5a1e5357e98cf3e40 | refs/heads/master | 2023-09-01T02:55:57.458119 | 2023-08-17T07:24:11 | 2023-08-17T07:24:11 | 216,080,851 | 1 | 0 | null | 2023-08-17T07:24:12 | 2019-10-18T18:03:20 | Jupyter Notebook | UTF-8 | R | false | false | 563 | r | Intervalo.Confianza.media.R | Intervalo.Confianza.media<-function(data,alpha,sigma.conocido=T,sigma){
n<-length(data)
xbar<-mean(data)
if (sigma.conocido)
{
Z.alpha <-qnorm(1-alpha/2,0,1)
lim.inf<-xbar-Z.alpha*sigma/(n^(1/2))
lim.sup<-xbar+Z.alpha*sigma/(n^(1/2))
}
else
{ s<-sd(data)
t.alpha<-qt(1-alpha/2,n-1)
lim.inf<-xbar-t.alpha*s/(n^(1/2))
lim.sup<-xbar+t.alpha*s/(n^(1/2))
}
r<-c(xbar,lim.inf,lim.sup)
result<-matrix(r,nrow=1,byrow=T)
variables<-c("mu","lim.inf","lim.sup")
dimnames(result)<-list(NULL,variables)
return(result)
}
|
48a23d18e616aaf369567b789601e63d2e153501 | b020bb5900efddfaa849714a1758a2df8cd00a0b | /scripts/uncategorized/basic_maps.R | f0c5166786bb85f35c9a88b1f00b6488ed4136d0 | [
"MIT"
] | permissive | duttashi/visualizer | a8908157bebde07f47085a921e6e6ba5d0e065b9 | 33bf1d5e0ba9f9dfa5704958f49e55a2c3a87023 | refs/heads/master | 2023-04-16T14:04:50.536510 | 2021-04-27T07:13:37 | 2021-04-27T07:13:37 | 90,606,378 | 14 | 1 | MIT | 2021-04-27T07:13:38 | 2017-05-08T08:49:15 | R | UTF-8 | R | false | false | 642 | r | basic_maps.R |
# Basic maps with tidyverse
library(tidyverse)
# map usa
map_data("usa") %>%
ggplot(aes(x=long, y=lat, group=group))+
geom_polygon()
map_data("state") %>%
ggplot(aes(x=long, y=lat, group=group))+
geom_polygon()
map_data("state") %>%
filter( region %in%
c("california","nevada","oregon","washington"))%>%
ggplot(aes(x=long, y=lat, group=group))+
geom_polygon()
map_data("county") %>%
filter( region %in%
c("california","nevada","oregon","washington"))%>%
ggplot(aes(x=long, y=lat, group=group))+
geom_polygon()
map_data("world") %>%
ggplot(aes(x=long, y=lat, group=group))+
geom_polygon()
|
89c8b686f8581abc8869c37dad30bf6bd0985192 | caf69e070111ee22f47ea041cd4447ad39fb0b54 | /tmp/readClust.R | 4d02444639bc5d8c9abf1b00e8f204ea43793457 | [] | no_license | shdam/flowsom-stability | df19a256804b7fef895357e5bdbbdb9b0c77c74b | a51f80745d65f417c9abf74d1f3701a130b84e74 | refs/heads/master | 2022-04-17T06:25:51.981505 | 2020-02-05T10:21:18 | 2020-02-05T10:21:18 | 238,408,000 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 3,137 | r | readClust.R | #!/usr/bin/env Rscript
###########################
### Read cluster result ###
###########################
list.of.packages <- c('mclust', "optparse", "Sys.time", 'umap', 'ggplot2')
loading = suppressWarnings(suppressMessages(lapply(list.of.packages, require, character.only = TRUE)))
param_index <-readLines("stdin")
# param_index <- scan(file="stdin", quiet=TRUE)
setwd("~/Dropbox/DTU/SP/cluster-stability/flow/")
source('functions.R')
load('parameter_grid.RData')
datafile <- "~/Documents/FCS_data/data_transformed.Rdata"
load(datafile, envir = parent.frame(), verbose = FALSE)
n_stable <- 1000
#df <- data.frame('patient','seed', 'size', 'nclust','time_spent','meanARI', 'meanTrueARI')
results <- setNames(data.frame(matrix(ncol = 7, nrow = 0)), c('patient', 'sampleSize', 'seed', 'nclust', 'time_spent', 'meanARI', 'trueLabelARI'))
param_index <- strsplit(param_index, ' ')[[1]][c(TRUE, FALSE)]
for(par_ind in param_index){
par_ind <- as.integer(par_ind)
patient <- parameter_grid$patient[par_ind]
size <- parameter_grid$size[par_ind]
seed <- parameter_grid$seed[par_ind]
nclust <- parameter_grid$nclust[par_ind]
version <- 1
load(paste0('results/flowsom', version, '/flowsom', version, '_', patient, '_',seed, '_',
size, '_', nclust, '.RData'))
if(patient == "all"){data <- expr_trans
} else{data <- expr_trans[expr_trans$sample==patient, ]}
sample_1k <- getSample(data[, lineage_channels], n = n_stable, seed = seed)
sample_1k_ids <- getIDs(sample_1k)
true_labs <- expr_trans[sample_1k_ids, ]$subpopulation
num_labs <- 0
sum_rand <- 0
for(lab in labels){
#print(length(unique(lab)))
#print(adjustedRandIndex(true_labs, lab))
sum_rand <- sum_rand + adjustedRandIndex(true_labs, lab)
num_labs <- num_labs + 1
}
meanTrueARI <- sum_rand/num_labs
fileResult <- c(toString(patient),seed,size,nclust,toString(params$time_spent), mean(meanARI(labels)), meanTrueARI)
results[nrow(results)+1, ] <- fileResult
print(mean(meanARI(labels)))
}
#save(results, file='flow1Result.RData')
stop()
# Data analysis
load('flow1Result.RData')
# patient <- parameter_grid$patient[param_index]
# size <- parameter_grid$size[param_index]
# seed <- parameter_grid$seed[param_index]
# nclust <- parameter_grid$nclust[param_index]
patient <- '001'
size <- -1
seed <- 42
nclust <- 20
version <- 1
load(paste0('results/flowsom', version, '/flowsom', version, '_', patient, '_',seed, '_',
size, '_', nclust, '.RData'))
print(mean(meanARI(labels)))
# Load data
datafile <- "~/Documents/FCS_data/data_transformed.Rdata"
load(datafile, envir = parent.frame(), verbose = FALSE)
if(patient == "all"){data <- expr_trans
} else{data <- expr_trans[expr_trans$sample==patient, ]}
sample_1k <- getSample(data[, lineage_channels], n = n_stable, seed = seed)
sample_1k_ids <- getIDs(sample_1k)
true_labs <- expr_trans[sample_1k_ids, ]$subpopulation
num_labs <- 0
sum_rand <- 0
for(lab in labels){
#print(length(unique(lab)))
print(ARI(true_labs, lab))
sum_rand <- sum_rand + ARI(true_labs, lab)
num_labs <- num_labs + 1
}
print(sum_rand/num_labs)
|
00d27383536583c7152bd0e5f0495773b8ac55d9 | 0471c748337a1027ee6458a701d1a4b988689d22 | /man/fill_NA_using_value.Rd | 3cd21386446f4a48c466c58db66c6fb92d20c40a | [] | no_license | stemangiola/nanny | 888dbb34703e47e6019f19ab6dc4f852928d20d0 | 4426035da5329cfce3d573421c41ca390bdb35b6 | refs/heads/master | 2023-08-03T12:02:58.039874 | 2023-07-25T07:11:15 | 2023-07-25T07:11:15 | 254,525,030 | 26 | 1 | null | 2020-12-15T02:01:45 | 2020-04-10T02:24:13 | R | UTF-8 | R | false | true | 839 | rd | fill_NA_using_value.Rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/functions.R
\name{fill_NA_using_value}
\alias{fill_NA_using_value}
\title{This function is needed for DE in case the matrix is not rectangular, but includes NA}
\usage{
fill_NA_using_value(
.data,
.element = NULL,
.feature = NULL,
.value = NULL,
fill_with
)
}
\arguments{
\item{.data}{A `tbl` formatted as | <element> | <feature> | <value> | <...> |}
\item{.element}{The name of the element column}
\item{.feature}{The name of the feature/gene column}
\item{.value}{The name of the feature/gene value column}
\item{fill_with}{A numerical value with which fill the mssing data points}
}
\value{
A tibble with adjusted counts
}
\description{
This function is needed for DE in case the matrix is not rectangular, but includes NA
}
\keyword{internal}
|
36080735cecf54916f402eef457f4a9488d88323 | 81f1247147de4712285ee008e7810a14a5bc3d28 | /Week3_RCoursework/code/Vectorize1.R | de2a2d8e2decc50495a93b8d8497d418ff38f99a | [] | no_license | yz2919/CMEECourseWork | fd60d4c975131dd933eafbccd6fd206d8313ed59 | 77739ab8f3f80140574606b2666d831ab49971d6 | refs/heads/master | 2020-08-11T05:29:14.996171 | 2020-04-21T22:05:51 | 2020-04-21T22:05:51 | 212,313,767 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 645 | r | Vectorize1.R | #!/bin/env Rscript
# Author: Yuqing Zhou yz2919@imperial.ac.uk
# Script: Vectorize1.R
# Desc: sums all elements of a matrix, compare operation time of code with loop and with inbuilt function
# Arguments: 0
# Date: Oct 2019
M <- matrix(runif(1000000),1000,1000)
SumAllElements <- function(M){
Dimensions <- dim(M)
Tot <- 0
for (i in 1:Dimensions[1]){
for (j in 1:Dimensions[2]){
Tot <- Tot + M[i,j]
}
}
return (Tot)
}
print("Using loops, the time taken is:")
print(system.time(SumAllElements(M)))
print("Using the in-built vectorized function, the time taken is:")
print(system.time(sum(M)))
|
88bde567a367b153c5dc9e27c2a93c0fd0775364 | 88c6f16d3c734011e8d363c7537c98249534f6d8 | /Ejemplo03.R | 570261ac340f0fcd4ca5728a6d9c4294e7ec6fb4 | [] | no_license | soymelisa/Sesion07_RCloud | f3df20248f474ecf65f9c13ccfcb8479262c0fc0 | 743ecc7562541190cec910cf85328259c88d5ba0 | refs/heads/main | 2023-02-24T05:26:34.480467 | 2021-01-27T20:10:58 | 2021-01-27T20:10:58 | 333,529,704 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 254 | r | Ejemplo03.R | install.packages("pool")
install.packages("dbplyr")
library(dbplyr)
library(pool)
my_db <- dbPool(
RMySQL::MySQL(),
dbname = "shinydemo",
host = "shiny-demo.csa7qlmguqrf.us-east-1.rds.amazonaws.com",
username = "guest",
password = "guest"
)
|
f55d5c7504e609b72d5779b54c0ec1198c9c3878 | b2f61fde194bfcb362b2266da124138efd27d867 | /code/dcnf-ankit-optimized/Results/QBFLIB-2018/E1/Experiments/Basler/terminator/stmt21_181_218/stmt21_181_218.R | 1f320cd02f8815479a491611a7defe605bd049c5 | [] | no_license | arey0pushpa/dcnf-autarky | e95fddba85c035e8b229f5fe9ac540b692a4d5c0 | a6c9a52236af11d7f7e165a4b25b32c538da1c98 | refs/heads/master | 2021-06-09T00:56:32.937250 | 2021-02-19T15:15:23 | 2021-02-19T15:15:23 | 136,440,042 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 713 | r | stmt21_181_218.R | c DCNF-Autarky [version 0.0.1].
c Copyright (c) 2018-2019 Swansea University.
c
c Input Clause Count: 8712
c Performing E1-Autarky iteration.
c Remaining clauses count after E-Reduction: 8711
c
c Performing E1-Autarky iteration.
c Remaining clauses count after E-Reduction: 8711
c
c Input Parameter (command line, file):
c input filename QBFLIB/Basler/terminator/stmt21_181_218.qdimacs
c output filename /tmp/dcnfAutarky.dimacs
c autarky level 1
c conformity level 0
c encoding type 2
c no.of var 2698
c no.of clauses 8712
c no.of taut cls 0
c
c Output Parameters:
c remaining no.of clauses 8711
c
c QBFLIB/Basler/terminator/stmt21_181_218.qdimacs 2698 8712 E1 [1] 0 176 2521 8711 RED
|
d11d50cf633004bcbe706c1922cbee8d071f95fa | 28d89d7bf4835af548dd01e3dadd3a0ec1a30ed9 | /deber1.R | 4190b6fd5406f104672c0217c101c21de2c4b1e3 | [] | no_license | jorchimalve/deber1 | 2bf1c0e5f879a8dbea53a122ae380ec72e4ee192 | 2601eccd1fd96976318a43abe1f6c802eca381bb | refs/heads/main | 2023-02-12T19:59:04.514229 | 2021-01-17T02:07:21 | 2021-01-17T02:07:21 | 330,295,359 | 1 | 0 | null | null | null | null | UTF-8 | R | false | false | 3,462 | r | deber1.R | # INTEGRANTES
# JORGE CHIMARRO
# DAYANARA PEÑAFIEL
# GUIDO OCHOA
# DIEGO CAJAMARCA
library(readr)
datos <- read_csv("C:/Users/JOCHA/Desktop/MAESTRIA/MODULO 8 - HERRAMIENTAS PARA CONTROL DE LA PRODUCCIÓN/deber 1/segmentation_data.csv")
#View(datos)
datos <- datos[,-c(1),drop=FALSE]
View(datos)
#ground = datos$frequency
######################################
######## MATRIZ DE DISTANCIAS ########
######################################
d=dist(datos,method = "euclidean")
######################################
########### CORRELACIONES ############
######################################
c=cor(datos)
#install.packages("corrplot")
library(corrplot)
corrplot(c)
######################################
### ESCALAMIENTO MULTIDIMENSIONAL ###
######################################
fit = cmdscale(d,eig=TRUE, k=2) #k es el numero de dimensiones
x = fit$points[,1]
y = fit$points[,2]
plot(x,y,col=c("red","green3", "blue", "black"), main = "grupos Original")
#text(x, y, labels = row.names(iris), cex=1)
## K-Means ##
grupos = kmeans(datos,4)
g1 = grupos$cluster
g2 = grupos$size
plot(x,y, col=c("red","green3","blue", "black")[g1], main = "grupos K-Means")
######################################
######## EXPORTACION DE DATOS ########
######################################
comparacion <-cbind(datos,g1)
View(comparacion)
setwd("C:/Users/JOCHA/Desktop")
#install.packages("xlsx")
library(xlsx)
write.xlsx(comparacion,"datosd1.xlsx")
######################################
############ MODELO DHC #############
######################################
library("dendextend")
hc = hclust(d, method = "complete" )
clus3 = cutree(hc, 4)
dend = as.dendrogram(hc)
dend = color_branches(dend, 4)
colors = c("red", "green3","blue", "black")
plot(dend, fill = colors[clus3], cex = 0.1 , main = "grupos DHC")
######################################
######### DIAGRAMA DE ELBOW ##########
######################################
wi = c()
for (i in 1:10)
{
g = kmeans(datos,i)
wi[i] = g$tot.withinss
}
plot((1:length(wi)),wi, xlab="Numero de Clusters", ylab="Suma Cuadrados Internos", pch=19, col="red", type = "b")
######################################
########### VALIDACIÓN ###############
######################################
library(cluster)
library(clValid)
du1 = dunn(d,g1)
du2 = dunn(d,clus3)
sil1 = silhouette(g1,d)
plot(sil1,col=1:4, border=NA)
sil2 = silhouette(clus3,d)
plot(sil2,col=5:8, border=NA)
library(aricode)
library(plyr)
ARI1= ARI(g1,g1)
ARI2= ARI(g1,clus3)
AMI1= AMI(g1,g1)
AMI2= AMI(g1,clus3)
NMI1= NMI(g1,g1,variant = c("joint"))
NMI2= NMI(g1,clus3,variant = c("joint"))
######################################
#### Identificación de las Clases ####
######################################
cliente = as.factor(comparacion$g1) #GROUND
plot(x,y,col=c("red","green3","blue", "black")[cliente], main = "clientes Dataset Original")
######################################
######### Validación Externa #########
######################################
# ARI, AMI, NMI
#install.packages("aricode")
library(aricode)
library(plyr)
ground = comparacion$g1
ARI1_C= ARI(ground,g1)
ARI2_C= ARI(ground,clus3)
AMI1_C= AMI(ground,g1)
AMI2_C= AMI(ground,clus3)
NMI1_C= NMI(ground,g1,variant = c("joint"))
NMI2_C= NMI(ground,clus3,variant = c("joint"))
ENT_C = entropy(ground,clus3)
|
d099411364c962b834611767eaeefb6c85a72ef0 | 49ca49c57d5781055798c02aa1ce3162b49ed607 | /Weighted and optimal importance sampling.R | 924ba736c68252b362c3a33bbb9b535f352f8ba4 | [] | no_license | sakib-malik/Statistical-Simulation-And-Data-Analysis | 2dc817e8bb5df2872dfede28a5e374fb770129d4 | 71ffa7712b151ecf15f68ad66a3929c57cb5540e | refs/heads/main | 2023-02-18T14:35:53.652095 | 2021-01-15T11:00:01 | 2021-01-15T11:00:01 | 329,884,284 | 3 | 0 | null | null | null | null | UTF-8 | R | false | false | 3,616 | r | Weighted and optimal importance sampling.R |
#################################
### Plotting weights and
### Weighted average
##################################
set.seed(1)
alpha <- 2
beta <- 5
alpha.p <- alpha + 1
lambda <- 4 #proposal
N <- 1e4
samp <- rgamma(N, shape = alpha.p, rate = lambda) # importance samples
weights <- dgamma(samp, shape = alpha, rate = beta) /dgamma(samp, shape = alpha.p, rate = lambda)
## Visualizing the target and the importance densities
index <- 1:200
foo <- seq(0, 2, length = 1e3)
plot(foo, dgamma(foo, shape = alpha, rate = beta),
type = 'l', col = "black", ylab = "density", ylim = c(0, 3.5), xlab = "Z")
lines(foo, dgamma(foo, shape = alpha.p, rate = lambda), col = "red")
segments(x0 = samp[index], x1 = samp[index], y0 = 0, y1 = weights[index], col = adjustcolor("blue", alpha.f = .2))
legend("topright", legend = c("Target", "Proposal", "Weights"), col = c(1,2,"blue"), lty = 1)
###########################################
### Optimal importance sampling from Gamma
###########################################
set.seed(1)
# Function does importance sampling to estimate second moment of a gamma distribution
imp_gamma <- function(N = 1e3, alpha = 4, beta = 10, moment = 2, imp.alpha = alpha + moment)
{
fn.value <- numeric(length = N)
draw <- rgamma(N, shape = imp.alpha, rate = beta) # draw importance samples
fn.value <- draw^moment * dgamma(draw, shape = alpha, rate = beta) / dgamma(draw, shape = imp.alpha, rate = beta)
return(fn.value) #return all values
}
N <- 1e4
# Estimate 2nd moment from Gamma(4, 10) using Gamma(4, 10)
# this is IID Monte Carlo
imp_samp <- imp_gamma(N = N, imp.alpha = 4)
mean(imp_samp)
var(imp_samp)
# Estimate 2nd moment from Gamma(4, 10) using Gamma(6, 10)
# this is the optimal proposal
imp_samp <- imp_gamma(N = N)
mean(imp_samp)
var(imp_samp)
# why is the estimate good
foo <- seq(0.001, 5, length = 1e3)
plot(foo, dgamma(foo, shape = 4, rate = 10), type= 'l', ylab = "Density")
lines(foo, dgamma(foo, shape = 6, rate = 10), col = "red")
legend("topright", col = 1:2, lty = 1, legend = c("Reference", "Optimal"))
# Choosing a horrible proposal
# Estimate 2nd moment from Gamma(4, 10) using Gamma(100, 10)
imp_samp <- imp_gamma(N = N, imp.alpha = 100)
mean(imp_samp) ## estimate is bad too
var(imp_samp)
# why is the estimate bad?
foo <- seq(0.001, 17, length = 1e3)
plot(foo, dgamma(foo, shape = 4, rate = 10), type= 'l', ylab = "Density")
lines(foo, dgamma(foo, shape = 100, rate = 10), col = "red")
legend("topright", col = 1:2, lty = 1, legend = c("Reference", "Importance"))
# This part is not in the notes
# Doing a simulation study for this
# Repeat the above many times to estimate the variability in the estimators
reps <- 1e3
N <- 1e4
var_ests <- matrix(0, nrow = reps, ncol = 4)
mean_ests <- matrix(0, nrow = reps, ncol = 4)
colnames(var_ests) <- c("4", "6", "7", "100")
colnames(mean_ests) <- c("4", "6", "7", "100")
for(i in 1:reps)
{
imp_samp <- imp_gamma(N = N, imp.alpha = 4)
mean_ests[i, 1] <- mean(imp_samp)
var_ests[i, 1] <- var(imp_samp)
imp_samp <- imp_gamma(N = N, imp.alpha = 6)
mean_ests[i, 2] <- mean(imp_samp)
var_ests[i, 2] <- var(imp_samp)
imp_samp <- imp_gamma(N = N, imp.alpha = 7)
mean_ests[i, 3] <- mean(imp_samp)
var_ests[i, 3] <- var(imp_samp)
imp_samp <- imp_gamma(N = N, imp.alpha = 100)
mean_ests[i, 4] <- mean(imp_samp)
var_ests[i, 4] <- var(imp_samp)
}
colMeans(mean_ests) # Last estimate is horrible
colMeans(var_ests) # Smallest for 6
|
fe85b5f5b413adfbe7b6713d9dc20eff33bcd874 | d15f12f29ed2f09d1b3af5d14e53c2d9f97cee44 | /tests/testthat/test-list.R | 938c87e476c9443edae0974251d0fe80a8fe2462 | [
"MIT"
] | permissive | r-lib/fs | ab3e5b896149b953de115d5a79b579b0e034c7b8 | 06c246909c65666058e9e401b1ed0ea73662c07f | refs/heads/main | 2023-09-04T05:54:57.448977 | 2023-07-20T12:16:29 | 2023-07-20T12:16:29 | 114,168,421 | 361 | 101 | NOASSERTION | 2023-07-06T05:54:28 | 2017-12-13T21:01:16 | C | UTF-8 | R | false | false | 7,618 | r | test-list.R |
describe("dir_ls", {
it("Does not include '.' or double '/' in results", {
with_dir_tree(list("foo" = "test"), {
expect_equal(dir_ls(), new_fs_path(c(foo = "foo")))
expect_equal(dir_ls("."), new_fs_path(c(foo = "foo")))
})
with_dir_tree(list("foo/bar" = "test"), {
expect_equal(dir_ls(recurse = TRUE), named_fs_path(c("foo", "foo/bar")))
expect_equal(dir_ls(recurse = TRUE, type = "file"), named_fs_path("foo/bar"))
expect_equal(dir_ls("./", recurse = TRUE), named_fs_path(c("foo", "foo/bar")))
expect_equal(dir_ls("foo"), named_fs_path("foo/bar"))
expect_equal(dir_ls("foo/"), named_fs_path("foo/bar"))
})
})
it("Does not follow symbolic links", {
with_dir_tree(list("foo/bar/baz" = "test"), {
link_create(path_abs("foo"), named_fs_path("foo/bar/qux"))
expect_equal(
dir_ls(recurse = TRUE),
named_fs_path(c("foo", "foo/bar", "foo/bar/baz", "foo/bar/qux"))
)
expect_equal(
dir_ls(recurse = TRUE, type = "symlink"),
named_fs_path("foo/bar/qux")
)
})
})
it("Uses grep to filter output", {
with_dir_tree(list(
"foo/bar/baz" = "test",
"foo/bar/test2" = "",
"foo/bar/test3" = ""), {
expect_equal(
dir_ls(recurse = TRUE, glob = "*baz"),
named_fs_path("foo/bar/baz")
)
expect_equal(
dir_ls(recurse = TRUE, regexp = "baz"),
named_fs_path("foo/bar/baz")
)
expect_equal(
dir_ls(recurse = TRUE, regexp = "[23]"),
named_fs_path(c("foo/bar/test2", "foo/bar/test3"))
)
expect_equal(
dir_ls(recurse = TRUE, regexp = "(?<=a)z", perl = TRUE),
named_fs_path("foo/bar/baz")
)
})
})
it("Does not print hidden files by default", {
with_dir_tree(list(
".foo" = "foo",
"bar" = "bar"), {
expect_equal(dir_ls(), named_fs_path("bar"))
expect_equal(dir_ls(all = TRUE), named_fs_path(c(".foo", "bar")))
})
})
it("can find multiple types", {
with_dir_tree(list(
"file" = "foo",
"dir"), {
link_create(path_abs("dir"), "link")
expect_equal(dir_ls(type = "file"), named_fs_path("file"))
expect_equal(dir_ls(type = "directory"), named_fs_path("dir"))
expect_equal(dir_ls(type = "symlink"), named_fs_path("link"))
expect_equal(
dir_ls(type = c("directory", "symlink")),
named_fs_path(c("dir", "link"))
)
expect_equal(
dir_ls(type = c("file", "directory", "symlink")),
named_fs_path(c("dir", "file", "link"))
)
})
})
it("works with UTF-8 encoded filenames", {
skip_if_not_utf8()
skip_on_os("solaris")
with_dir_tree("\U7684\U6D4B\U8BD5\U6587\U4EF6", {
file_create("fs\U7684\U6D4B\U8BD5\U6587\U4EF6.docx")
link_create(path_abs("\U7684\U6D4B\U8BD5\U6587\U4EF6"), "\U7684\U6D4B")
expect_equal(
dir_ls(type = "file"),
named_fs_path("fs\U7684\U6D4B\U8BD5\U6587\U4EF6.docx")
)
expect_equal(
dir_ls(type = "directory"),
named_fs_path("\U7684\U6D4B\U8BD5\U6587\U4EF6")
)
expect_equal(
dir_ls(type = "symlink"),
named_fs_path("\U7684\U6D4B")
)
expect_equal(path_file(link_path("\U7684\U6D4B")), "\U7684\U6D4B\U8BD5\U6587\U4EF6")
})
})
it("errors on missing input", {
expect_error(dir_ls(NA), class = "invalid_argument")
})
it("warns if fail == FALSE", {
skip_on_os("windows")
if (Sys.info()[["effective_user"]] == "root") skip("root user")
with_dir_tree(list(
"foo",
"foo2/bar/baz"), {
file_chmod("foo", "a-r")
expect_error(dir_ls(".", recurse = TRUE), class = "EACCES")
expect_warning(dir_ls(fail = FALSE, recurse = TRUE), class = "EACCES")
file_chmod("foo", "a+r")
file_chmod("foo2/bar", "a-r")
expect_warning(dir_ls("foo2", fail = FALSE, recurse = TRUE), class = "EACCES")
file_chmod("foo2/bar", "a+r")
})
})
})
describe("dir_map", {
it("can find multiple types", {
with_dir_tree(list(
"file" = "foo",
"dir"), {
nc <- function(x) nchar(x, keepNA = FALSE)
expect_equal(dir_map(type = "file", fun = nc), list(4))
expect_equal(dir_map(type = "directory", fun = nc), list(3))
expect_equal(dir_map(type = c("file", "directory"), fun = nc), list(3, 4))
})
})
it("errors on missing input", {
expect_error(dir_map(NA, fun = identity), class = "invalid_argument")
})
it("warns if fail == FALSE", {
skip_on_os("windows")
if (Sys.info()[["effective_user"]] == "root") skip("root user")
with_dir_tree(list(
"foo",
"foo2/bar/baz"), {
file_chmod("foo", "a-r")
expect_error(dir_map(".", fun = identity, recurse = TRUE), class = "EACCES")
expect_warning(dir_map(fail = FALSE, fun = identity, recurse = TRUE), class = "EACCES")
file_chmod("foo", "a+r")
file_chmod("foo2/bar", "a-r")
expect_warning(dir_map("foo2", fail = FALSE, fun = identity, recurse = TRUE), class = "EACCES")
file_chmod("foo2/bar", "a+r")
})
})
})
describe("dir_walk", {
it("can find multiple types", {
x <- character()
f <- function(p) x <<- p
with_dir_tree(list(
"file" = "foo",
"dir"), {
link_create(path_abs("dir"), "link")
dir_walk(type = "file", fun = f)
expect_equal(x, "file")
dir_walk(type = "directory", fun = f)
expect_equal(x, "dir")
dir_walk(type = "symlink", fun = f)
expect_equal(x, "link")
x <- character()
dir_walk(type = c("directory", "symlink"), fun = function(p) x <<- c(x, p))
expect_equal(x, c("dir", "link"))
x <- character()
dir_walk(type = c("file", "directory", "symlink"), fun = function(p) x <<- c(x, p))
expect_equal(x, c("dir", "file", "link"))
})
})
it("errors on missing input", {
expect_error(dir_walk(NA, fun = identity), class = "invalid_argument")
})
it("warns if fail == FALSE", {
skip_on_os("windows")
if (Sys.info()[["effective_user"]] == "root") skip("root user")
with_dir_tree(list(
"foo",
"foo2/bar/baz"), {
file_chmod("foo", "a-r")
expect_error(dir_walk(".", fun = identity, recurse = TRUE), class = "EACCES")
expect_warning(dir_walk(fail = FALSE, fun = identity, recurse = TRUE), class = "EACCES")
file_chmod("foo", "a+r")
file_chmod("foo2/bar", "a-r")
expect_warning(dir_walk("foo2", fail = FALSE, fun = identity, recurse = TRUE), class = "EACCES")
file_chmod("foo2/bar", "a+r")
})
})
})
describe("dir_info", {
it("is identical to file_info(dir_ls())", {
with_dir_tree(list(
"file" = "foo",
"dir"), {
link_create(path_abs("dir"), "link")
expect_identical(dir_info(), file_info(dir_ls()))
})
})
it("errors on missing input", {
expect_error(dir_info(NA), class = "invalid_argument")
})
it("warns if fail == FALSE", {
skip_on_os("windows")
if (Sys.info()[["effective_user"]] == "root") skip("root user")
with_dir_tree(list(
"foo",
"foo2/bar/baz"), {
file_chmod("foo", "a-r")
expect_error(dir_info(".", fun = identity, recurse = TRUE), class = "EACCES")
expect_warning(dir_info(fail = FALSE, fun = identity, recurse = TRUE), class = "EACCES")
file_chmod("foo", "a+r")
file_chmod("foo2/bar", "a-r")
expect_warning(dir_info("foo2", fail = FALSE, fun = identity, recurse = TRUE), class = "EACCES")
file_chmod("foo2/bar", "a+r")
})
})
})
|
d8a152d6cd03dee339ae65b5131f59a405855084 | ffdea92d4315e4363dd4ae673a1a6adf82a761b5 | /data/genthat_extracted_code/splus2R/examples/allTrue.Rd.R | 630b07352b38468a9fcefc7521b0f42dc54915bf | [] | no_license | surayaaramli/typeRrh | d257ac8905c49123f4ccd4e377ee3dfc84d1636c | 66e6996f31961bc8b9aafe1a6a6098327b66bf71 | refs/heads/master | 2023-05-05T04:05:31.617869 | 2019-04-25T22:10:06 | 2019-04-25T22:10:06 | null | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 1,353 | r | allTrue.Rd.R | library(splus2R)
### Name: allTrue
### Title: Test whether all expressions return TRUE
### Aliases: allTrue
### Keywords: utilities
### ** Examples
# This is the type of expression that may be found in a test file
# to be run by do.test -- inside {} are lines that create one or
# more objects, followed by multiple tests (inside allTrue) that
# check those objects.
{
y <- rnorm(30)
x <- matrix(rnorm(60), ncol=2)
fit <- lm(y~x)
allTrue(# are important components included?
all(is.element(c("coefficients", "residuals", "effects", "rank",
"fitted.values", "assign", "df.residual", "call"),
names(fit))),
{
# do coefficients match the algebraic form?
# The algebraic form is inaccurate, so allow greater tolerance
X <- cbind(1, x)
all.equal(unname(fit$coefficients),
drop(solve( t(X) %*% X, t(X) %*% y)),
tol = 1e-5)
},
# are residuals computed correctly?
all.equal(fit$residuals, y - X %*% fit$coefficients))
}
# The second test uses 'unname' to remove names and 'drop' to change a
# matrix to a vector, so the test should pass.
# The third test fails because fit$residuals is a vector with names
# while the %*% calculation returns a matrix.
|
0a327f3bf044d912df7e005306802979bfe28726 | 094f81c31a3cfd560b24280e476d5af4fb52b9e3 | /R/zipper.R | 27d051162c9124b1fa35f43be552a1820ddc2581 | [
"MIT"
] | permissive | PJOssenbruggen/Basic | 6c2343dcb135cb364d059160925ded5cb43b5455 | 1885fa40d3318cc554b4dd80154b263baef19ac4 | refs/heads/master | 2021-01-25T11:57:19.583401 | 2019-01-04T13:03:32 | 2019-01-04T13:03:32 | 123,449,454 | 0 | 0 | null | 2018-03-05T12:26:55 | 2018-03-01T14:56:48 | R | UTF-8 | R | false | false | 3,512 | r | zipper.R | #' The \code{zipper} function can be used to simulate interaction among three vehicles in car following
#' on a single lane or three vehicles merging on a two-lane highway.
#'
#' @param tstart start time, a number
#' @param tend end time, a number
#' @param ustart1 start speed (mph) for vehicle in lane 1, a number
#' @param uend1 end speed (mph) for vehicle in lane 1, a number
#' @param xstart1 start location for vehicle in lane 1 (feet), a number
#' @param xend1 end location for vehicle in lane 1 (feet), a number
#' @param ustart2 start speed (mph) for vehicle in lane 2, a number
#' @param uend2 end speed (mph) for vehicle in lane 2, a number
#' @param xstart2 start location for vehicle in lane 2 (feet), a number
#' @param xend2 end location for vehicle in lane 2 (feet), a number
#' @param ustart3 start speed (mph) for vehicle in lane 3, a number
#' @param uend3 end speed (mph) for vehicle in lane 3, a number
#' @param xstart3 start location for vehicle in lane 3 (feet), a number
#' @param xend3 end location for vehicle in lane 3 (feet), a number
#' @return \code{zipper} uses a deterministic model and animation to illustrate an ``idealistic'' situation,
#' a so-called a ``zipper merge.''
#' @usage zipper(tstart, tend,
#' ustart1, uend1, xstart1, xend1,
#' ustart2, uend2, xstart2, xend2,
#' ustart3, uend3, xstart3, xend3)
# #' @examples
# #' zipper(0,60,90,90,0,5000,90,90,-200,4500,90,90,-500, 4000)
# #' zipper(0,40,85,90,0,4000,90,99,0,4500,90,90,-500, 4200)
# #' zipper(0,5,60,20,0,500,65,20,-100,467,80,20,-350,433)
#' @export
zipper <- function(tstart, tend,
ustart1, uend1, xstart1, xend1,
ustart2, uend2, xstart2, xend2,
ustart3, uend3, xstart3, xend3
)
{
tseq <- seq(0, tend, by = 0.2)
xfseq1 <- ufseq1 <- xlseq1 <- ulseq1 <- {}
xfseq2 <- ufseq2 <- xlseq2 <- ulseq2 <- {}
xfseq3 <- ufseq3 <- xlseq3 <- ulseq3 <- {}
# vehicle 1
dfab <- xabparam(tstart, tend, ustart = ustart1 * 5280/3600, uend = uend1 * 5280/3600,
xstart = xstart1, xend = xend1)
a1 <- dfab[1]
b1 <- dfab[2]
u0 <- ustart1 * 5280/3600
x0 <- xstart1
t0 <- 0
df1 <- df2 <- df3 <- {}
for(i in 1:length(tseq)) {
t <- tseq[i]
u <- uab(u0, a = a1, b = b1,t,t0)
x <- xab(x0,u0,a = a1, b = b1,t,t0)
Vehicle = "1"
df1 <- rbind(df1, data.frame(t, u, x, Vehicle))
}
# vehicle 2
dfab <- xabparam(tstart,tend,ustart = ustart2 * 5280/3600, uend = uend2 * 5280/3600,
xstart = xstart2, xend = xend2)
a2 <- dfab[1]
b2 <- dfab[2]
u0 <- ustart2 * 5280/3600
x0 <- xstart2
t0 <- 0
for(i in 1:length(tseq)) {
t <- tseq[i]
u <- uab(u0,a = a2, b = b2,t,t0)
x <- xab(x0,u0,a = a2, b= b2,t,t0)
Vehicle = "2"
df2 <- rbind(df2, data.frame(t, u, x, Vehicle))
}
# vehicle 3
dfab <- xabparam(tstart,tend,ustart = ustart3 * 5280/3600, uend = uend3 * 5280/3600,
xstart = xstart3, xend = xend3)
a3 <- dfab[1]
b3 <- dfab[2]
u0 <- ustart3 * 5280/3600
x0 <- xstart3
t0 <- 0
for(i in 1:length(tseq)) {
t <- tseq[i]
u <- uab(u0,a = a3, b = b3,t,t0)
x <- xab(x0,u0,a = a3, b = b3,t,t0)
df3 <- rbind(df3, data.frame(t, u, x, Vehicle = "3"))
}
df <- rbind(df1, df2, df3)
p <- ggplot2::ggplot(df, ggplot2::aes(t, x, colour = Vehicle, size = u, frame = t)) +
ggplot2::geom_point()
return(p)
}
|
658e37e34154324b8b477563816e1385ab3ec2c7 | 1688a2ff3643cb1872421708fb166bc9f87ecbdd | /gap_plot.R | 70c916558d60fbe2e376335f0b63f5b6e0f274df | [] | no_license | ccjolley/DECA-app | 71d7c765ce8c4f8d0c91d2f3d57a1ec486b7e684 | 15d31fbd4d9bcfa6d89481d74b24d2352d6a3a69 | refs/heads/master | 2020-12-04T06:29:38.124825 | 2020-03-25T17:00:25 | 2020-03-25T17:00:25 | 231,657,506 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 2,948 | r | gap_plot.R | source('plots.R')
library(stringr)
wb_findex <- read_csv('findex.csv') %>%
select(-starts_with('barrier_'))
# TODO: find another source on urban/rural population split so I can calculate urban levels
# A better plot for visualizing gender (and similar) gaps
# Instead of showing a singe "gap score", show the absolute numbers in a barbell plot
gap_list <- c('Male/Female','Rich/Poor','Educated/Uneducated','Old/Young','Employed/Unemployed','Urban/Rural')
gap_captions <- c('','"Rich" = richest 40% of population, "Poor" = poorest 60% of population',
'"Educated" = secondary or higher, "Uneducated" = primary or less',
'"Old" = 25+ years, "Young" = 15-24 years','',
'NOTE: WB Findex doesn\'t disaggregate urban populations; urban values inferred')
gap_vars <- c('Account ownership','Borrowing','Digital payments','Mobile money','Internet use')
get_gap_vars <- function(gap,meas) {
gap_types <- tibble(gap_label=gap_list,
suffix1=c('_m','_rich','_ed','_old','_labor','_urban'),
suffix2=c('_f','_poor','_uned','_young','_nolabor','_rural'))
measurements <- tibble(meas_label=gap_vars,
prefix=c('acct','borrow','dig_pay','mm','internet'))
var1 <- paste0(filter(measurements,meas_label==meas)$prefix,
filter(gap_types,gap_label==gap)$suffix1)
var2 <- paste0(filter(measurements,meas_label==meas)$prefix,
filter(gap_types,gap_label==gap)$suffix2)
c(var1,var2)
}
gap_plot <- function(gap,meas,country_list) {
if (meas == 'Internet use' && gap != 'Male/Female') {
stop('Only the Male/Female gap is available for the ITU Internet use variable. Please choose another plot.')
}
gv <- get_gap_vars(gap,meas)
var1 <- gv[1]
var2 <- gv[2]
lab1 <- str_extract(gap,"^[A-Za-z]+")
lab2 <- str_extract(gap,"[A-Za-z]+$")
segs <- wb_findex %>%
select(country,var1,var2) %>%
filter(country %in% country_list) %>%
rename(v1=2,v2=3) %>%
mutate(m = (v1+v2)/2,
country=fct_reorder(country,m)) %>%
select(-m) %>%
na.omit
dots <- segs %>%
melt %>%
mutate(variable=ifelse(variable=='v1',lab1,lab2),
variable=factor(variable,levels=c(lab1,lab2)))
if (meas=='Internet use') {
source <- 'ITU'
} else {
source <- 'WB Findex'
}
ggplot(segs) +
geom_segment(aes(x=v1,xend=v2,y=country,yend=country),color='#6C6463') +
geom_point(data=dots,aes(x=value,y=country,color=variable),size=5) +
theme_USAID + colors_USAID +
scale_x_continuous(labels=scales::percent_format(accuracy=1)) +
theme(axis.title.y=element_blank(),
legend.title=element_blank()) +
xlab(paste0(meas,' (',source,')')) +
labs(title=paste0(meas,' (',gap,')'),
caption=gap_captions[which(gap_list==gap)])
}
gap_plot('Male/Female','Mobile money',
c('Cyprus','Finland','Brazil','Argentina'))
|
7c82c3659147394ed2948ecaa7820e09b07028bb | 58f4a54e7d85fa0cc013d97f80c9a8507784c5c5 | /man/rinker_interviews_formality.Rd | 4ef3a857c3c7c7a81d7b24a4b68eeffd79bcaefc | [] | no_license | trinker/testing | ebec35af8f0e16cba7c34d2ca99f70d0d6117e50 | dc08f09d64d0a980614cd91447531736db94e72c | refs/heads/master | 2021-01-01T05:22:56.755359 | 2016-11-06T05:00:28 | 2016-11-06T05:00:28 | 56,280,598 | 0 | 0 | null | null | null | null | UTF-8 | R | false | true | 870 | rd | rinker_interviews_formality.Rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/rinker_interviews_formality.R
\docType{data}
\name{rinker_interviews_formality}
\alias{rinker_interviews_formality}
\title{Interview Formality}
\format{A data frame with 1 rows and 13 variables}
\usage{
data(rinker_interviews_formality)
}
\description{
A dataset containing a formality object for interview.
}
\details{
\itemize{
\item all. The grouping variable.
\item noun. Noun counts.
\item preposition. Preposition counts.
\item adjective. Adjective counts.
\item article. Article counts.
\item verb. Verb counts.
\item pronoun. Pronoun counts.
\item adverb. Adverb counts.
\item interjection. Interjection counts.
\item formal. Formal counts.
\item contextual. Contextual counts.
\item n. Total words aalyzed.
\item F. Formality score.
}
}
\keyword{datasets}
|
f5a33b6ac51d80f91aaafd10f2e05231711c2959 | c45d72d7b9ab0496cd5461222ecebf75b070b67a | /modularity_conn/31.role_tables.R | fad90176096f4d761f4233725d352f408ab4a984 | [] | no_license | michaelandric/steadystate | 3ca33888effbcb56a4db602cfcf1529e57555031 | f6da02cbb7d481725cabc88bbb56b2a2749f0c8c | refs/heads/master | 2021-01-19T11:02:40.227087 | 2014-06-17T21:57:06 | 2014-06-17T21:57:06 | null | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 654 | r | 31.role_tables.R | ## generate role tables
subjects <- c("ANGO","CLFR","MYTP","TRCO","PIGL","SNNW","LDMW","FLTM","EEPA","DNLN","CRFO","ANMS","MRZM","MRVV","MRMK","MRMC","MRAG","MNGO","LRVN")
conditions <- seq(4)
for (ss in subjects)
{
setwd(paste(Sys.getenv("state"),"/",ss,"/corrTRIM_BLUR/",sep=""))
print(getwd())
role_mat <- matrix(nrow=7,ncol=4)
for (cc in conditions)
{
dat <- as.matrix(read.table(paste(ss,".",cc,".node_roles",sep="")))
for (i in seq(7))
{
role_mat[i,cc] = length(which(dat == i))
}
}
write.table(role_mat, paste(ss,"_roletable.txt",sep=""),row.names=F,col.names=F,quote=F)
}
|
d0da6cd4f6d2078818cb6084e0939bd74292ae7e | 870e48cd13c3876cf580e1dd4701dd8ae4cf8dd8 | /MapUS.R | 33656443a1e5b048ae929d4ec34cacc918fb367e | [] | no_license | CUBigDataClass/Twitter-EmotiMap | 5fc6386580ead73c56fa5be8e360fe32afc783d9 | a87582953bf840bbc0da9aa444f7dc2ec970d72d | refs/heads/master | 2020-05-29T12:30:52.417144 | 2014-05-16T14:31:46 | 2014-05-16T14:31:46 | 16,750,294 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 1,927 | r | MapUS.R |
##############################################################################
Sys.setenv(NOAWT = 1)
library(OpenStreetMap)
library(rgdal)
library(stringr)
library(ggplot2)
library(ggmap)
library(maps)
library(scales)
library(Rgooglemaps)
#Set Working Directory
setwd("~/Documents/School/Computing/Twitter-EmotiMap/DataGen")
#Get geo location of tweets and the percent of literacy of the tweets
LatLong <- read.csv("LatLong.csv")
PercentLit <- read.csv("PercentLit.csv")
#Create data frame for all geo location of all tweets with literacy score
geoFrame <- data.frame(Lat = LatLong[2], Long = LatLong[1], PercLit = PercentLit[1])
#Label the Columns of the geoFrame for easy calling into that data frame
names(geoFrame)[1] <- "Lat"
names(geoFrame)[2] <- "Long"
names(geoFrame)[3] <- "Perclit"
map("usa")
points(x = geoFrame$Long, y = geoFrame$Lat)
geoMerc <- as.data.frame(projectMercator(geoFrame$Lat, geoFrame$Long), drop = FALSE)
names(geoMerc)[1] <- "Lat"
names(geoMerc)[2] <- "Long"
geoFrame$Lat= geoMerc$Lat
geoFrame$Long = geoMerc$Long
test <- get_map(location = 'united states', zoom = 4)
ggmap(test)
MapUS <- get_map(location = 'New York City', zoom = 9)
MapUS <- openmap(c(49.345786,-124.794409), c(24.7433195,-66.9513812), type = 'stamen-watercolor')
MapUS <- openmap(c(50.0,-124.794409), c(15.0,-66.9513812), type = 'stamen-watercolor')
map <- ggmap(test) + geom_point(data = geoFrame, aes(x = Long, y = Lat, size = Perclit, color = Perclit), alpha = .5) +
scale_size_continuous(range = c(1, 2)) + theme(axis.line = element_blank(), axis.text.x = element_blank(),
axis.text.y = element_blank(), axis.ticks = element_blank(),
axis.title.x = element_blank(), axis.title.y = element_blank())+scale_colour_gradient(low="white", high="black")
map
png("US_Literacy.png", 811, 588)
ggsave("US_Literacy.png", dpi=300)
########################################################################
|
c29ba3aa7a3754a1ea9ed83611594a7ce2dca3bf | ef7ce2d16174e46b1db92333effa753603b33ec7 | /Exercise7_Liu_Yu.R | 93cbebc35250cfbb3c000d952602390768dd9f82 | [] | no_license | nationstrong/Biocomp_exercise07 | 5eef0ff46fcfbd33e52af711c38bc7995ddd7b76 | 23a1b3efcb8c515d81003dc712cfed06d05b7e6d | refs/heads/master | 2022-03-16T01:57:28.485337 | 2019-11-15T07:07:12 | 2019-11-15T07:07:12 | null | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 1,359 | r | Exercise7_Liu_Yu.R | ## Excercise 7
# Question 1
# Write a function that returns the odd (1, 3, 5, etc.) rows of any dataframe passed as an argument.
oddrows = function(df){
rownumber = nrow(df)
if((rownumber %% 2) == 0) {
row.index = 2*c(1:(rownumber/2))-1
} else {
row.index = 2*c(1:((rownumber+1)/2))-1
}
print(df[row.index,])
}
# Alternative solution to Question 1
oddrows = function(df){
rownumber = nrow(df)
row.index = c()
for (i in c(1:rownumber)){
if(i %% 2 == 1){
row.index = c(row.index,i)
}
}
print(df[row.index,])
}
# Question 2
# load dataset
data = read.csv('iris.csv')
# return the number of observations for a given species included in the data set
number.of.obs = function(species){
data.species = data[which(data$Species==species),]
number.obs = nrow(data.species)
print(number.obs)
}
# return a dataframe for flowers with Sepal.Width greater than a value specified by the function user
df.of.spl = function(n){
data.spl = data[which(data$Sepal.Width>n),]
data.spl
}
# write the data for a given species to a comma-delimited file with the given species name as the file name; Hint: look at paste() to add the .csv extension to your file in the for-loop.
write.species = function(species){
data.species = data[which(data$Species==species),]
write.csv(data.species,paste(species,'.csv',sep = ''))
}
|
8f4a14f32c800540a955ab1c1dca33d64ccfb3d5 | 4ad1c361c12dfcb71bb00b5e1baee6d3d3a3f4a5 | /R/MaraDownstream.R | 780982f843ebe80665dc16ffa8cc946fa481f58a | [] | no_license | jakeyeung/SleepDepAnalysis | 3e0c78c9dea123cedd6b13515c4230fcfb0d6541 | 92b941c91ece6a6e9349160583e7d96d9ec65714 | refs/heads/master | 2021-12-16T06:46:48.955058 | 2021-12-06T09:15:07 | 2021-12-06T09:15:07 | 168,015,963 | 1 | 0 | null | null | null | null | UTF-8 | R | false | false | 4,010 | r | MaraDownstream.R | #source("R/functions/PlotFunctions.auxiliary.R")
LoadMaraOutput <- function(act.dir="data/sleep_deprivation_gene_exprs_all"){
act.f <- file.path(act.dir, "Activities")
zscores.f <- file.path(act.dir, "Zscores")
cnames.f <- file.path(act.dir, "Colnames")
se.f <- file.path(act.dir, "StandardError")
act.mat <- read.table(act.f, header = FALSE, sep = "\t")
zscores <- read.table(zscores.f, header = FALSE, sep = "\t", col.names = c("motif", "zscore"))
se.mat <- read.table(se.f, header = FALSE, sep = "\t")
zscores <- zscores[order(zscores$zscore, decreasing = TRUE), ]
# get colnames
cnames <- readLines(cnames.f)
# add "gene" to first cname
cnames <- c("gene", cnames)
act.long <- MatToLong(act.mat, cnames = cnames, jid.vars = "gene", jvar.name = "sample", jval.name = "exprs")
se.long <- MatToLong(se.mat, cnames = cnames, jid.vars = "gene", jvar.name = "sample", jval.name = "sem")
act.long <- inner_join(act.long, se.long)
return(list(act.long=act.long, zscores=zscores))
}
MatToLong <- function(dat.mat, cnames, jid.vars = "gene", jvar.name = "sample", jval.name = "exprs"){
# dat.mat -> long format, renames colnames(dat.mat) to cnames
colnames(dat.mat) <- cnames
dat.long <- melt(dat.mat, id.vars = jid.vars, variable.name = jvar.name, value.name = jval.name)
dat.long$time <- sapply(dat.long$sample, LongSampToTime)
dat.long$samp <- sapply(dat.long$sample, LongSampToRep)
return(dat.long)
}
LongSampToTime <- function(s){
# ZT_00_1 -> 0
as.numeric(strsplit(as.character(s), "_")[[1]][[2]])
}
LongSampToRep <- function(s){
# ZT_00_1 -> 1
as.numeric(strsplit(as.character(s), "_")[[1]][[3]])
}
PlotMara <- function(jsub, add.sd.period = TRUE){
# jsub is summarised across samples (expect exprs and sem)
decimal.places <- 1
jxlim <- range(jsub$time)
m <- ggplot(data = jsub, aes(x = time, y = exprs, ymin = exprs - sem, ymax = exprs + sem)) +
geom_line() +
geom_errorbar(alpha = 0.25)
m <- m + theme_bw() + theme(panel.grid.major = element_blank(), panel.grid.minor = element_blank()) +
# scale_x_continuous(limits = jxlim, breaks = seq(jxlim[1], jxlim[2], 6)) +
geom_vline(xintercept = seq(0, 78, 24), linetype = "dashed", colour = "black")
m <- m + scale_x_continuous(limits = jxlim, breaks = seq(jxlim[1], jxlim[2], 6), expand = c(0, 0)) +
scale_y_continuous(labels = fmt_dcimals(1))
if (add.sd.period){
m <- AddSDPeriod(m)
}
return(m)
}
PlotMara.withEEG <- function(jsub, dat.eeg.plot, jtitle, labsize = 20, ysize=20){
if (missing(jtitle)){
jtitle <- unique(as.character(jsub$gene))
}
jxlim <- range(jsub$time)
m.gene <- PlotMara(jsub) +
ylab("TF activity [A.U.]") +
AddDarkPhases(alpha = 0.1) +
ggtitle(jtitle) +
theme(legend.position= "top")
# calculate ndecs required for PlotEeg
ylabs <- GetYMajorSource(m.gene)
# ylabs <- ggplot_build(m.gene)$layout$panel_params[[1]]$y.major_source
# if (is.null(ylabs)){
# warning("Ylabs is NULL perhaps your hacky script (ggplot_build(m)$layout$panel_params[[1]]$y.major_source) has failed")
# }
ndecs <- max(sapply(ylabs, NDecimals), na.rm = TRUE) # PlotFunctions.auxiliary.R
if (min(ylabs) < 0){
ndecs <- ndecs + 1 # account for negative sign
}
m.eeg <- PlotEeg(dat.eeg.plot, jxlim = jxlim, n.decs = ndecs)
jlay <- matrix(c(rep(1, 4), 2), ncol = 1)
# print("My labels")
# print(c(paste0("-0.", paste(rep(0, ndecs), collapse="")), paste0("-5.", paste(rep(0, ndecs), collapse=""))))
jticks <- c(paste0("-0.", paste(rep(0, ndecs), collapse="")), paste0("-5.", paste(rep(0, ndecs), collapse="")))
multiplot(m.gene + xlab("") + scale_x_continuous(breaks = NULL) +
theme(axis.text=element_text(size=labsize), axis.title=element_text(size=labsize), title=element_text(size=0.4*labsize)),
m.eeg + theme(axis.text=element_text(size=labsize), axis.title=element_text(size=labsize), axis.text.y=element_text(size=ysize)), layout = jlay)
}
|
7bd228f593167368b03a86f53cf0af32f249c854 | d3756e689a59b564a7ba0aec7c6c7130cc811815 | /R/compare.bio.rep.r | c977b268ab940930cb5ad696413bc0bc54f2c755 | [] | no_license | PacificCommunity/ofp-sam-r4mfcl | 710824eb52bc02a604123ca6860d9271ddcd949b | 6b8f9e6f452b6d203da65a1d8902e28fd2f5a175 | refs/heads/master | 2023-02-21T12:52:16.632352 | 2023-02-07T02:28:02 | 2023-02-07T02:28:02 | 23,813,801 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 4,100 | r | compare.bio.rep.r | #' @export
compare.bio.rep <- function (vrep = c(""), vpar = c(""), vcrep = c(""), modnms = c(""),
plotfol = c(""), plotnm = "")
{
if (length(vrep) > 7) {
stop("No. models to compare > 7!!")
}
rslt <- c("a_rslt", "b_rslt", "c_rslt", "d_rslt", "e_rslt",
"f_rslt", "g_rslt")
pars <- c("a_par", "b_par", "c_par", "d_par", "e_par", "f_par",
"g_par")
lst.rslt <- list()
lst.pars <- list()
for (i in 1:length(vrep)) {
lst.rslt[[i]] <- read.rep(vrep[i])
lst.pars[[i]] <- read.par(vpar[i])
}
names(lst.rslt) <- rslt[c(1:length(vrep))]
names(lst.pars) <- pars[c(1:length(vrep))]
mods <- c("a", "b", "c", "d", "e", "f", "g")
mods <- mods[c(1:length(vrep))]
modnames <- modnms
plotfol <- plotfol
ymax <- 0
nmax <- 0
nmodyrs <- vector(mode = "numeric", length = length(mods))
for (i in 1:length(mods)) {
yy <- rowSums(lst.rslt[[i]][["TotBiomass"]])
ymax <- max(max(yy), ymax)
nmax <- max(length(yy), nmax)
nmodyrs[i] <- length(yy)
}
ymat <- matrix(NA, nrow = nmax, ncol = length(mods))
for (i in 1:length(mods)) {
ymat[c(1:nmodyrs[i]), i] <- rowSums(lst.rslt[[i]][["TotBiomass"]])
}
yy <- ymat[, 1]
plot(1:length(yy), yy, type = "n", lty = 1, lwd = 1, main = "Comparison total biomass",
xlab = "Time interval", ylab = "Biomass (mt)", ylim = c(0,
ymax))
for (i in 1:length(mods)) {
lines(1:length(ymat[, 1]), ymat[, i], type = "l", lty = i,
lwd = 1, col = i)
}
legend(0, ymax, legend = modnames, lty = c(1:length(mods)),
col = c(1:length(mods)), lwd = 3)
filnm <- paste(plotfol, "/", plotnm, "_biom_comprsn", sep = "")
savePlot(filnm, type = "png")
ymax <- 0
for (i in 1:length(mods)) {
yy <- lst.rslt[[i]][["mean.LatAge"]]
yy_sd <- lst.rslt[[i]][["sd.LatAge"]]
ymax <- max(max(yy + yy_sd), ymax)
}
yy <- lst.rslt[[1]][["mean.LatAge"]]
yy_sd <- lst.rslt[[1]][["sd.LatAge"]]
plot(1:length(yy), yy, type = "n", lty = 1, lwd = 1, main = "Comparison growth curves (+/- 1sd)",
xlab = "Age", ylab = "Length (cm)", ylim = c(0, ymax))
for (i in 1:length(mods)) {
yy <- lst.rslt[[i]][["mean.LatAge"]]
yy_sd <- lst.rslt[[i]][["sd.LatAge"]]
lines(1:length(yy), yy, type = "l", lty = 1, lwd = 3,
col = i)
lines(1:length(yy), yy + yy_sd, type = "l", lty = 3,
lwd = 3, col = i)
lines(1:length(yy), yy - yy_sd, type = "l", lty = 3,
lwd = 3, col = i)
}
legend(0, ymax, legend = modnames, lty = 1, col = c(1:length(mods)),
lwd = 3)
filnm <- paste(plotfol, "/", plotnm, "_growth_comprsn", sep = "")
savePlot(filnm, type = "png")
like_tbl <- data.frame(Mods = mods, Like = rep(NA, length = length(mods)))
for (i in 1:length(mods)) {
like_tbl[i, "Like"] <- lst.pars[[i]][["obj"]]
}
filnm <- paste(plotfol, "/", plotnm, "_likl_comprsn.txt",
sep = "")
write.table(like_tbl, file = filnm, quote = FALSE, row.names = FALSE)
mod_outs <- list()
for (i in 1:length(mods)) {
mod_outs[[i]] <- get.outcomes.2014(vrep[i], vpar[i],
vcrep[i])
}
names(mod_outs) <- modnames
outputs <- c(names(mod_outs[[1]]))
comprsn_outs <- data.frame(Qnts = outputs, dummy = rep(NA,
length = length(outputs)))
for (i in 1:length(mods)) {
comprsn_outs[, (i + 1)] <- unlist(mod_outs[[i]])
}
names(comprsn_outs)[2:(length(mods) + 1)] <- modnames
comprsn_pcntg_outs <- comprsn_outs
comprsn_pcntg_outs[, c(2:(length(mods) + 1))] <- comprsn_pcntg_outs[,
c(2:(length(mods) + 1))]/comprsn_outs[, 2]
filnm <- paste(plotfol, "/", plotnm, "_comprsn_outs.txt",
sep = "")
write.table(comprsn_outs, file = filnm, quote = FALSE, row.names = FALSE)
filnm <- paste(plotfol, "/", plotnm, "_comprsn_pcntg_outs.txt",
sep = "")
write.table(comprsn_pcntg_outs, file = filnm, quote = FALSE,
row.names = FALSE)
}
|
719d1cb4916ca99a7a75c13e8ee2e63cb655b0e6 | 824ddd92b46d8eea656919f2dfc03914f91e2807 | /100_adsl_cancer.R | 97a081adbef8ef0dc8819642b6e30dd9b98ade54 | [] | no_license | coursephd/PostgreSQL | beec040f412c97da8b8df0c77ca7daca1a2f6bc5 | 0bf90e3505c64e77204fb8a849918013c13d7601 | refs/heads/master | 2023-09-01T21:31:21.627358 | 2023-08-24T06:27:08 | 2023-08-24T06:27:08 | 127,494,908 | 3 | 0 | null | null | null | null | UTF-8 | R | false | false | 11,076 | r | 100_adsl_cancer.R |
######################################################
# Program name: 100_adsl_cancer.R
# Output files: 01adsl_cancer.csv, 01adsl_cancer.rds
# Create calculations using base01_ip and base01_op
######################################################
#C- Cancelled
#U - Condn. Unnecessary
#Y -Conducted
#N - Not Conducted
#P - Partially Conducted
library(data.table)
library(dplyr)
library(anytime)
# Get all the data IP, OP and Service
base01_ip <- fread("D:/Hospital_data/ProgresSQL/source/base01_ip.csv")
base01_op <- fread("D:/Hospital_data/ProgresSQL/source/base01_op.csv")
base01_ser <- fread("D:/Hospital_data/ProgresSQL/source/base01_ser.csv")
pat_diag_vis <- fread("D:/Hospital_data/ProgresSQL/source/pat_diag_vis.csv")
# Get the disease category list for MCSD and Metabolic
# This is replaced by cancer data with a subset for "Arbuda" below
#discat <- data.table( fread ("D:/Hospital_data/ProgresSQL/analysis/discategory.csv") )
# Get the medication and service list
med <- data.table( fread ("D:/Hospital_data/ProgresSQL/source/med.csv") )
ser <- data.table( fread ("D:/Hospital_data/ProgresSQL/source/services.csv") )
medall <- rbind(med, ser, fill = TRUE)
rm(med, ser)
########################################################
# Work on the services data
# get the date converted to numeric date
# get the minimum and maximum date for each visit
# get the frequency count for each type of service
########################################################
base01_ser0 <- base01_ser [,c("mr_no", "patient_id", "prescdate", "sercond_date", "cat_id", "conducted"), with =FALSE]
base01_ser0 <- base01_ser0 [, `:=` ( newdt = anydate(prescdate),
serdt = anydate(sercond_date) )] [order(mr_no, newdt, patient_id)]
base01_ser01 <- base01_ser0[, .(serstdt = min(newdt),
serendt = max(newdt),
freq = .N), by = .(mr_no, patient_id, cat_id, conducted)]
base01_ser01t <- dcast(data = base01_ser01,
mr_no + patient_id + cat_id + serstdt + serendt ~ conducted,
value.var = c("freq"),
fill = "")
base01_ser01t <- merge (x = base01_ser01t,
y = medall,
by.x = "cat_id",
by.y = "medicine_id",
all.x = TRUE)
base01_ser01t <- base01_ser01t [order(mr_no, serstdt, patient_id)]
base01_ser01t <- base01_ser01t [, newdt := serstdt]
l = list(IP = base01_ip, OP = base01_op)
base01_all <- rbindlist(l, idcol = "Type", use.names = TRUE, fill = TRUE)
base01_all <- base01_all [, `:=` ( newdt = anydate(prescdate) )] [order(mr_no, newdt, patient_id)]
#################################################
# create visit numbers and total number of visits
# Individual visits: merge the data on base01_all
# IP visits
# OP visits
# Total number of visits IP + OP
#################################################
vis <- unique ( rbind(base01_all [, c("mr_no", "patient_id", "newdt"), with =FALSE],
base01_ser01t[, c("mr_no", "patient_id", "newdt"), with =FALSE], fill=TRUE ))
vis <- vis [, Type := substr(patient_id, 1, 2)] [order (mr_no, newdt, patient_id)]
vis <- vis [, `:=` (vis =1:.N,
all_vis = max( seq_len(.N) ) ), by = .(mr_no)]
vis02 <- vis [, .(vistype =.N), by = .(mr_no, Type, all_vis)]
vis02t <- dcast(data = vis02,
mr_no +all_vis ~ paste("all_", tolower(Type), sep =""),
value.var =c("vistype"),
fill="")
vis03 <- merge (vis [, -c("all_vis")], vis02t, by = "mr_no")
#############################################
# Start and end date for each type OP and IP
# Start and end date for overall visit dates
#############################################
base01_all01 <- vis[, .(stdt = min(newdt),
endt = max(newdt),
dur = max(newdt) - min(newdt) + 1), by = .(mr_no, Type)]
base01_all01t <- dcast(data = base01_all01,
mr_no ~ Type,
value.var = c("stdt", "endt", "dur"),
fill = "")
#############################
# Start for the overall study
#############################
base01_all020 <- vis[, .(cstdt = min(newdt),
cendt = max(newdt),
cdur = max(newdt) - min(newdt) + 1), by = .(mr_no)]
#############################################
# Create one large dataset with all the dates
#############################################
dates_dur <- merge (x = base01_all020,
y = base01_all01t,
by = c("mr_no"),
all.x = TRUE)
vis03dates_dur <- merge (x = dates_dur,
y = vis03,
by = c("mr_no"),
all.x = TRUE)
vis03dates_dur <- vis03dates_dur [, studyday := newdt - cstdt + 1]
##################################################
# Merge the Medication information
# Merge the visit information and day calculations
# Merge this information on SERVICEs data as well
##################################################
base01_all01 <- merge (x = base01_all,
y = medall,
by.x = "cat_id",
by.y = "medicine_id",
all.x = TRUE)
base01_all011 <- merge (x = base01_all01,
y = vis03dates_dur [, -c("Type")],
by = c("mr_no", "patient_id", "newdt" ),
all.x = TRUE)
#################################################
# This should be moved after the VIS calculations
# Add the patient_info
#################################################
base01_ser02t <- merge (x = base01_ser01t,
y = vis03dates_dur,
by = c("mr_no", "patient_id", "newdt" ),
all.x = TRUE)
base01_ser02t <- merge (x = base01_ser02t,
y = pat_diag_vis,
by = c("mr_no", "patient_id"),
all.x = TRUE)
all <- rbind(base01_all011, base01_ser02t, fill =TRUE, use.names = TRUE)
all02 <- all [, -c("ippatient_id", "consult_id", "consultation_id" ,"patient_presc_id",
"med_form_id", "op_medicine_pres_id", "doctor_id", "diagdate",
"prescdate")] [order(mr_no, studyday, patient_id, newdt, vis, cat_id)]
#######################################################
# Calculations for
# Get the disease category list for Cancer patients
#######################################################
# Find patients with cancer and create a subset
discat0 <- unique(all02 [tolower(description) %like% "arbu", c("icd_code", "description", "mr_no"),] )
discat0 <- discat0 [, distype := "Cancer",]
discat0 <- discat0 [, Code := icd_code,]
discat <- unique( discat0 [, -c("mr_no"),])
cancer <- unique(discat0 [, c("mr_no", "distype"), ])
tmpall <- merge (x = cancer,
y = all02,
by = c("mr_no"),
all.x = TRUE)
# create a dummy variable
tmpall <- tmpall[ ,val:=1]
subset2 <- tmpall [, c("mr_no", "distype", "val"), with =FALSE]
subset2 <- unique(subset2)
subset3 <- dcast (data = subset2,
fill =0,
mr_no ~ distype,
value.var="val")
all_met_rmsd <- merge (x = subset3,
y = all02,
by = "mr_no",
all.x = TRUE)
all_met_rmsd <- merge (x = discat,
y = all_met_rmsd,
all = TRUE,
by.x = c("Code", "description"),
by.y = c("icd_code", "description"))
all_met_rmsd$distype[is.na(all_met_rmsd$distype)] <- "OTHER"
all_met_rmsd <- all_met_rmsd [order(mr_no, studyday, patient_id, newdt, vis, cat_id)]
# Calculation of first RMSD or Metabolic disease date
minday <- all_met_rmsd[ distype != "OTHER",
.(minday = min(studyday)), by =.(mr_no, distype)]
mindayt <- dcast (data = minday,
mr_no ~ paste("minday", distype, sep=""),
value.var="minday")
all_met_rmsd <- merge (all_met_rmsd, mindayt, by = "mr_no")
# Calculate the age variable for non-missing dates
all_met_rmsd <- all_met_rmsd [, `:=`( age = ifelse ( !is.na( anydate(dateofbirth)) ,
round( (anydate(newdt) - anydate(dateofbirth) + 1)/365.25, digits = 0 ), NA),
newdt0 = anydate(newdt)), ]
# Add Indian rutus as new variables
# https://www.drikpanchang.com/seasons/season-tropical-timings.html?geoname-id=1277333&year=2010
rutus <- fread("D:/Hospital_data/ProgresSQL/analysis/rutus.csv")
rutus <- rutus [, `:=`(startdt = as.POSIXct( startdate, format="%d-%m-%Y"),
enddt = as.POSIXct( enddate, format="%d-%m-%Y")) ]
rutus02 <- rutus[ , list(season = season, year = year,
newdt0 = anydate( seq(startdt, enddt, by = "day") )), by = 1:nrow(rutus)]
all_met_rmsd <- merge (x = all_met_rmsd,
y = rutus02 [, c("newdt0", "year", "season")],
by = c("newdt0"),
all.x = TRUE)
rm (base01_ip, base01_op, base01_ser, l)
all_met_rmsd <- all_met_rmsd [, `:=` (baseage = min(age)), by =.(mr_no)]
#?????????????????????????????????????????
# Section after this has not been executed
# as the names of the medicines are unknown
#?????????????????????????????????????????
#############################################
# Update the data by re-coded Medicine names
#############################################
lookup_medicine <- fread("D:/Hospital_data/ProgresSQL/analysis/lookup_medicine.txt", sep="|")
all_met_rmsd <- merge(x = all_met_rmsd,
y = lookup_medicine,
all.x = TRUE,
by.x = c("medicine_name"),
by.y = c("medicine_name") )
fwrite(all_met_rmsd, "D:/Hospital_data/ProgresSQL/analysis/01adsl_cancer.csv")
saveRDS (all_met_rmsd, "D:/Hospital_data/ProgresSQL/analysis/01adsl_cancer.rds")
dis_rutu <- all_met_rmsd [Code != "", .(cnt = uniqueN(mr_no)), by = .(season, Code, description)] [order(season, -cnt, Code)]
dis_rutu_yr <- all_met_rmsd [Code != "", .(cnt = uniqueN(mr_no)), by = .(year, season, Code, description)][order(year, season, -cnt, Code)]
dis_rutu_yr02 <- dcast(dis_rutu_yr,
season + Code + description ~ paste("yr", year, sep=""),
value.var = c("cnt"),
fill=" ")
#fwrite(dis_rutu, "D:/Hospital_data/ProgresSQL/analysis/dis_rutu.csv")
#fwrite(dis_rutu_yr02, "D:/Hospital_data/ProgresSQL/analysis/dis_rutu_yr.csv")
|
ecf0f751ec9707e68f712b11d8a88ef72171769b | 296f060dd7d51e6d0dd58cec150955e44fc8e1e2 | /ui.R | 893c5e8be71846e0e12387a152018384e1c9bf70 | [] | no_license | pinghu/courseraLearning | a1e48ffa38be280b2c8541fc801eeef966f5a1e6 | aa4df7a29bc1f5a5101597be8359b8a3220409ed | refs/heads/master | 2020-04-14T23:50:19.086495 | 2019-01-16T03:12:44 | 2019-01-16T03:12:44 | 164,218,368 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 607 | r | ui.R |
library(shiny)
shinyUI(fluidPage(
# Application title
titlePanel("Final Project 1-12-2019 Ping Hu"),
# Sidebar with a slider input for number of bins
pageWithSidebar(
headerPanel('Iris k-means clustering, please choose parameter below to see the resulted clusters'),
sidebarPanel(
selectInput('xcol', 'X Variable', names(iris)),
selectInput('ycol', 'Y Variable', names(iris),
selected=names(iris)[[2]]),
numericInput('clusters', 'Cluster count', 3,
min = 1, max = 9)
),
mainPanel(
plotOutput('plot1')
)
)
))
|
7ebc4aaacb0fb703725d1b5cab54d9ac9bdfbdde | 6e8e04c50edd8f5c67d9d5f8ba7eea1b486e4bdc | /R/ds.exposome_pca.R | 1c56e5b9267880e92a150978683f6371e4902430 | [
"MIT"
] | permissive | FlorianSchw/dsExposomeClient | 1b0431b63ead3277f44ca11e37dc82fd754f7649 | b3d800b9cc72459b46f03cec4a3fffe17c09d541 | refs/heads/master | 2023-05-10T14:42:10.289311 | 2021-06-11T13:54:03 | 2021-06-11T13:54:03 | null | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 2,774 | r | ds.exposome_pca.R | #' @title Principal components analysis of an Exposome Set
#'
#' @description Performs a non-disclosive PCA given an Exposome Set on the study server,
#' the Exposome Set can be subsetted by families to perform the PCA
#'
#' @param Set \code{character} Name of the exposome set on the study server
#' @param fam \code{character vector} (default \code{NULL}) Families to subset the exposome set
#' @param standar \code{bool} Whether the values will be normalized prior the analysis (\code{TRUE}) or not (\code{FALSE})
#' Default \code{TRUE}
#' @param method \code{character} (default \code("normal")) Method of standarization, only applies when \code{standar}
#' is set to \code{TRUE}. Options are \code("normal") which scales the exposures using the mean as the center
#' and the standard variation as dispersion, \code{"robust"} which uses the median and median absolute deviation respectively
#' and \code{"interquartile range"} which uses the median as the center and the coeficient between the
#' interquartile range of the exposure and the normal range between the percentile 75 and 25 as variance.
#' @param pca \code{bool} (default \code{TRUE}) If TRUE perform PCA (only numerical variables),
#' if FALSE FAMD (numerical and categorical)
#' @param \code{numeric} (default \code{10}) Number of PC to be kept
#' @param datasources a list of \code{\link{DSConnection-class}} objects obtained after login
#'
#' @return This function does not have an output. It creates a object on the study server named \code{ds.exposome_pca.Results},
#' this object can be passed to the \code{\link{ds.exposome_pca_plot}} to visualize the results of the PCA.
#'
#' @examples
#' \dontrun{Refer to the package Vignette for examples.}
#' @export
ds.exposome_pca <- function(Set, fam = NULL, standar = TRUE, method = "normal", pca = TRUE, npc = 10, datasources = NULL){
if(is.null(Set) | class(Set) != "character"){
stop("Input variable 'Set' must have a value which is a character string")
}
if (is.null(datasources)) {
datasources <- DSI::datashield.connections_find()
}
if(!is.null(fam)){
ds.exposomeSubset(Set, fam, NULL, datasources)
Set <- paste0(Set, "_subsetted")
warning('The family subset of [', paste0(fam, collapse = ", "), '] yielded (',
unlist(ds.dim(Set)[1])[1], ') valid exposures.')
}
if(standar){
ds.standardize(Set, name = "pca_std_exposomeSet", method = method, datasources = datasources)
Set <- "pca_std_exposomeSet"
}
checkForExposomeSet(Set, datasources)
cally <- paste0("exposome_pcaDS(", Set, ", npc = ", npc, ", pca = ", pca, ")")
DSI::datashield.assign.expr(datasources, "ds.exposome_pca.Results", as.symbol(cally))
if(standar){
datashield.rm(datasources, Set)
}
} |
a54d7f08c1323e8649718c8651a04d7a315982db | 68d2930eaa1716467565f9c1794fdfae6620367a | /Session 12 (upload).r | 0ac35d54f053214d35321649399ed23966710b77 | [] | no_license | dgopal2/data_science | 5342db598af9d07dc45a33a08239ec330d1979d7 | 0f8877c932edc02315b13569d6d93da9aa95f3e5 | refs/heads/master | 2020-12-05T03:10:04.533477 | 2020-01-06T02:23:59 | 2020-01-06T02:23:59 | 231,992,507 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 12,918 | r | Session 12 (upload).r | #=========================================================================
# IDS 462, Session 12
#========================================================================
# Copyright Zack Kertcher, PhD, 2018. All rights reserved.
# Do not distribute or use outside this class without explicit permission
# from the instructor.
#========================================================================
## LOAD DATA AND LIBRARIES
#=
library(car)
library(corrplot)
library(rms)# for pseudo-R-Squared
library(psych)
library(tidyverse)
load("session12data.Rdata")
## Common issues and tips
## Logit (cont.)
## Exploratory factor analysis
## Common issues and tips
## Common issues and tips
#=
# The problem: identify and remove outliers
# Outlier detection utility function
outliers <- function(column) {
lowerq <- as.vector(quantile(column)[2]) # returns 1st quartile
upperq <- as.vector(quantile(column)[4]) # returns 1st quartile
iqr <- upperq-lowerq
# Moderate outliers
mod.outliers.upper <- (iqr * 1.5) + upperq
mod.outliers.lower <- lowerq - (iqr * 1.5)
mod.outliers <- which(column > mod.outliers.upper |
column < mod.outliers.lower)
print(paste(mod.outliers))
#Extreme outliers
extreme.outliers.upper <- (iqr * 3) + upperq
extreme.outliers.lower <- lowerq - (iqr * 3)
extreme.outliers<-which(column > extreme.outliers.upper
| column < extreme.outliers.lower)
print(paste(extreme.outliers))
}
# Either compute +/-1.5*IQR, or use the utility function I provided (outliers)
# If you use the outlier function, you can find the outlier rows, but you can't directly use
# to remove these rows. So,
# How many outliers are in the data?
num_outliers <- outliers(bank$balance) %>% length()
num_outliers/nrow(bank) # around 5%, so let's drop them
outliersvec <- outliers(bank$balance) %>% as.numeric # generate a numeric vector of outliers#outliers shows with quotes cant subset with characters so ocnvert as number
bank1 <- bank[-outliersvec,] # bank1 is without outliers in balance
plot(density(bank$balance)) # bad
plot(density(bank1$balance)) # not too bad
# The problem: can you start with a regression model then perform EDA?
# Not recommnded. Remember the debt variable from the bank model? What about poutcome?
bank_mod3 <- glm(y~balance+marital+loan+poutcome, data=bank1, family=binomial)
summary(bank_mod3)
# Wow! Look at the z-value for poutcome sucess. Unkown is significant as well.#z value gves magnitude more value
#But wait, how many cases are there?
table(bank1$poutcome) #gud fr prediction but not fr analysis
# So, how meaningful and useful is this variable?
# Useful for prediction, but practial application is likely limited.
# The problem: how to recode a variable with many levels? #ifelse, gsub, with subsetting
# Remember that you can recode a variable with an ifelse function. For example,
bank1$marital_binary <- ifelse(bank1$marital=="married", "married", "not married")
table(bank1$marital_binary)
# But if we have more levels it becomes complicated (a lot of nested ifelse statements), or clunky assignments.
# The car package has what I think is the best option, with the recode function. Here's how it works:
bank1$season <- car::recode(bank1$month, "c('sep', 'oct', 'nov')='fall';
c('dec', 'jan', 'feb')='winter';
c('mar', 'apr', 'may')='spring';
c('jun', 'jul', 'aug')='summer' ")
table(bank1$season) # a lot more contact in spring and summer
# The problem: Can't transform a variable. Getting an error message. For example,
log(bank$balance)
sqrt(bank$balance)
# The issue are the negatives and zeros. How many do we have?
bank$balance[bank$balance<=0] %>% length/nrow(bank) # Quite a few, and it makes sense, so #16% ve begative zero balnce its possible# i can remove
bank$balance_std <- bank$balance+abs(min(bank$balance))+1 #min bank bal was -3313 tajing 3313 addig it still ll giv 0 so add +1 n add that to allvalues# simple stadardization of balance to beomce positive
head(bank$balance); head(bank$balance_std)
# Now try the transformation
log(bank$balance_std) # yes!
## Logistic regression (cont.)
# Stopped at exploring interactions with logit
#################
# Find a "good" model to predict quitting.
# Your model might include interactions
# Interpret the results.
#################
quitmod <- glm(ynquit~jobsatisfaction*age, data=quit, family="binomial") #job satisfcation likert scake n not cont var#: adds only interaction * adds main effect n inetraction
summary(quitmod)
#use exp to interpret results
plot(effect(term="jobsatisfaction:age", mod=quitmod, default.levels=20), multiline=T) # intersting #plot only interaction #job satisfaction has no effect fr age 60#effect of JS decreases the older u get on d odds of quitting
## 4. Diagnostics
#=
# Logit does not have exactly the same diagnostics tools as OLS.
# But here is what we can do:
# a. Examine for possible multicollinearity
vif(quitmod)
sqrt(vif(quitmod))>2 # There is multicollinearity in this model!
# However, this is not data-related multicollinearity. This type of structured-multicollinearity, is less concerning. #we intrducd the interaction acknowledge n move on# if vif was der fr age n JS then need to consider#dont panic if cuz of inetraction term
# Still, know that coefficients are not going to be precise.
# b. Check for overdispersion
# The logit model assumes a binomial distribution.
# Sometimes the variance of the DV is larger than
# specified for a binomial distribution.
# The is called *overdispersion* (we don't typically find underdispersion)
# We identify overdispersion by dividing the residual deviance
# with the residual degrees of freedom in the model.
# If the result is *much higher than 1*, there is overdispersion. #run chi sq n compare nt sure
# Returning to the bank model
bank_mod3 <- glm(y~balance+marital+loan+poutcome, data=bank1, family=binomial)
deviance(bank_mod3)/df.residual(bank_mod3) # Not bad. #deviance /residul the value is less than 1 so no overdispersion #we can test fr this using chi sq too
# But how do we know for sure?
# We compare the model to a model that assumes overdispersion,
# and see if there is a difference
bank_mod3_od <- glm(y~balance+marital+loan+poutcome,
data=bank1, family=quasibinomial) # note the quasibinomial distribution
pchisq(summary(bank_mod3_od)$dispersion * bank_mod3$df.residual,
bank_mod3$df.residual, lower=F)
# p-value is much higher than 0.05 #if value is less than .05 then tehre is overdispersion #anything above std p=value no overdispersion
# There is no overdispersion.
# If there is overdispersion, fit a quasibinomial distribution intead of binomial.
#################
# Using the quit data, build a model
# Check for multicollinearity
# Check for overdispersion
# What did you find?
#################
rm(list=ls())
# Model fit
#=
# Instead of Adjusted-R-Squared we get AIC.
# AIC: Index of fit that penalizes the number of parameters.
# It is computed as:
# -2*(max log-likelihood)+2*(number of parameters)
# So, the smaller the AIC, the better.
# We can compare models using AIC
# but we don't get a good sense of our model's performance
# Instead, we use pseudo-R-Squared measures.
# There are a few of them. Here's a good one from the rms package:
mod_fit_3<- lrm(y ~ balance+marital+loan+poutcome,
data = bank1)
mod_fit_3$stats["R2"]
# What if we take out one IV, like poutcome?
mod_fit_3.1 <- lrm(y~balance+marital+loan,
data = bank1)
mod_fit_3.1$stats["R2"] #pseudo r sq dropped from 13 to 3% #but cant tel a story
# poutcome, despite having few values in sucess, is an important IV!
# We could have also figure this out but simply looking at the z-value
summary(bank_mod3)
#################
# Build three models and find the one with the best fit.
# What are the best AIC?
# What is the best pseudo-R-Squared?
#################
## BACKGROUND ON (EXPLORATORY) FACTOR ANALYSIS #common #other type is conformity factor analsis rare sorta lik tree
#=
# Aims to find underlying (latent) factors #obj is to find hidden structure in data
# that account for observed relationships
# among numeric variables
# Used to reduce variables, and build a scale/index (e.g., social status scale, work personality index)
# The process is:
#1. Examine the data
#2. Scale the data (if needed)
#3. Consider number of factors
#4. Extract factors
#5. (optional) use the factors discovered in analysis and modeling
#1. Examine the data
#=
View(brands)
str(brands)
summary(brands)
#2. Scale the data (if needed) #change value but not distribution #not ransformed
#=
# Data may have different scales/values. We want to center them (xi-mean(x)).
# Better yet, standardize them. This is how we standardize a variable:
#(xi-mean(x))/sd(x) #most common
# Or use R's scale function #all values seems to be likert and looks lik ll be simiar n can be grouped # diff scale lik 1 to mil n others in diff scale
# This not needed when the variables have the same scale, as in the brands data.
# But, this is the general procedure:
brands_s <- data.frame(scale(brands[,-1])) # omit the brand variable (factor)
describe(brands_s) # as expected #with just describe mght get diff results clashes with other package so change code as pstch::describe
corrplot(cor(brands_s), method="circle", addCoef.col="grey", type="upper") #find var which correlate with one another but not one which is super correlated(lik both same) n dont want with no correlation we want .5 .7 etc
# We are looking for variables with relatively high correlation with one or a few others
# brands_s$brand <- brands$brand # add the factor variable
#3. Consider number of factors #tell how many factors to extract
#=
# We'll use the fa.parallel function from the psych package
# To use the current data against simulated data
# The procedure in FA is to "rotate" the data to maximize variation for each factor.
# Default rotation is varimax. Oblique rotations allow correlation among factors, whereas orthogonal rotations do not.
fa.parallel(brands_s, fa="both", n.iter=100, show.legend=F) #its gonna rotate columns together to c if there is correlation w usualy do varimax. we here trying to max the dist betn them #diff rotation alogorithms exst
#our obj here is least no.of factors which gives most variance
#eigen value to look at distance
# Results suggest the optimal number of factors based on the data
# Look for an "elbow", especially around eigenvalue = 1. #it ll giv a reco itslf
## Perform FA
#=
# Many methods to extract factors (unlike PCA)
brandsfa1 <- factanal(brands_s, 3) #factor analysis #rem it has to be scaled data # here we ve same scale but still we use this
brandsfa1$loadings
# loading values are essentially correlation coefficient of variable with factor
# The higher the loading, the higher the "correlation" with the factor
# Look for loading of .5 or higher (conservatively, .7 or higher)
# When developing a scale, you might include the negative with the positive loadings
# But typically use either all the positive, or negative that have a high loading
# So,
# Factor1 (bargain, value) - value proposition etc
# Factor2 (perform, leader, serious) - quality proposition #notice the fun variable
# Factor (latest, trendy) -hip factor
#if i get -.8 #include either all + or all -
#create separate with value proposition, , etc n also other left out var n then the story i can tel ll be much easier
#loadings which is above .7 can be conidered factor those tat didnt come are indepfrom the rest #prop var --.206 factor 1 explains 20% of variance
##############################################
# Perform factor analysis on the decathlon data.
# How many factors were discovered?
# Which events loaded the best on these factors?
# Can you come up with names for these underlying factors?
##############################################
## ADDITIONAL RESOURCES
#=
# Books:
#-
# Multiple Factor Analysis by Example Using R. 2014. Chapman and Hall/CRC
# Exploratory Multivariate Analysis by Example, 2nd. Edition. 2017. CRC Press.
# The above procedure is for exploratory factor analysis. If you want confirmatory analysis, use structural equation modeling (from the sem package)
# A good tutorial of structural equation modeling using R is found at: http://socserv.socsci.mcmaster.ca/jfox/Misc/sem/SEM-paper.pdf
|
751f2676ce5e29684060b0a9b62d5a20f56ec7f1 | 2f9e4caf2dea903fa1398263c5a16890039321de | /DesarrolloPreguntas.R | 085ece11e624e06ec7badbb95a1c43cd3f894298 | [] | no_license | barbarapablaza/Tarea_3 | 7eeead6fbb25bcca3d4e4e9dda527642f2ae0ee8 | b7b133ff703494b1de3dab3818e10651013e6355 | refs/heads/master | 2020-08-02T20:18:12.014413 | 2019-09-30T13:03:15 | 2019-09-30T13:03:15 | 211,494,615 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 1,738 | r | DesarrolloPreguntas.R | # Lista de numeros
listaDeNumeros <- list(2,5,6,2,1,5,6,10,11,20,15)
# Ejercicio 1
listaDeNumeros[0]
listaDeNumeros[17]
listaDeNumeros[-5]
# Ejercicio 2
listaDeNumeros[5]
unlist(listaDeNumeros[5])
if(listaDeNumeros[5]+1>0){
print("se cumple")
}
if(unlist(listaDeNumeros[5])+1>0){
print("se cumple")
}
# Ejercicio 3
listaDeNumeros[5]<-12
# Ejercicio 4
length(nombre_variable)
length(listaDeNumeros)
# Ejercicio 5
valorInicial : valorFinal
valorInicial <- 5
valorFinal <- 20
valorInicial <- 20
valorFinal <- 5
valorInicial:length(listaDeNumeros)
# Ejercicio 6
for(i in 1 : 100){
print(paste("cuento ",i," misisipis"))
}
# Ejercicio 7
for(i in listaDeNumeros){
print(paste("cuento ",i," misisipis"))
}
# Ejercicio 8
if(i%%2==0){print("Par")} else {print("impar")}
# Ejercicio 9
determinar_ganador <- function(total,votosSI,votosNO){
if(votosSI+votosNO>total){
"votaciones alteradas"
}else if (votosSI>=votosNO && votosSI>=0.3*total){
print("SI")
}else {
print("NO")
}
}
determinar_ganador(100,100,50)
determinar_ganador(100,55,45)
determinar_ganador(100,19,7)
determinar_ganador(100,45,55)
#ejercicio 10
Suma<-function(a,b){
a+b
}
Suma(3,4)
10.a)
Multiplicacion<-function(a,b){
a*b
}
Multiplicacion(3,4)
10.b)
Division<-function(a,b){
a/b
}
Division(3,4)
10.c)
Resta<-function(a,b){
a-b
}
Resta(3,4)
#ejercicio 11
Figura 1.
area_rectangulo<-function(base1,altura1,base2,altura2){
abs((base1*altura1)-(base2*altura2))
}
area_rectangulo(20,6,6,7)
area_rectangulo(450,300,300,250)
Figura 2.
area_circulo<-function(radio1,radio2){
abs((pi*radio1^2)-(pi*radio2^2))
}
area_circulo(6,9)
area_circulo(4,3)
#ejercicio opcional
for(i in listaDeNumeros){
print(i)
}
|
53b9cdac19fb8eded65f52e9b856b3545a1469d6 | 69f4b59b4fa537e356d18898f8b01eb667e600c9 | /CSAW_differential_binding_phipps.R | 95732ce6818cc701ed81da8e210ccd26d156ac31 | [] | no_license | ajphipps/EpiAD | 7a6fb49ecfa3d931c161ef7e45bc7e0e062b3b22 | 593aca0cde82efb832ffaaf19f2b5c16abf82e4a | refs/heads/master | 2021-10-19T04:18:22.692503 | 2019-02-18T01:07:14 | 2019-02-18T01:07:14 | 105,228,275 | 1 | 1 | null | null | null | null | UTF-8 | R | false | false | 11,678 | r | CSAW_differential_binding_phipps.R | #Title: Differential binding analysis with CSAW
#Author: Andrew Phipps
#Date: 06032018
#notes: summary of csaw experiments
###############################################################################
#download and install required packages - note you need R 3.3.3 or later
#source("https://bioconductor.org/biocLite.R")
#biocLite("edgeR")
#biocLite("csaw")
#biocLite("DiffBind")
#load required packages into R
#tutorial link https://www.bioconductor.org/help/course-materials/2015/BioC2015/csaw_lab.html
library(DiffBind)
library(csaw)
library(edgeR)
library(rtracklayer)
library(ggplot2)
#import required bam files - for now stick to single timepoint WT vs TG for K4me3 - but not sure if it would be better to load all data
#for multiple marks at a single time point or import all data from the experiment
#load samples in from marked duplicated files
K4me3_bam.files <- file.path("/NGS_Data/Andrew/Bowtie2_MarkDuplicates/K4me3" , c("154467_TG12_K4me3_CC0TAANXX_CGTCCATT_all_trimmed.fq.sam.bam.sorted.bam_markdup.bam",
"155542_TG12_K4me3_CC0TAANXX_GTCCTGTT_all_trimmed.fq.sam.bam.sorted.bam_markdup.bam",
"155668_WT12_K4me3_CC0TAANXX_AGCTAGTG_all_trimmed.fq.sam.bam.sorted.bam_markdup.bam",
"155669_TG12_K4me3_CC0TAANXX_AGCCGTAA_all_trimmed.fq.sam.bam.sorted.bam_markdup.bam",
"155688_WT12_K4me3_CC0TAANXX_CACGTCTA_all_trimmed.fq.sam.bam.sorted.bam_markdup.bam",
"155691_TG12_K4me3_CC0TAANXX_GAGTAGAG_all_trimmed.fq.sam.bam.sorted.bam_markdup.bam",
"156508_WT12_K4me3_CC0TAANXX_ACTATCGC_all_trimmed.fq.sam.bam.sorted.bam_markdup.bam",
"156509_WT12_K4me3_CC0TAANXX_GCGTATCA_all_trimmed.fq.sam.bam.sorted.bam_markdup.bam",
"157306_WT12_K4me3_CC0TAANXX_ACTCTCCA_all_trimmed.fq.sam.bam.sorted.bam_markdup.bam",
"157307_TG12_K4me3_CC0TAANXX_ACGTCGTT_all_trimmed.fq.sam.bam.sorted.bam_markdup.bam"))
input <- file.path("/NGS_Data/Andrew/Bowtie2_MarkDuplicates/TI", c("TI_merged.sorted.bam_markdup.bam"))
#design matrix
design <- model.matrix(~factor(c('TG12', 'TG12', 'WT12', 'TG12', 'WT12', 'TG12', 'WT12','WT12','WT12','TG12')))
colnames(design) <- c("intercept", "genotype")
#making blacklist granges with Rtracklayer
library(rtracklayer)
gr_blacklist = import("/NGS_Data/Andrew/bigwig/mm10.blacklist.bed")
###############################################################################
#setting up parameters
frag.length <- 250 #this is the fragment length from sonication
window.width <- 150 #will partition into windows of 150bp - for TF set small window, but for histone marks set window relevent \
##to mark - around 150bp windows - min size of a nucleosome? see 2.5 - Choosing appropriate window size
spacing <- 50
#discard mm10 blacklist regions - remember to include encode blacklist regions #probability mapped incorrectly: 0.01 (mapq 20)
parameters <- readParam(minq=30, discard = gr_blacklist)
#parallelisation of
data <- windowCounts(K4me3_bam.files, ext=frag.length, width =window.width, param = parameters)
#can visualise the data with
rowRanges(data)
#visualise counts with
assay(data)
###############################################################################
#can visualise to pick fragment size with the following plot - however you need to have marked (not removed) duplicates with Picard
#Sharp spike = fragment length. Low curve = potentially poor IP efficiency (low input from my samples)
max.delay <- 500
dedup.on <- reform(parameters, dedup=TRUE)
plot1 <- correlateReads(K4me3_bam.files, max.delay, param = dedup.on)
plot(0:max.delay, plot1, type='l', ylab="CCF", xlab="Delay (bp)")
#can quantitate the fragment size with the following:
maximizeCcf(plot1)
#you can also perform library specific fragmentation - see manual
###############################################################################
###############################################################################
#Filtering steps:
#filtering out low quality reads
#Independent filtering of count data - see Chapter 3
#filtering from the 'negative binomial' - log transform of NB is referred to as abundance
##to remove uninteresting regions and lower computational complexity
#a better option for our analysis is to filter via global enrichement
bin.size <- 2000L
binned1 <- windowCounts(K4me3_bam.files, bin=TRUE, width=bin.size, param=parameters)
filtered.stat <- filterWindows(data, background=binned1, type="global")
#keep samples with a fold change of greater than 4 (3 by default in csaw guide)
filtered.keep <- filtered.stat$filter >log2(3)
#sum of filtered samples
sum(filtered.keep)
#make filtered.data array for downstream analysis
filtered.data <- data[filtered.keep,]
#visualise fold change to confirm that the bulk of background sites are removed by filtering
par(mfrow=c(1,1))
hist(filtered.stat$back.abundances, xlab="adjusted bin log-CPM", breaks=100,
main="", col="grey80", xlim=c(min(filtered.stat$back.abundances), 0))
global.bg <- filtered.stat$abundances -filtered.stat$filter
abline(v=global.bg[1], col="red", lwd=2)
abline(v=global.bg[1]+log2(3), col="blue", lwd=2)
legend("topright", lwd=2, col=c('red', 'blue'), legend=c("Background", "Threshold"))
###############################################################################
###############################################################################
#elimination of composition bias with global enrichment
binned <- windowCounts(K4me3_bam.files, bin=TRUE, width=10000, param=parameters)
filtered.data <- normOffsets(binned, se.out=filtered.data)
filtered.data$norm.factors
#You can test multiple normalisation windows here: too small = low counts and loss of DB, too large and DB will
##be in the same window as background
#demo <- windowCounts(K4me3_bam.files, bin=TRUE, width=5000, param=parameters)
#normOffsets(demo, se.out=FALSE)
# [1] 0.9748893 1.0295585 0.8987019 1.0386579 1.0815877 0.8709669 0.9466737 1.0718893 1.0981895 1.0167509
#demo <- windowCounts(K4me3_bam.files, bin=TRUE, width=15000, param=parameters)
#normOffsets(demo, se.out=FALSE)
#[1] 0.9847623 1.0302603 0.9183524 1.0549877 1.0909148 0.8883423 0.9719159 1.0686444 1.1166129 0.9051679
#visualisation of normalisation with MA plots - generate for each sample
#vertical shift in the bars might indicate composition bias - ideally want comp factors (line) to pass through
##centre of the cloud
par(mfrow=c(3,3), mar=c(5,4,2,1.5))
adj.counts <- cpm(asDGEList(binned), log=TRUE)
normfacs <- filtered.data$norm.factors
for (i in seq_len(length(K4me3_bam.files)-1)) {
cur.x <- adj.counts[,1]
cur.y <- adj.counts[,1+i]
smoothScatter(x=(cur.x+cur.y)/2+6*log2(10), y=cur.x-cur.y,
xlab="A", ylab="M", main=paste("1 vs",i+1))
all.dist <-diff(log2(normfacs[c(i+1, 1)]))
abline(h=all.dist, col="red")
}
###############################################################################
#Eliminating efficiency bias using TMM on high abundance regions
filtered.data.TMM <- normOffsets(filtered.data, se.out=TRUE)
filtered.data.TMM.efficiency <- filtered.data.TMM$norm.factors
data.comp<- normOffsets(binned, se.out=FALSE)
#visualisation post normalisation
##Low A-value = background, high A-value = bound. Normalisation factors from removal of comp bias(dashed line), pass
###through low A-value, removal of efficiency bias pass through (full)
par(mfrow=c(1,2))
bins <- binned
comp <- data.comp
eff <- filtered.data.TMM.efficiency
adjc <-cpm(asDGEList(bins), log=TRUE)
smoothScatter(x=rowMeans(adjc), y=adjc[,1]-adjc[,2], xlab="A-value (background vs whole)", ylab="M", main= "TMM normalisation K4me3 12m")
abline(h=log2(eff[1]/eff[2]), col="red")
abline(h=log2(comp[1]/comp[2]), col="red", lty=2)
###############################################################################
###############################################################################
#testing for Diffbinding
#need: filtered.data.TMM and filtered.data, original data: K4me3_bam.files, and design matrix
#setting up the data
y<- asDGEList(filtered.data.TMM)
#experimental design and setting up data
design
#stabilising estimates with empyrical bayes
y<- estimateDisp(y, design)
summary(y$trended.dispersion)
fit <-glmQLFit(y, design, robust=TRUE)
summary(fit$var.post)
#visualisation of EB stabilisation biological coefficient of variation for NB dispersion - see pg 42
par(mfrow=c(1,2))
o<-order(y$AveLogCPM)
plot(y$AveLogCPM[o],sqrt(y$trended.dispersion[o]), type="l", lwd=2,
ylim=c(0,1), xlab=expression("Ave."~Log[2]~"CPM"),
ylab=("biological coefficient of variation"))
plotQLDisp(fit)
#my filtering may not have been strong enough - might need greater fold change to reduce the crap I am seeing here
#alternatively it might be due to increased variation I see between my samples here?
summary(fit$df.prior)
#visualise with MDS plots:
plotMDS()
#Testing for differential binding
results <- glmQLFTest(fit, contrast=c(0,1))
head(results$table)
#assign p-values to co-ordinates
rowData(filtered.data.TMM) <-cbind(rowData(filtered.data.TMM),results$table)
#examine replicate similarity with MDS plots
par(mfrow=c(2,2), mar=c(5,4,2,2))
adj.counts<-cpm(y,log=TRUE)
for(top in c(100,500,1000,5000)) {
out <- plotMDS(adj.counts, main=top, col=c("blue","blue", "red", "blue", "red", "blue", "red", "red", "red", "blue" ),
labels=c("154467", "155542", "155668","155669", "155688", "155691", "156508", "156509", "157306", "157307"), top=top)
}
###############################################################################
###############################################################################
#Correction for multiple testing
#need - filtered.data.TMM and results
#uses Benamini-Hochbeg method to p-values, which is less conservative than Bonferroni correction, but still provides
##some form of error control. Can correct with regions (wider combinations of windows) - best for broader marks,
### or use a single window to represent region/cluster - sensible for sharp binding sites
#cluster windows into regions without external information/cues - note max.width to limit size (6.2.2)
#tolerance is the min distance for two binding sites to be treated as separate events
mergedwindowsK4me3 <- mergeWindows(rowRanges(filtered.data.TMM), tol = 100L) #max.width = 8000L)
mergedwindowsK4me3$region
#assigning combined P value for merged windows
p.mergedwindowsK4me3 <- combineTests(mergedwindowsK4me3$id, results$table)
#check to see if most clusters are an acceptable size, if there are huge clusters we need to improve our filtering or limit
summary(width(mergedwindowsK4me3$region))
#now assign direction of fold change to the p value
direction.p.mergedwindowsK4me3 <- p.mergedwindowsK4me3$FDR <= 0.1
table(mergedwindowsK4me3$direction[direction.p.mergedwindowsK4me3])
#option to select only a particular file type from a working directory
##sys.glob("*.bam")
|
89626d3b039ff38e3688333bbc7f87bd19767186 | 0935642ce9675185a442d299e7ddf95ab74bab6c | /Neural Networks/NN_Concrete.R | e8f9c9188f4bbc5406835dd371a1619952258358 | [] | no_license | PraveenAithapaga/Datascience-Assignments | 96388f6a5fecce528ab2e95173dae001fb819d21 | 3a463821841d94ad799d6a615ed510635e9587d5 | refs/heads/master | 2022-07-29T01:18:29.664200 | 2020-05-20T15:14:59 | 2020-05-20T15:14:59 | 265,233,443 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 1,187 | r | NN_Concrete.R |
concrete <- read.csv(file.choose(),header = T)
View(concrete)
str(concrete)
attach(concrete)
normalize<-function(x){
return ( (x-min(x))/(max(x)-min(x)))
}
concrete_norm<-as.data.frame(lapply(concrete,FUN=normalize))
summary(concrete_norm$strength)
summary(concrete$strength)
concrete_train<-concrete_norm[1:773,]
concrete_test<-concrete_norm[774:1030,]
install.packages("neuralnet")
install.packages("nnet")
library(neuralnet)
library(nnet)
concrete_model <- neuralnet(strength~cement+slag+ash+water+superplastic+coarseagg+fineagg+age,data = concrete_train)
str(concrete_model)
plot(concrete_model)
model_results <- compute(concrete_model,concrete_test[1:8])
predicted_strength <- model_results$net.result
predicted_strength
model_results$neurons
cor(predicted_strength,concrete_test$strength)
plot(predicted_strength,concrete_test$strength)
model_5<-neuralnet(strength~cement+slag+ash+water+superplastic+coarseagg+fineagg+age,data= concrete_norm,hidden = 5)
plot(model_5)
model_5_res<-compute(model_5,concrete_test[1:8])
pred_strn_5<-model_5_res$net.result
cor(pred_strn_5,concrete_test$strength)
plot(pred_strn_5,concrete_test$strength)
|
f0fe31a9eb40bd8ba33b85da2c6639262db785c2 | f44a214a714ce68e1cbb8fd8cd01fd608fe073f1 | /Subject_Statistics/distro1.R | 2bb3cb77f85022e4c4e164f80188c7538830a733 | [
"MIT"
] | permissive | mahnooranjum/R_Programming | cdc4cffb3decabb07a6ed2e37515cdd055eb2bde | 3227d95323d7c33644edeb6d21264d50f18a2725 | refs/heads/master | 2023-02-17T13:35:11.697968 | 2021-01-18T12:22:12 | 2021-01-18T12:22:12 | 257,075,397 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 194 | r | distro1.R | # the data frame
data <- data.frame(outcome = 0:5, probs = c(0.1, 0.2, 0.3, 0.2, 0.1, 0.1))
# make a histogram of the probability distribution
barplot(names = data$outcome, height = data$probs) |
5d4e7f507e2f95f181af706cf3cc57b5654c7b3e | e012063d891c2102adb9dce8a62387abd0170c78 | /COVID-trends-shiny/function-pt.R | 702a1e797cf0c79a41f38d0258c6b32ebfcfae75 | [] | no_license | Wang-526/Trinity-project-G2 | fb7482b78b5fecaa671f5e8a632cc697c34b7073 | a131c8103fa9c25a76bbed8972e2fed172c8f42a | refs/heads/main | 2023-01-27T21:27:48.616674 | 2020-12-15T05:01:32 | 2020-12-15T05:01:32 | 314,225,680 | 0 | 1 | null | 2020-12-01T22:42:35 | 2020-11-19T11:21:59 | null | UTF-8 | R | false | false | 11,279 | r | function-pt.R | # function to obtain legend_name according to keywords
legendName=function(keywords){
if(length(keywords)==1) {
legend_name=keywords[1]
} else if (length(keywords)==2) {
legend_name=paste(keywords[1],', ',keywords[2],sep='')
} else {
legend_name=paste(keywords[1],', ',keywords[2],', ...',sep='')
}
return(legend_name)
}
# one group of keywords plot function
trendPlot=function(covid,keywords,trend){
# select covid trend data
trend=trend%>%mutate(date=ymd(date))
date=trend$date%>%{ymd('1970-01-01')+max(.):min(.)}
covid=covid%>%mutate(date=ymd(date))
covid=data.frame(date)%>%
left_join(covid,'date')
trend=data.frame(date)%>%
left_join(trend,'date')
trend$number[is.na(trend$number)]=0
trend$sentiment_score[is.na(trend$sentiment_score)]=0
# plot for daily covid trend
hover=paste('Date: ',covid[,1],' <br>Daily Increase: ',covid[,2])
pic=plot_ly(x=covid[,1],y=covid[,2],color=I('black'),text=hover,
hoverinfo='text',name='Daily case increase',type ='scatter',
mode='lines+markers')
# plot for trend
## trend legend plot
if(length(keywords)==1) {
legend_name=keywords[1]
} else if (length(keywords)==2) {
legend_name=paste(keywords[1],', ',keywords[2],sep='')
} else {
legend_name=paste(keywords[1],', ',keywords[2],', ...',sep='')
}
pic=pic%>%
add_trace(pic,x=trend$date,y=trend$number,color=I('blue'),
name=legend_name,mode='lines+markers',yaxis="y2",
marker=list(symbol=2,size=10),visible='legendonly')
## plot for sentiment legend
pic=pic%>%
add_trace(x=trend$date[1],y=trend$number[1],color=I('green'),name="Possitive",
mode='markers',marker=list(size=15),yaxis="y2",visible='legendonly')
pic=pic%>%
add_trace(x=trend$date[1],y=trend$number[1],color=I('red'),name="Negative",
mode='markers',marker=list(size=15),yaxis="y2",visible='legendonly')
## trend sentiment & frequency plot
n=nrow(trend)
color=trend$sentiment_score%>%
{(.[1:(n-1)]+.[2:n])/2}%>%
{ifelse(.>0,'green','red')}
hover=paste('Date: ',trend$date,' <br>Sentiment Score: ',round(trend$sentiment_score,3))
for(i in 1:(n-1)){
pic=pic%>%add_trace(x=trend$date[i:(i+1)],y=trend$number[i:(i+1)],color=I(color[i]),
text=hover[i],hoverinfo='text',marker=list(symbol=2,size=10),
mode='lines+markers',yaxis="y2",showlegend=F)
}
# set entire layout
pic%>%layout(title="Trends between the COVID-19 and sentiment",
yaxis=list(title="Number of infections on each day"),
yaxis2=list(tickfont=list(color="blue"),overlaying="y",
side="right",title="The frequency of keywords"),
xaxis=list(title="Date"))
}
# two groups of keywords plot function
trendsPlot=function(covid,keywords,trends){
# select covid trend data
trends[[1]]=trends[[1]]%>%mutate(date=ymd(date))
trends[[2]]=trends[[2]]%>%mutate(date=ymd(date))
date=c(trends[[1]]$date,trends[[2]]$date)%>%{ymd('1970-01-01')+max(.):min(.)}
covid=covid%>%mutate(date=ymd(date))
covid=data.frame(date)%>%
left_join(covid,'date')
trends[[1]]=data.frame(date)%>%
left_join(trends[[1]],'date')
trends[[2]]=data.frame(date)%>%
left_join(trends[[2]],'date')
trends[[1]]$number[is.na(trends[[1]]$number)]=0
trends[[2]]$number[is.na(trends[[2]]$number)]=0
trends[[1]]$sentiment_score[is.na(trends[[1]]$sentiment_score)]=0
trends[[2]]$sentiment_score[is.na(trends[[2]]$sentiment_score)]=0
# plot for daily trend
hover=paste('Date: ',covid[,1],' <br>Daily Increase: ',covid[,2])
pic=plot_ly(x=covid[,1],y=covid[,2],color=I('black'),text=hover,
hoverinfo='text',name='Daily case increase',type ='scatter',
mode='lines+markers')
# plot for legend
## trend1 legend plot
if(length(keywords[[1]])==1) {
legend_name=keywords[[1]][1]
} else if (length(keywords[[1]])==2) {
legend_name=paste(keywords[[1]][1],', ',keywords[[1]][2],sep='')
} else {
legend_name=paste(keywords[[1]][1],', ',keywords[[1]][2],', ...',sep='')
}
pic=pic%>%add_trace(x=trends[[1]]$date,y=trends[[1]]$number,color=I('blue'),
name=legend_name,mode='lines+markers',yaxis="y2",
marker=list(symbol=2,size=10),visible='legendonly')
## trend2 legend plot
if(length(keywords[[2]])==1) {
legend_name=keywords[[2]][1]
} else if (length(keywords[[2]])==2) {
legend_name=paste(keywords[[2]][1],', ',keywords[[2]][2],sep='')
} else {
legend_name=paste(keywords[[2]][1],', ',keywords[[2]][2],', ...',sep='')
}
pic=pic%>%add_trace(x=trends[[2]]$date,y=trends[[2]]$number,color=I('blue'),
name=legend_name,mode='lines+markers',yaxis="y2",
marker=list(symbol=8,size=10),visible='legendonly')
## plot for sentiment legend
pic=pic%>%
add_trace(x=trends[[1]]$date[1],y=trends[[1]]$number[1],color=I('green'),name="Possitive",mode='markers',
marker=list(size=15), yaxis="y2",visible='legendonly')
pic=pic%>%
add_trace(x=trends[[1]]$date[1],y=trends[[1]]$number[1],color=I('red'),name="Negative",mode='markers',
marker=list(size=15), yaxis="y2",visible='legendonly')
# plot for trend1
n=nrow(trends[[1]])
color=trends[[1]]$sentiment_score%>%
{(.[1:(n-1)]+.[2:n])/2}%>%
{ifelse(.>0,'green','red')}
hover=paste('Date: ',trends[[1]]$date,' <br>Sentiment Score: ',round(trends[[1]]$sentiment_score,3))
for(i in 1:(n-1)){
pic=pic%>%add_trace(x=trends[[1]]$date[i:(i+1)],y=trends[[1]]$number[i:(i+1)],color=I(color[i]),
text=hover[i],hoverinfo='text',marker=list(symbol=2,size=10),
mode='lines+markers',yaxis="y2",showlegend=F)
}
# plot for trend2
n=nrow(trends[[2]])
color=trends[[2]]$sentiment_score%>%
{(.[1:(n-1)]+.[2:n])/2}%>%
{ifelse(.>0,'green','red')}
hover=paste('Date: ',trends[[2]]$date,' <br>Sentiment Score: ',round(trends[[2]]$sentiment_score,3))
for(i in 1:(n-1)){
pic=pic%>%add_trace(x=trends[[2]]$date[i:(i+1)],y=trends[[2]]$number[i:(i+1)],color=I(color[i]),
text=hover[i],hoverinfo='text',marker=list(symbol=8,size=10),
mode='lines+markers',yaxis="y2",showlegend=F)
}
# set entire layout
pic%>%layout(title="Trends between the COVID-19 and Twitter sentiment",
yaxis=list(title="Number of infections on each day"),
yaxis2=list(tickfont=list(color="blue"),overlaying="y",
side="right",title="The frequency of keywords"),
xaxis=list(title="Date"))
}
# geo map function
geoTrendMap=function(covid,trend){
# merge data
data=left_join(trend,covid,c('state','month'))%>%
select(month,number,sentiment_score,state,positiveIncrease)
data$number[is.na(data$number)]=0
data$sentiment_score[is.na(data$sentiment_score)]=0
data=mutate(data,hover=with(data,paste(state,"<br> <br> Positive:",positiveIncrease,
"<br> Number of Tweets",number,
"<br> Sentiment Score",round(sentiment_score,3))))
# background map
pic=plot_geo(locationmode='USA-states')
# monthly maps
n=data$month%>%unique()%>%length()
visible=c(T,rep(F,n-1),T,T)
steps=list()
for (i in 1:n) {
pic=data[data$month==unique(data$month)[i],]%>%
{add_trace(pic,locations=.$state,z=.$sentiment_score,text=.$hover,
hoverinfo='text',visible=visible[i],type='choropleth',colors="RdBu")}
steps[[i]]=list(args=list('visible',c(rep(F,i-1),T,rep(F,n-i),T,T)),
label=month(unique(data$month)[i],T),method='restyle')
}
# add slider control & modify entire layout
pic%>%
add_trace(x=0,y=0,color=I('blue'),name="Positive",mode='markers',
marker=list(size=15),visible='legendonly')%>%
add_trace(x=0,y=0,color=I('red'),name="Negative",mode='markers',
marker=list(size=15),visible='legendonly')%>%
layout(title="Sentiment Score of States",
geo=list(scope='usa',projection=list(type='albers usa'),
showlakes=T,lakecolor=toRGB('white')),
sliders=list(list(active=1,currentvalue=list(prefix="Month: "),
steps=steps)))%>%
hide_colorbar()
}
# geo trend function
geoTrendPlot=function(covid,keywords,trend){
# select covid trend data
covid=trend$month%>%
{data.frame(month=min(.):max(.))}%>%
left_join(covid,'month')
covid$month=month(covid$month)
# plot for daily covid trend
hover=paste('Month: ',covid[,1],' <br>Monthly Increase: ',covid[,2])
pic=plot_ly(x=covid[,1],y=covid[,2],color=I('black'),text=hover,
hoverinfo='text',name='Monthly case increase',type ='scatter',
mode='lines+markers')
# plot for trend
## trend legend plot
if(length(keywords)==1){
legend_name=keywords[1]
}
else{
if(length(keywords)==2){
legend_name=paste(keywords[1],', ',keywords[2],sep='')
}
else {
legend_name=paste(keywords[1],', ',keywords[2],', ...',sep='')
}
}
pic=pic%>%
add_trace(x=trend$month,y=trend$number,color=I('blue'),
name=legend_name,mode='lines+markers',yaxis="y2",
marker=list(symbol=2,size=10),visible='legendonly')
## plot for sentiment legend
pic=pic%>%
add_trace(x=trend$month[1],y=trend$number[1],color=I('green'),name="Possitive",
mode='markers',marker=list(size=15),yaxis="y2",visible='legendonly')
pic=pic%>%
add_trace(x=trend$month[1],y=trend$number[1],color=I('red'),name="Negative",
mode='markers',marker=list(size=15),yaxis="y2",visible='legendonly')
## trend sentiment & frequency plot
n=nrow(trend)
color=trend$sentiment_score%>%
{(.[1:(n-1)]+.[2:n])/2}%>%
{ifelse(.>0,'green','red')}
hover=paste('Month: ',trend$month,' <br>Sentiment Score: ',round(trend$sentiment_score,3))
for(i in 1:(n-1)){
pic=pic%>%add_trace(x=trend$month[i:(i+1)],y=trend$number[i:(i+1)],color=I(color[i]),
text=hover[i],hoverinfo='text',marker=list(symbol=2,size=10),
mode='lines+markers',yaxis="y2",showlegend=F)
}
# set entire layout
pic%>%layout(title="Trends between the COVID-19 and sentiment",
yaxis=list(title="Number of infections in each month"),
yaxis2=list(tickfont=list(color="blue"),overlaying="y",
side="right",title="The frequency of keywords"),
xaxis=list(title="Month"))
} |
21028848c764e4982885cb30674cf9a347d9d75c | 446056e4c3fe1f6ef7fcfda4e0ee3f1e71b93e16 | /src/urban_polygons/compute_population.R | f9bb8187ec64282b3e7e393c996796d4cc87567f | [] | no_license | gonlairo/geomcity | d90b3871d374bbcce48420a0f8d7378ccbad374f | dd5a9882c6b3a78fa00cd910d33df87e0becb94d | refs/heads/master | 2022-12-16T22:26:05.732975 | 2020-09-22T19:39:30 | 2020-09-22T19:39:30 | 226,718,600 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 1,898 | r | compute_population.R | # compute population
setwd("/Users/rodrigo/Documents/tfg/")
library(raster)
library(sf)
library(dplyr)
path_pols = 'data/ups/track_other/ups_4326.gpkg'
pols = st_read(path_pols)
# YEAR 1990 #
path_ghsl_pop1990 = 'data/data-raw/GHSL/population/GHS_POP_E1990_GLOBE_R2019A_4326_9ss_V1_0/GHS_POP_E1990_GLOBE_R2019A_4326_9ss_V1_0.tif'
rpop = raster(path_ghsl_pop1990)
pols1990 = pols %>%
select(id, year, geom) %>%
filter(year %in% c("1992"))
pols1990$pop1990 = NA
npols = nrow(pols1990)
for (i in 1:npols){
pop_clip1 = raster::crop(rpop, pols1990[i,])
pop_clip2 = raster::mask(pop_clip1, pols1990[i,])
pop = raster::cellStats(pop_clip2, sum)
pols1990$pop1990[i] = pop
}
st_write(pols1990, 'data/ups/track_other/ups_pop90_4326.gpkg')
# 2000
path_ghsl_pop2000 = 'data/data-raw/GHSL/population/GHS_POP_E2000_GLOBE_R2019A_4326_9ss_V1_0/GHS_POP_E2000_GLOBE_R2019A_4326_9ss_V1_0.tif'
rpop = raster(path_ghsl_pop2000)
pols2000 = pols %>%
select(id, year, geom) %>%
filter(year %in% c("2000"))
pols2000$pop2000 = NA
npols = nrow(pols2000)
for (i in 1:npols){
pop_clip1 = raster::crop(rpop, pols2000[i,])
pop_clip2 = raster::mask(pop_clip1, pols2000[i,])
pop = raster::cellStats(pop_clip2, sum)
pols2000$pop2000[i] = pop
}
st_write(pols2000, 'data/ups/track_other/ups_pop00_4326.gpkg')
# 2015
path_ghsl_pop2015 = 'data/data-raw/GHSL/population/GHS_POP_E2015_GLOBE_R2019A_4326_9ss_V1_0/GHS_POP_E2015_GLOBE_R2019A_4326_9ss_V1_0.tif'
rpop = raster(path_ghsl_pop2015)
pols2015 = pols %>%
select(id, year, geom) %>%
filter(year %in% c("2011"))
pols2015$pop2015= NA
npols = nrow(pols2015)
for (i in 1:npols){
pop_clip1 = raster::crop(rpop, pols2015[i,])
pop_clip2 = raster::mask(pop_clip1, pols2015[i,])
pop = raster::cellStats(pop_clip2, sum)
pols2015$pop2015[i] = pop
}
st_write(pols2015, 'data/ups/ups_pop15_4326.gpkg')
# add to ups_3395.gpckg
|
737929901095b3ef0296c0226f031c611132a294 | 6c5afeeb3eefbfcf5ba6cfc2e7895d8c9dbf83c5 | /R/man/construct_holiday_dataframe.Rd | b37f3406d05033a15c39a95cba5e08e5e2e7f104 | [
"MIT"
] | permissive | facebook/prophet | 59a74aa92d27bdc673ceaede02016a9218556cc4 | 2ac9e8fa760e587371e1d1260f3e9f1fac9d76cb | refs/heads/main | 2023-09-01T12:43:34.236541 | 2023-08-21T22:27:06 | 2023-08-21T22:27:06 | 73,872,834 | 13,093 | 4,448 | MIT | 2023-08-24T21:49:59 | 2016-11-16T01:50:08 | Python | UTF-8 | R | false | true | 528 | rd | construct_holiday_dataframe.Rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/prophet.R
\name{construct_holiday_dataframe}
\alias{construct_holiday_dataframe}
\title{Construct a dataframe of holiday dates.}
\usage{
construct_holiday_dataframe(m, dates)
}
\arguments{
\item{m}{Prophet object.}
\item{dates}{Vector with dates used for computing seasonality.}
}
\value{
A dataframe of holiday dates, in holiday dataframe format used in
initialization.
}
\description{
Construct a dataframe of holiday dates.
}
\keyword{internal}
|
09e34426b140b5b8c78687ae43778898e024d885 | ffdea92d4315e4363dd4ae673a1a6adf82a761b5 | /data/genthat_extracted_code/HistData/examples/Virginis.Rd.R | 2bac081da36375af5d86c4b09fa735678e94562d | [] | no_license | surayaaramli/typeRrh | d257ac8905c49123f4ccd4e377ee3dfc84d1636c | 66e6996f31961bc8b9aafe1a6a6098327b66bf71 | refs/heads/master | 2023-05-05T04:05:31.617869 | 2019-04-25T22:10:06 | 2019-04-25T22:10:06 | null | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 626 | r | Virginis.Rd.R | library(HistData)
### Name: Virginis
### Title: John F. W. Herschel's Data on the Orbit of the Twin Stars gamma
### _Virginis_
### Aliases: Virginis Virginis.interp
### Keywords: datasets
### ** Examples
data(Virginis)
data(Virginis.interp)
# Herschel's interpolated curve
plot(posangle ~ year, data=Virginis.interp,
pch=15, type="b", col="red", cex=0.8, lwd=2,
xlim=c(1710,1840), ylim=c(80, 170),
ylab="Position angle (deg.)", xlab="Year",
cex.lab=1.5)
# The data points, and indication of their uncertainty
points(posangle ~ year, data=Virginis, pch=16)
points(posangle ~ year, data=Virginis, cex=weight/2)
|
110aebc5dd687239b945464dfcd3a4f9d5787c8d | 1112756730a0bb4c5259d7011da1709e7ae18b49 | /cachematrix.R | cc25fcf0a98bc4534dd394b0093dc3ac1dfba43e | [] | no_license | probA-K/ProgrammingAssignment2 | 22bb5fa3195ac47116197982ac7f38edbc255c94 | 590251678d4a475c0572743788d608b65136cc3f | refs/heads/master | 2023-06-01T10:39:43.621498 | 2021-06-11T16:54:59 | 2021-06-11T16:54:59 | 375,817,682 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 936 | r | cachematrix.R | ## Put comments here that give an overall description of what your
## functions do
## Write a short comment describing this function
## My Comment: this creates a cache for the matrix and its inverse
## to reduce runtime if needed in another environment
## default x returns a 4 by 4 matrix of 16 random numbers
makeCacheMatrix <- function(x = matrix(rnorm(16), 4,4)) {
myCachedM<<-x
y<-solve(myCachedM)
myCahcedMSolve<<-y
}
## Write a short comment describing this function
## This returns an inverse of the matrix from the cache
cacheSolve <- function(x = myCachedM, ...) {
## Return a matrix that is the inverse of 'x'
mxSolve<-myCahcedMSolve
if(!is.null(mxSolve)){
message("getting cached data")
return(mxSolve)
}
else{
mxSolve<-solve(x)
ytwo<<-mxSolve
return(ytwo)
}
}
|
2ae2cd041f9fb2e4023348b6a2079509e31e5dae | f96af69ed2cd74a7fcf70f0f63c40f7725fe5090 | /MonteShaffer/humanVerseWSU/humanVerseWSU/man/standardizeDollarsInDataFrame.Rd | e8b6e50448363e2d13bc4dbea84b0b1cb63cc5a8 | [
"MIT"
] | permissive | sronchet/WSU_STATS419_2021 | 80aa40978698305123af917ed68b90f0ed5fff18 | e1def6982879596a93b2a88f8ddd319357aeee3e | refs/heads/main | 2023-03-25T09:20:26.697560 | 2021-03-15T17:28:06 | 2021-03-15T17:28:06 | 333,239,117 | 0 | 0 | null | null | null | null | UTF-8 | R | false | true | 981 | rd | standardizeDollarsInDataFrame.Rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/functions-inflation.R
\name{standardizeDollarsInDataFrame}
\alias{standardizeDollarsInDataFrame}
\title{standardizeDollarsInDataFrame}
\usage{
standardizeDollarsInDataFrame(
df,
anchor.year,
dollar.source,
year.source,
dollar.out,
idf = inflation.df
)
}
\arguments{
\item{df}{dataframe containing dollar.source and year.source}
\item{anchor.year}{base year to convert all dollars to}
\item{dollar.source}{column name (in df) with raw dollars}
\item{year.source}{column name (in df) with 4-digit years (1920 - 2020)}
\item{dollar.out}{new column name (in df) to be created}
\item{idf}{inflation data frame}
}
\value{
dataframe, updated
}
\description{
standardizeDollarsInDataFrame
}
\examples{
loadInflationData();
# todo once I get Will/Denzel data ...
}
\seealso{
Other Inflation:
\code{\link{adjustDollarForInflation}()},
\code{\link{lookupInflationDollar}()}
}
\concept{Inflation}
|
96c014cf47648fafa2a2c1c1c234fa28cd2425ae | cb3ca9f0958f816c3c64664a8d10e4d768897fc3 | /2_analyses/2_exploring_effect_max_segment_length_and_startup_value.R | 4cfb884af8956504aa6cda9b346e1bdf34aa33a6 | [] | no_license | tamarlok/spoonacc | 71ca8ed6d612fa8b4d4f772a378562bd43cab14c | d50b30590d2f89813a8515de2c8c981fc0db07d3 | refs/heads/main | 2023-04-09T10:01:09.525103 | 2023-01-16T13:48:57 | 2023-01-16T13:48:57 | 470,207,329 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 5,463 | r | 2_exploring_effect_max_segment_length_and_startup_value.R | # check performance for the flexible segmentation method with different sampling durations
### Run the model with flexible segmentation, including GPS speed, but first splitting the 10 sec segments into smaller segments (to reflect the length of the segments at which the data is collected on the majority of transmitters, which for spoonbills is 0.8 or 1.6 sec)
ARL.values <- c(500, 5000) # can be extended to compare for multiple ARL values
max.seg.lengths <- c(0.4,0.8,1.6,2,5,10)
# arrays to save results using old segmentation script
sensitivity.flexible.ARL.maxlength.old <- array(NA, c(length(ARL.values),length(behaviour.pooled),length(max.seg.lengths)), dimnames = list(ARL.values, behaviour.pooled, max.seg.lengths))
precision.flexible.ARL.maxlength.old <-array(NA, c(length(ARL.values),length(behaviour.pooled),length(max.seg.lengths)), dimnames = list(ARL.values, behaviour.pooled, max.seg.lengths))
# arrays to save results using new segmentation script
sensitivity.flexible.ARL.maxlength <- array(NA, c(length(ARL.values),length(behaviour.pooled),length(max.seg.lengths)), dimnames = list(ARL.values, behaviour.pooled, max.seg.lengths))
precision.flexible.ARL.maxlength <-array(NA, c(length(ARL.values),length(behaviour.pooled),length(max.seg.lengths)), dimnames = list(ARL.values, behaviour.pooled, max.seg.lengths))
# make a loop to run for the different max.seg.lengths and ARL-values #
for (i in 1:length(max.seg.lengths)) {
for (j in 1:length(ARL.values)) {
dfs <- create.flexible.segments(ARL.values[j], max.segment.length=max.seg.lengths[i], startup=20)
RF.model.output <- RF.model(dfs[[1]], dfs[[2]])
performance.stats <- calculate.performance(RF.model.output[[2]])
sensitivity.flexible.ARL.maxlength[j,,i] <- performance.stats[[1]]
precision.flexible.ARL.maxlength[j,,i] <- performance.stats[[3]]
print(round(sensitivity.flexible.ARL.maxlength[j,"ingest",],2))
}
}
# Exploring effect of startup values
# Investigating effect of start up value using max segment length of 1.6
startup.values <- c(1, 10, 20, 30, 40) # can be extended to compare for multiple ARL values
# matrices to save results
sensitivity.effect.startup.value <- matrix(nrow=length(startup.values), ncol=length(behaviour.pooled), dimnames = list(startup.values, behaviour.pooled))
precision.effect.startup.value <- matrix(nrow=length(startup.values), ncol=length(behaviour.pooled), dimnames = list(startup.values, behaviour.pooled))
for (i in 1:length(startup.values)) {
dfs <- create.flexible.segments(startup=startup.values[i], max.segment.length=1.6)
RF.model.output <- RF.model(dfs[[1]], dfs[[2]])
performance.stats <- calculate.performance(RF.model.output[[2]])
sensitivity.effect.startup.value[i,] <- performance.stats[[1]]
precision.effect.startup.value[i,] <- performance.stats[[3]]
print(round(sensitivity.effect.startup.value,2))
}
# explore how the flexible segmentation works for different startup values
# from what I read in the manual (Ross 2015, J Stat Software), the startup value determines the number of observations at the start of the sequence during which no break point is estimated. As I understand it, this means that the first part of the sequence should consist of at least "the value of startup" observations.
max.segment.length <- 10
index.min.obs.id <- aggregate(Index~obs.id, acc.annotated, min)
names(index.min.obs.id)[2]<-"Index.start"
acc.test <- merge(acc.annotated, index.min.obs.id)
acc.test$Index <- acc.test$Index-acc.test$Index.start
### cut the segments to the length set by max.segment.length to allow the analysed data to better reflect how the data is collected in the long term on the majority of transmitters (in case of the spoonbills, mostly during bouts of 1.6 sec); by default it is set to 10 sec, which is the longest sampling duration in the data (10 sec), used for birds that were video-recorded:
acc.test$obs.id.cut <- paste(acc.test$obs.id, formatC(format="d", ceiling((acc.test$Index+1)/(max.segment.length*20)),flag="0",width=ceiling(log10(max(ceiling((acc.test$Index+1)/(max.segment.length*20)))))), sep = ".")
acc.test$segment.id.cut <- acc.test$obs.id.cut
acc.test$duration <- 1/20
un.obs.cut <- aggregate(duration~obs.id.cut, acc.test, sum)
un.obs.cut <- un.obs.cut[round(un.obs.cut$duration,1)==max.segment.length,] # only select segments equal to the max segment length (as this will be the segment length at which the data is collected)
# it could also be set to include smaller segment lengths though... to not loose any data.
un.obs.cut <- un.obs.cut$obs.id.cut
acc.test <- acc.test[acc.test$obs.id.cut %in% un.obs.cut, ]
obs.id.cut.with.handling <- unique(acc.test$obs.id.cut[acc.test$behaviour.index==12])
temp.acc <- acc.test[acc.test$obs.id.cut==obs.id.cut.with.handling[1],]
processStream(temp.acc$x, "GLR", ARL0=500, startup=20) # first changepoint at 32 (32 67 85 117 129 145 166 192)
processStream(temp.acc$x, "GLR", ARL0=500, startup=1) # first changepoint at 32; exactly the same
processStream(temp.acc$x[20:length(temp.acc$x)], "GLR", ARL0=500, startup=1) # first changepoint at 14 (14 48 66 98 110 126 147 173)
processStream(temp.acc$x[20:length(temp.acc$x)], "GLR", ARL0=500, startup=20) # exactly the same as with startup = 1; I had expected that a changepoint at 14 would not be estimated with startup set at 20.
# from this, I conclude that the startup value is not used in the function at all.
rm(acc.test, dfs, RF.model.output) |
445357657112cadf02c0f7e8f2c70907ccd7cd82 | 1eb3ba68e2a3e395b0d5438d4d4b2c28ae389736 | /Deaths.R | 6152f49ac6e39b66cdcc9c399f2293dd33c263ba | [] | no_license | davidjhawkins/COVIDGIT | 1741459e398180a0ecc091f0a8eb9e33baa50be5 | d9a7d5b26c2ed718656e106562331da74e9e57a4 | refs/heads/master | 2023-06-01T07:49:31.505197 | 2021-06-16T09:25:15 | 2021-06-16T09:25:15 | 377,123,452 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 1,275 | r | Deaths.R | ### Deaths from gov.uk
library(data.table)
library(readxl)
library(tidyverse)
library(ggplot2)
library(plotly)
library(scales)
library(fasttime)
library(lubridate)
###
ho <- fread("https://api.coronavirus.data.gov.uk/v2/data?areaType=region&metric=newDailyNsoDeathsByDeathDate&metric=newDeaths28DaysByDeathDate&metric=newDeaths28DaysByPublishDate&format=csv")
ho <- ho[,c(2,4,5,6,7)]
hopp <- ggplot(ho) +
geom_point(aes(x=date, y=newDeaths28DaysByPublishDate, colour=areaName), size=0.8, alpha=0.8)+
geom_smooth(aes(x=date, y=newDeaths28DaysByPublishDate, colour=areaName), span=0.2,se=F, fullrange=T, alpha=0.8)+
ggtitle("New deaths by region (daily with trend). Source of data - coronavirus.gov.uk") +
ylim(0,NA) +
facet_wrap(vars(areaName), scales="free")
ggplotly(hopp, dynamicTicks = T) %>% layout(showlegend=F, legend= list(x=0, y =1))
## log y-scale
hopp <- ggplot(ho) +
geom_point(aes(x=date, y=newDeaths28DaysByPublishDate, colour=areaName), size=0.8, alpha=0.8)+
geom_smooth(aes(x=date, y=newDeaths28DaysByPublishDate, colour=areaName), span=0.2,se=F, fullrange=T, alpha=0.8)+
ggtitle("New deaths by region (daily with trend). Source of data - coronavirus.gov.uk") +
ylim(0,NA) +
scale_y_log10() +
facet_wrap(vars(areaName), scales="fixed")
|
a7a017283afc2b2cedd289a481c11bea8b5a6f9e | 7917fc0a7108a994bf39359385fb5728d189c182 | /cran/paws.security.identity/man/waf_create_size_constraint_set.Rd | dd9f5c663b04bbc3dc7eabee8e2d7c9a4561a278 | [
"Apache-2.0"
] | permissive | TWarczak/paws | b59300a5c41e374542a80aba223f84e1e2538bec | e70532e3e245286452e97e3286b5decce5c4eb90 | refs/heads/main | 2023-07-06T21:51:31.572720 | 2021-08-06T02:08:53 | 2021-08-06T02:08:53 | 396,131,582 | 1 | 0 | NOASSERTION | 2021-08-14T21:11:04 | 2021-08-14T21:11:04 | null | UTF-8 | R | false | true | 3,627 | rd | waf_create_size_constraint_set.Rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/waf_operations.R
\name{waf_create_size_constraint_set}
\alias{waf_create_size_constraint_set}
\title{This is AWS WAF Classic documentation}
\usage{
waf_create_size_constraint_set(Name, ChangeToken)
}
\arguments{
\item{Name}{[required] A friendly name or description of the SizeConstraintSet. You can't
change \code{Name} after you create a \code{SizeConstraintSet}.}
\item{ChangeToken}{[required] The value returned by the most recent call to
\code{\link[=waf_get_change_token]{get_change_token}}.}
}
\value{
A list with the following syntax:\preformatted{list(
SizeConstraintSet = list(
SizeConstraintSetId = "string",
Name = "string",
SizeConstraints = list(
list(
FieldToMatch = list(
Type = "URI"|"QUERY_STRING"|"HEADER"|"METHOD"|"BODY"|"SINGLE_QUERY_ARG"|"ALL_QUERY_ARGS",
Data = "string"
),
TextTransformation = "NONE"|"COMPRESS_WHITE_SPACE"|"HTML_ENTITY_DECODE"|"LOWERCASE"|"CMD_LINE"|"URL_DECODE",
ComparisonOperator = "EQ"|"NE"|"LE"|"LT"|"GE"|"GT",
Size = 123
)
)
),
ChangeToken = "string"
)
}
}
\description{
This is \strong{AWS WAF Classic} documentation. For more information, see
\href{https://docs.aws.amazon.com/waf/latest/developerguide/classic-waf-chapter.html}{AWS WAF Classic}
in the developer guide.
\strong{For the latest version of AWS WAF}, use the AWS WAFV2 API and see the
\href{https://docs.aws.amazon.com/waf/latest/developerguide/waf-chapter.html}{AWS WAF Developer Guide}.
With the latest version, AWS WAF has a single set of endpoints for
regional and global use.
Creates a \code{SizeConstraintSet}. You then use
\code{\link[=waf_update_size_constraint_set]{update_size_constraint_set}} to
identify the part of a web request that you want AWS WAF to check for
length, such as the length of the \code{User-Agent} header or the length of
the query string. For example, you can create a \code{SizeConstraintSet} that
matches any requests that have a query string that is longer than 100
bytes. You can then configure AWS WAF to reject those requests.
To create and configure a \code{SizeConstraintSet}, perform the following
steps:
\enumerate{
\item Use \code{\link[=waf_get_change_token]{get_change_token}} to get the change
token that you provide in the \code{ChangeToken} parameter of a
\code{\link[=waf_create_size_constraint_set]{create_size_constraint_set}}
request.
\item Submit a
\code{\link[=waf_create_size_constraint_set]{create_size_constraint_set}}
request.
\item Use \code{\link[=waf_get_change_token]{get_change_token}} to get the change
token that you provide in the \code{ChangeToken} parameter of an
\code{\link[=waf_update_size_constraint_set]{update_size_constraint_set}}
request.
\item Submit an
\code{\link[=waf_update_size_constraint_set]{update_size_constraint_set}}
request to specify the part of the request that you want AWS WAF to
inspect (for example, the header or the URI) and the value that you
want AWS WAF to watch for.
}
For more information about how to use the AWS WAF API to allow or block
HTTP requests, see the \href{https://docs.aws.amazon.com/waf/latest/developerguide/}{AWS WAF Developer Guide}.
}
\section{Request syntax}{
\preformatted{svc$create_size_constraint_set(
Name = "string",
ChangeToken = "string"
)
}
}
\examples{
\dontrun{
# The following example creates size constraint set named
# MySampleSizeConstraintSet.
svc$create_size_constraint_set(
ChangeToken = "abcd12f2-46da-4fdb-b8d5-fbd4c466928f",
Name = "MySampleSizeConstraintSet"
)
}
}
\keyword{internal}
|
409eccaf1e7ab81483ccce4266662be468d8916e | ffdea92d4315e4363dd4ae673a1a6adf82a761b5 | /data/genthat_extracted_code/ROCit/examples/cartesian_2D.Rd.R | 80c2be638de5110baffd80ef7521ba545aff325b | [] | no_license | surayaaramli/typeRrh | d257ac8905c49123f4ccd4e377ee3dfc84d1636c | 66e6996f31961bc8b9aafe1a6a6098327b66bf71 | refs/heads/master | 2023-05-05T04:05:31.617869 | 2019-04-25T22:10:06 | 2019-04-25T22:10:06 | null | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 180 | r | cartesian_2D.Rd.R | library(ROCit)
### Name: cartesian_2D
### Title: Cartesian Product of Two Vectors
### Aliases: cartesian_2D
### ** Examples
x <- seq(3)
y <- c(10,20,30)
cartesian_2D(x,y)
|
4ec669570a767004d333f1d64e1b2752b6bbb047 | ec3fde959d6e1a46f2bc01f312a48e6bff71e089 | /LinguagemDeProgramacaoR/Exercicio_Temperaturas/Temperaturas.R | af701ddb463388a7697f21e59db2278d115ecbbb | [
"MIT"
] | permissive | LuisFernandoBenatto/Programming-Language-R | f67d2bc50a69708d8f000897a98954be7accc5e1 | 29ee4dc3431c18d129b5f179a2299302dcccb7e5 | refs/heads/main | 2023-07-12T10:25:44.055044 | 2021-08-21T00:20:14 | 2021-08-21T00:20:14 | 392,460,559 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 1,485 | r | Temperaturas.R | # install.packages("readr")
# install.packages("data.table")
# install.packages("dplyr")
# install.packages("ggplot2", dependencies = TRUE)
# install.packages("scales")
# install.packages("magrittr")
library(readr)
library(dplyr)
library(ggplot2)
library(scales)
library(data.table)
library(magrittr)
# As temperatura na cidade de Curitiba aumentaram ao longo dos anos:
#carrega o dataset
df <- fread("TemperaturasGlobais.csv")
cidadesBrasil <- subset(df, Country == 'Brazil')
# separa as cidades do Brasil e elimina valores NA
cidadesBrasil <- df %>% subset(df$Country == 'Brazil') %>% na.omit()
cidadesBrasil <- df %>% subset(df$Country == 'Brazil')
cidadesBrasil <- cidadesBrasil %>% na.omit()
# cria um dataset so com dados de Curitiba
curitiba <- cidadesBrasil %>% subset(cidadesBrasil$City == 'Curitiba')
curitiba$dt <- curitiba$dt %>% as.POSIXct(format='%Y-%m-%d')
# cria coluna Mes
curitiba$Mes <- month(curitiba$dt)
# cria coluna Ano
curitiba$Ano <- year(curitiba$dt)
# separa alguns anos para gerar um grafico
curitiba <- subset(curitiba, Ano %in% c(1796,1846,1896,1946,1996,2013))
p_crt <- ggplot(curitiba, aes(x = (Mes), y = AverageTemperature, color = as.factor(Ano))) +
geom_smooth(se = FALSE,fill = NA, size = 2) +
theme_light(base_size = 20) +
xlab("Mes")+
ylab("Temperatura") +
scale_color_discrete("") +
ggtitle("Temperatura Media ao longo dos anos em Curitiba") +
theme(plot.title = element_text(size = 18))
# plota um grafico
p_crt
|
e880b5a1646aa000935881ce64fdcefce6a5f48d | 87fd5524793058ac328e97cab69877d93242dd70 | /CombineMarkerEffectEstimation.R | be866a586f66cb1e7cee731b313fecc8bd05027f | [] | no_license | aiminy/IBD-GCA-model | 2bf2c4b80d19ead96a153350b9d1d61c88246295 | 49f2636063fc2a6d4222f14b594f6548b77ca2b7 | refs/heads/master | 2016-09-06T01:05:36.973865 | 2015-02-14T17:11:31 | 2015-02-14T17:11:31 | 30,801,081 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 837 | r | CombineMarkerEffectEstimation.R | load("1008_marker_effect_estimation.RData")
load("1015_marker_effect_estimation.RData")
load("1016_marker_effect_estimation.RData")
load("1017_marker_effect_estimation.RData")
load("1018_marker_effect_estimation.RData")
load("1019_marker_effect_estimation.RData")
load("1020_marker_effect_estimation.RData")
load("1021_marker_effect_estimation.RData")
load("1023_marker_effect_estimation.RData")
load("1028_marker_effect_estimation.RData")
load("1114_marker_effect_estimation.RData")
load("1115_marker_effect_estimation.RData")
load("1116_marker_effect_estimation.RData")
load("1117_marker_effect_estimation.RData")
load("1118_marker_effect_estimation.RData")
load("1119_marker_effect_estimation.RData")
load("1120_marker_effect_estimation.RData")
load("1121_marker_effect_estimation.RData")
load("1122_marker_effect_estimation.RData")
|
e60be7db68eb6953ab23e47fee97d054b5e8a522 | 8a49c48aa8a4969d38dfdc5f756c69958c627da0 | /R/AllGenerics.R | 5122a2a05437978f2c7d7d4e6c780d8db85430bb | [
"MIT"
] | permissive | mal2017/clique-signal | 033d9262ac85b426a4859239e91b1194743109ad | 6b3b8f65a28b7e60f11c380865a3cf97d1afa2dd | refs/heads/master | 2020-03-12T03:17:36.516649 | 2018-04-26T22:59:11 | 2018-04-26T22:59:11 | 130,420,779 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 9,651 | r | AllGenerics.R | # -----------------------------------------------------------------------------
# generics --------------------------------------------------------------------
# -----------------------------------------------------------------------------
#' @rdname name
#' @export
setGeneric("name", function(x) standardGeneric("name"))
#' @rdname name
#' @export
setGeneric("name<-", function(x, value) standardGeneric("name<-"))
#' @rdname pwms
#' @export
setGeneric("pwms", function(x) standardGeneric("pwms"))
#' @rdname combine
#' @export
setGeneric("combine", function(x, ..., name = name(x)) {
if (length(list(...)) > 1) {
combine(x, do.call(combine, list(...)))
} else {
standardGeneric("combine")
}
})
#' @rdname extract_cliques
#' @export
setGeneric("extract_cliques", function(object) standardGeneric("extract_cliques"))
#' @rdname extract_cliques
#' @export
setGeneric("unique_cliques", function(object) standardGeneric("unique_cliques"))
#' @rdname tfbs
#' @export
setGeneric("tfbs", function(object) standardGeneric("tfbs"))
#' @rdname hash
#' @export
setGeneric("hash", function(clique) standardGeneric("hash"))
#' @rdname quantsites
#' @export
setGeneric("quantsites", function(object) standardGeneric("quantsites"))
#' @rdname name
#' @export
setGeneric("members", function(clique) standardGeneric("members"))
#' @rdname hash
#' @export
setGeneric("hashes", function(cliquelist) standardGeneric("hashes"))
#' @rdname bam
#' @export
setGeneric("bam", function(object) standardGeneric("bam"))
#' @rdname show
#' @export
setGeneric("show", function(object) standardGeneric("show"))
#' @rdname remove_subset_cliques
#' @export
setGeneric("remove_subset_cliques", function(object) standardGeneric("remove_subset_cliques"))
#' @rdname combine_similar_cliques
#' @export
setGeneric("combine_similar_cliques", function(object, ...) standardGeneric("combine_similar_cliques"))
# set methods -----------------------------------------------------------------
#' Combine cliques that are similar
#' @rdname combine_similar_cliques
#' @param object CliqueList object.
#' @param ... Other args. See other methods.
#' @param combine_when_at_least_as_similar Return cliquelist with <N differences combined.
#' @export
setMethod("combine_similar_cliques", signature(object = "CliqueList"),
function(object, combine_when_at_least_as_similar = 1) {
lapply(object, members) %>%
combine_similar_vectors(max_diff_to_combine = combine_when_at_least_as_similar) %>%
list_to_cliquelist()
})
# -----------------------------------------------------------------------------
#' Remove cliques that are subsets of other cliques
#' @rdname remove_subset_cliques
#' @param object CliqueList object.
#' @export
setMethod("remove_subset_cliques", signature(object = "CliqueList"),
function(object) {
not_subsets <- lapply(object, members) %>%
remove_subset_vectors() %>% names
object[not_subsets]
})
#' @rdname remove_subset_cliques
#' @export
setMethod("remove_subset_cliques", signature(object = "CRCExperiment"),
function(object) {
not_subsets <- lapply(object, members) %>%
remove_subset_vectors() %>% names
object[not_subsets]
})
# -----------------------------------------------------------------------------
#' Get bam
#' @rdname bam
#' @param object Object holding bam paths.
#' @export
setMethod("bam", signature(object = "CRCViewList"), function(object) {
object %>% lapply(bam)
})
#' @rdname bam
#' @export
setMethod("bam", signature(object = "CRCView"), function(object) {
object@bam
})
# -----------------------------------------------------------------------------
#' Get quantsites.
#' @rdname quantsites
#' @param object Object holding quantsites.
#' @export
setMethod("quantsites", signature(object = "CRCView"), function(object) {
GRanges(object)
})
#' @rdname quantsites
#' @export
setMethod("quantsites", signature(object = "CRCViewList"), function(object) {
object@listData %>% lapply(GRanges) %>% GRangesList
})
# -----------------------------------------------------------------------------
#' Get tfbs.
#' @rdname tfbs
#' @param object Object holding tfbs as a GRanges object.
#' @export
setMethod("tfbs", signature(object = "CRCView"), function(object) {
object@tfbs
})
#' @rdname tfbs
#' @export
setMethod("tfbs", signature(object = "CRCViewList"), function(object) {
nested_tfbs <- object@listData %>% lapply(tfbs)
all_tf_names <- nested_tfbs %>% lapply(names) %>% unlist %>% unique
top_level_tfbs_by_motif <- list()
for (i in 1:length(all_tf_names)) {
idx <- all_tf_names[i]
i_tfbs <- nested_tfbs %>% lapply(`[[`, idx) %>% unlist %>% GRangesList() %>% unlist %>% GenomicRanges::reduce()
top_level_tfbs_by_motif[[idx]] <- i_tfbs
}
GRangesList(top_level_tfbs_by_motif)
})
#' @rdname tfbs
#' @export
setMethod("tfbs", signature(object = "CRCExperiment"), function(object) {
tfbs(object@crcs)
})
# -----------------------------------------------------------------------------
#' Clique extractors.
#' @rdname extract_cliques
#' @param object Object holding cliques.
#' @export
setMethod("unique_cliques", signature(object = "CliqueList"), function(object) {
names(object) %>% unique %>% object[.]
})
#' @rdname extract_cliques
#' @export
setMethod("extract_cliques", signature(object = "CliqueList"), function(object) {
object@listData
})
#' @rdname extract_cliques
#' @export
setMethod("extract_cliques", signature(object = "CRCView"), function(object) {
object@cliques
})
#' @rdname extract_cliques
#' @export
setMethod("extract_cliques", signature(object = "CRCViewList"), function(object) {
object@listData %>% lapply(extract_cliques) %>% lapply(extract_cliques) %>% CliqueList()
})
#' @rdname extract_cliques
#' @export
setMethod("extract_cliques", signature(object = "CRCExperiment"), function(object) {
extract_cliques(object@crcs)
})
# -----------------------------------------------------------------------------
#' Name getters.
#' @rdname name
#' @param x A name-able object.
#' @param value New name.
#' @export
setMethod("name", signature(x = "TranscriptionFactor"), function(x) {
return(x@name)
})
#' @rdname name
#' @export
setMethod("name", signature(x = "CliqueList"), function(x) {
return(x@name)
})
#' @rdname name
#' @export
setMethod("name", signature(x = "CRCView"), function(x) {
return(x@name)
})
#' @rdname name
#' @exportMethod 'name<-'
setReplaceMethod("name", signature(x = "TranscriptionFactor", value = "character"), function(x, value) {
x@name <- value
x
})
#' @rdname name
#' @exportMethod 'name<-'
setReplaceMethod("name", signature(x = "CliqueList", value = "character"), function(x, value) {
x@name <- value
x
})
#' @rdname name
#' @param clique A clique object.
#' @export
setMethod("members", signature(clique = "Clique"), function(clique) {
return(clique@members)
})
# -----------------------------------------------------------------------------
#' Get pwms.
#' @rdname pwms
#' @param x A TranscriptionFactor object.
#' @export
setMethod("pwms", signature(x = "TranscriptionFactor"), function(x) {
return(x@pwms)
})
# -----------------------------------------------------------------------------
#' Combine TranscriptionFactor instances.
#' @rdname combine
#' @param x A TranscriptionFactor object or list of TranscriptionFactor objects.
#' @param ... TranscriptionFactor objects.
#' @param name A new name for the combined TF. Optional. Defaults to name of first TF.
#' @export
setMethod("combine", signature(x = "list"), function(x, ..., name = NULL) {
w <- unlist(list(x, ...))
new_name <- name
if (is.null(name))
new_name <- name(w[[1]])
pwms <- do.call("c", unlist(lapply(w, FUN = function(z) pwms(z))))
return(TranscriptionFactor(name = new_name, pwms = pwms))
})
#' @rdname combine
#' @export
setMethod("combine", signature(x = "TranscriptionFactor"), function(x, ..., name = NULL) {
w <- unlist(list(x, ...))
new_name <- name
if (is.null(name))
new_name <- name(w[[1]])
pwms <- do.call("c", unlist(lapply(w, FUN = function(z) pwms(z))))
return(TranscriptionFactor(name = new_name, pwms = pwms))
})
# -----------------------------------------------------------------------------
#' Get unique hash.
#' @rdname hash
#' @param clique A Clique.
#' @param cliquelist A CliqueList.
#' @export
setMethod("hash", signature(clique = "Clique"), function(clique) {
return(clique@hash)
})
#' @rdname hash
#' @export
setMethod("hashes", signature(cliquelist = "CliqueList"), function(cliquelist) {
return(cliquelist@clique_hashes)
})
# -----------------------------------------------------------------------------
#' Print object representation.
#' @rdname show
#' @param object Object with printable representation.
#' @export
setMethod("show", "CRCView", function(object) {
# print name
cat("Class: ", class(object), "\n")
cat("Cliques: ", length(object@cliques), "\n")
cat("Accessible Sites: ", length(object), "\n")
cat("TFBS: ", length(head(names(object@tfbs))), " and ", length(object@tfbs) - 6, " more", "\n")
cat("Bam: ", object@bam, "\n")
})
#' @rdname show
#' @export
setMethod("show", "CRCExperiment", function(object) {
# print name
cat("Class: ", class(object), "\n")
cat("Samples: ", length(object@crcs), "\n")
cat("Accessible Sites: ", length(object), "\n")
cat("Conditions: ", object@metadata$CONDITION %>% levels, "\n")
})
|
deb8a473d98b66ea740455a52efbdb88a16b28b0 | 6e2ebf0cb3142a146319497232386025f39669cc | /run_analysis.R | 94835c7de47101b2d6010dcbe7e4b4f059b13e05 | [] | no_license | thefatalist/Getting-and-Cleaning-Data | cd354f954dbe5249483c5ca0c86ea40878fe1440 | 23a91a3b6a9e22b4597934511f383f4e69d1eaf4 | refs/heads/master | 2021-01-10T02:30:12.358094 | 2015-05-23T17:49:39 | 2015-05-23T17:49:39 | 36,135,034 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 2,383 | r | run_analysis.R | # R script called run_analysis.R that does the following.
# 1. Merges the training and the test sets to create one data set.
# 2. Extracts only the measurements on the mean and standard deviation for each measurement.
# 3. Uses descriptive activity names to name the activities in the data set
# 4. Appropriately labels the data set with descriptive variable names.
# 5. From the data set in step 4, creates a second, independent tidy data set with the average of each variable for each activity and each subject.
require("reshape2")
require("data.table")
get_data <- function(fileX, fileY, fileSubject, labelsX, labelsX_selection, labelsY) {
dataX <- read.table(fileX)
dataY <- read.table(fileY)
dataSubject <- read.table(fileSubject)
names(dataX) <- labelsX
dataX <- dataX[,labelsX_selection]
dataY[,2] <- labelsY[dataY[,1]]
names(dataY) <- c("activity_id", "activity_label")
names(dataSubject) <- "subject"
result <- cbind( as.data.table(dataSubject), dataX, dataY )
result
}
activity_labels <- read.table("./UCI HAR Dataset/activity_labels.txt")[,2]
feature_labels <- read.table("./UCI HAR Dataset/features.txt")[,2]
feature_mean_std <- grepl("mean|std", feature_labels)
# Collect test data
dataTest <- get_data(
fileX = "./UCI HAR Dataset/test/X_test.txt",
fileY = "./UCI HAR Dataset/test/y_test.txt",
fileSubject = "./UCI HAR Dataset/test/subject_test.txt",
labelsX = feature_labels,
labelsX_selection = feature_mean_std,
labelsY = activity_labels
)
# Collect train data
dataTrain <- get_data(
fileX = "./UCI HAR Dataset/train/X_train.txt",
fileY = "./UCI HAR Dataset/train/y_train.txt",
fileSubject = "./UCI HAR Dataset/train/subject_train.txt",
labelsX = feature_labels,
labelsX_selection = feature_mean_std,
labelsY = activity_labels
)
# Combine them both
data <- rbind(dataTest, dataTrain)
id_labels <- c("subject", "activity_id", "activity_label")
measure_labels <- setdiff( colnames(data), id_labels)
melt_data <- melt(data, id=id_labels, measure.vars = measure_labels)
# Apply mean function to dataset using dcast function
tidy_data = dcast(melt_data, subject + activity_label ~ variable, fun.aggregate = mean)
write.table(tidy_data, file = "./tidy_data.txt")
|
8d8b1b6dbee45ea4e542f35318dbfa2ee0ff0246 | 84cf92d31063ba128d38b7ab68bff47ab554fe36 | /tests/testthat/dummy-test.R | dc10c6d6d07659e8fb37fbe92fbd5373b03f4c80 | [] | no_license | minutestatistique/R-template-project | c375a0c4accd992ef19926f9c00c349a0e8d64b0 | a641be7f70cfc1aad3423dbe9139924406ba0cb7 | refs/heads/master | 2021-06-13T09:25:11.962429 | 2017-03-21T21:15:32 | 2017-03-21T21:15:32 | null | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 188 | r | dummy-test.R | library(testthat)
context("My test")
test_that("Dummy test works", {
expect_true(1 + 1 == 2)
m1 <- matrix(1:4, 2, 2)
m2 <- m1 * 2
expect_equivalent(matrix(1:4 * 2, 2, 2), m2)
})
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.