blob_id
stringlengths 40
40
| directory_id
stringlengths 40
40
| path
stringlengths 2
327
| content_id
stringlengths 40
40
| detected_licenses
listlengths 0
91
| license_type
stringclasses 2
values | repo_name
stringlengths 5
134
| snapshot_id
stringlengths 40
40
| revision_id
stringlengths 40
40
| branch_name
stringclasses 46
values | visit_date
timestamp[us]date 2016-08-02 22:44:29
2023-09-06 08:39:28
| revision_date
timestamp[us]date 1977-08-08 00:00:00
2023-09-05 12:13:49
| committer_date
timestamp[us]date 1977-08-08 00:00:00
2023-09-05 12:13:49
| github_id
int64 19.4k
671M
⌀ | star_events_count
int64 0
40k
| fork_events_count
int64 0
32.4k
| gha_license_id
stringclasses 14
values | gha_event_created_at
timestamp[us]date 2012-06-21 16:39:19
2023-09-14 21:52:42
⌀ | gha_created_at
timestamp[us]date 2008-05-25 01:21:32
2023-06-28 13:19:12
⌀ | gha_language
stringclasses 60
values | src_encoding
stringclasses 24
values | language
stringclasses 1
value | is_vendor
bool 2
classes | is_generated
bool 2
classes | length_bytes
int64 7
9.18M
| extension
stringclasses 20
values | filename
stringlengths 1
141
| content
stringlengths 7
9.18M
|
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
6cc2758698ccb057afc20d73c6c633365524b1cd
|
af5841763d8f0fdd5ca28114ff78324b5dbaa36b
|
/man/extract_date.Rd
|
1d255f3dd97be6509092a21abc3d58c677a2b77b
|
[] |
no_license
|
RJHKnight/TCALoader
|
fe8a56973c3303ef880450952bc9677b2074a49e
|
be8ea6a376c0730c6e9047df85c4ef0893a5fa48
|
refs/heads/master
| 2021-03-28T15:56:16.465896
| 2020-10-13T05:31:00
| 2020-10-13T05:31:00
| 247,876,192
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| true
| 359
|
rd
|
extract_date.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/Java Date Format.R
\name{extract_date}
\alias{extract_date}
\title{Convert java date / time into a Date}
\usage{
extract_date(java_date_strings)
}
\arguments{
\item{java_date_strings}{list of datetimes}
}
\value{
parsed Dates
}
\description{
Convert java date / time into a Date
}
|
044c5a03fa246e8cecac107c7e6c089f29def3b2
|
72050e80c34429148728bc27e93a474045c3a387
|
/R/trainNN.R
|
124b55a37eb4e5776922daefa06853198d348f61
|
[] |
no_license
|
cran/foster
|
f19407721792cb12f51851dd83939396a69007f4
|
9f78bb6370d6904d890eed2d3a8be729e7cb0dc6
|
refs/heads/master
| 2023-03-28T10:24:23.302542
| 2021-03-30T10:40:05
| 2021-03-30T10:40:05
| 310,511,623
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 10,026
|
r
|
trainNN.R
|
#' Train and assess accuracy of a k-NN model
#'
#' This function trains a k-NN model from response variables (Y) and predictors
#' (X) at reference observations using the package yaImpute (see
#' \code{\link[yaImpute]{yai}}). By default, the distance between observations
#' is obtained from the proximity matrix of random forest regression or
#' classification trees. Optionally, training and testing sets can be provided
#' to return the accuracy of the trained k-NN model.
#'
#' If performing model validation, the function trains a kNN model from the
#' training set, finds the k NN of the validation set and imputes the response
#' variables from the k NN. If \code{k = 1}, only the closest NN value is
#' imputed. If k > 1, the imputed value can be either the closest NN value, the
#' mean, median or distance weighted mean of the k NN values.This is controlled
#' by the arguments \code{impute.cont} or \code{impute.fac}.
#'
#' If inTest = NULL, all rows that are not in inTrain will be used for model
#' testing. If inTrain = NULL, all rows that are not in inTest will be used for
#' model training. If both inTrain and inTest are NULL, all rows of x and y will
#' be used for training and no testing is performed.
#'
#' The final model returned by \code{findNN} is trained from all observations of
#' \code{x} and \code{y}.
#'
#'@param x A dataframe or SpatialPointsDataFrame of predictors variables X for
#' reference observations. Row names of X are used as identification of
#' reference observations.
#'@param y A dataframe or SpatialPointsDataFrame of response variables Y for the
#' reference observations. Row names of Y are used as identification of
#' reference observations.
#'@param inTrain Optional. A list obtained from
#' \code{\link[foster]{partition}}indicating which rows of x and y go to
#' training.
#'@param inTest Optional list indicating which rows of x and y go to validation.
#' If left NULL, all rows that are not in \code{inTrain} are used for
#' validation.
#'@param k Integer. Number of nearest neighbors
#'@param method Character. Which nearness metrics is used to compute the nearest
#' neighbors. Default is \code{"randomForest"}. Other methods are listed in
#' \code{\link[yaImpute]{yai}}
#'@param impute.cont Character. The method used to compute the imputed
#' continuous variables. Can be \code{"closest"}, \code{"mean"},
#' \code{"median"} or \code{"dstWeighted"}. Default is \code{"closest"} if
#' \code{k = 1} and \code{"dstWeighted"} if \code{k > 1}. See
#' \code{\link[yaImpute]{impute.yai}} for more details.
#'@param impute.fac Character. The method used to compute the imputed values for
#' factors. Default value is the same as \code{impute.cont}. See
#' \code{\link[yaImpute]{impute.yai}} for more details.
#'@param ntree Number of classification or regression trees drawn for each
#' response variable. Default is 500
#'@param mtry Number of X variables picked randomly to split each node. Default
#' is sqrt(number of X variables)
#'@param rfMode By default, \code{rfMode} is set to \code{""} which forces
#' \code{\link[yaImpute]{yai}} to create random forest regression trees instead
#' of classification trees for continuous variables. Can be set to
#' \code{"buildClasses"} if wanting continuous variables to be converted to
#' classes and forcing random forest to build classification trees. (See
#' \code{\link[yaImpute]{yai}})
#'@param ... Other arguments passed to \code{\link[yaImpute]{yai}} (e.g.
#' \code{"rfXsubsets"})
#'
#'@return A list containing the following objects: \describe{
#' \item{\code{model}}{A \code{yai} object, the trained k-NN model}
#' \item{\code{preds}}{A data.frame with observed and predicted values of the
#' testing set for each response variables} }
#'
#'@seealso \code{\link[yaImpute]{yai}}, \code{\link[yaImpute]{newtargets}},
#' \code{\link[yaImpute]{impute.yai}}, \code{\link[foster]{accuracy}}
#'
#' @examples
#' # Load data in memory
#' # X_vars_sample: Predictor variables at sample (from getSample)
#' # Y_vars_sample: Response variables at sample (from getSample)
#' # train_idx: Rows of X_vars_sample and Y_vars_sample that are used for
#' # training (from (partition))
#' load(system.file("extdata/examples/example_trainNN.RData",package="foster"))
#'
#' set.seed(1234) #for example reproducibility
#' kNN <- trainNN(x = X_vars_sample,
#' y=Y_vars_sample,
#' inTrain = train_idx,
#' k = 1,
#' method = "randomForest",
#' ntree = 200)
#' @export
trainNN <- function(x,
y,
inTrain = NULL,
inTest = NULL,
k = 1,
method = "randomForest",
impute.cont = NULL,
impute.fac = NULL,
ntree = 500,
mtry = NULL,
rfMode = "",
...) {
if (length(k) > 1) stop("Support only a single k value")
if (dim(x)[1] != dim(y)[1]) stop("x an y must have the same number of rows")
if (class(x) %in% "SpatialPointsDataFrame") {
x <- spdf2df(x)
}
if (class(y) %in% "SpatialPointsDataFrame") {
y <- spdf2df(y)
}
if (is.null(inTrain) & is.null(inTest)) {
message("No training or validation set provided.")
isTest <- FALSE
}
if(!is.null(inTrain)) {
isTest <- TRUE
if(is.list(inTrain)) {
nfolds_train <- length(inTrain)
}else{
nfolds_train <- 1
inTrain <- list(inTrain)
}
}
if(!is.null(inTest)) {
isTest <- TRUE
if(is.list(inTest)) {
nfolds_test <- length(inTest)
}else{
nfolds_test <- 1
inTest <- list(inTest)
}
}
if (is.null(inTrain)) {
nfolds_train <- 0
}
if (is.null(inTest)) {
nfolds_test <- 0
}
if (nfolds_train > nfolds_test &!is.null(inTest)) {
inTest <- NULL
message("Performing cross-validation: inTest argument ignored. Test samples are determined from the supplied training folds.")
}
if (nfolds_train < nfolds_test &!is.null(inTrain)) {
inTrain <- NULL
message("Performing cross-validation: inTrain argument ignored. Validation samples are determined from the supplied testing folds.")
}
# Number of folds is either 1 or the max number of folds in either test or training
nfolds <- max(c(nfolds_train, nfolds_test))
# Set rules for imputation
if (k == 1) {
impute.cont <- "closest"
impute.fac <- "closest"
} else {
if (is.null(impute.cont)) impute.cont <- "dstWeighted"
if (is.null(impute.fac)) impute.fac <- impute.cont
}
if (isTest) {
# List storing all predictions and validation
preds_out <- list()
for (n in 1:nfolds) {
if (is.null(inTest)) {
train_fold <- inTrain[[n]]
test_fold <- setdiff(seq(1, dim(x)[1], 1), train_fold)
}else if (is.null(inTrain)) {
test_fold <- inTest[[n]]
train_fold <- setdiff(seq(1, dim(x)[1], 1), test_fold)
}else{
test_fold <- inTest[[n]]
train_fold <- inTrain[[n]]
}
# Make sure than row indices of train_fold and test_fold are valid
if(any(!c(test_fold,train_fold) %in% seq_len(dim(x)[1]))) {
stop("Invalid training or validation row indices in inTrain or inTest")
}
if(anyDuplicated(c(test_fold,train_fold)) != 0) {
warning(sprintf("Fold %d: duplicated row indices in training and validation", n))
}
X_tr <- x[train_fold, ]
Y_tr <- y[train_fold, ]
X_val <- x[test_fold, ]
Y_val <- y[test_fold, ]
#Train yai
yai_object <- yaImpute::yai(x = X_tr,
y = Y_tr,
method = method,
k = k,
mtry = mtry,
ntree = ntree * ncol(y),
rfMode = rfMode,
...)
# Find NN and impute at validation
yai_newtrgs <- yaImpute::newtargets(yai_object, X_val)
Y_val_predicted <- yaImpute::impute(yai_newtrgs,
method = impute.cont,
method.factor = impute.fac,
observed = FALSE)
Y_val_predicted <- Y_val_predicted[, colnames(Y_tr)]
Y_val_predicted <- data.frame(ID = rownames(Y_val_predicted), Y_val_predicted)
Y_val <- data.frame(ID = rownames(Y_val), Y_val)
Y_pred <- reshape2::melt(Y_val_predicted, measure_vars = colnames(Y_tr),
value.name = "preds", id.vars = "ID")
Y_val <- reshape2::melt(Y_val, measure.vars = colnames(Y_tr),
value.name = "obs", id.vars = "ID")
preds <- merge(Y_val, Y_pred, by = c("ID", "variable"))
# Keep track of fold
preds$Fold <- n
preds_out[[n]] <- preds
}
preds_out <- do.call(rbind, preds_out)
}else{ #No testing/validation
preds_out <- NULL
# Once validation is done, the final model is trained with all observations
}
yai_object_final <- yaImpute::yai(x = x,
y = y,
method = method,
k = k,
mtry = mtry,
ntree = ntree * ncol(y),
rfMode = rfMode,
...)
# Add imputation mode to be used with predictTrgs
yai_object_final$impute.cont <- impute.cont
yai_object_final$impute.fac <- impute.fac
out <- list(
model = yai_object_final,
preds = preds_out
)
return(out)
}
|
f5d8669235e7dbc07773d513e0a1db056d84b754
|
f3594d8476ccdf1c89f378fe35bb5869a6683b7d
|
/sqlWorkbenchAddin/R/sqlWorkbenchAddin.R
|
db03218ec43212e0acc979c733fe1cbfd5cde48f
|
[] |
no_license
|
micrain/barug-november-2016
|
e78d14fcc8b83c13f8a2632240777a14dd6213ab
|
3abe1346e23afae37e4642a5a01617ddc87c901d
|
refs/heads/master
| 2020-08-01T05:02:53.591124
| 2016-11-11T18:34:35
| 2016-11-11T18:34:35
| 73,584,126
| 0
| 0
| null | 2016-11-12T23:17:26
| 2016-11-12T23:17:26
| null |
UTF-8
|
R
| false
| false
| 3,190
|
r
|
sqlWorkbenchAddin.R
|
#'@export
sqlWorkbenchAddin <- function() {
library(shiny)
library(miniUI)
library(ggplot2)
# get verbose output from shiny for debugging
options(shiny.trace = TRUE)
ui <- miniPage(
gadgetTitleBar("SQL Workbench"),
miniTabstripPanel(
miniTabPanel("Connect", icon = icon("database"),
miniContentPanel(
fileInput("load_file", "Load File")
)
),
miniTabPanel("Table", icon = icon("table"),
fillRow(textInput("table_filter", "Table Filter"),
actionButton("filter_btn", "Filter!"), height = "75px"),
fillRow(dataTableOutput("sql_table"), height = "300px")
),
miniTabPanel("Chart", icon = icon("line-chart"),
fillPage(fillRow(
fillCol(selectInput("x_select", "Select X Value", choices = c("None")),
selectInput("y_select", "Select Y Value", choices = c("None")),
selectInput("plot_type","Plot Type", choices = c("line"="geom_line()",
"point"="geom_point()"))),
plotOutput("sql_plot"), flex = c(1,4)
))
)
)
)
server <- function(input, output, session) {
# create our reactive data frame to be used later
df = reactiveValues(table = NULL)
# read the file when it is uploaded
observeEvent(input$load_file,{
df$table = readRDS(input$load_file$datapath)
})
observeEvent(input$filter_btn,{
df$table = tryCatch({
subset(df$table, eval(parse(text = input$table_filter)))
}, error = function(e) {
df$table
})
})
# Output a DataTable (js package, not data.table R package)
# set options https://datatables.net/reference/option/
output$sql_table = renderDataTable(
df$table,
options = list(pageLength = 5, scrollY = 300)
)
# update filters for plot
observe({
updateSelectInput(session, "x_select", choices = names(df$table))
updateSelectInput(session, "y_select", choices = names(df$table))
})
output$sql_plot = renderPlot({
# eval used to parse select input text as columns of data frame
p = ggplot(df$table, aes(x = eval(parse(text = input$x_select)),
y = eval(parse(text = input$y_select)))) +
ylab(input$y_select) + xlab(input$x_select)
# based on select input set the type of plot
# the whole renderPlot() is re-evaluated every time the input changes
if (input$plot_type == "geom_line()") {
p = p + geom_line()
}
if (input$plot_type == "geom_point()") {
p = p + geom_point()
}
p
})
# when "Done" is clicked return the text
observeEvent(input$done, {
stopApp(df$table)
})
}
# create a dialog window instead of having it return in the viewer pane
viewer <- dialogViewer("sqlWorkbench", width = 800, height = 800)
runGadget(ui, server, viewer = viewer)
}
|
2a568706ff9223009bdf0c29fe5ceb3b7207033e
|
873da174f5474eb0941bba27cefc06cb1ad559a3
|
/pcoa with PERMANOVA.R
|
302203a4c183014820e7c912a78f1d33ed808316
|
[] |
no_license
|
ZhenyanZhang/Plant-disease
|
e5bee7030767d54baf0acfdeb512ba629955cd44
|
5bbdacd4ee05ba0e370c7e0bc88a5202837828d7
|
refs/heads/main
| 2023-07-31T14:11:09.680275
| 2021-09-13T15:10:48
| 2021-09-13T15:10:48
| 351,673,675
| 0
| 1
| null | null | null | null |
ISO-8859-1
|
R
| false
| false
| 1,965
|
r
|
pcoa with PERMANOVA.R
|
wd<- "D:/OneDrive/disease"
setwd(wd)
#OTU table
otu <- read.delim('disease.txt', row.names = 1, sep = '\t', stringsAsFactors = FALSE, check.names = FALSE,na.strings="na")
otu[is.na(otu)] <- 0
otu <- data.frame(t(otu))
#group table
group <- read.delim('group.txt', sep = '\t', stringsAsFactors = FALSE)
library(vegan)
#calculation of the Bray¨CCurtis dissimilarity
distance <- vegdist(otu, method = 'bray')
pcoa <- cmdscale(distance, k = (nrow(otu) - 1), eig = TRUE)
write.csv(as.matrix(distance),'distance.csv')#output distance
#extract the first two coordinate
pcoa_eig <- (pcoa$eig)[1:2] / sum(pcoa$eig)
sample_site <- data.frame({pcoa$point})[1:2]
sample_site$names <- rownames(sample_site)
names(sample_site)[1:2] <- c('PCoA1', 'PCoA2')
#add the information of group
sample_site <- merge(sample_site, group,by ='names', all.x = TRUE)
library(ggplot2)
pcoa_plot <- ggplot(sample_site, aes(PCoA1, PCoA2, group = group)) +
theme(panel.grid = element_blank(), panel.background = element_rect(color = 'black', fill = 'transparent'), legend.key = element_rect(fill = 'transparent')) +
geom_vline(xintercept = 0, color = 'gray', size = 0.3) + #add the line of pco1=0
geom_hline(yintercept = 0, color = 'gray', size = 0.3) + #add the line of pco2=0
geom_point(aes(color =group), size = 3, alpha = 0.8) + #set the size, color and transparency of points
stat_ellipse(level = 0.90, show.legend = F,aes(color = group),linetype="longdash")+#add the ellipse
scale_color_manual(values = c('red', 'skyblue',"green")) +
labs(x = paste('PCoA axis1: ', round(100 * pcoa_eig[1], 2), '%'), y = paste('PCoA axis2: ', round(100 * pcoa_eig[2], 2), '%')) +
annotate('text', label = 'R2=0.3684,P=0.151', x = 0, y = 0, size = 5, colour = '#73D5FF')#results of PERMANOVA (calculate as below)
pcoa_plot
#PERMANOVA
adonis_result_otu <- adonis(otu~group, group, permutations = 999, distance = 'bray')
adonis_result_otu
|
b8cefca0eda1b29d852bdd2bdcb96350b9cd37c3
|
2a7e77565c33e6b5d92ce6702b4a5fd96f80d7d0
|
/fuzzedpackages/hyper2/R/keep.R
|
7b0b47f0ea178b62876f2db84d831342e1659a5b
|
[] |
no_license
|
akhikolla/testpackages
|
62ccaeed866e2194652b65e7360987b3b20df7e7
|
01259c3543febc89955ea5b79f3a08d3afe57e95
|
refs/heads/master
| 2023-02-18T03:50:28.288006
| 2021-01-18T13:23:32
| 2021-01-18T13:23:32
| 329,981,898
| 7
| 1
| null | null | null | null |
UTF-8
|
R
| false
| false
| 1,619
|
r
|
keep.R
|
`tidy` <- function(H){
wanted <- sort(unique(c(brackets(H),recursive=TRUE))) # numeric
o <- order(wanted) # == seq_along(wanted)
bracketout <- list()
powerout <- NULL
for(i in seq_along(H)){
b <- brackets(H)[[i]] # numeric (not necessarily sorted)
if(any(b %in% wanted)){
bracketout <- c(bracketout, list(which(apply(outer(b,wanted,"=="),2,any)))) # easily the ugliest line in any of my code, anywhere
powerout <- c(powerout, powers(H)[i])
}
}
if(identical(pnames(H),NA)){
pout <- NA
} else {
pout <- pnames(H)[wanted]
}
return(hyper2(bracketout,powerout,pout))
}
`keep` <- function(H, keep, tidy=TRUE){
p <- pnames(H) # might be NA
if(is.character(keep)){
stopifnot(all(keep %in% p))
keep <- which(p %in% keep) # 'keep' now numeric
} else {
jj <- seq_len(size(H))
stopifnot(all(keep %in% jj))
keep <- which(jj %in% keep) # 'keep' now numeric
}
bracketout <- list()
powerout <- NULL
for(i in seq_along(H)){
b <- brackets(H)[[i]]
jj <- b[b %in% keep] # the meat
if(length(jj)>0){
bracketout <- c(bracketout, list(jj))
powerout <- c(powerout, powers(H)[i])
}
}
out <-hyper2(L=bracketout,d=powerout,pnames=p)
if(tidy){out <- tidy(out)}
return(out)
}
`discard` <- function(H, discard, tidy=TRUE){
p <- pnames(H)
if(is.character(discard)){
stopifnot(all(discard %in% p))
keep <- which(!(p %in% discard))
} else {
jj <- seq_len(size(H))
stopifnot(all(discard %in% jj))
keep <- which(!(jj %in% discard))
}
return(keep(H,keep,tidy=tidy)) # the meat
}
|
0e7231e6dfdd479477a7256761cefdbb5940f47f
|
59144bb519bd522b6084129a05a9e25d79fcd312
|
/EM in R/EM_bnlearn/EM_algorithm_with_bnlearn.R
|
563ab1280715ef8479fe24aa4cbca2b6ec9b4fca
|
[
"MIT"
] |
permissive
|
madlabunimib/Expectation-Maximisation
|
73da3d218616bf64d7a1ba95f8e2511f10b4b461
|
e15eefb18853d4800da49c4449e0c8eadf385c44
|
refs/heads/master
| 2023-01-01T19:08:57.212091
| 2020-10-22T14:52:19
| 2020-10-22T14:52:19
| 265,578,622
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 2,142
|
r
|
EM_algorithm_with_bnlearn.R
|
library(bnlearn)
# https://www.bnlearn.com/documentation/man/impute.html
# https://www.bnlearn.com/documentation/man/structural.em.html
# incomplete data
lv = c("0", "1")
x <- data.frame("A" = factor(c(0, 0, 0, NA, NA, NA, 1, 1, 1, 1), levels = lv),
"B" = factor(c(0, 1, 1, 1, 0, 0, 0, 0, 1, NA), levels = lv))
# FIRST CASE
# a simple BN with two nodes A and B linked by a single arc
em.dag = empty.graph(nodes = c("A", "B"))
em.dag = set.arc(em.dag, from = "A", to = "B")
path(em.dag, from = "A", to = "B")
em.dag
# parameters of the local distribution of A
A.prob = array(c(0.5, 0.5), dim = 2, dimnames = list(A = lv))
# parameters of the local distribution of B
B.prob = array(c(0.333, 0.667, 0.667, 0.333), dim = c(2, 2),
dimnames = list(B = lv, A = lv))
cpt = list(A = A.prob, B = B.prob)
cpt
bn = custom.fit(em.dag, cpt)
bn
# expectation step
imputed = impute(bn, x, method = "bayes-lw")
imputed
# maximisation step (forcing A to be connected to B)
em.dag = tabu(imputed, whitelist = data.frame(from = "A", to = "B"))
bn = bn.fit(em.dag, imputed, method = "bayes")
bn
# SECOND CASE
# initialise an empty BN
imputed = x
bn = bn.fit(empty.graph(names(x)), imputed)
bn
bn$A = array(c(0.5, 0.5), dim = 2, dimnames = list(lv))
# I can't initialize the parameters of the local distribution of B
# because I have not an arc from A to B
for (i in 1:4) {
# expectation step
imputed = impute(bn, x, method = "bayes-lw")
imputed
# maximisation step (forcing A to be connected to B,
# and not to the other nodes because A create a self-loop)
dag = tabu(imputed, whitelist = data.frame(from = "A", to = "B"))
dag
#graphviz.plot(dag)
bn = bn.fit(dag, imputed, method = "bayes")
bn
# same results of first case
}
imputed
dag
bn
# THIRD CASE
# structural.em function
r = structural.em(x, fit = "bayes", fit.args = list(), maximize = "tabu",
maximize.args = list(whitelist = data.frame(from = "A", to = "B")),
"parents", impute.args = list(), return.all = TRUE,
start = NULL, max.iter = 4, debug = FALSE)
r[2]
r[1]
r[3]
|
ef5824537526cf169ae076344f441ba59be12984
|
ccd38c90def4db59203f5337516194837047e642
|
/R/genBM.R
|
3ba2471b73ae826af849e8708fccc30880f21c55
|
[] |
no_license
|
atuldeshpande/simBM
|
9a8d0c63631ade9ba0a1cdc8f68772062d423ea7
|
3eb06c7528272a0334f6e8221305b0f4a9aef88c
|
refs/heads/master
| 2021-01-18T00:16:33.809542
| 2016-05-17T04:28:23
| 2016-05-17T04:28:23
| 58,980,269
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 1,377
|
r
|
genBM.R
|
#' Generate Brownian Motion
#'
#' This function generates 1-D and 2-D Brownian motion
#'
#' @param vector_size a positive integer value for the number of brownian motion points
#' @param dim in {1,2} for the dimension of Brownian Motion
#' @param seed an integer value to seed the brownian motion
#' @return a numeric vector of stages of Brownian motion dimensions vector_size x dim
#' @author Atul Deshpande
#' @export
#' @importFrom stats rnorm
#' @examples
#' genBM(3,1,1)
#' genBM(300,2)
genBM<-
function(vector_size,dim,seed)
{
if (missing('seed'))
{ if (!testInteger(vector_size) || !testInteger(dim))
stop('Error: All inputs expected to be integers');
if (missing('dim'))
{
if (!testInteger(vector_size))
stop('Error: All inputs expected to be integers');
dim<-1
}
if (dim>2)
stop('Error: This code only creates up to 2D BMs');
}
else{
if (!testInteger(vector_size) || !testInteger(dim) || !testInteger(seed))
stop('Error: All inputs expected to be integers');
}
if (!missing('seed'))
{
set.seed(seed, kind = NULL, normal.kind = NULL);
}
updates <- array(rnorm(vector_size*dim, mean = 0, sd = 1),c(vector_size,dim))
simBM<-matrix(0,vector_size,dim);
for (i in 1:dim)
simBM[,i] <- cumsum(updates[,i])
return(simBM)
}
|
6137303873bdff91593facf9046e7eec715f8311
|
c97370ef57cb15d1faea83de281589fde0485361
|
/02-wrangle-tweets.R
|
220a44f13ae56d0e986f6c8d8a42c2a1e93224cd
|
[] |
no_license
|
carlislerainey/decision-desk-hq-returns
|
112e15277047bd523753ff97164403eebf8ae2b6
|
9999144ee531df063e146b97ae6a7ee72d61eb53
|
refs/heads/master
| 2023-01-06T16:18:26.152092
| 2020-11-10T15:36:27
| 2020-11-10T15:36:27
| 311,688,625
| 1
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 1,055
|
r
|
02-wrangle-tweets.R
|
# load packages
library(tidyverse)
library(rtweet)
# get raw tweets
raw_tw <- read_rds("protected/decision-desk-timeline.rds")
# separate tweets into data
r <- raw_tw %>%
select(text, time = created_at) %>%
mutate(pres_results = str_detect(text, "Presidential Election Results")) %>%
filter(pres_results) %>%
mutate(state = str_sub(text, start = 1L, end = 2L)) %>%
separate(text, sep = "\n", into = LETTERS, remove = FALSE, fill = "right") %>%
select(text, time, state, C, D) %>%
pivot_longer(cols = C:D) %>%
select(-name) %>%
separate(value, sep = ":|\\(|\\)", into = LETTERS, remove = FALSE, fill = "right") %>%
select(text, time, state, candidate = A, party = B, votes = E) %>%
mutate(votes = str_remove(votes, " votes"),
votes = str_remove_all(votes, ","),
votes = as.numeric(votes)) %>%
mutate(candidate = str_trim(candidate)) %>%
mutate(time = lubridate::ymd_hms(time)) %>%
select(state, time, candidate, votes, text) %>%
glimpse()
r %>%
write_rds("returns.rds") %>%
write_csv("returns.csv")
|
da47169dd183302efd02f3f5ebc7ef534d7740e3
|
58c2005e36def8c378d0db8e69a648d266ad18c6
|
/Practice/DIME_leaflet/server.R
|
0a078136cb8b994f4b8618c75b001e686c83c40e
|
[] |
no_license
|
ShyGuyPy/workflow
|
b324c498374a12624b97ea2ab11776a511de644f
|
53b1f05d4705b1284b21d6d99d0597d8ea5e9f7f
|
refs/heads/master
| 2023-03-05T16:13:45.935934
| 2023-02-23T20:02:50
| 2023-02-23T20:02:50
| 146,636,191
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 2,222
|
r
|
server.R
|
shinyServer(function(input, output, session) {
select_data <- reactive({
parameter_data <- parameter_data %>%
filter(parameter == as.character(input$data))
})
output$map <- renderLeaflet({
leaflet() %>%
addTiles() %>%
setView(-78.193593, 38.917359, zoom = 7.5) %>%
addPolygons(data = HUC8, fill = FALSE, weight = 2, color = "steelblue", group = "HUC8") %>%
addPolygons(data = PRB, fill = FALSE, weight = 3, color = "navy", group = "Potomac River Watershed") %>%
addPolygons(data = ches, fill = FALSE, weight = 3, color = "navy", group = "Chesapeake Bay Watershed") %>%
addLayersControl(overlayGroups = c("Chesapeake Bay Watershed", "Potomac River Watershed", "HUC8"), position = "bottomleft",
options = layersControlOptions(collapsed = FALSE)) %>%
hideGroup("Chesapeake Bay Watershed") #%>%
# addLegend("bottomleft", pal = pal, values = parameter_data$measurevalue, title = as.character(input$data), opacity = 1)
})
observe({
parameter_data <- select_data()
proxy <- leafletProxy("map", data = parameter_data) %>%
clearMarkers() %>%
addCircleMarkers(data = parameter_data,
lng = ~longitude,
lat = ~latitude,
radius = 2,
color = ~ pal(parameter_data$measurevalue),
stroke = TRUE,
fillOpacity = 0.5,
popup=paste('<b>Date:</b>', parameter_data$sampledate, "<br>",
'<b>Value:</b>', parameter_data$measurevalue, "<br>",
'<b>Unit:</b>', parameter_data$unit, "<br>",
'<b>Station:</b>', parameter_data$station, "<br"),
options = popupOptions(maxHeight = 50))
})
observe({
pal <- colorNumeric(palette = c("yellow","darkgreen"), domain = select_data()$measurevalue)
proxy <- leafletProxy("map", data = parameter_data) %>%
clearControls() %>%
addLegend("bottomleft", pal = pal, values =select_data()$measurevalue, title = as.character(input$data), opacity = 1)
})
}) # end shinyServer
|
692dcc6edeb08bf575b3e2d57963193476422876
|
7a647ab7cad0f4a077d58adacca9f28a2286954d
|
/R/musselbed.r
|
0fb679af8b1247136711aa0368009feea5edc835
|
[
"MIT"
] |
permissive
|
yangxhcaf/SpatialStress
|
2cca692a19075be230bca012b72d44c0256791ea
|
86727b5081dbbe64d0434baf6724e5955125ce59
|
refs/heads/master
| 2021-06-01T04:31:45.031987
| 2015-06-15T12:11:03
| 2015-06-15T12:11:03
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 3,744
|
r
|
musselbed.r
|
########################################################
# The MIT License (MIT)
#
# Copyright (c) 2014 Florian D. Schneider & Sonia Kéfi
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in all
# copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
#
########################################################
### mussel bed model (Guichard et al 2003)
musselbed <- list()
class(musselbed) <- "ca_model"
musselbed$name <- "Mussel Disturbance Model"
musselbed$ref <- "Guichard et al 2003, American Naturalist, Vol. 161, pp. 889–904"
musselbed$states <- c("+", "0", "-")
musselbed$cols <- grayscale(3)
musselbed$parms <- list(
r = 0.4, # recolonisation of empty sites dependent on local density
d = 0.9, # probability of disturbance of occupied sites if at least one disturbed site
delta = 0.01 # intrinsic disturbance rate
)
musselbed$update <- function(x_old, parms_temp, delta = 0.2, subs = 10, timestep = NA) {
x_new <- x_old
for(s in 1:subs) {
parms_temp$rho_plus <- sum(x_old$cells == "+")/(x_old$dim[1]*x_old$dim[2]) # get initial vegetation cover
parms_temp$localdisturbance <- count(x_old, "-") > 0 # any disturbance in neighborhood?
parms_temp$localcover <- count(x_old, "+")/4 # any occupied in neighborhood?
# 2 - drawing random numbers
rnum <- runif(x_old$dim[1]*x_old$dim[2]) # one random number between 0 and 1 for each cell
# 3 - setting transition probabilities
if(parms_temp$rho_plus > 0) {
recolonisation <- with(parms_temp, (r*localcover)*1/subs)
disturbance <- with(parms_temp, (delta+d*localdisturbance)*1/subs)
disturbance[disturbance > 1] <- 1
} else {
recolonisation <- 0
disturbance <- 1
}
regeneration <- 1*1/subs
# check for sum of probabilities to be inferior 1 and superior 0
if(any(c(recolonisation, disturbance, regeneration) > 1 )) warning(paste("a set probability is exceeding 1 in time step", timestep, "! decrease number of substeps!!!"))
if(any(recolonisation < 0)) warning(paste("recolonisation falls below 0 in time step",timestep, "! balance parameters!!!"))
if(any( disturbance < 0)) warning(paste("disturbance falls below 0 in time step",timestep, "! balance parameters!!!"))
if(any(regeneration < 0)) warning(paste("regeneration falls below 0 in time step",timestep, "! balance parameters!!!"))
# 4 - apply transition probabilities
x_new$cells[which(x_old$cells == "0" & rnum <= recolonisation)] <- "+"
x_new$cells[which(x_old$cells == "+" & rnum <= disturbance)] <- "-"
x_new$cells[which(x_old$cells == "-" & rnum <= regeneration)] <- "0"
# 5- store x_new as next x_old
x_old <- x_new
}
## end of single update call
return(x_new)
}
|
f4c6845753163190bcb17defe4d41c83f7ce7759
|
c981caf103a3540f7964e6c41a56ca34d67732c4
|
/R/mice_impute_catpmm_create_dummies_y.R
|
f7790a6d67605efa17f7f00a97fb10be0ce060cd
|
[] |
no_license
|
alexanderrobitzsch/miceadds
|
8285b8c98c2563c2c04209d74af6432ce94340ee
|
faab4efffa36230335bfb1603078da2253d29566
|
refs/heads/master
| 2023-03-07T02:53:26.480028
| 2023-03-01T16:26:31
| 2023-03-01T16:26:31
| 95,305,394
| 17
| 2
| null | 2018-05-31T11:41:51
| 2017-06-24T15:16:57
|
R
|
UTF-8
|
R
| false
| false
| 464
|
r
|
mice_impute_catpmm_create_dummies_y.R
|
## File Name: mice_impute_catpmm_create_dummies_y.R
## File Version: 0.02
mice_impute_catpmm_create_dummies_y <- function(y, dfr, ridge=0)
{
n <- nrow(dfr)
y1 <- stats::model.matrix(object=~0+as.factor(y), data=dfr )
ny <- ncol(y1)
colnames(y1) <- paste0("y",1:ny)
y1 <- y1 + matrix( stats::rnorm(n*ny, mean=0, sd=ridge), nrow=n, ncol=ny)
dfr <- data.frame(dfr, y1)
#-- outcome
res <- list(y1=y1, ny=ny, dfr=dfr)
return(res)
}
|
4e6afdf305e02242ba804219d06c9d01963088ea
|
d5d5c96b2efabed5b1e789a6b46a99670064ef25
|
/R/gamllik.R
|
818432af5325792f419096aed48f2a3504b691aa
|
[] |
no_license
|
baihongguo/RPEXE.RPEXT-1
|
f69c9c8e139ce50e9113d25e34f4e0ce8b528236
|
c860e2ebbefd2e1d330a3156f62e42ae67264cc8
|
refs/heads/master
| 2022-07-29T00:37:21.474495
| 2020-05-22T22:29:17
| 2020-05-22T22:29:17
| 266,162,760
| 0
| 0
| null | 2020-05-22T16:54:47
| 2020-05-22T16:54:46
| null |
UTF-8
|
R
| false
| false
| 2,192
|
r
|
gamllik.R
|
#' @title Log likelihood from the gamma distribution
#'
#' @description A function computing the log likelihood from the gamma distribution under an order restriction reduction
#'
#' @param structtime change-point times to be used to compute the likelihood value
#' @param structttot total time on test (ttot) between each time point and the previous time point (or 0) corresponding to structtime
#' @param structdeaths number of deaths corresponding to structttot
#' @param time_die all event and censoring times from small to large
#' @param ttot total time on test corresponding to time_die
#' @param deaths the number of deaths corresponding to "ttot"
#'
#' @importFrom stats dgamma
#'
#' @usage gamllik(structtime,structttot,structdeaths,time_die,ttot,deaths)
#'
#' @return
#' log of the likelihood
#'
#' @export
#'
#' @examples
#' time_die <- c(0.05,0.08,0.38,0.41,0.64)
#' ttot <- c(9.2,5.8,52.1,5.8,40.0)
#' deaths <- c(1,1,1,1,1)
#' structtime <- c(0.05,0.64)
#' structttot <- c(9.2, 40.0)
#' structdeaths = c(1, 5)
#' gamllik(structtime,structttot,structdeaths,time_die,ttot,deaths)
gamllik=function(structtime,structttot,structdeaths,time_die,ttot,deaths)
{
#compute the Gamma parameter
structgamindi=array(0,c(length(structtime),1))
for(j in 1:length(structtime))
structgamindi[j]= structttot[j]/structdeaths[j]
#the likelihood
#get the indices of the cut times:
structindi=array(0,c(length(structtime),1))
for(j in 1:length(structtime))
for (jj in 1:length(time_die))
if (time_die[jj]==structtime[j])
structindi[j] =jj
#set the scale parameter for Gamma distribution of the ttot
structgampar=array(0,c(length(time_die),1))
for (ii in 1:length(time_die))
for (j in 1:length(structtime))
if (ii<=structindi[j])
{
if (j==1)
structgampar[ii]= structgamindi[j]
else
if (ii>structindi[j-1])
structgampar[ii] = structgamindi[j]
}
loglik = 0
for (ii in 1:length(structgampar))
loglik = loglik + log(dgamma(ttot[ii]/deaths[ii],shape=deaths[ii],scale=structgampar[ii],log=FALSE))
return(loglik)
}
|
ed654e4b8d384d582a3b5604455e150dc2ed0f75
|
b01da0776510ff28170e0fe2cf16bca2295c40b4
|
/2021/cleaning_flux.R
|
67feb8e6a45a2e11d248a8664b1a917665be8ed1
|
[] |
no_license
|
jogaudard/BIO102
|
38f7cddd453ab8aec8bc423dc8aec77137c97761
|
71844c0085ffc574803ce66ce6685f78683c2c82
|
refs/heads/master
| 2023-08-14T18:13:11.742302
| 2021-10-13T08:20:16
| 2021-10-13T08:20:16
| 298,063,570
| 0
| 2
| null | 2021-08-06T13:05:39
| 2020-09-23T18:44:54
|
R
|
UTF-8
|
R
| false
| false
| 10,537
|
r
|
cleaning_flux.R
|
# This script is to clean the flux data from logger format to a dataset of fluxes with all the sites and environmental parameters
# We need some packages to make it work
library(tidyverse)
library("dataDownloader") #details here https://github.com/Between-the-Fjords/dataDownloader
library(broom)
library(fs)
library(lubridate)
# We also need to write our own functions:
# to match the fluxes with the correct site
match.flux <- function(raw_flux, field_record){
co2conc <- full_join(raw_flux, field_record, by = c("datetime" = "start"), keep = TRUE) %>% #joining both dataset in one
fill(PAR, temp_air, temp_soil, site, type, plot, campaign, start, date, end, start_window, end_window) %>% #filling all rows (except Remarks) with data from above
group_by(date, site, plot, type) %>% #this part is to fill Remarks while keeping the NA (some fluxes have no remark)
fill(comments) %>%
ungroup() %>%
mutate(ID = group_indices(., date, site, type, plot)) %>% #assigning a unique ID to each flux, useful for plotting uzw
filter(
datetime <= end
& datetime >= start) #%>% #cropping the part of the flux that is after the End and before the Start
return(co2conc)
}
# to calculate fluxes
flux.calc <- function(co2conc, # dataset of CO2 concentration versus time (output of match.flux)
chamber_volume = 24.5, # volume of the flux chamber in L, default for Three-D chamber (25x24.5x40cm)
tube_volume = 0.075, # volume of the tubing in L, default for summer 2020 setup
atm_pressure = 1, # atmoshperic pressure, assumed 1 atm
plot_area = 0.0625 # area of the plot in m^2, default for Three-D
)
{
R = 0.082057 #gas constant, in L*atm*K^(-1)*mol^(-1)
vol = chamber_volume + tube_volume
fluxes_final <- co2conc %>%
# group_by(ID) %>%
nest(-ID) %>%
mutate(
data = map(data, ~.x %>%
mutate(time = difftime(datetime[1:length(datetime)],datetime[1] , units = "secs"), #add a column with the time difference between each measurements and the beginning of the measurement. Usefull to calculate the slope.
PARavg = mean(PAR, na.rm = TRUE), #mean value of PAR for each flux
temp_airavg = mean(temp_air, na.rm = TRUE) #mean value of Temp_air for each flux
+ 273.15, #transforming in kelvin for calculation
temp_soilavg = mean(temp_soil, na.rm = TRUE) #mean value of temp_soil for each flux
)),
fit = map(data, ~lm(CO2 ~ time, data = .)), #fit is a new column in the tibble with the slope of the CO2 concentration vs time (in secs^(-1))
# slope = map_dbl(fit, "time")
results = map(fit, glance), #to see the coefficients of the model
slope = map(fit, tidy) #creates a tidy df with the coefficients of fit
) %>%
unnest(results, slope) %>%
unnest(data) %>%
filter(term == 'time' #filter the estimate of time only. That is the slope of the CO2 concentration. We need that to calculate the flux.
# & r.squared >= 0.7 #keeping only trendline with an r.squared above or equal to 0.7. Below that it means that the data are not good quality enough
# & p.value < 0.05 #keeping only the significant fluxes
) %>%
# select(ID, Plot_ID, Type, Replicate, Remarks, Date, PARavg, Temp_airavg, r.squared, p.value, estimate, Campaign) %>% #select the column we need, dump the rest
distinct(ID, site, plot, type, comments, date, PARavg, temp_airavg, temp_soilavg, r.squared, p.value, estimate, campaign, .keep_all = TRUE) %>% #remove duplicate. Because of the nesting, we get one row per Datetime entry. We only need one row per flux. Select() gets rid of Datetime and then distinct() is cleaning those extra rows.
#calculate fluxes using the trendline and the air temperature
mutate(flux = (estimate * atm_pressure * vol)/(R * temp_airavg * plot_area) #gives flux in micromol/s/m^2
*3600 #secs to hours
/1000 #micromol to mmol
) %>% #flux is now in mmol/m^2/h, which is more common
select(datetime, ID, site, plot, type, comments, date, PARavg, temp_airavg, temp_soilavg, r.squared, p.value, nobs, flux, campaign)
return(fluxes_final)
}
# Here we define the length of the fluxes
measurement <- 210 #the length of the measurement taken on the field in seconds
startcrop <- 10 #how much to crop at the beginning of the measurement in seconds
endcrop <- 40 #how much to crop at the end of the measurement in seconds
# Getting data
# the raw data will be on OSF
# co2 concentration
get_file(node = "3qhdj",
file = "BIO102_cflux_2021.zip",
path = "2021/data",
remote_path = "raw_data/2021")
# field record
get_file(node = "3qhdj",
file = "BIO102_field-record_2021.csv",
path = "2021/data",
remote_path = "raw_data/2021")
# NDVI
get_file(node = "3qhdj",
file = "BIO102_NDVI_2021.csv",
path = "2021/data",
remote_path = "raw_data/2021")
# Soil moisture
get_file(node = "3qhdj",
file = "BIO102_soil-moisture_2021.csv",
path = "2021/data",
remote_path = "raw_data/2021")
# cutting file
get_file(node = "3qhdj",
file = "BIO102_cutting_2021.csv",
path = "2021/data",
remote_path = "raw_data/2021")
# Unzip files
zipFile <- "2021/data/BIO102_cflux_2021.zip"
if(file.exists(zipFile)){
outDir <- "2021/data"
unzip(zipFile, exdir = outDir)
}
#importing fluxes data
location <- "2021/data/BIO102_cflux_2021" #location of datafiles
fluxes <-
dir_ls(location, regexp = "CO2.") %>%
map_dfr(read_csv, na = c("#N/A", "Over")) %>%
rename( #rename the column to get something more practical without space
CO2 = "CO2 (ppm)",
temp_air = "Temp_air ('C)",
temp_soil = "Temp_soil ('C)",
PAR = "PAR (umolsm2)",
datetime = "Date/Time"
) %>%
mutate(
datetime = dmy_hms(datetime)
) %>%
select(datetime, CO2, PAR, temp_air, temp_soil)
#import the record file from the field
record <- read_csv("2021/data/BIO102_field-record_2021.csv", na = c(""), col_types = "ffftDfc") %>%
drop_na(starting_time) %>% #delete row without starting time (meaning no measurement was done)
mutate(
start = ymd_hms(paste(date, starting_time)), #converting the date as posixct, pasting date and starting time together
end = start + measurement, #creating column End
start_window = start + startcrop, #cropping the start
end_window = end - endcrop #cropping the end of the measurement
)
#matching the CO2 concentration data with the turfs using the field record
co2_fluxes <- match.flux(fluxes,record)
# import cutting
cutting <- read_csv("2021/data/BIO102_cutting_2021.csv", na = "", col_types = "dtt")
co2_cut <- co2_fluxes %>%
left_join(cutting, by = "ID") %>%
mutate(
start_cut = ymd_hms(paste(date, .$start_cut)),
end_cut = ymd_hms(paste(date, .$end_cut))
)
# adjusting the time window with manual cuts
co2_cut <- co2_cut %>% mutate(
start_window = case_when(
is.na(start_cut) == FALSE ~ start_cut,
TRUE ~ start_window
),
end_window = case_when(
is.na(end_cut) == FALSE ~ end_cut,
TRUE ~ end_window
),
cut = case_when(
datetime <= start_window | datetime >= end_window ~ "cut",
# ID == & datetime %in% ~ "cut",
# ID == & datetime %in% ~ "cut",
TRUE ~ "keep"
),
cut = as_factor(cut)
)
theme_set(theme_grey(base_size = 5))
#plot each flux to look into details what to cut off
ggplot(co2_cut, aes(x = datetime, y = CO2, color = cut)) +
geom_line(size = 0.2, aes(group = ID)) +
scale_x_datetime(date_breaks = "1 min", minor_breaks = "10 sec", date_labels = "%e/%m \n %H:%M") +
# scale_x_date(date_labels = "%H:%M:%S") +
facet_wrap(vars(ID), ncol = 5, scales = "free") +
ggsave("2021/BIO102_2021_detailb.png", height = 20, width = 30, units = "cm")
# graph PAR, soil temp and air temp to check
ggplot(co2_cut, aes(x = datetime, y = PAR, color = cut)) +
geom_line(size = 0.2, aes(group = ID)) +
scale_x_datetime(date_breaks = "1 min", minor_breaks = "10 sec", date_labels = "%e/%m \n %H:%M") +
# scale_x_date(date_labels = "%H:%M:%S") +
facet_wrap(vars(ID), ncol = 5, scales = "free") +
ggsave("2021/BIO102_2021_detailb_PAR.png", height = 20, width = 30, units = "cm")
ggplot(co2_cut, aes(x = datetime, y = temp_air, color = cut)) +
geom_line(size = 0.2, aes(group = ID)) +
scale_x_datetime(date_breaks = "1 min", minor_breaks = "10 sec", date_labels = "%e/%m \n %H:%M") +
# scale_x_date(date_labels = "%H:%M:%S") +
facet_wrap(vars(ID), ncol = 5, scales = "free") +
ggsave("2021/BIO102_2021_detailb_air.png", height = 20, width = 30, units = "cm")
ggplot(co2_cut, aes(x = datetime, y = temp_soil, color = cut)) +
geom_line(size = 0.2, aes(group = ID)) +
scale_x_datetime(date_breaks = "1 min", minor_breaks = "10 sec", date_labels = "%e/%m \n %H:%M") +
# scale_x_date(date_labels = "%H:%M:%S") +
facet_wrap(vars(ID), ncol = 5, scales = "free") +
ggsave("2021/BIO102_2021_detailb_soil.png", height = 20, width = 30, units = "cm")
#Some corrections are needed with the PAR (I knew the sensor has a faulty contact: we will remove the dip when it is suddenly going close to 0 while it was in the light.)
# co2_cut <- co2_cut %>%
# mutate(
# PAR =
# case_when(
# # type == "ER" & PAR <= 0 ~ 0, #when light is very low it can give a negative value. Obvioulsy this is a 0
# # ID == 1 & datetime %in% c(ymd_hms("2021-08-23T15:01:10"):ymd_hms("2021-08-23T15:01:29")) ~ NA_real_,
# # ID == 3 & datetime %in% c(ymd_hms("2021-08-23T15:17:"):ymd_hms("2021-08-23T15:01:29")),
# # ID == 1 & PAR <= 500 ~ NA_real_,
# # ID == 3 & PAR <= 100 ~ NA_real_,
# # ID == 6 & PAR <= 100 ~ NA_real_,
# # type == "NEE" & PAR <= 50 ~ NA_real_,#cannot do that because some NEE (forest) have very low PAR and this is normal
# TRUE ~ as.numeric(PAR)
# )
# )
# second thought: will not do corrections as it is too difficult to know if it was a sensor faulty contact or someone shading the chamber
# cutting the part we want to keep. To do only after cleaning everything
co2_cut <- filter(co2_cut, cut == "keep") #to keep only the part we want to keep
# calculating the fluxes
fluxes2021 <- flux.calc(co2_cut)
# needs to add avg NDVI and soil moisture
write_csv(fluxes2021, "2021/data/BIO102_c-flux_2021.csv")
|
61c6215f50cb05aa5816e2bd064b449eeb35cdd1
|
b27f67e1cb5f7962b35803e8d455fbce8268f50f
|
/dates.R
|
80a9761130874080f9e6c5bee03eecd7eab2c823
|
[
"CC-BY-4.0",
"LicenseRef-scancode-public-domain"
] |
permissive
|
pablobernabeu/date-converter
|
6d9ab23fa0899ef82d755544d4e650b55676f608
|
4cdd8c3e1d5fd9c9b86a2e8dc98ba691179b3aa4
|
refs/heads/master
| 2020-08-28T09:02:28.135423
| 2020-01-26T16:57:05
| 2020-01-26T16:57:05
| 217,656,300
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 3,799
|
r
|
dates.R
|
# R function for converting variously formatted dates to computer format (YYYY-MM-DD).
# This version of the function changes years '19' and '20' to full forms '2019' and '2020'.
format_date = function(dat){
# Character format
dat$DATE = as.character(dat$DATE)
dat$date_format = as.character(dat$date_format)
# Replace any special characters with hyphens
dat$re_date = gsub("\\.|,|, |/|\\-|\"|\\s", "-", dat$DATE)
# Extract year from dates formatted ymd, dmy, ydm, and mdy, respectively
dat[dat$date_format=='ymd', 'year'] = sub('(^\\d+|\\w+|\\W+)-.*', replacement = '\\1', dat[dat$date_format=='ymd', 're_date'])
dat[dat$date_format=='dmy', 'year'] = sub('.*-', '', dat[dat$date_format=='dmy', 're_date'])
dat[dat$date_format=='ydm', 'year'] = sub('(^\\d+|\\w+|\\W+)-.*', replacement = '\\1', dat[dat$date_format=='ydm', 're_date'])
dat[dat$date_format=='mdy', 'year'] = sub('.*-', '', dat[dat$date_format=='mdy', 're_date'])
# Change abbreviated years to full form -- only for 19 and 20
dat$year = gsub('^19$', '2019', dat$year)
dat$year = gsub('^20$', '2020', dat$year)
# Extract month from dates formatted ymd, dmy, ydm, and mdy, respectively
dat[dat$date_format=='ymd', 'month'] = gsub('^[^-]*-([^-]+).*', '\\1', dat[dat$date_format=='ymd', 're_date'])
dat[dat$date_format=='dmy', 'month'] = gsub('^[^-]*-([^-]+).*', '\\1', dat[dat$date_format=='dmy', 're_date'])
dat[dat$date_format=='ydm', 'month'] = sub('.*-', '', dat[dat$date_format=='ydm', 're_date'])
dat[dat$date_format=='mdy', 'month'] = sub('(^\\d+|\\w+|\\W+)-.*', replacement = '\\1', dat[dat$date_format=='mdy', 're_date'])
# Change written months to numbers
dat$month = gsub('^Jan$', '1', dat$month)
dat$month = gsub('^January$', '1', dat$month)
dat$month = gsub('^Feb$', '2', dat$month)
dat$month = gsub('^February$', '2', dat$month)
dat$month = gsub('^Mar$', '3', dat$month)
dat$month = gsub('^March$', '3', dat$month)
dat$month = gsub('^Apr$', '4', dat$month)
dat$month = gsub('^April$', '4', dat$month)
dat$month = gsub('^May$', '5', dat$month)
dat$month = gsub('^June$', '6', dat$month)
dat$month = gsub('^July$', '7', dat$month)
dat$month = gsub('^Aug$', '8', dat$month)
dat$month = gsub('^August$', '8', dat$month)
dat$month = gsub('^Sept$', '9', dat$month)
dat$month = gsub('^September$', '9', dat$month)
dat$month = gsub('^Oct$', '10', dat$month)
dat$month = gsub('^October$', '10', dat$month)
dat$month = gsub('^Nov$', '11', dat$month)
dat$month = gsub('^November$', '11', dat$month)
dat$month = gsub('^Dec$', '12', dat$month)
dat$month = gsub('^December$', '12', dat$month)
# Extract day from dates formatted ymd, dmy, ydm, and mdy, respectively
dat[dat$date_format=='ymd', 'day'] = sub('.*-', '', dat[dat$date_format=='ymd', 're_date'])
dat[dat$date_format=='dmy', 'day'] = sub('(^\\d+|\\w+|\\W+)-.*', replacement = '\\1', dat[dat$date_format=='dmy', 're_date'])
dat[dat$date_format=='ydm', 'day'] = gsub('^[^-]*-([^-]+).*', '\\1', dat[dat$date_format=='ydm', 're_date'])
dat[dat$date_format=='mdy', 'day'] = gsub('^[^-]*-([^-]+).*', '\\1', dat[dat$date_format=='mdy', 're_date'])
# Order parts as YYYY-MM-DD
dat$sorted_date = paste0(dat$year, '-', dat$month, '-', dat$day)
# Return
return(dat[,c('DATE', 'date_format', 'sorted_date')])
}
# Test example
DATE = c('17.10.2019', '18.9.19', '2019-10.18', '19.10.17', '23/9/2019', '2019/23/9', '7 Nov 19',
'December 9, 2019', '2019 December 9', '10 Dec 2019')
test_data = data.frame(DATE)
# Manually enter date format (this is the only part that will need to be manually entered, in a spreadsheet or in R)
test_data$date_format = c('dmy','dmy','ymd','ymd','dmy','ydm','dmy','mdy','ymd','dmy')
# Function
format_date(test_data)
|
52ec4058c842456ddafc249d4d6d22da82f62663
|
8ef3e422e2f5618fdb0bcb99574cc771962ee99e
|
/R/oscar.note.R
|
3a4c0536cceae82876b44197f6ec075e9301d2b3
|
[] |
no_license
|
cran/tdm
|
f09134e94f9fb4d60133486d72dc84e1c80f99d2
|
293cc0a7e6d9641de189077cb5f46f6c146b5689
|
refs/heads/master
| 2016-08-03T21:51:21.507848
| 2014-12-16T00:00:00
| 2014-12-16T00:00:00
| 17,700,401
| 0
| 1
| null | null | null | null |
UTF-8
|
R
| false
| false
| 2,839
|
r
|
oscar.note.R
|
note_for_ocIR_input <- function(){
cat("-----------------------------------------------------\n")
cat(" --Oxycodone input data information-- \n\n")
cat(" D = dose for the 1st-dose (mg) \n")
cat(" phenotype: 1=UM, 2=EM, 3=PM \n")
cat("-----------------------------------------------------\n")
}
note_for_OO_conc_input <- function(){
cat("-----------------------------------------------------\n")
cat(" --Opioid input data information-- \n\n")
cat(" ts = sampling time after the 1st dose (hr) \n")
cat(" conc = measured opioids conc.(ng/mL) \n")
cat("-----------------------------------------------------\n")
}
note_for_OO_output <- function(){
cat("-----------------------------------------------------------\n")
cat(" --Opioid output data information-- \n\n")
cat(" cl_F = estimated clearance (L/hr) \n")
cat(" v_F = estimated volume of distribution (L) \n")
cat(" Cmax_ss = calculated peak conc.(ng/mL) at SS \n")
cat(" Cmin_ss = calculated trough conc.(ng/mL) at SS \n")
cat(" SS = steady-state \n")
cat("-----------------------------------------------------------\n")
}
note_for_OO_input <- function(){
cat("-----------------------------------------------------\n")
cat(" --Opioid input data information-- \n\n")
cat(" D = dose for the 1st-dose (mg) \n\n")
cat(" Note: for fentanyl, the dose unit is mcg. \n")
cat("-----------------------------------------------------\n")
}
note_for_c_to_d_OO <- function(){
cat("--------------------------------------------\n")
cat(" --input data-- \n")
cat(" Cmin_ss = desired trough conc (ng/L) \n")
cat(" tau = desired dosing interval (hr) \n")
cat(" \n")
cat(" --output data-- \n")
cat(" Dose = calculated dose (mg) \n")
cat(" for fentanyl, the dose is mcg \n")
cat("--------------------------------------------\n\n")
}
note_for_d_to_c_OO <- function(){
cat("--------------------------------------------\n")
cat(" --input data-- \n")
cat(" D = desired dose (mg); fentanyl - mcg \n")
cat(" tau = desired dosing interval (hr) \n")
cat(" \n")
cat(" --output data-- \n")
cat(" Cmin_ss = calculated trough conc (ng/L) \n")
cat("--------------------------------------------\n\n")
}
|
900df7aeccac181d27b741578c958d0cf8d1769b
|
74f67b0be8bcd4fa66fd62e5268dd999d1183554
|
/Hosei/filtering/eliminate misbehaving lib.R
|
533431b99a7d0b259cd398832eefa24153287643
|
[] |
no_license
|
vyrms/sravandevanathan
|
b37a9a5aca0c1cf0780d663cd5a252431ece7889
|
f3bc2d497beb7f89d6eec0eb38c0555a4ec40ddc
|
refs/heads/master
| 2022-02-24T16:01:11.435047
| 2019-07-29T20:48:10
| 2019-07-29T20:48:10
| 170,383,541
| 1
| 0
| null | 2019-02-12T20:07:35
| 2019-02-12T20:07:35
| null |
UTF-8
|
R
| false
| false
| 85
|
r
|
eliminate misbehaving lib.R
|
filteredgenes2 = filteredgenes[,-c(2,24)]
filteredtdata2 = filteredtdata[,-c(2,24)]
|
3903e7da4a9eb6e285ad19d6a5aa174fe6aa60ac
|
4edd01a0d66f6f3336f7396cec13f5da671a8dcb
|
/test/data/metric_bad_2.data.R
|
7020d6978ad132a56a4d09c6de4c6cedc49fb8f1
|
[
"BSD-3-Clause"
] |
permissive
|
stan-dev/cmdstanpy
|
c0a33ca6d3a6f5e1486b797e5a402df6cc66c92c
|
107a347f12b1abb4f3d7b8a380d1138f6633d26b
|
refs/heads/develop
| 2023-09-03T19:03:09.568378
| 2023-08-28T19:19:40
| 2023-08-28T19:19:40
| 163,899,458
| 135
| 75
|
BSD-3-Clause
| 2023-09-11T20:33:23
| 2019-01-02T23:18:04
|
Python
|
UTF-8
|
R
| false
| false
| 23
|
r
|
metric_bad_2.data.R
|
inv_metric <- 0.787405
|
fb0ba0b82cbc4e9129c9d86832ed3f8102dd3dbc
|
74fcdcd73701f8f69d666f8056bdaf16b94c0474
|
/R scripts/5_line_fig4.R
|
e7a0b3046f6f0fad07c40d0899d56efe01a4cf97
|
[] |
no_license
|
USEPA/NATTS-HAP
|
6e014bf5569e0a976da3353d5665dd74f0b327fa
|
d18332c06fe27f4ce9b1448d7308810e7ce53a49
|
refs/heads/master
| 2023-03-07T02:31:27.339357
| 2021-02-16T13:51:38
| 2021-02-16T13:51:38
| 272,454,098
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 3,014
|
r
|
5_line_fig4.R
|
library(rlist); library(dplyr); library(ggplot2)
########################################################################
# This script creates a line graph for each site that shows how cancer #
# risk has changed relative to 2013 cancer risk for that site, note #
# only chemcials that were measured for all 5 years at a side were #
# included #
########################################################################
### Read in data set: filtered NATTS HAPs
df <- read.csv("../Data/FilteredNattsHaps.csv") # Change path if necessary
### Use only pollutants measured all five years at a given site
complete <- df %>% group_by(LOCATION, AQS_PARAMETER_NAME) %>% filter(length(YEAR) > 4)
d1 <- as.data.frame(unique(df$LOCATION))
d2 <- as.data.frame(unique(complete$LOCATION))
y1 <- complete %>% group_by(LOCATION, YEAR) %>%
dplyr::summarize("CRinAmil" = sum(CRinAmil))
y1 <- reshape2::dcast(y1, LOCATION ~ YEAR, value.var = "CRinAmil")
### Calculate standardized change in CR, using 2013 as baseline
y1$std13 <- 1
y1$std14 <- y1$`2014` / y1$`2013`
y1$std15 <- y1$`2015` / y1$`2013`
y1$std16 <- y1$`2016` / y1$`2013`
y1$std17 <- y1$`2017` / y1$`2013`
y2 <- reshape2::melt(y1, id= "LOCATION", measure.vars = c("std13", "std14", "std15",
"std16", "std17"), value.name = "CR")
### Group locations and label, for graphing so that there are ~5 lines per graph
y2 <- y2 %>% mutate("ID" = ifelse(LOCATION %in% c("Los Angeles, CA", "Washington, DC", "Phoenix, AZ", "Bountiful, UT"), 1,
ifelse(LOCATION %in% c("Grand Junction, CO", "Detroit, MI", "Rubidoux, CA", "St. Louis, MO"), 2,
ifelse(LOCATION %in% c("Pinellas County, FL", "Bronx, NY", "San Jose, CA", "Houston, TX"), 3,
ifelse(LOCATION %in% c("Roxbury, MA", "Tampa, FL", "Richmond, VA", "Chicago, IL"), 4,
ifelse(LOCATION %in% c("Chesterfield, SC", "Karnack, TX", "La Grande, OR", "Grayson Lake, KY", "Horicon, WI", "Underhill, VT"), 5,
6 ))))))
### Rename year variable to full year
y3 <- y2 %>% mutate("YEAR" = ifelse(variable == "std13", 2013,
ifelse(variable == "std14", 2014,
ifelse(variable == "std15", 2015,
ifelse(variable == "std16", 2016, 2017)))))
#### Create line graphs
p <- ggplot(y3, aes(x = YEAR, colour = LOCATION, group = LOCATION)) +
labs(x = "YEAR", y = "Change in CR") +
geom_point(aes(y = CR), size = 1.2) +
geom_line(aes(y = CR), size = 0.8) +
theme(axis.text = element_text(size = 16)) +
scale_x_continuous(expand = c(0,0)) +
facet_wrap(~ID)
### Write to pdf
pdf("../Figures/Fig4_timeSeries.pdf", paper = "USr", width = 11, height = 7)
print(p)
dev.off()
### Data frame for graph
#write.csv(y1, "./Data/df_fig4.csv", row.names = F)
|
15864abad896060bb424144a4909891f70539712
|
c0bcd0b5f2d1abd72de5e775aedebfd407a1a4a3
|
/exercise4/starting.R
|
987f677a2326f47f995079bc3ee677e7b6ee3655
|
[] |
no_license
|
zembrzuski/machine-learning-andrewng
|
707421b12aef715ae8121666b68d1c6f459f0b29
|
520fc38e83468865ec158026a10c73bdfb015dc7
|
refs/heads/master
| 2021-01-10T01:42:46.852296
| 2015-12-08T18:36:41
| 2015-12-08T18:36:41
| 45,475,686
| 1
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 1,841
|
r
|
starting.R
|
rm(list = ls())
### reading data
setwd('/home/nozes/github-machine-learning-coursera/exercise4')
x = read.csv2('input/data1.txt', sep=',', stringsAsFactors = FALSE, header = FALSE)
x <- as.matrix(sapply(x, as.numeric))
y = read.csv2('input/y.txt', sep=',', stringsAsFactors = FALSE, header = FALSE)$V1
y <- as.matrix(sapply(y, as.numeric))
theta1 = read.csv2('input/theta1.txt', sep=',', stringsAsFactors = FALSE, header = FALSE)
theta2 = read.csv2('input/theta2.txt', sep=',', stringsAsFactors = FALSE, header = FALSE)
theta1 <- as.matrix(sapply(theta1, as.numeric))
theta2 <- as.matrix(sapply(theta2, as.numeric))
### visualizing data
indexNumber <- 1
digit <- matrix(as.numeric(matrix(x[indexNumber,], ncol = 20)), ncol=20)
rotate <- function(x) t(apply(x, 2, rev))
image(rotate(digit))
### implementing forward propagation
x <- cbind(rep(1, nrow(x)), x)
dim(x)
dim(t(theta1))
outputLayerOne <- hypothesis(theta1, t(x))
dim(outputLayerOne)
inputLayerTwo <- cbind(1, outputLayerOne)
dim(inputLayerTwo)
dim(t(theta2))
m <- nrow(inputLayerTwo)
hipo <- hypothesis(theta2, inputLayerTwo)
dim(hipo)
# vou ter que criar uma matix de 5000x10
yMatrix <- matrix(NA, nrow = 5000, ncol=10)
for(i in 1:5000) {
yMatrix[i, ] <- createVectorForSinglePoint(y[i])
}
### agora vou ver se consigo subtrair
dim(hipo)
dim(yMatrix)
vectorCostForEachPoint <- -yMatrix*log(hipo) - (1-yMatrix)*log(hipo)
sum(vectorCostForEachPoint)/5000
acertou <- 0
for(nnn in 1:5000) {
if(which.max(yMatrix[nnn,]) == which.min(vectorCostForEachPoint[nnn,])) {
acertou <- acertou+1
}
}
acertou
4876/5000
############################################################
############################################################
############################################################
## tentando fazer a derivada agora o.O
sigma3 <-
|
a37c207332fc5e58760729fd23085606b34a1005
|
e85887c76341d45a3829fc552d6c53536f32b719
|
/man/weights_at_dose.Rd
|
e1c237469f7d73b14ea16ce4984d68099aac116d
|
[] |
no_license
|
brockk/trialr
|
fa8fd43ca43dc79911677ba42c8e50d88a2fc03d
|
15fd90d3a779a61454baedcd517e2ce8bb301f92
|
refs/heads/master
| 2023-03-16T21:41:49.580277
| 2023-03-11T07:36:58
| 2023-03-11T07:36:58
| 69,753,350
| 39
| 13
| null | 2023-09-10T19:11:18
| 2016-10-01T17:39:28
|
TeX
|
UTF-8
|
R
| false
| true
| 1,266
|
rd
|
weights_at_dose.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/weights_at_dose.R
\name{weights_at_dose}
\alias{weights_at_dose}
\alias{weights_at_dose.default}
\alias{weights_at_dose.crm_fit}
\title{Get the weights of patient outcomes at the doses under investigation.}
\usage{
weights_at_dose(x, dose, ...)
\method{weights_at_dose}{default}(x, dose = NULL, ...)
\method{weights_at_dose}{crm_fit}(x, dose = NULL, ...)
}
\arguments{
\item{x}{An R object of class \code{"dose_finding_fit"}}
\item{dose}{Optional integer, at which dose-level? Omit to get data on all doses.}
\item{...}{arguments passed to other methods}
}
\value{
list if \code{dose} omitted, numerical vector if \code{dose} provided.
}
\description{
Get the weights of patient outcomes at the doses under investigation.
}
\examples{
\dontrun{
# CRM example
fit <- stan_crm(skeleton = c(0.1, 0.2, 0.35, 0.6), target = 0.2,
model = 'empiric', beta_sd = sqrt(1.34), seed = 123,
doses = c(1, 1, 2, 2, 2),
tox = c(0, 0, 0, 0, 0),
weights = c(1, 1, 0.9, 0.1, 0.1))
l <- weights_at_dose(fit)
length(l) # 4
l[[1]] # c(1, 1)
l[[2]] # c(0.9, 0.1, 0.1)
l[[3]] # c()
weights_at_dose(fit, dose = 2) # c(0.9, 0.1, 0.1)
}
}
|
0671761f3cf595a15430d254a8cd8513a1d7a483
|
1f31b1e6cf2aafdf2a341ba7570b1806b6fa90b9
|
/R/plotConvergence.R
|
228ffeca26c3b3516c694db6ed65638e78643686
|
[] |
no_license
|
crsh/TreeBUGS
|
3faf2095ff67a99536fb49a9b381ed8ed767a1a9
|
02ada2dff50ed076d653e869bf93c8cb66687fe7
|
refs/heads/master
| 2021-01-24T00:44:09.553630
| 2016-05-30T13:11:16
| 2016-05-30T13:11:16
| 60,010,766
| 0
| 0
| null | 2016-05-30T13:07:09
| 2016-05-30T13:07:09
| null |
UTF-8
|
R
| false
| false
| 2,491
|
r
|
plotConvergence.R
|
#### wrappers for convenient convergence plots
#' @export
#' @describeIn plot Plot convergence for beta MPT
plot.betaMPT <- function(x, parameter="mean", type="default", ...){
plot.traitMPT(x,parameter=parameter, type=type,...)
}
#' Plot Convergence for Hierarchical MPT Models
#'
#' @param x fitted hierarchical MPT model (\code{\link{traitMPT}}, \code{\link{betaMPT}})
#' @param parameter which parameter to plot (e.g., \code{"theta"}, \code{"rho"}, \code{"slope"}). Parameters are matched partially, in order to plot all entries of vector valued parameters (see \code{\link{getParam}} to get a list of parameters)
#' @param type type of convergence plot. Can be one of \code{"default"} (trace+density), \code{"acf"} (auto-correlation function), \code{"trace"}, \code{"autocorr"}, \code{"crosscorr"},\code{"density"}, \code{"gelman"}. See, e.g., \code{\link[coda]{plot.mcmc.list}}
#' @param ... further arguments passed to the plotting functions in coda
#' @export
#' @describeIn plot Plot convergence for latent-trait MPT
#' @importFrom coda traceplot acfplot gelman.plot as.mcmc.list varnames crosscorr.plot autocorr.plot densplot
plot.traitMPT <- function(x, parameter="mean", type="default", ...){
# if(type %in% c("ecdf", "histogram", "autocorr", "key", "crosscorr","all")){
# plot(x$fit, plot.type = type, vars = parameter,...)
# }else{
# MPT.mcmc <- x$runjags$mcmc
allnam <- varnames(x$runjags$mcmc)
thetaUnique <- x$mptInfo$thetaUnique
idx <- setdiff(grep(parameter,allnam) , grep(".pred",allnam))
if(length(idx) <=0){
stop("Parameter not found in MCMC object.")
}
# parLabel <- names(x$mcmc$BUGSoutput$mean[[parameter]]) # names(x$mcmc$BUGSoutput$mean[[parameter]])
if(length(idx) == length(thetaUnique)){
allnam[idx] <- paste0(parameter, "_", thetaUnique)
}else if(parameter == "theta"){
allnam[idx] <- paste0(allnam[idx], rep(thetaUnique,"_", length(idx)/2))
}
coda::varnames(x$runjags$mcmc) <- allnam
# `varnames()<-`(MPT.mcmc, allnam)
switch(type,
"trace" = traceplot(x$runjags$mcmc[,idx],...),
"acf" = acfplot(x$runjags$mcmc[,idx],...),
"gelman" = gelman.plot(x$runjags$mcmc[,idx],...),
"crosscorr" = crosscorr.plot(x$runjags$mcmc[,idx],...),
"autocorr" = autocorr.plot(x$runjags$mcmc[,idx],...),
"density" = densplot(x$runjags$mcmc[,idx],...),
"default" = plot(x$runjags$mcmc[,idx],...),
stop("Check 'type' for possible plots." )
)
}
|
6f0afd057aac717318a970c33f9dd5e23be4f9a1
|
4014719bdbff8b7771ee42a6dae89a4e7b97167f
|
/validation.R
|
18a1f2e8260d4a48d4262da4f192ccead9c49ece
|
[] |
no_license
|
macjankowski/pub_lda
|
1d7decf690429241c97847faec6554abf73c3b32
|
c718a1bdbd1ab647271ac75f6d85aecc5c8b644d
|
refs/heads/master
| 2021-01-21T14:29:00.859831
| 2019-04-29T10:05:41
| 2019-04-29T10:05:41
| 95,290,947
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 681
|
r
|
validation.R
|
plotResults <- function(threshold, bridgeRatio, errorRatio) {
par(mfrow=c(1,3))
plot(threshold, errorRatio, type = "l", main = "Poziom błędu (%)", xlab = "Próg pewności", ylab = "Poziom błędu (%)", col="red", col.axis = "dimgray", col.lab = "blueviolet")
plot(threshold, bridgeRatio, type = "l", main = "Poziom klasyfikacji (%)", xlab = "Próg pewności", ylab = "Poziom klasyfikacji (%)", col="red", col.axis = "dimgray", col.lab = "blueviolet")
plot(bridgeRatio, errorRatio, type = "l", main = "Poziom klasyfikacji vs. Poziom błędu", xlab = "Poziom klasyfikacji (%)", ylab = "Poziom błędu (%)", col="red", col.axis = "dimgray", col.lab = "blueviolet")
}
|
84a9ded7bde6a7177a1717064666bc59321f16c2
|
dd1f71d17af9a470065687f0ec9c9874445ae5f8
|
/binomial/man/bin_cumulative.Rd
|
e3f83605536c3085f486d38da006a653d9b115b4
|
[] |
no_license
|
stat133-sp19/hw-stat133-chauj96
|
dbdc2785f0a564cddd0cf7d5c5a02d79cc485afd
|
b82c4199f5d2671d6ec45022d4d5bf9bd550db96
|
refs/heads/master
| 2020-04-28T09:12:59.666564
| 2019-05-02T18:52:01
| 2019-05-02T18:52:01
| 175,159,369
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| true
| 409
|
rd
|
bin_cumulative.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/binomial.R
\name{bin_cumulative}
\alias{bin_cumulative}
\title{bin_cumulative()}
\usage{
bin_cumulative(trials, prob)
}
\arguments{
\item{trials}{the number of trials}
\item{prob}{the probability}
}
\value{
a data frame with successes and probability and cumulative
}
\description{
calcuate the binomial cumulative distribution
}
|
cfba160b2598d242ae24b0bd4d01ec2d51fcff7d
|
12c5fe203a70038f7dbb724cd101239d689fc4bf
|
/HW/SeoyoonChoi_20191218_day16HW.R
|
b73a75c88d721e5cb5ba2348709a1d63eb42b9f2
|
[] |
no_license
|
choisawyou/learnR
|
d5479280b5fa5011d37df487a12647cab6da310e
|
1fea4f39f18a3878ba0dee12f29a31fc5586000e
|
refs/heads/master
| 2020-09-21T11:47:44.983163
| 2020-04-21T14:48:03
| 2020-04-21T14:48:03
| 224,779,452
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 2,718
|
r
|
SeoyoonChoi_20191218_day16HW.R
|
#최서윤
#제출일 2019 12 18
#작성일 2019 12 18
#문1)
#R에서 제공하는 state.x77 데이터셋에 대해 k-평균 군집화를 실시하고 결과를 그래프로 출력하시오.
#• 군집의 수는 5로 한다.
#• state.x77은 각 변수(열)의 값들의 단위의 차이가 많이 나기 때문에 0~1 표준화를 실시한 후 군집화를 실행한다.
head(state.x77)
#표준화
standard <- function(x){
return((x - min( x ))/max(x)-min(x))
}
st <- apply(state.x77,2,standard)
st
fit <- kmeans(x = st,center = 5)
fit
clusplot( st, fit$cluster, color = TRUE, shade = TRUE, labels = 2, lines = 0 )
#문2)
#mlbench 패키지에서 제공하는 Sonar 데이터셋에 대해 k-평균 군집화를 실시하고 결과를 그래프로 출력하시오.
#• 군집의 수는 2로 한다.
#• Sonar 데이터셋에서 마지막에 있는 Class 열은 제외하고 군집화를 실행한다.
library( mlbench )
data( "Sonar" ) # 데이터셋 불러오기
dim(Sonar)
sn <- Sonar[,-61]
group_sn <- kmeans( x= sn , center = 2)
group_sn
library(cluster)
clusplot(sn,
group_sn$cluster,
color = T,
shade = T,
labels= 2,
lines = 1)
#문3)
#mlbench 패키지에서 제공하는 Sonar 데이터셋에 대해 k-최근접 이웃 알고리즘을
#이용하여 모델을 만들고 예측 정확도를 측정하시오.
#. Sonar 데이터셋에서 마지막에 있는 Class 열이 그룹 정보이다.
#. Sonar 데이터셋에서 홀수 번째 데이터(관측값)를 훈련용 데이터로 하고,
# 짝수번째 데이터(관측값)를 테스트용 데이터로 한다.
#. k-최근접 이웃에서 k를 3, 5, 7로 다르게 하여 예측 정확도를 비교한다.
library(class)
dim(Sonar)
str(Sonar)
odd.n <- seq(1,nrow(Sonar),2) #훈련용 인덱스
doub.n <- seq(2,nrow(Sonar),2) #테스트용 인덱스
sn.tr <- Sonar[odd.n,] ; sn.tr#훈련용 데이터
sn.ts <- Sonar[doub.n,] ; sn.ts#테스트 데이터
tr <- factor(Sonar[odd.n,61]) # 훈련용 데이터 그룹정보 팩터화
ts <- factor(Sonar[-odd.n,61]) # 테스트용 데이터 그룹정보 팩터화
pred <- knn(sn.tr,sn.ts,tr,k = 3, prob = TRUE) ; pred
acc <- mean(pred == ts)
#k = 3
#예측 정확도 0.9333333
#k = 5
#예측 정확도 0.9333333
acc
#k = 7
#예측 정확도 0.9333333
#문4)
#mlbench 패키지에서 제공하는 Sonar 데이터셋에 대해 k-최근접 이웃 알고리즘을 이용하여 모델을 만들고 예측 정확도를 측정하시오.
#. Sonar 데이터셋에서 마지막에 있는 Class 열이 그룹 정보이다.
#. k-최근접 이웃에서 k는 3으로 한다.
#. 5-fold 교차 검증 방법으로 예측 정확도를 측정한다.
|
e10885f550cca0a1c7ad45a7284ed9d936b635af
|
0ae29405c310b9835ea68ffe3d091a8d8156fd8d
|
/src/02_build_model.R
|
42549bdfa5aa8c3a569999e0c854a59617800148
|
[] |
no_license
|
SamEdwardes/nfl-big-data-bowl
|
c18220804de1ac386953e68c66dd929e16a4f214
|
f1a08634d178c18c2b4f9440e0d7e57b16706d30
|
refs/heads/master
| 2020-08-22T16:10:29.507294
| 2019-10-26T00:58:04
| 2019-10-26T00:58:04
| 215,148,484
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 1,164
|
r
|
02_build_model.R
|
library(caret)
library(janitor)
library(tidyverse)
library(infer)
options(scipen = 999)
set.seed(2019-10-25)
# READ DATA ----
train_features <- read_csv("data/train_features.csv", col_types = cols())
test_features <- read_csv("data/test_features.csv", col_types = cols())
# BUILD MODELS ----
## Linear model ----
model_lm <- train(data = train_features,
rushing_yards ~ down,
method = "lm")
# ASSESS MODEL ----
## linear model ----
predictions_lm <- predict(model_lm, test_features) %>% round(0)
results_lm <- tibble(
prediction = predictions_lm,
truth = test_features$rushing_yards
) %>%
mutate(delta_amount = prediction - truth,
delta_percent = prediction / truth)
results_lm_ci <- quantile(results_lm$delta_amount, c(0.025, 0.975))
results_lm %>%
ggplot(aes(x = delta_amount)) +
geom_histogram(binwidth = 1) +
labs(title = "Linear Model Results",
x = "Prediction yards delta",
caption = "Positive number represents prediction was greater than actual
Negative number represents prediction was less than actual") +
shade_ci(results_lm_ci)
|
8588534862dc44bccb53621921331e52dd584763
|
e2e6fe9504c3e5fc6af558c45234bbb9eabfd9d1
|
/man/createParentFindrReport.Rd
|
db9fc8620e6b13861a1d1f1323e1e9e47ebbe06b
|
[
"MIT"
] |
permissive
|
rmsharp/parentfindr
|
2ebca196d8ff9627874738cb469d4f93807415e2
|
052c42a50f835fe1321dd93dc4cd21b844218e73
|
refs/heads/master
| 2023-01-13T23:04:28.613212
| 2020-11-23T02:44:23
| 2020-11-23T02:44:23
| 266,241,084
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| true
| 309
|
rd
|
createParentFindrReport.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/makeReport.R
\name{createParentFindrReport}
\alias{createParentFindrReport}
\title{Create results report for parentfindr}
\usage{
createParentFindrReport()
}
\description{
Exclusion Parentage Report: Wed May 02 16:33:23 CDT 2012
}
|
4d84af33089ca791aa1bb345a9a7c498b08ec563
|
14244b1b47d457ce1d5969f3b2c0c548f4ef3180
|
/tests/testthat/test-4likelihood.R
|
0ae1f794458abbdcbde88e716fa96f9ce630e841
|
[] |
no_license
|
jpritikin/pcFactorStan
|
962b1b34122b3d16cfc714ac88b81eddb4ac23bc
|
6de1fcd968ad199c82e1908f75f6efe916640e40
|
refs/heads/master
| 2021-09-29T03:56:38.236151
| 2021-09-25T02:05:36
| 2021-09-25T02:05:36
| 189,459,768
| 2
| 3
| null | 2023-09-10T15:33:29
| 2019-05-30T18:03:39
|
R
|
UTF-8
|
R
| false
| false
| 4,023
|
r
|
test-4likelihood.R
|
library(testthat)
library(pcFactorStan)
context("test-4likelihood")
skip_on_cran()
options(mc.cores=4)
suppressWarnings(RNGversion("3.5"))
library(rstan) # for get_logposterior
test_that("unidim", {
# As of rstan 2.19, cores=0 suppresses the warnings about chain convergence.
expect_error(pcStan('unidim', data=phyActFlowPropensity[,1:3]),
"Data must be processed by prepData")
expect_error(pcStan('unidim', data=matrix(0, 3, 3)),
"Is data an object returned by prepData")
dl <- prepData(phyActFlowPropensity[,c(1,2,3)])
dl$varCorrection <- 2.0
m1 <- findModel("unidim_adapt")
f1 <- sampling(m1, dl, chains=1, cores=0, iter=1, seed=1,warmup=0, refresh=0)
expect_equal(get_logposterior(f1)[[1]], -3612.551, tolerance=1e-2, scale=1)
dl$scale <- 1.0
m2 <- findModel("unidim_ll")
f2 <- sampling(m2, dl, chains=1, cores=0, iter=1, seed=1,warmup=0, refresh=0)
expect_equal(get_logposterior(f2)[[1]], -8044.32, tolerance=1e-2, scale=1)
#cat(deparse(round(fivenum(extract(f2)$log_lik[1,]), 3)))
expect_equal(fivenum(extract(f2)$log_lik[1,]),
c(-21.461, -13.534, -8.544, -0.193, -0.001), tolerance=1e-2, scale=1)
})
test_that("correlation", {
set.seed(1)
dl <- prepData(phyActFlowPropensity)
dl$scale <- rnorm(dl$NITEMS, sd=.2)
m2 <- findModel("correlation_ll")
f2 <- sampling(m2, dl, chains=1, cores=0, iter=1, seed=1,warmup=0, refresh=0)
expect_equal(get_logposterior(f2)[[1]], -64918.59, tolerance=1e-2, scale=1)
#cat(deparse(round(fivenum(extract(f2)$log_lik[1,]), 3)))
expect_equal(fivenum(extract(f2)$log_lik[1,]),
c(-29.637, -3.781, -2.027, -0.762, 0), tolerance=1e-2, scale=1)
})
test_that("factor", {
set.seed(1)
dl <- prepData(phyActFlowPropensity)
dl$scale <- rep(1.5, dl$NITEMS)
dl <- prepSingleFactorModel(dl)
m2 <- findModel("factor1_ll")
f2 <- sampling(m2, dl, chains=1, cores=0, iter=1, seed=1,warmup=0, refresh=0)
expect_equal(get_logposterior(f2)[[1]], -60859.06, tolerance=1e-1, scale=1)
#cat(deparse(round(fivenum(extract(f2)$log_lik[1,]), 3)))
expect_equal(fivenum(extract(f2)$log_lik[1,]),
c(-33.319, -3.754, -1.989, -0.963, 0), tolerance=1e-2, scale=1)
})
test_that("mixed thresholds", {
library(mvtnorm)
set.seed(1)
palist <- letters[1:10]
df <- twoLevelGraph(palist, 300)
for (k in paste0('pa',1:2)) df[[k]] <- factor(df[[k]], levels=palist)
numItems <- 5
trueCor <- cov2cor(rWishart(1, numItems, diag(numItems))[,,1])
theta <- rmvnorm(length(palist), sigma=trueCor)
dimnames(theta) <- list(palist, paste0('i', 1:numItems))
for (ix in 1:numItems) {
df <- generateItem(df, theta[,ix], th=rep(0.5, ix))#
}
df <- filterGraph(df)
dl <- prepCleanData(df)
scaleSave <- rnorm(numItems, .9, .2)
dl$scale <- scaleSave
m2 <- findModel("correlation_ll")
f2 <- sampling(m2, dl, chains=1, cores=0, iter=1, seed=1,warmup=0, refresh=0)
expect_equal(get_logposterior(f2)[[1]], -5792.789, tolerance=1e-2, scale=1)
#cat(deparse(round(fivenum(extract(f2)$log_lik[1,]), 3)))
expect_equal(fivenum(extract(f2)$log_lik[1,]),
c(-26.73, -3.609, -1.794, -1.238, -0.035), tolerance=1e-2, scale=1)
df <- normalizeData(df, .palist=sample(palist, 10))
dl <- prepCleanData(df)
dl$scale <- scaleSave
f3 <- sampling(m2, dl, chains=1, cores=0, iter=1, seed=1,warmup=0, refresh=0)
expect_equal(get_logposterior(f3)[[1]],
get_logposterior(f2)[[1]], tolerance=1e-2, scale=1)
expect_equal(fivenum(extract(f2)$log_lik[1,]),
fivenum(extract(f3)$log_lik[1,]), tolerance=1e-2, scale=1)
})
test_that("calibrateItems", {
pafp <- phyActFlowPropensity[,c(1,2,6:8)]
result <- calibrateItems(pafp, iter=1000L, chains=2, seed=1)
expect_equal(nrow(result), 3)
expect_true(all(result[1:2,'n_eff'] > 200))
expect_true(all(result[1:2,'Rhat'] < 1.015))
# cat(deparse(round(result[,'scale'],3)))
expect_equal(result[,'scale'], c(0.566, 0.646, 0.081),
tolerance=.01, scale=1)
})
|
7a7cd738fae83bcf9b56f34293f6ec6aa937ce9d
|
ecb1d037e50203f2e5e79da0d845598c5b6cdd99
|
/R/optimization_lambda.R
|
e0af5c91c69110ce020553a604b645c0dc48cbff
|
[] |
no_license
|
Z1chenZhao/bis557
|
59669f343f9764ba6aa4c132dfefe1d4eba0b8ab
|
b9c1f1d2fc60ec23d2ec18f274fe743769477ab3
|
refs/heads/master
| 2023-02-01T00:36:18.885873
| 2020-12-18T19:45:21
| 2020-12-18T19:45:21
| 296,169,783
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 1,708
|
r
|
optimization_lambda.R
|
#' @title optimization_lambda
#' @description Optimizing the ridge parameter lambda by cross validation
#'
#' @param form A formula with the format of "Y ~ .".
#' @param dat A dataframe.
#' @param folds The number of folds to cross validate
#' @param lambdas A list of the ridge penalty term lambda.
#' @param contrasts A list of contrasts.
#'
#' @return The ridge regression parameter lambda that minimizes mse.
#' @importFrom stats formula qnorm sd model.matrix predict
#' @importFrom doParallel registerDoParallel
#' @importFrom rsample vfold_cv testing training
#' @importFrom foreach foreach %do% %dopar%
#' @import dplyr foreach rsample
#' @export
#'
optimization_lambda <- function (form, dat, folds = 10, lambdas = seq(0, 1, 0.1), contrasts= NULL) {
registerDoParallel(cores = 10)
folds <- vfold_cv(dat, v = folds)
#Find mse:
mse <- function(x, x_hat){
return(mean((x - x_hat)^2))
}
#Calculate mse for each lambda
i <- NULL
`.`<- NULL
lambda <- NULL
mse.l <- foreach(lambda = lambdas, .combine = rbind) %dopar% {
foreach(i = seq_len(nrow(folds)), .combine = c) %do% {
mse(
testing(folds$splits[[i]])[[as.character(form[2])]],
predict(ridge_regression(form, training(folds$splits[[i]]),
lambda = lambda, contrasts = contrasts),
testing(folds$splits[[i]]))
)
}
}
#Create a tibble results
mse.t <- tibble(mean = apply(mse.l, 1, mean), sd = apply(mse.l, 1, sd), lambda = lambdas) %>%
mutate(upper = mean + qnorm(0.975) * sd / nrow(.), lower = mean - qnorm(0.975) * sd / nrow(.))
#Find the lambda that minimizes mse
lambda_min <- mse.t$lambda[which.min(mse.t$mean)]
lambda_min
}
|
2d0f395680b2f3c74211f9230fd1c149f5334cb0
|
dfbae56bb8b3aab05e4b347afa8ccf173fcc8c3a
|
/app1/app.R
|
6e8ffc8bfc984775f5bacfeeaa9f5bbaf3981ff7
|
[] |
no_license
|
daslu/r-playground
|
fef5f34cc435107d7c225267f5f44ef75e4884f0
|
0f3fa247f630bae31c56351dc768a2514f169e75
|
refs/heads/master
| 2022-10-14T01:52:12.298089
| 2020-06-08T13:10:02
| 2020-06-08T13:10:02
| 269,067,617
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 345
|
r
|
app.R
|
library(shiny)
ui <- fluidPage(
tags$head(tags$script(src = "message-handler.js")),
titlePanel("~~~~"),
actionButton("Pull", "Pull apps from github"))
server = function(input, output, session) {
observeEvent(input$do, {
system("touch /tmp/a")
#system("(cd /home/shiny/apps; git pull)")
})
}
shinyApp(ui=ui, server=server)
|
17f2dfad1e7d611d2d1deb96fa8a48461f5ee12a
|
1f10d551dd9e9518cc2ec42eb4ada46a2d714b5e
|
/master/man/panel.hist.Rd
|
d4953cd87a503c5a31ac8bdc28ec945c2516d648
|
[] |
no_license
|
Ibasam/MetaIBASAM
|
1f3e1e6488de7631b18ad80ad2e02e49f57d04d8
|
74019a03f08ad8c96fa30146dab65790d856db52
|
refs/heads/master
| 2023-08-24T14:13:01.152797
| 2021-10-25T13:25:13
| 2021-10-25T13:25:13
| 84,918,679
| 1
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 1,529
|
rd
|
panel.hist.Rd
|
\name{panel.hist}
\alias{panel.hist}
%- Also NEED an '\alias' for EACH other topic documented here.
\title{
panel.hist
}
\description{
%% ~~ A concise (1-5 lines) description of what the function does. ~~
}
\usage{
panel.hist(x, ...)
}
%- maybe also 'usage' for other objects documented here.
\arguments{
\item{x}{
%% ~~Describe \code{x} here~~
}
\item{\dots}{
%% ~~Describe \code{\dots} here~~
}
}
\details{
%% ~~ If necessary, more details than the description above ~~
}
\value{
%% ~Describe the value returned
%% If it is a LIST, use
%% \item{comp1 }{Description of 'comp1'}
%% \item{comp2 }{Description of 'comp2'}
%% ...
}
\references{
%% ~put references to the literature/web site here ~
}
\author{
Cyril Piou
}
\note{
%% ~~further notes~~
}
%% ~Make other sections like Warning with \section{Warning }{....} ~
\seealso{
%% ~~objects to See Also as \code{\link{help}}, ~~~
}
\examples{
##---- Should be DIRECTLY executable !! ----
##-- ==> Define data, use random,
##-- or do help(data=index) for the standard data sets.
## The function is currently defined as
function (x, ...)
{
usr <- par("usr")
on.exit(par(usr))
par(usr = c(usr[1:2], 0, 1.5))
h <- hist(x, plot = FALSE)
breaks <- h$breaks
nB <- length(breaks)
y <- h$counts
y <- y/max(y)
rect(breaks[-nB], 0, breaks[-1], y, col = "cyan", ...)
}
}
% Add one or more standard keywords, see file 'KEYWORDS' in the
% R documentation directory.
\keyword{ misc }
\keyword{ utilities }
\keyword{ programming }
|
179d967a5ecf15de213bd129f94f5a3ebf2a76df
|
21cba64d52d243e9f10727a963854970ad38c9b0
|
/R/count.R
|
4a3539309199946c6215a5cf1796c253493562b8
|
[
"MIT"
] |
permissive
|
tidyverse/forcats
|
8685f788b06056864243c0ceb802769a11295333
|
4a8525abba44dbd4f9a03efff09c85922039ffa3
|
refs/heads/main
| 2023-09-05T14:29:32.174074
| 2023-01-30T15:21:30
| 2023-01-30T15:21:30
| 65,227,405
| 447
| 151
|
NOASSERTION
| 2023-08-26T03:57:47
| 2016-08-08T18:07:47
|
R
|
UTF-8
|
R
| false
| false
| 860
|
r
|
count.R
|
#' Count entries in a factor
#'
#' @param f A factor (or character vector).
#' @param sort If `TRUE`, sort the result so that the most common values
#' float to the top.
#' @param prop If `TRUE`, compute the fraction of marginal table.
#' @return A tibble with columns `f`, `n` and `p`, if prop is `TRUE`.
#' @export
#' @examples
#' f <- factor(sample(letters)[rpois(1000, 10)])
#' table(f)
#' fct_count(f)
#' fct_count(f, sort = TRUE)
#' fct_count(f, sort = TRUE, prop = TRUE)
fct_count <- function(f, sort = FALSE, prop = FALSE) {
f <- check_factor(f)
check_bool(sort)
check_bool(prop)
n_na <- sum(is.na(f))
n <- c(tabulate(f, nlevels(f)), if (n_na > 0) n_na)
df <- tibble::tibble(
f = fct_unique(f),
n = n
)
if (sort) {
df <- df[order(df$n, decreasing = TRUE), ]
}
if (prop) {
df$p <- prop.table(df$n)
}
df
}
|
d99f654eb46e798cb2974d0651cd261132956b66
|
9d068471c68078d3a26d83929158dd3b51e96887
|
/man/calc_stream_voronoi_weights.Rd
|
580df352842c542e8c109bb772218ee30710783a
|
[
"MIT"
] |
permissive
|
scantle/pbjr
|
b4077d5c19c58f6e7f5db978f6a82205cf9b2394
|
365106683e21ef9358695aceecc265346d6c4ea5
|
refs/heads/master
| 2023-04-13T05:33:18.764597
| 2021-04-15T19:28:37
| 2021-04-15T19:28:37
| 279,678,995
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| true
| 2,833
|
rd
|
calc_stream_voronoi_weights.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/calc_stream_voronoi_weights.R
\name{calc_stream_voronoi_weights}
\alias{calc_stream_voronoi_weights}
\title{Calculate polyline (e.g. stream) barycentric coordinates}
\usage{
calc_stream_voronoi_weights(
stream,
voronoi,
triangles,
addTo = NULL,
geometry = T,
correct_seg_order = T,
cutoff_value = 1e-07,
seg_min_length = 1e-07,
keep_stream_cols = NULL
)
}
\arguments{
\item{stream}{sf polyline, "exploded" into segments (see \code{\link{line_explode}})}
\item{voronoi}{sf polygon of voronoi tesselation (unstructured model grid). Shapefile ID field
will be used to determine node ID.}
\item{triangles}{sf polygon of delaunay triangulation corresponding to voronoi grid.}
\item{addTo}{(optional) existing calc_stream_voronoi_weights() output new output should be added
to (False by default)}
\item{geometry}{(optional) T/F whether to include sf geometry in output dataframe (default: True)}
\item{correct_seg_order}{(optional) T/F to re-order the line segments after finding overlaps with
the triangle grid. Will crash if you have multiple seperate lines (e.g. two streams). (default:
True)}
\item{cutoff_value}{numeric, minimum barcentric coordinate value. Values below will be forced to
zero (1e-7 by default)}
\item{seg_min_length}{numeric, minimum length of segment to include in calculation (default 1e-7).
Generally just to weed out numerical errors.}
\item{keep_stream_cols}{character array, columns in stream segment dataframe to add to returned
dataframe.}
}
\value{
DataFrame or sf object, if geometry = True. Each row is one segment-triangle overlap,
with six barycentric weights (three for segment end), the three voronoi shape IDs (model nodes)
connected by the triangle, and the segment length in the triangle.
This the expected input of \code{\link{stream_elev_from_slope}} and
the \code{calc_conductance*} functions (e.g. \code{\link{calc_conductance_modflow}})
}
\description{
These coordinates are used as "weights" in the PBJ MODFLOW-USG package to interpolate heads and
distribute flows.
}
\details{
The function can take a while to run
}
\examples{
#-- Read in shapefiles
str <- read_sf(system.file("extdata", "MehlandHill2010_stream.shp", package = "pbjr"))
tri <- read_sf(system.file("extdata", "720_triangles.shp", package = "pbjr"))
vor <- read_sf(system.file("extdata", "720_voronoi.shp", package = "pbjr"))
#-- Explode polyline
str <- line_explode(str)
#-- Run the function
swdf <- calc_stream_voronoi_weights(stream = str, voronoi = vor, triangles = tri)
#-- Example of addTo use (more likely run with new stream shapefile)
more_swdf <- calc_stream_voronoi_weights(stream = str, voronoi = vor, triangles = tri,
addTo = swdf)
}
\author{
Leland Scantlebury
}
|
1246e523aaca3f4f0997170574466af1498bd7c4
|
4c1647c062685ee55e42e686c30c506b3d7200bb
|
/PenalizedRegression.R
|
fb7cff30684716e41599f713c347a79cddf5c399
|
[
"MIT"
] |
permissive
|
zointblackbriar/ML4P
|
a0d8f3469c0ec95194916c1a3c0750296ff70de3
|
8e256dc2c33edf8d1cbb1ea0f3103e779de94dd0
|
refs/heads/master
| 2020-03-24T21:00:34.441313
| 2019-01-17T08:09:18
| 2019-01-17T08:09:18
| 143,008,982
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 3,056
|
r
|
PenalizedRegression.R
|
# Test penalties of regression
# High Correlation between predictors
if(!require(glmnet)) {
install.packages("glmnet"); require(glmnet)
}
if(!require(MASS)) {
install.packages("MASS"); require(MASS)
}
# Generate Data
set.seed(20000)
dependentValue.train <- my_data$HARDNESSP1
independentValues.train <- my_data %>% data.matrix()
CovMatrix <- outer(1:dependentValue.train, 1:dependentValue.train, function(x, y) {.7^abs(x-y)})
x <- mvrnorm (independentValues.train, rep(0,dependentValue.train), CovMatrix)
y <- 10 * apply(x[, 1:2], 1, sum) +
5 * apply(x[, 3:4], 1, sum) +
apply(x[, 5:14], 1, sum) +
rnorm(independentValues.train)
# Split data into train and test sets
# train_rows <- sample(1:n, 0.66*n)
# x.train <- x[train_rows, ]
# x.test <- x[-train_rows, ]
#
# y.train <- y[train_rows]
# y.test <- y[-train_rows]
#
# fit.lasso <- glmnet(x.train, y.train, family="gaussian", alpha=1)
# fit.ridge <- glmnet(x.train, y.train, family="gaussian", alpha=0)
# fit.elastic <- glmnet(x.train, y.train, family="gaussian", alpha=0.5)
#
# # 10 fold cross validation for each alpha
# # MSE calculation
# fit.lasso.cv <- cv.glmnet(x.train, y.train, type.measure = "mse", alpha = 1, family="gaussian")
# fit.ridge.cv <- cv.glmnet(x.train, y.train, type.measure = "mse", alpha = 0, family="gaussian")
# fit.elastic.cv <- cv.glmnet(x.train, y.train, type.measure = "mse", alpha = 0.5, family="gaussian")
#
# for(i in 0:10)
# {
# assign(paste("fit", i, sep=""), cv.glmnet(x.train, y.train, type.measure = "mse", alpha=i/10, family="gaussian"))
# }
# # graphics.off()
# # par(mfrow = c(3, 2))
# # plot(fit.lasso, xvar="lambda")
# # plot(fit10, main="LASSO")
# #
# # plot(fit.ridge, xvar="lambda")
# # plot(fit0, main="Ridge")
# #
# # plot(fit.elnet, xvar="lambda")
# # plot(fit5, main="Elastic Net")
#
# yhat0 <- predict(fit0, s=fit0$lambda.1se, newx=x.test)
# yhat1 <- predict(fit1, s=fit1$lambda.1se, newx=x.test)
# yhat2 <- predict(fit2, s=fit2$lambda.1se, newx=x.test)
# yhat3 <- predict(fit3, s=fit3$lambda.1se, newx=x.test)
# yhat4 <- predict(fit4, s=fit4$lambda.1se, newx=x.test)
# yhat5 <- predict(fit5, s=fit5$lambda.1se, newx=x.test)
# yhat6 <- predict(fit6, s=fit6$lambda.1se, newx=x.test)
# yhat7 <- predict(fit7, s=fit7$lambda.1se, newx=x.test)
# yhat8 <- predict(fit8, s=fit8$lambda.1se, newx=x.test)
# yhat9 <- predict(fit9, s=fit9$lambda.1se, newx=x.test)
# yhat10 <- predict(fit10, s=fit10$lambda.1se, newx=x.test)
#
# print(mse0 <- mean((y.test - yhat0)^2))
# print(mse1 <- mean((y.test - yhat1)^2))
# print(mse2 <- mean((y.test - yhat2)^2))
# print(mse3 <- mean((y.test - yhat3)^2))
# print(mse4 <- mean((y.test - yhat4)^2))
# print(mse5 <- mean((y.test - yhat5)^2))
# print(mse6 <- mean((y.test - yhat6)^2))
# print(mse7 <- mean((y.test - yhat7)^2))
# print(mse8 <- mean((y.test - yhat8)^2))
# print(mse9 <- mean((y.test - yhat9)^2))
# print(mse10 <- mean((y.test - yhat10)^2))
#
# sst <- sum((y.test - mean(y.test))^2)
# sse <- sum((yhat3 - y.test)^2)
#
# rsq <- 1 - sse / sst
# print(paste0("R squared error: ", rsq))
|
2ced7618bd83d60935a652ad83e1dd303c9f54fe
|
bae0af3bec95ee9123dd74a3cd42a3792f65e25d
|
/Chapter01/12/gganimate.R
|
8fcb41a03d920f5deed6c72f44f20ee1a0ee0ddb
|
[
"MIT"
] |
permissive
|
PacktPublishing/R-Statistics-Cookbook
|
f521ead1a05104b68663521374861dfced4c1bab
|
74eb6057e47df5d43a981c44a52148bd3930c7e1
|
refs/heads/master
| 2023-02-04T14:18:10.374693
| 2023-01-30T09:26:43
| 2023-01-30T09:26:43
| 179,272,388
| 9
| 18
| null | null | null | null |
UTF-8
|
R
| false
| false
| 687
|
r
|
gganimate.R
|
# Load required package
library(gapminder)
library(gganimate)
# Basic scatter plot
data = read.csv("./companies.csv",stringsAsFactors = FALSE)
colors = c("A"="#AB5406","B"="#EC9936","C"="#BE1826","D"="#9B4A06","E"="#FDD6A2","F"="#9ACD62")
p = ggplot(data, aes(Sales, Profit, size = Profit,colour=Company)) +
geom_point(alpha = 0.7, show.legend = FALSE) +
scale_colour_manual(values = colors) +
scale_size(range = c(2, 12)) +
# Here comes the gganimate specific bits
labs(title = 'Year: {frame_time}', x = 'Sales', y = 'Profit') +
transition_time(Year) +
ease_aes('linear')
animate(p, nframes = 48, renderer = gifski_renderer("/Users/admin/Documents/R_book/gganim.gif"))
|
0b4ef3622566479d92f24e7d08b8dffded84fb20
|
366ec1e0e81f9d8c40e2fde01efa44d640c67daa
|
/R/rackauskas_zuokas.R
|
46e985701fbeba433cdd6996feecdab9fde36e74
|
[] |
no_license
|
tjfarrar/skedastic
|
20194324833b8f2f20e5666b642cff617159588c
|
050e6a177a28fb0cc2054b506a53b09d6859e3c7
|
refs/heads/master
| 2022-11-17T22:41:43.930246
| 2022-11-06T06:39:13
| 2022-11-06T06:39:13
| 219,455,416
| 6
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 4,874
|
r
|
rackauskas_zuokas.R
|
#' Rackauskas-Zuokas Test for Heteroskedasticity in a Linear Regression Model
#'
#' This function implements the two methods of
#' \insertCite{Rackauskas07;textual}{skedastic} for testing for heteroskedasticity
#' in a linear regression model.
#'
#' @details Rackauskas and Zuokas propose a class of tests that entails
#' determining the largest weighted difference in variance of estimated
#' error. The asymptotic behaviour of their test statistic
#' \eqn{T_{n,\alpha}} is studied using the empirical polygonal process
#' constructed from partial sums of the squared residuals. The test is
#' right-tailed.
#' @param alpha A double such that \eqn{0 \le \alpha < 1/2}; a hyperparameter
#' of the test. Defaults to 0.
#' @param pvalmethod A character, either \code{"data"} or \code{"sim"},
#' determining which method to use to compute the empirical
#' \eqn{p}-value. If \code{"data"}, the dataset \code{\link{T_alpha}}
#' consisting of pre-generated Monte Carlo replicates from the
#' asymptotic null distribution of the test statistic is loaded and used to
#' compute empirical \eqn{p}-value. This is only available for certain
#' values of \code{alpha}, namely \eqn{i/32} where \eqn{i=0,1,\ldots,15}.
#' If \code{"sim"}, Monte Carlo replicates are generated from the
#' asymptotic null distribution. Partial matching is used.
#' @param R An integer representing the number of Monte Carlo replicates to
#' generate, if \code{pvalmethod == "sim"}. Ignored if
#' \code{pvalmethod == "data"}.
#' @param m An integer representing the number of standard normal variates to
#' use when generating the Brownian Bridge for each replicate, if
#' \code{pvalmethod == "sim"}. Ignored if \code{pvalmethod == "data"}. If
#' number of observations is small,
#' \insertCite{Rackauskas07;textual}{skedastic} recommends using \eqn{m=n}.
#' The dataset \code{\link{T_alpha}} used \eqn{m=2^17} which is
#' computationally intensive.
#' @param sqZ A logical. If \code{TRUE}, the standard normal variates used
#' in the Brownian Bridge when generating from the asymptotic null
#' distribution are first squared, i.e. transformed to \eqn{\chi^2(1)}
#' variates. This is recommended by
#' \insertCite{Rackauskas07;textual}{skedastic} when the number of
#' observations is small. Ignored if \code{pvalmethod == "data"}.
#' @param seed An integer representing the seed to be used for pseudorandom
#' number generation when simulating values from the asymptotic null
#' distribution. This is to provide reproducibility of test results.
#' Ignored if \code{pvalmethod == "data"}. If user does not wish to set
#' the seed, pass \code{NA}.
#'
#' @inheritParams breusch_pagan
#'
#' @return An object of \code{\link[base]{class}} \code{"htest"}. If object is
#' not assigned, its attributes are displayed in the console as a
#' \code{\link[tibble]{tibble}} using \code{\link[broom]{tidy}}.
#' @references{\insertAllCited{}}
#' @importFrom Rdpack reprompt
#' @export
#'
#' @examples
#' mtcars_lm <- lm(mpg ~ wt + qsec + am, data = mtcars)
#' rackauskas_zuokas(mtcars_lm)
#' rackauskas_zuokas(mtcars_lm, alpha = 7 / 16)
#' \donttest{
#' n <- length(mtcars_lm$residuals)
#' rackauskas_zuokas(mtcars_lm, pvalmethod = "sim", m = n, sqZ = TRUE)
#' }
#'
rackauskas_zuokas <- function(mainlm, alpha = 0, pvalmethod = c("data", "sim"),
R = 2 ^ 14, m = 2 ^ 17, sqZ = FALSE, seed = 1234,
statonly = FALSE) {
if (alpha < 0 || alpha >= 1 / 2) stop("Invalid `alpha` argument. `alpha` must be >= 0
and < 1/2")
processmainlm(m = mainlm, needy = FALSE, needp = FALSE)
n <- length(e)
Tnalpha <- max(vapply(1:(n - 1), function(ell) max((ell / n) ^ (-alpha) *
vapply(0:(n - ell), function(k)
abs(sum(e[(k + 1):(k + ell)] ^ 2 - 1 / n * sum(e ^ 2))), NA_real_)), NA_real_))
deltahat <- mean((e ^ 2 - mean(e ^ 2)) ^ 2)
teststat <- Tnalpha / sqrt(deltahat * n)
if (statonly) return(teststat)
pvalmethod <- match.arg(pvalmethod, c("data", "sim"))
if (pvalmethod == "data") {
utils::data(T_alpha)
if (min(abs(alpha - (0:15 / 32))) > 1e-6) stop("Values from the
null distribution have not been pre-generated for this
value of alpha")
whichcol <- alpha * 32 + 1
pval <- sum(teststat < T_alpha[, whichcol]) / nrow(T_alpha)
} else if (pvalmethod == "sim") {
Talphavals <- rksim(R. = R, m. = m, sqZ. = sqZ, seed. = seed,
alpha. = alpha)
pval <- sum(teststat < Talphavals) / R
}
rval <- structure(list(statistic = teststat, p.value = pval,
null.value = "Homoskedasticity", alternative = "greater",
method = pvalmethod, parameter = alpha), class = "htest")
broom::tidy(rval)
}
|
faa1416296dab08bc95244f5d2d7c9156a000549
|
37c9adc4ab2de4f3b60a22dcfb9499780e31e1c3
|
/_03_CreateModelingDataset.R
|
4cb2c1208b2b7f38e8a6ba7431b3d0cac05fa020
|
[] |
no_license
|
dnegrey/DSCI-352
|
262c55e7a44641574fe4497d1d9c0eed650a9c53
|
76d5592a637b3b5519e5626b8efc23e3d328c911
|
refs/heads/master
| 2020-06-21T00:23:49.452933
| 2019-07-17T02:15:25
| 2019-07-17T02:15:25
| 197,298,276
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 2,682
|
r
|
_03_CreateModelingDataset.R
|
# load clean beer RData
load("_01_ImportBeerData.RData")
# goal: predict likelihood that a beer is an IPA based on other beer attributes
# create binary target ("dependent variable")
# flag beers as IPA (based on Style)
b$FlagIPA <- grepl(
pattern = "IPA",
x = b$Style,
fixed = TRUE
)
# print a table summarizing style and flag just to double check
table(b$Style, b$FlagIPA)
# convert character fields to integer binary flags for each level
summary(factor(b$SugarScale))
b$SugarScale_Plato <- as.integer(b$SugarScale == "Plato")
b$SugarScale_SpecificGravity <- as.integer(b$SugarScale == "Specific Gravity")
table(b$SugarScale_Plato, b$SugarScale_SpecificGravity)
summary(factor(b$BrewMethod))
b$BrewMethod_AllGrain <- as.integer(b$BrewMethod == "All Grain")
b$BrewMethod_BIAB <- as.integer(b$BrewMethod == "BIAB")
b$BrewMethod_Extract <- as.integer(b$BrewMethod == "extract")
b$BrewMethod_PartialMash <- as.integer(b$BrewMethod == "Partial Mash")
table(b$BrewMethod, b$BrewMethod_AllGrain)
table(b$BrewMethod, b$BrewMethod_BIAB)
table(b$BrewMethod, b$BrewMethod_Extract)
table(b$BrewMethod, b$BrewMethod_PartialMash)
# drop all character fields for modeling
# can get descriptors back by joining to earlier dataset on BeerID
library(dplyr)
bm <- b %>%
select(
BeerID,
OG,
FG,
ABV,
IBU,
Color,
BoilSize,
BoilTime,
BoilGravity,
Efficiency,
MashThickness,
SugarScale_Plato,
SugarScale_SpecificGravity,
BrewMethod_AllGrain,
BrewMethod_BIAB,
BrewMethod_Extract,
BrewMethod_PartialMash,
PitchRate,
PrimaryTemp,
FlagIPA
)
# we need to randomly split our dataset into 2 sets:
# 1 for building our model and 1 for validating it
# what we need is a uniform random number
# create a function for this so everyone in the class has the same sets
# (this has to do with setting the seed and how R works)
foo <- function(num, seed = 20190717) {
set.seed(seed)
runif(n = num)
}
head(foo(nrow(bm)))
# put random number on data set
bm$RandomNumber <- foo(nrow(bm))
# for skeptics, check a basic histogram
hist(bm$RandomNumber)
# now, we have about 74K observerations
mean(bm$FlagIPA)
# and a 23% success rate of our DV
# we could probably get by modeling on ~20% of our data but lets use 30%
bm$BuildValidate <- ifelse(
bm$RandomNumber <= 0.30,
"Build",
"Validate"
)
summary(factor(bm$BuildValidate))
# save dataset in RData file
save(list = c("bm"), file = "_03_CreateModelingDataset.RData")
|
6486d074dd6826802a6fb0350b50eeb7bf9e2d61
|
21dec0ef47fc2d0412bb95f6f7fd19d5a723cd02
|
/fxn_RMIntense.R
|
b521b554e88a28489c06914d88849be9bb18a8b5
|
[] |
no_license
|
srcorsi-USGS/Rainmaker
|
f420b34480cb96b7b459dd4fe2f0dfa710a41620
|
a2f39d07c557046ef8f5ae281da0941a2f4c4084
|
refs/heads/master
| 2021-01-23T03:59:35.642932
| 2012-03-01T23:53:45
| 2012-03-01T23:53:45
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 2,102
|
r
|
fxn_RMIntense.R
|
# Function to compute maximum x-minute rainfall intensities in units of depth/hr
# This funtion requires package TTR to be installed
# Input: unit value rain file
# Input: Storms file
# Output: X-hour maximum rainfall intensities
#Usage: RMIntense(df,
# date, Date column in df as POSIX
# rain, Column in df with instantaneous rain values
# df.events, Date frame with start and end dates/times for events
# sdate, Start date column in df.events rain file as POSIX
# edate, End date column in df.events rain file as POSIX
# xmin) Vector of values representing X-minute max
# rainfall requested
RMIntense <- function(df,date="r.date",rain = "rain",
df.events,sdate="sdate",edate="edate",
xmin=c(60,180,360)) {
# Compute overall event intensity
df.events$duration <- (1+as.numeric(difftime(df.events[,edate],df.events[,sdate],units="hours")))
df.events$Ievent <- df.events$rain/df.events$duration
# Determine x-minute intensities for each of the intensities specified
for (i in 1:length(xmin)){
x <- xmin[i]*60
intensity.var <- paste("I",xmin[i],sep="")
df.events[,intensity.var] <- NA
# Isolate individual events and Compute max x-min intensity for each event
# period: compute sum rain and divide by duration. Report x-min intensity
# in units/hr
for (j in 1:nrow(df.events)) {
subdf <- subset(df,date >= df.events[j,sdate] & date <= df.events[j,edate])
# Initialize intensity vector
intensity <- numeric(length=nrow(subdf))
for (k in 1:nrow(subdf)){
enddate <- subdf[k,date]+x
bdate <- subdf[k,date]
subdf2 <- subset(subdf,date >= bdate & date < enddate)
intensity[k] <- sum(subdf2[,rain])/(x/60/60)
# k;bdate;enddate;intensity[k];max(subdf2$rain)
}
df.events[j,intensity.var] <- max(intensity,na.rm=TRUE)
}
}
return(df.events)
}
|
1ba9224cebc18407582bf6e8b6b85474192068c9
|
4fe9e46b985c6e1677418d98c6ca37b6db13c57c
|
/astrazeneca-atrasada.R
|
c695490310b32aa72980b9fe1671cc242c85fba9
|
[] |
no_license
|
jfemdados/vacinas
|
15a8b9b52c389f6cf819a27091ba2dfff342686e
|
ea61b5f01282b36397e51f282719afa6ef9fb9b6
|
refs/heads/main
| 2023-07-13T10:00:30.199165
| 2021-08-16T11:45:43
| 2021-08-16T11:45:43
| 349,569,831
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 2,838
|
r
|
astrazeneca-atrasada.R
|
### SCRIPT SÓ PARA OS ATRASOS DA ASTRAZENECA EM JF
#Objetivo: o intuito desse script é criar análises sobre a vacinação em
#Juiz de Fora, Minas Gerais
#Autor: Matheus Valentim
#Importacao de dados -------------------
basedosdados::set_billing_id('double-voice-305816')
base_completa<- basedosdados::read_sql("SELECT id_paciente, idade_paciente,
referencia_fabricante_vacina, id_municipio_estabelecimento, data_aplicacao_vacina,
nome_fabricante_vacina,
nome_fantasia_estabelecimento,
dose_vacina, lote_vacina, categoria_vacina,
sexo_paciente, raca_cor_paciente
FROM `basedosdados.br_ms_vacinacao_covid19.microdados`
WHERE id_municipio_estabelecimento = '3136702'", 'double-voice-305816')
#5) Vacinas astrazeneca atrasadas
astrazeneca <- base_completa %>%
select(id_paciente, data_aplicacao_vacina, nome_fabricante_vacina, dose_vacina,
raca_cor_paciente, sexo_paciente, idade_paciente, referencia_fabricante_vacina) %>%
#havia mais de um nome para uma mesma marca de vacina
mutate(nome_fabricante_vacina_novo =
case_when(nome_fabricante_vacina == "SINOVAC LIFE SCIENCE CO LTD" ~ "FUNDACAO BUTANTAN",
nome_fabricante_vacina == "MINISTERIO DA SAUDE" ~ "Sem identificação",
nome_fabricante_vacina == "SERUM INSTITUTE OF INDIA LTD" ~ "Astrazeneca",
nome_fabricante_vacina == "FUNDACAO OSWALDO CRUZ" ~ "Astrazeneca",
TRUE ~ nome_fabricante_vacina)) %>%
filter(nome_fabricante_vacina_novo == 'Astrazeneca') %>%
distinct(id_paciente, dose_vacina, .keep_all = TRUE)
# Filtrando a base para criar uma base de primeira e uma base de segunda dose
primeira_dose_astrazeneca<-astrazeneca %>%
select(id_paciente, data_aplicacao_vacina, dose_vacina, sexo_paciente, raca_cor_paciente,
idade_paciente) %>%
filter(dose_vacina =="1ª\xa0Dose") %>%
mutate(primeira_dose = 1) %>%
rename(data_primeira_dose = data_aplicacao_vacina) %>%
select(-dose_vacina)
segunda_dose_astrazeneca<-astrazeneca %>%
select(id_paciente, data_aplicacao_vacina,dose_vacina) %>%
filter(dose_vacina =="2ª\xa0Dose") %>%
mutate(segunda_dose = 1) %>%
rename(data_segunda_dose = data_aplicacao_vacina) %>%
select(-dose_vacina)
nova_astra <- primeira_dose_astrazeneca %>%
left_join(segunda_dose_astrazeneca, by = 'id_paciente') %>%
mutate(data_primeira_dose = lubridate::ymd(data_primeira_dose),
data_segunda_dose = lubridate::ymd(data_segunda_dose),
data_adequada_segunda = data_primeira_dose + 90,
atrasada = case_when((data_segunda_dose > data_adequada_segunda) |
(segunda_dose == is_null(segunda_dose) &
data_adequada_segunda < 2021-07-31)~ 1))
|
2c032a0074a7189df51670df9a5414074005c990
|
f578929e6e33cb9b7f918afa010f89b09a5ffa90
|
/R/id463315.R
|
9124fb3e5eac71d5b05dd853a0332d61880e7221
|
[] |
no_license
|
goranbrostrom/cedar16
|
604b1ca378f0762ffc148605938d2b9da27701b3
|
cacfebd31121949689e5cd0cf1cc7488b245502a
|
refs/heads/master
| 2021-01-10T10:53:56.294573
| 2016-04-20T16:38:33
| 2016-04-20T16:38:33
| 55,224,821
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 906
|
r
|
id463315.R
|
id463315 <- function(dat = observations, birthdate = 1795.385){
enter <- tillTid(dat$startdat[dat$id == 463315]) - birthdate
exit <- tillTid(dat$slutdat[dat$id == 463315]) - birthdate
n <- length(exit)
ord <- order(enter, -exit)
enter <- enter[ord]
exit <- exit[ord]
plot(c(enter[1], exit[1]), c(1, 1), type = "l", xlab = "Age",
ylim = c(0, 36), xlim = c(min(enter), max(exit)),
col = "blue", axes = FALSE, ylab = "interval No.", lwd = 1.2)
axis(1)
wid <- c(1, 5, 10, 15, 20, 25, 30, 34)
axis(2, at = wid)
box()
## abline(h = wid, col = "darkgreen", lty = 3)
clo <- rep("blue", n)
for (i in 2:n){
incl <- c(4:9, 13, 16, 20, 22, 23)
clo[incl] <- "red"
lines(c(enter[i], exit[i]), c(i, i), type = "l", col = clo[i],
lwd = 1.2)
}
text(13, 18, "red = 'included' intervals", col = "red")
}
|
097af32b2b6ca3306a4c917434880418be697a7d
|
29c0a57cc284c57ab4701717d46b83f4227ed75c
|
/man/restez_path_set.Rd
|
2ffacc8cbe885c9dbb24c354c45c11d62f767aa4
|
[
"MIT"
] |
permissive
|
ropensci/restez
|
753d476ee87f7c9623656643129e3c0e94229f22
|
d7136fb3c71b96f761d10f4ba9fe4787165f14dd
|
refs/heads/main
| 2023-08-03T16:38:58.503846
| 2023-07-23T22:04:55
| 2023-07-23T22:48:46
| 129,107,980
| 23
| 6
|
NOASSERTION
| 2023-07-23T22:48:47
| 2018-04-11T14:38:21
|
R
|
UTF-8
|
R
| false
| true
| 815
|
rd
|
restez_path_set.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/filepath-tools.R
\name{restez_path_set}
\alias{restez_path_set}
\title{Set restez path}
\usage{
restez_path_set(filepath)
}
\arguments{
\item{filepath}{character, valid filepath to the folder where the
database should be stored.}
}
\description{
Specify the filepath for the local GenBank database.
}
\details{
Adds 'restez_path' to options(). In this path
the folder 'restez' will be created and all downloaded and
database files will be stored there.
}
\examples{
\dontrun{
library(restez)
restez_path_set(filepath = 'path/to/where/you/want/files/to/download')
}
}
\seealso{
Other setup:
\code{\link{restez_path_get}()},
\code{\link{restez_path_unset}()},
\code{\link{restez_ready}()},
\code{\link{restez_status}()}
}
\concept{setup}
|
fd4ac697383ab4b40a5e75179097fb0592a442e3
|
a3f9b39352ae4409dab117b1a1c129a8778585fb
|
/HLEIneqScotxSex.R
|
0ba1a77e4c46a99224f33e14f9a4bf295165ed62
|
[] |
no_license
|
VictimOfMaths/Routine-Data
|
01a7a416b4f0bde909a0e15518c6cf767739f362
|
466ed22342dcb8ec941806497385f2b7f7e1d8ca
|
refs/heads/master
| 2023-07-20T10:29:15.453387
| 2023-07-17T11:52:15
| 2023-07-17T11:52:15
| 245,402,797
| 9
| 1
| null | null | null | null |
UTF-8
|
R
| false
| false
| 2,372
|
r
|
HLEIneqScotxSex.R
|
rm(list=ls())
library(tidyverse)
library(curl)
library(readxl)
library(extrafont)
library(ragg)
temp <- tempfile()
url <- "https://www.nrscotland.gov.uk/files//statistics/healthy-life-expectancy/17-19/healthy-life-expectancy-17-19-data.xlsx"
temp <- curl_download(url=url, destfile=temp, quiet=FALSE, mode="wb")
rawdata <- read_excel(temp, sheet="Figure6 Data", range="C7:G27", col_names=FALSE) %>%
mutate(Sex=c(rep("Male", times=10), NA, rep("Female", times=10))) %>%
filter(!is.na(Sex)) %>%
mutate(SIMD=rep(c("Most deprived", rep("", times=8), "Least deprived"), times=2),
index=rep(1:10, times=2)) %>%
rename(HLE=`...1`, LE=`...5`) %>%
mutate(ULE=LE-HLE) %>%
gather(Measure, Years, c(HLE, ULE)) %>%
mutate(Measure=factor(Measure, levels=c("ULE", "HLE")))
agg_tiff("Outputs/HLEIneqScotlandxSex.tiff", units="in", width=8, height=6, res=500)
ggplot(rawdata, aes(x=Years, y=as.factor(index), fill=Measure, label=round(Years, 1)))+
geom_col(position="stack")+
geom_text(aes(colour=Measure),position=position_stack(vjust=0.5), show.legend=FALSE, size=3)+
scale_x_continuous(name="Years of life", breaks=seq(0,90, by=10))+
scale_y_discrete(labels=c("Most deprived", rep("", times=8), "Least deprived"), name="SIMD decile")+
scale_fill_manual(name="", values=c("#009f92", "#03312e"), labels=c("Years lived in poor health",
"Years lived in good health"))+
scale_colour_manual(values=c("Black", "White"))+
facet_grid(Sex~., switch="y")+
theme_classic()+
theme(strip.background=element_blank(), strip.text=element_text(face="bold", size=rel(1)),
axis.ticks.y=element_blank(), text=element_text(family="Lato"), legend.position="top",
axis.line.y=element_blank(), plot.title=element_text(face="bold", size=rel(1.6)),
plot.title.position="plot", panel.grid.major.x=element_line())+
guides(fill=guide_legend(reverse=TRUE))+
labs(title="Inequalities in healthy lifespan are larger than in overall lifespan",
subtitle="Average years lived in self-rated 'good' or 'very good' health compared to overall Life Expectancy in Scotland\nby sex and decile of the Scottish Index of Multiple Deprivation",
caption="Data from National Records of Scotland | Plot by @VictimOfMaths")
dev.off()
|
b6b4edd2ee5a982aa33f018ce606db076084cf6f
|
5dfab2c2519bd2545ecefc5b4f0a63aa9d7370f6
|
/Simulation_and_Risk/HW1_finalcode.R
|
3ad56f45d089a17d9d0d3eaf63663418f85fd358
|
[] |
no_license
|
sopheeli/F3-Blueteam12
|
1698adcf678684c6ac32792dfc0dfc03812f75a1
|
b6456227f0812ba598870c2e32df091b5d923346
|
refs/heads/master
| 2020-04-03T18:36:27.946559
| 2018-11-30T23:47:06
| 2018-11-30T23:47:06
| 155,490,476
| 1
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 6,581
|
r
|
HW1_finalcode.R
|
#--------------------------#
# JCP HW 1 #
# Simulation #
# #
#--------------------------#
# Needed Libraries for Analysis #
library(graphics)
#install.packages("ks")
library(ks)
# install.packages("xlsx")
# library("xlsx")
# install.packages("readxl") # CRAN version
library(readxl)
library(dplyr)
#install.packages("EnvStats") # CRAN version
library(EnvStats)
library(ggplot2)
#set up working directory
setwd("C:/Users/Sophe/Desktop/FALL/Fall3/SimulationandRiskAnalysis/Project/HW1/")
#import data from the XLSX
drill = read_excel("Analysis_Data.xlsx", sheet = 2, skip = 2)
#shortens data set to relivent years
drill1 <- drill %>% filter(Date >= "1991-06-01") %>% filter(Date <= "2006-07-01")
#change character to a numeric
drill1$Return.Crude.Oil = (as.numeric(drill1$`Arithmetic Return - Crude Oil`))
drill1$Return.Natural.Gas = (as.numeric(drill1$`Arithmetic Return - Natural Gas`))
drill1$Return.Dry.Well = (as.numeric(drill1$`Arithmetic Return - Dry Well`))
#creates average for the cost
drill1$Average.Cost = ((drill1$`U.S. Nominal Cost per Crude Oil Well Drilled (Thousand Dollars per Well)`+
drill1$`U.S. Nominal Cost per Natural Gas Well Drilled (Thousand Dollars per Well)` +
drill1$`U.S. Nominal Cost per Dry Well Drilled (Thousand Dollars per Well)`)/3)
#histogram for average cost and average return
hist(c(drill1$Return.Crude.Oil, drill1$Return.Natural.Gas, drill1$Return.Dry.Well),
main = "Arithmetic Change in Cost Distribution",xlab="Arithmetic Return", col = "#6699FF", breaks = 24)
# Basic histogram
x = as.data.frame(c(drill1$Return.Crude.Oil, drill1$Return.Natural.Gas, drill1$Return.Dry.Well))
ggplot(x, aes(x=c(drill1$Return.Crude.Oil, drill1$Return.Natural.Gas, drill1$Return.Dry.Well))) +
geom_histogram(fill="#6699FF", color="black", binwidth=.075)+
# geom_histogram(fill="#6699FF", color="black")+
labs(title="Arithmetic Change in Cost Distribution",x="Arithmetic Return", y = "Frequency")+
theme_minimal() + theme(title = element_text(size = 18), axis.title.x = element_text(size =14), axis.title.y = element_text(size =14))
#-------------------------Kernel Density---------------------------------#
#Kernel Estimation for 2006 to 2012 based on historical data from 1991 to 2006
set.seed(8888)
Density.R <- density(c(drill1$Return.Crude.Oil,drill1$Return.Natural.Gas,drill1$Return.Dry.Well),bw="SJ-ste")
Density.R
Est.R <- rkde(fhat=kde(c(drill1$Return.Crude.Oil,drill1$Return.Natural.Gas,drill1$Return.Dry.Well), h=0.07935), n=10000)
hist(Est.R, breaks=50, main='Estimated 2006 to 2012 Return Value Distribution', xlab='Return')
Est.Hist = as.data.frame(Est.R)
ggplot(Est.Hist, aes(x=Est.R)) +
#geom_histogram(fill="#6699FF", color="black", binwidth=.075)+
geom_histogram(fill="#6699FF", color="black")+
labs(title="Estimated 2006 to 2012 Return Value Distribution",x="Return", y = "Frequency")+
theme_minimal() + theme(title = element_text(size = 18), axis.title.x = element_text(size =14), axis.title.y = element_text(size =14))
#qq-plot -- not sure which one to look at
qqnorm(c(drill1$Return.Crude.Oil,drill1$Return.Natural.Gas,drill1$Return.Dry.Well), col = "#6699FF", main="QQ Plot Historical Data")
qqline(c(drill1$Return.Crude.Oil,drill1$Return.Natural.Gas,drill1$Return.Dry.Well), col = "2")
#Shapiro-Wilk normality test p-value = 0.8041
shapiro.test(c(drill1$Return.Crude.Oil,drill1$Return.Natural.Gas,drill1$Return.Dry.Well))
summary(c(drill1$Return.Crude.Oil,drill1$Return.Natural.Gas,drill1$Return.Dry.Well))
kurtosis(c(drill1$Return.Crude.Oil,drill1$Return.Natural.Gas,drill1$Return.Dry.Well))
#-------------------------Simulation---------------------------------#
##############Simulation1: Kernel Density #######################################
#should I use triangular dist. for r2 and r3?
#seems wrong, cannot assume mean is mode
#but cannot use normal dist. as well, b/c don't know std
#need discussion
n = 500000
cost_k <- rep(0,n)
class(cost_k)
for (i in 1:n){
r1 <- rkde(fhat=kde(c(drill1$Return.Crude.Oil,drill1$Return.Natural.Gas,drill1$Return.Dry.Well),
h=0.07935), n=6)
r2 <- rtri(n=3, mode = -0.0917, min = -0.22, max = -0.07)
r3 <- rtri(n=4, mode = 0.05, min = 0.02, max = 0.06)
P <- drill1$Average.Cost[16]
r <- append(r1,append(r2,r3)) # give the rate of return from years 2006 to 2019, 13 years
for (j in 1:13){
P <- P*(1+r[j]) # give the cost prediction for 2019
}
cost_k[i] <- P # outputs cost prediciton for 2019 to vector "cost_k"
}
summary(cost_k) #summary statistics for cost_k
#Min. 1st Qu. Median Mean 3rd Qu. Max.
#367.4 2577.8 3476.5 3769.7 4634.9 27030.9
quantile(cost_k, probs = c(0.05, 0.95))
# 5% 95%
# 1643.848 6893.007
quantile(cost_k, probs = c(0.01, 0.99))
# 1% 99%
# 1187.543 9006.787
hist(cost_k, breaks=100, main='2019 Cost Prediction Distribution KD', xlab='Cost', col = "#6699FF")
abline(v = drill1$Average.Cost[16], col="red", lwd=2)
mtext("2006 Cost", at=drill1$Average.Cost[16], col="red")
#-------------------------Simulation---------------------------------#
########Simulation2: normal distribution###############################
n = 500000
cost_n <- rep(0,n)
class(cost_n)
for (i in 1:n){
r1 <- rnorm(n=6, mean = mean(c(drill1$Return.Crude.Oil,drill1$Return.Natural.Gas,drill1$Return.Dry.Well)),
sd = sd(c(drill1$Return.Crude.Oil,drill1$Return.Natural.Gas,drill1$Return.Dry.Well)))
r2 <- rtri(n=3, mode = -0.0917, min = -0.22, max = -0.07)
r3 <- rtri(n=4, mode = 0.05, min = 0.02, max = 0.06)
P <- drill1$Average.Cost[16]
r <- append(r1,append(r2,r3))
for (j in 1:13){
P <- P*(1+r[j])
}
cost_n[i] <- P
}
summary(cost_n) #summary statistics for cost_n
#Min. 1st Qu. Median Mean 3rd Qu. Max.
#292.2 2670.8 3528.6 3770.5 4600.2 16280.9
quantile(cost_n, probs = c(0.05, 0.95))
# 5% 95%
# 1748.740 6618.248
quantile(cost_n, probs = c(0.01, 0.99))
# 1% 99%
# 1270.605 8434.950
hist(cost_n, breaks=100, main='2019 Cost Prediction Distribution N', xlab='Cost', col = "#6699FF")
abline(v = drill1$Average.Cost[16], col="red", lwd=2)
mtext("2006 Cost", at=drill1$Average.Cost[16], col="red")
|
f1e3d1a50d186625a9ca11f384b3f7717e3d8ab4
|
a3c9c7338b361c5258771860e1bee5134622323f
|
/cog_plot.R
|
19470a294e32d896e0ead18f799afc80e5b5d4d6
|
[] |
no_license
|
yogesh1612/Text-Analytics
|
5a77c40984cff71e7d3c080ed55927500c64479a
|
aa54477588fd7a08f286f30bbb37cd1d3d82f4cd
|
refs/heads/master
| 2021-05-08T18:54:47.792772
| 2018-01-30T15:05:37
| 2018-01-30T15:05:37
| 119,132,027
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 3,885
|
r
|
cog_plot.R
|
nokia <- readLines('https://github.com/sudhir-voleti/sample-data-sets/raw/master/text%20analysis%20data/amazon%20nokia%20lumia%20reviews.txt')
text <- nokia
# Function 1
clean_text_tokenizer <- function(text){
if (!require(tidytext)) {install.packages("tidytext")}
if (!require(dplyr)) {install.packages("dypyr")}
if (!require(tm)) {install.packages("tm")}
temp = gsub("(<.*>)|([^[:alnum:]])", " ", text)
temp = iconv(temp, "latin1", "ASCII", sub="")
temp = tolower(temp)
temp = gsub("\\d", "", temp)
temp = stripWhitespace(temp)
temp = gsub("^\\s+|\\s+$", "", temp)
temp <- data_frame(document = 1:120, text = temp)
temp <- temp %>% unnest_tokens(word, text)
my_stop_words <- c("phone","samsung", "phones") # my stope words defined
custom_stop_words <- bind_rows(data_frame(word = my_stop_words,
lexicon = c("custom")), stop_words)
custom_stop_words = unique(custom_stop_words) # de duplicating
temp <- temp %>%
anti_join(custom_stop_words)
return(temp) }
# Function 2 A - foor creating DTM matrix
dtm_creator <- function(cleaned_text){
if (!require(tidytext)) {install.packages("tidytext")}
if (!require(dplyr)) {install.packages("dypyr")}
if (!require(tm)) {install.packages("tm")}
temp <- cleaned_text %>% count(document, word, sort = TRUE) %>%
ungroup()
# creating DTM
dtm_temp <- temp %>%
cast_sparse(document, word, n)
return(dtm_temp) }
# Function 2 B - for TF-IDF Matrix
tf_idf_creator <- function(cleaned_text){
if (!require(tidytext)) {install.packages("tidytext")}
if (!require(dplyr)) {install.packages("dypyr")}
if (!require(tm)) {install.packages("tm")}
temp <- cleaned_text %>% count(document, word, sort = TRUE) %>%
ungroup()
total_temp <- temp %>% group_by(document) %>%
summarize(total = sum(n))
temp <- left_join(temp, total_temp)
# creating TF-IDF matrix
temp <- temp %>%
bind_tf_idf(word, document, n)
tf_tdf_matrix <- temp %>%
cast_sparse(document, word, tf_idf)
return(tf_tdf_matrix) }
# Function 3
plot_wrd_cloud = function(dtm_temp){
if (!require(wordcloud)) {install.packages("wordcloud")}
dtm = as.matrix(dtm_temp)
dtm_colsum = apply(dtm, 2, sum)
min_word = min(50, length(dtm_colsum))
words = colnames(dtm)[1:min_word]
freq = 10 * dtm_colsum/mean(dtm_colsum)
wordcloud(words,
freq,
scale = c(8, 0.3),
colors=1:10)
}
# Running all three functions together through piping
#nokia %>% clean_text_tokenizer() %>% dtm_creator() %>% plot_wrd_cloud
#nokia %>% clean_text_tokenizer() %>% tf_idf_creator() %>% plot_wrd_cloud
cleaned_text <- clean_text_tokenizer(nokia)
head(cleaned_text)
dim(cleaned_text)
cleaned_dtm <- dtm_creator(cleaned_text)
head(cleaned_dtm)
class(cleaned_dtm)
cleaned_tfidf <- tf_idf_creator(cleaned_text)
head(cleaned_tfidf)
class(cleaned_tfidf)
plot_wrd_cloud(cleaned_dtm)
library(wordcloud)
cog_plot<- function(text,min_n){
if (!require(widyr)) {install.packages("widyr")}
if (!require(ggplot2)) {install.packages("ggplot2")}
if (!require(ggraph)) {install.packages("ggraph")}
if (!require(igraph)) {install.packages("igraph")}
cleaned_text <- clean_text_tokenizer(text)
dsc_word_pair <- cleaned_text %>% pairwise_count(word,document,sort=TRUE,upper = FALSE)
dsc_word_pair %>%
filter(n >= min_n) %>%
graph_from_data_frame() %>%
ggraph(layout = "fr") +
geom_edge_link(aes(edge_alpha = n, edge_width = n), edge_colour = "royalblue") +
geom_node_point(size = 5) +
geom_node_text(aes(label = name), repel = TRUE,
point.padding = unit(0.2, "lines")) +
theme_void()
}
cog_plot(nokia,5)
|
b7a1ad0fb96f82a1ad793009f7da9a21bb5ee678
|
6cbb51fe996e65a51a8d9f2f35e3159721933f25
|
/R/scds_doubletdetection.R
|
21099e4ff17430945ac8085a1e24793990d5b194
|
[
"MIT"
] |
permissive
|
compbiomed/singleCellTK
|
927fb97e257ba89cddee9a90f9cb7cb375a5c6fb
|
990e89e7ccfbf663f23c793454f72fb8c6878a32
|
refs/heads/master
| 2023-08-11T09:17:41.232437
| 2023-07-26T20:43:47
| 2023-07-26T20:43:47
| 68,756,293
| 144
| 89
|
NOASSERTION
| 2023-09-06T18:22:08
| 2016-09-20T21:50:24
|
R
|
UTF-8
|
R
| false
| false
| 15,532
|
r
|
scds_doubletdetection.R
|
#' @title Find doublets/multiplets using \link[scds]{cxds}.
#' @description A wrapper function for \link[scds]{cxds}. Annotate
#' doublets/multiplets using co-expression based approach. Generate a doublet
#' score for each cell. Infer doublets if \code{estNdbl} is \code{TRUE}.
#' @param inSCE A \linkS4class{SingleCellExperiment} object.
#' @param sample Character vector or colData variable name. Indicates which
#' sample each cell belongs to. Default \code{NULL}.
#' @param seed Seed for the random number generator, can be \code{NULL}. Default
#' \code{12345}.
#' @param ntop See \link[scds]{cxds} for more information. Default \code{500}.
#' @param binThresh See \link[scds]{cxds} for more information. Default
#' \code{0}.
#' @param verb See \link[scds]{cxds} for more information. Default \code{FALSE}.
#' @param retRes See \link[scds]{cxds} for more information. Default
#' \code{FALSE}.
#' @param estNdbl See \link[scds]{cxds} for more information. Default
#' \code{FALSE}.
#' @param useAssay A string specifying which assay in the SCE to use. Default
#' \code{"counts"}
#' @details When the argument \code{sample} is specified, \link[scds]{cxds} will
#' be run on cells from each sample separately. If \code{sample = NULL}, then
#' all cells will be processed together.
#' @return A \linkS4class{SingleCellExperiment} object with \link[scds]{cxds}
#' output appended to the \link{colData} slot. The columns include
#' \emph{cxds_score} and optionally \emph{cxds_call}.
#' @seealso \code{\link[scds]{cxds}}, \code{\link{plotCxdsResults}},
#' \code{\link{runCellQC}}
#' @examples
#' data(scExample, package = "singleCellTK")
#' sce <- subsetSCECols(sce, colData = "type != 'EmptyDroplet'")
#' sce <- runCxds(sce)
#' @export
#' @importFrom SummarizedExperiment colData colData<- assay
#' @importFrom SingleCellExperiment counts<-
#' @importFrom S4Vectors metadata<-
runCxds <- function(
inSCE,
sample = NULL,
seed = 12345,
ntop = 500,
binThresh = 0,
verb = FALSE,
retRes = FALSE,
estNdbl = FALSE,
useAssay = "counts")
{
message(date(), " ... Running 'cxds'")
## Getting current arguments
argsList <- mget(names(formals()),sys.frame(sys.nframe()))
argsList <- argsList[!names(argsList) %in% c("inSCE")]
argsList$packageVersion <- utils::packageDescription("scds")$Version
sample <- .manageCellVar(inSCE, var = sample)
if (is.null(sample)) {
sample <- rep(1, ncol(inSCE))
}
## Define result matrix for all samples
if (isTRUE(estNdbl)) {
output <- S4Vectors::DataFrame(row.names = colnames(inSCE),
cxds_score = numeric(ncol(inSCE)),
cxds_call = logical(ncol(inSCE)))
} else {
output <- S4Vectors::DataFrame(row.names = colnames(inSCE),
cxds_score = numeric(ncol(inSCE)))
}
## Loop through each sample and run cxds
samples <- unique(sample)
for (s in samples) {
sceSampleInd <- sample == s
sceSample <- inSCE[, sceSampleInd]
mat <- assay(sceSample, i = useAssay)
counts(sceSample) <- .convertToMatrix(mat)
result <- NULL
nGene <- ntop
while (!inherits(result, "SingleCellExperiment") & nGene > 0) {
try({
result <- .withSeed(seed, {
scds::cxds(sce = sceSample,
ntop = nGene,
binThresh = binThresh,
verb = verb,
retRes = retRes,
estNdbl = estNdbl)
})
}, silent = TRUE)
nGene <- nGene - 100
}
if (!inherits(result, "try-error") & !is.null(result)) {
if ("cxds_call" %in% colnames(colData(result))) {
output[sceSampleInd, ] <- colData(result)[, c("cxds_score",
"cxds_call")]
} else {
output[sceSampleInd, ] <- colData(result)[, c("cxds_score")]
}
} else {
output[sceSampleInd, ] <- NA
warning("'cxds' from package 'scds' did not complete successfully ",
"for sample: ", s)
}
if (!identical(samples, 1)) {
metadata(inSCE)$sctk$runCxds[[s]] <- argsList
}
}
if (identical(samples, 1)) {
metadata(inSCE)$sctk$runCxds$all_cells <- argsList
}
colData(inSCE)[, paste0("scds_", colnames(output))] <- NULL
if (isTRUE(estNdbl)) {
output$cxds_call <- as.factor(output$cxds_call)
levels(output$cxds_call) <- list(Singlet = "FALSE", Doublet = "TRUE")
}
colnames(output) <- paste0("scds_", colnames(output))
colData(inSCE) = cbind(colData(inSCE), output)
return(inSCE)
}
#' @title Find doublets/multiplets using \link[scds]{bcds}.
#' @description A wrapper function for \link[scds]{bcds}. Annotate
#' doublets/multiplets using a binary classification approach to discriminate
#' artificial doublets from original data. Generate a doublet
#' score for each cell. Infer doublets if \code{estNdbl} is \code{TRUE}.
#' @param inSCE A \linkS4class{SingleCellExperiment} object.
#' @param sample Character vector or colData variable name. Indicates which
#' sample each cell belongs to. Default \code{NULL}.
#' @param seed Seed for the random number generator, can be \code{NULL}. Default
#' \code{12345}.
#' @param ntop See \link[scds]{bcds} for more information. Default \code{500}.
#' @param srat See \link[scds]{bcds} for more information. Default \code{1}.
#' @param verb See \link[scds]{bcds} for more information. Default \code{FALSE}.
#' @param retRes See \link[scds]{bcds} for more information. Default
#' \code{FALSE}.
#' @param nmax See \link[scds]{bcds} for more information. Default
#' \code{"tune"}.
#' @param varImp See \link[scds]{bcds} for more information. Default
#' \code{FALSE}.
#' @param estNdbl See \link[scds]{bcds} for more information. Default
#' \code{FALSE}.
#' @param useAssay A string specifying which assay in \code{inSCE} to use.
#' Default \code{"counts"}
#' @return A \linkS4class{SingleCellExperiment} object with \link[scds]{bcds}
#' output appended to the \link{colData} slot. The columns include
#' \emph{bcds_score} and optionally \emph{bcds_call}. Please refer to the
#' documentation of \link[scds]{bcds} for details.
#' @details When the argument \code{sample} is specified, \link[scds]{bcds} will
#' be run on cells from each sample separately. If \code{sample = NULL}, then
#' all cells will be processed together.
#' @seealso \code{\link[scds]{bcds}}, \code{\link{plotBcdsResults}},
#' \code{\link{runCellQC}}
#' @examples
#' data(scExample, package = "singleCellTK")
#' sce <- subsetSCECols(sce, colData = "type != 'EmptyDroplet'")
#' sce <- runBcds(sce)
#' @export
#' @importFrom SummarizedExperiment colData colData<-
#' @importFrom SingleCellExperiment counts<-
#' @importFrom S4Vectors metadata<-
runBcds <- function(
inSCE,
sample = NULL,
seed = 12345,
ntop = 500,
srat = 1,
verb = FALSE,
retRes = FALSE,
nmax = "tune",
varImp = FALSE,
estNdbl = FALSE,
useAssay = "counts"
) {
message(date(), " ... Running 'bcds'")
## Getting current arguments
argsList <- mget(names(formals()),sys.frame(sys.nframe()))
argsList <- argsList[!names(argsList) %in% c("inSCE")]
argsList$packageVersion <- utils::packageDescription("scds")$Version
sample <- .manageCellVar(inSCE, var = sample)
if (is.null(sample)) {
sample <- rep(1, ncol(inSCE))
}
## Define result matrix for all samples
if (isTRUE(estNdbl)) {
output <- S4Vectors::DataFrame(row.names = colnames(inSCE),
bcds_score = numeric(ncol(inSCE)),
bcds_call = logical(ncol(inSCE)))
} else {
output <- S4Vectors::DataFrame(row.names = colnames(inSCE),
bcds_score = numeric(ncol(inSCE)))
}
## Loop through each sample and run bcds
samples <- unique(sample)
for (s in samples) {
sceSampleInd <- sample == s
sceSample <- inSCE[, sceSampleInd]
mat <- SummarizedExperiment::assay(sceSample, i = useAssay)
counts(sceSample) <- .convertToMatrix(mat)
result <- NULL
nGene <- ntop
while (!inherits(result, "SingleCellExperiment") & nGene > 0) {
try({
result <- .withSeed(seed, {
scds::bcds(sce = sceSample,
ntop = nGene,
srat = srat,
verb = verb,
retRes = retRes,
nmax = nmax,
varImp = varImp,
estNdbl = estNdbl
)
})
}, silent = TRUE)
nGene <- nGene - 100
}
if (!inherits(result, "try-error") & !is.null(result)) {
if ("bcds_call" %in% colnames(colData(result))) {
output[sceSampleInd, ] <- colData(result)[,c("bcds_score", "bcds_call")]
} else {
output[sceSampleInd, ] <- colData(result)[,c("bcds_score")]
}
} else {
output[sceSampleInd, ] <- NA
warning("'bcds' from package 'scds' did not complete successfully for ",
"sample: ", s)
}
if (!identical(samples, 1)) {
metadata(inSCE)$sctk$runBcds[[s]] <- argsList
}
}
if (identical(samples, 1)) {
metadata(inSCE)$sctk$runBcds$all_cells <- argsList
}
colData(inSCE)[, paste0("scds_", colnames(output))] <- NULL
if (isTRUE(estNdbl)) {
output$bcds_call <- as.factor(output$bcds_call)
levels(output$bcds_call) <- list(Singlet = "FALSE", Doublet = "TRUE")
}
colnames(output) <- paste0("scds_", colnames(output))
colData(inSCE) = cbind(colData(inSCE), output)
return(inSCE)
}
#' @title Find doublets/multiplets using \link[scds]{cxds_bcds_hybrid}.
#' @description A wrapper function for \link[scds]{cxds_bcds_hybrid}. Annotate
#' doublets/multiplets using a binary classification approach to discriminate
#' artificial doublets from original data. Generate a doublet
#' score for each cell. Infer doublets if \code{estNdbl} is \code{TRUE}.
#' @param inSCE A \link[SingleCellExperiment]{SingleCellExperiment} object.
#' Needs \code{counts} in assays slot.
#' @param sample Character vector. Indicates which sample each cell belongs to.
#' \link[scds]{cxds_bcds_hybrid} will be run on cells from each sample
#' separately. If NULL, then all cells will be processed together.
#' Default NULL.
#' @param seed Seed for the random number generator. Default 12345.
#' @param nTop The number of top varialbe genes to consider. Used in both \code{csds}
#' and \code{bcds}. Default \code{500}.
#' @param cxdsArgs See \link[scds]{cxds_bcds_hybrid} for more information. Default \code{NULL}.
#' @param bcdsArgs See \link[scds]{cxds_bcds_hybrid} for more information. Default \code{NULL}.
#' @param verb See \link[scds]{cxds_bcds_hybrid} for more information. Default \code{FALSE}.
#' @param estNdbl See \link[scds]{cxds_bcds_hybrid} for more information. Default \code{FALSE}.
#' @param force See \link[scds]{cxds_bcds_hybrid} for more information. Default \code{FALSE}.
#' @param useAssay A string specifying which assay in the SCE to use.
#' @return A \link[SingleCellExperiment]{SingleCellExperiment} object with
#' \link[scds]{cxds_bcds_hybrid} output appended to the
#' \link{colData} slot. The columns include
#' \emph{hybrid_score} and optionally \emph{hybrid_call}.
#' Please refer to the documentation of \link[scds]{cxds_bcds_hybrid} for
#' details.
#' @examples
#' data(scExample, package = "singleCellTK")
#' sce <- subsetSCECols(sce, colData = "type != 'EmptyDroplet'")
#' sce <- runCxdsBcdsHybrid(sce)
#' @export
#' @importFrom SummarizedExperiment colData colData<-
#' @importFrom SingleCellExperiment counts counts<-
runCxdsBcdsHybrid <- function(inSCE,
sample = NULL,
seed = 12345,
nTop = 500,
cxdsArgs = list(),
bcdsArgs = list(),
verb = FALSE,
estNdbl = FALSE,
force = FALSE,
useAssay = "counts")
{
message(date(), " ... Running 'cxds_bcds_hybrid'")
## Getting current arguments
argsList <- mget(names(formals()),sys.frame(sys.nframe()))
argsList <- argsList[!names(argsList) %in% c("inSCE")]
argsList$packageVersion <- utils::packageDescription("scds")$Version
sample <- .manageCellVar(inSCE, var = sample)
if (is.null(sample)) {
sample <- rep(1, ncol(inSCE))
}
## Define result matrix for all samples
if (isTRUE(estNdbl)) {
output <- S4Vectors::DataFrame(row.names = colnames(inSCE),
hybrid_score = numeric(ncol(inSCE)),
hybrid_call = logical(ncol(inSCE)))
} else {
output <- S4Vectors::DataFrame(row.names = colnames(inSCE),
hybrid_score = numeric(ncol(inSCE)))
}
## Loop through each sample and run cxds_bcds_hybrid
samples <- unique(sample)
for (s in samples) {
sceSampleInd <- sample == s
sceSample <- inSCE[, sceSampleInd]
mat <- SummarizedExperiment::assay(sceSample, i = useAssay)
counts(sceSample) <- .convertToMatrix(mat)
# Get ntop from Args list if they are available, otherwise use
# the 'ntop' parameter
result <- NULL
nGene.cxds <- nTop
if (!is.null(cxdsArgs[["ntop"]])) {
nGene.cxds <- cxdsArgs[["ntop"]]
cxdsArgs[["ntop"]] <- NULL
}
nGene.bcds <- nTop
if (!is.null(bcdsArgs[["ntop"]])) {
nGene.bcds <- bcdsArgs[["ntop"]]
bcdsArgs[["ntop"]] <- NULL
}
nGene <- min(nGene.cxds, nGene.bcds)
while (!inherits(result, "SingleCellExperiment") & nGene > 0) {
try({
result <- .withSeed(seed, {
scds::cxds_bcds_hybrid(sce = sceSample,
cxdsArgs = c(list(ntop = nGene.cxds),
cxdsArgs),
bcdsArgs = c(list(ntop = nGene.bcds),
bcdsArgs),
verb = verb,
estNdbl = estNdbl,
force = force)
})
}, silent = TRUE)
nGene <- nGene - 100
nGene.bcds <- nGene.bcds - 100
nGene.cxds <- nGene.cxds - 100
}
if (!inherits(result, "try-error") & !is.null(result)) {
if ("hybrid_call" %in% colnames(colData(result))) {
output[sceSampleInd, ] <- colData(result)[, c("hybrid_score",
"hybrid_call")]
} else {
output[sceSampleInd, ] <- colData(result)[, c("hybrid_score")]
}
} else {
output[sceSampleInd, ] <- NA
warning("'cxds_bcds_hybrid' from package 'scds' did not complete ",
"successfully for sample: ", s)
}
if (!identical(samples, 1)) {
metadata(inSCE)$sctk$runCxdsBcdsHybrid[[s]] <- argsList
}
}
if (identical(samples, 1)) {
metadata(inSCE)$sctk$runCxdsBcdsHybrid$all_cells <- argsList
}
colData(inSCE)[, paste0("scds_", colnames(output))] <- NULL
if (isTRUE(estNdbl)) {
output$hybrid_call <- as.factor(output$hybrid_call)
levels(output$hybrid_call) <- list(Singlet = "FALSE", Doublet = "TRUE")
}
colnames(output) <- paste0("scds_", colnames(output))
colData(inSCE) <- cbind(colData(inSCE), output)
return(inSCE)
}
|
ac1bea0a252d88c1972dc6812323464fb4506f70
|
2391fe78619ca01968bb6d093e2e1aecde28702e
|
/R/generate_trade_frequency.R
|
700fc8a4b18ff01b1440c3aed59097808ce2f46b
|
[] |
no_license
|
cran/TwitterAutomatedTrading
|
d08fbfa62721fb7a9689aec8a5d0f04c4b318908
|
fd6c8eb87f54eca08c8beb589a193155b462569c
|
refs/heads/master
| 2022-08-03T21:30:47.772296
| 2020-05-31T08:50:13
| 2020-05-31T08:50:13
| 268,360,556
| 2
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 1,207
|
r
|
generate_trade_frequency.R
|
#' generate_trade_frequency function
#'
#'
#'
#'
#' @param initial_time The time the algorithm starts trading.
#' @param final_time The time the algorithm ends trading.
#' @param freq_trade The frequency which the algorithm recalculates the sentiment index.
#'
#' @return A vector containing the hours of operation.
#' @export
#'
#' @examples
#' hours_candle_10 <- generate_trade_frequency(9,17,10)
#' #For example, for 17:30, you should use minutes/60, i.e. 17.5
#' hours_candle_20 <- generate_trade_frequency(9,17.5,10)
generate_trade_frequency <- function(initial_time, final_time,freq_trade){
hours_candlestick <- seq(from = initial_time, to = final_time, by = 1)
minutes_candlestick <- seq(from = 0, to = 60, by = freq_trade)/60
hours_candles_lst <- list()
for(i in 1:length(hours_candlestick)){
hours_candles_lst[[i]] <- rep(hours_candlestick[i], length(minutes_candlestick)) + minutes_candlestick
}
hours_candles_new <- unique(unlist(hours_candles_lst))
idx_ini <- which(hours_candles_new == initial_time)
idx_fin <- which(hours_candles_new == final_time)
hours_candles_new <- hours_candles_new[idx_ini:idx_fin]
return(hours_candles_new)
}
|
d7af6c6a5eed894938305ba8396c20e582f8d117
|
44d57d96e83b891b53f80118c4ae038a18d811ef
|
/LoadPackages_PA.R
|
e6ee36671b98b570b153644a30d2ababe90d262a
|
[] |
no_license
|
elhazen/PA-paper
|
fa85a4894d6d70e1269bf09682dcc7bcbc8ac118
|
9d3f9683097f74242dd884e0fdbf6f7abb4ee835
|
refs/heads/main
| 2023-04-12T14:55:20.456007
| 2023-03-31T19:24:30
| 2023-03-31T19:24:30
| 306,073,556
| 3
| 1
| null | null | null | null |
UTF-8
|
R
| false
| false
| 697
|
r
|
LoadPackages_PA.R
|
require(adehabitatLT) # help(package='adehabitat') # help.search('angle',package='adehabitat')
require(maps) # for map.where
require(mapdata) # for worldHires
require(sp)
require(maptools)
require(mgcv)
library(ape)
library(ncf)
library(ncdf4)
library(spdep)
require(ROCR)
require(dismo)
require(gbm)
require(tidyverse)
require(stringr)
library(spdep)
library(caret)
library(mlbench)
library(dismo)
library(gbm)
#library(lubridate)
library(qmap)
#library(dplyr)
library(Hmisc)
require(ggplot2)
require(ggmap)
require(lubridate)
require(RColorBrewer)
require(viridis)
require(gridExtra)
require(scales)
require(rnaturalearth)
require(rgeos)
library(glue)
library(reshape2)
library(mgcViz)
|
5550a761da5892d750879c3444e0a37eb6b24b7b
|
d4d02954f1567a20eb43fd7acd8b56e2c5423119
|
/finance/stockAnalytics/tests/testthat/test.downloadExchangeCompanyList.R
|
956ddc4fda9cad7a224fb2819bc49a1b6d3d76fa
|
[] |
no_license
|
shanaka-desoysa/datascience
|
68f6a940dbb8f4ae34ed6133ed02af26ff3772d9
|
e3316e5db76ee8c2efcd7848fb7812e5bce9b56b
|
refs/heads/master
| 2020-09-21T19:33:20.953759
| 2016-10-07T21:01:51
| 2016-10-07T21:01:51
| 66,153,460
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 2,301
|
r
|
test.downloadExchangeCompanyList.R
|
context("downloadExchangeCompanyList")
test_that("downloadExchangeCompanyList: Data folder was created.", {
# Unit Test 1: Check if data folder was created.
# Initialize
exchange = "NASDAQ"
dataFolder = "./datatest"
# Delete data folder.
if (file.exists(dataFolder)) {
unlink(dataFolder, recursive = T)
}
downloadExchangeCompanyList(exchange, dataFolder)
expect_true(file.exists(dataFolder), "Unit Test 1: DataFolder was not creaed.")
# Cleanup
if (file.exists(dataFolder)) {
unlink(dataFolder, recursive = T, force = T)
}
})
test_that("downloadExchangeCompanyList: Single output file.", {
# Initialize
exchange = "NASDAQ"
dataFolder = "./datatest"
actual <- unlist(downloadExchangeCompanyList(exchange, dataFolder))
expected <- paste(dataFolder, "/", exchange, "-companylist", ".csv", sep = "")
#printActualExpected(actual, expected)
expect_equal(actual, expected, info = "File name is not in correct format.")
expect_true(file.exists(expected), info = "File does not exist.")
csvFile <- read.csv(expected)
expect_true("Symbol" %in% colnames(csvFile), info = "Column Symbol does not exist")
expect_true("Name" %in% colnames(csvFile), info = "Column Name does not exist")
# Cleanup
if (file.exists(dataFolder)) {
unlink(dataFolder, recursive = T, force = T)
}
})
test_that("downloadExchangeCompanyList: Multiple output files.", {
# Initialize
exchanges = c("NASDAQ", "NYSE")
dataFolder = "./datatest"
lapply(exchanges, function(exchange) {
actual <- unlist(downloadExchangeCompanyList(exchange, dataFolder))
expected <- paste(dataFolder, "/", exchange, "-companylist", ".csv", sep = "")
#printActualExpected(actual, expected)
expect_equal(actual, expected, info = "File name is not in correct format.")
expect_true(file.exists(expected), info = "File does not exist.")
csvFile <- read.csv(expected)
expect_true("Symbol" %in% colnames(csvFile), info = "Column Symbol does not exist")
expect_true("Name" %in% colnames(csvFile), info = "Column Name does not exist")
})
# Cleanup
if (file.exists(dataFolder)) {
unlink(dataFolder, recursive = T, force = T)
}
})
|
89b11348c729975812dc0877542a0e2bc2d6c3d3
|
5111b402fec416a6cf3c4517d097eb4e600921e2
|
/tests/testthat/test.R
|
9f03c63a834a969bd68c07b960c74663bc2f9d11
|
[] |
no_license
|
Pratheek-Dev/circles
|
884e4385411e7b5a8e69af82ee6e8542a5aeb50a
|
158faf674cbbd02fdca7965b3f685578edc77994
|
refs/heads/master
| 2021-01-13T15:52:05.838499
| 2016-12-19T20:46:51
| 2016-12-19T20:46:51
| 76,830,569
| 0
| 1
| null | null | null | null |
UTF-8
|
R
| false
| false
| 697
|
r
|
test.R
|
#Tests written by Pratheek Devaraj
library(testthat)
library(circles)
context("testing circle methods")
#test the object
circle1 <- circle(3)
#test the area
area(circle1)
#test the cicumference
circumference(circle1)
test_that(" creating object", {
expect_is(circle(3), 'circle')
})
test_that(" testing empty inputs", {
expect_error(circle())
})
test_that(" testing negative arguments", {
expect_error(circle(-3))
})
# test_that(" testing positive arguments of area", {
#
#
# expect_equal(area(circle(3)), 28.26)
#
#
# })
#
# test_that(" testing positive arguments of circumference", {
#
#
# expect_equal(circumference(circle(3)), 18.84, tolerance = 0.5)
#
#
# })
|
2e6e89c888b004890dde39b783e01bbed793ee3b
|
0834f80c8feaf23b376331c246f2c75f5897e8e5
|
/man/most_frequent_service.Rd
|
69d98b124372276a66ec80e9977ff85607f51278
|
[] |
no_license
|
r-transit/bustt
|
d5ef553bc06e996de879d30f252b2eee3b069451
|
fa4312e3d0c4632402f94517d0c92dc45c51bab7
|
refs/heads/master
| 2020-03-22T02:17:18.128569
| 2018-08-23T04:18:15
| 2018-08-23T04:18:15
| 139,361,533
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| true
| 471
|
rd
|
most_frequent_service.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/service.R
\name{most_frequent_service}
\alias{most_frequent_service}
\title{Get the most frequent service for a set of trips.}
\usage{
most_frequent_service(trips)
}
\arguments{
\item{trips}{gtfs dataframe with a service_trips count column}
}
\value{
the trips that are in the most frequent service window
}
\description{
Get the most frequent service for a set of trips.
}
\keyword{internal}
|
370e2e59c2fc57777eea89517faa74b3be99f1b5
|
f03f07b05c4e0e2e86eb9ca2de1c9e8880dc5152
|
/data_visualization/exp1/exp1_plant_biomass_vs_respiration.r
|
c236033ee7eec6c20db3a206a7fb663e4a4caf98
|
[] |
no_license
|
colinaverill/suillus_ecosystem
|
3decb69dc90d149016e298f2f521e3f6b272ec0f
|
9e22c2e28c1f653bec2ac3f6c64654939bc38aa2
|
refs/heads/master
| 2021-06-12T04:26:44.960751
| 2021-03-30T07:34:41
| 2021-03-30T07:34:41
| 151,416,745
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 1,755
|
r
|
exp1_plant_biomass_vs_respiration.r
|
#check aboveground biomass vs. belowground biomass vs. plant-derived soil respiration values Experiment 1.
rm(list=ls())
source('paths.r')
#load data.----
d <- readRDS(Duke_2017_exp1_to_analyze.path)
#make plot.----
par(mfrow = c(1,3), mar = c(5,5,1,1))
#plot 1. Belowground repsiration ~ aboveground biomass.
y <- d$plant_ugC_h
x <- d$above_mass
dat <- data.frame(y,x)
labs <- c('plant-derived soil respiration','aboveground biomass')
mod1 <- lm(log10(y) ~ log10(x), data = dat)
plot(log10(y) ~ log10(x), bty = 'l', ylab = NA, xlab = NA, data = dat)
abline(mod1, lwd = 2)
rsq <- round(summary(mod1)$r.squared, 2)
mtext(paste0('R2=',rsq), side = 1, line = -2, adj = 0.95)
mtext(labs[1],side=2, line = 3, cex = 1.2)
mtext(labs[2],side=1, line = 3, cex = 1.2)
#plot 2. Belowground respiration ~ belowground biomass.
y <- d$plant_ugC_h
x <- d$below_mass
dat <- data.frame(y,x)
labs <- c('plant-derived soil respiration','belowground biomass')
mod1 <- lm(log10(y) ~ log10(x), data = dat)
plot(log10(y) ~ log10(x), bty = 'l', ylab = NA, xlab = NA, data = dat)
abline(mod1, lwd = 2)
rsq <- round(summary(mod1)$r.squared, 2)
mtext(paste0('R2=',rsq), side = 1, line = -2, adj = 0.95)
mtext(labs[1],side=2, line = 3, cex = 1.2)
mtext(labs[2],side=1, line = 3, cex = 1.2)
#plot 3. aboveground biomass ~ belowground biomass.
y <- d$above_mass
x <- d$below_mass
dat <- data.frame(y,x)
labs <- c('aboveground biomass','belowground biomass')
mod1 <- lm(log10(y) ~ log10(x), data = dat)
plot(log10(y) ~ log10(x), bty = 'l', ylab = NA, xlab = NA, data = dat)
abline(mod1, lwd = 2)
rsq <- round(summary(mod1)$r.squared, 2)
mtext(paste0('R2=',rsq), side = 1, line = -2, adj = 0.95)
mtext(labs[1],side=2, line = 3, cex = 1.2)
mtext(labs[2],side=1, line = 3, cex = 1.2)
|
0725ff42f982fe769d20d64f8c02f8ed4c91a808
|
1c535fae4dee5e1f577cbf67933ed33298d0c4a4
|
/sesion_6/retos/producto_1/reto_final_1.R
|
af289e467df21c202845e1dc93b7f1d6a0819597
|
[] |
no_license
|
manuelrojo89/ecoinformatica_2014_2015
|
5d943deeab37c56250420ac96f45513a96556a9a
|
c4484539189349f191c1b933f2bbb578196f7a20
|
refs/heads/master
| 2021-01-18T16:48:49.709432
| 2015-02-09T01:32:59
| 2015-02-09T01:32:59
| 30,033,509
| 0
| 0
| null | 2015-01-29T18:01:26
| 2015-01-29T18:01:24
|
JavaScript
|
UTF-8
|
R
| false
| false
| 6,258
|
r
|
reto_final_1.R
|
#Caracterización del funcionamiento del ecosistema y de las variables ambientales que
#lo condicionan. Gracias a la gran cantidad de información procedente de sensores
#remotos, es posible conocer la evolución temporal de variables como la cobertura de
#nieve, índice de vegetación o temperatura del suelo en los distintos tipos de
#ecosistemas diana. En concreto evaluaremos la tendencia en la cobertura de nieve
#para los robledales de Sierra Nevada. Se trata de caracterizar la tendencia en la
#duración de la nieve para cada píxel ocupado por robledal y año hidrológico. El
#producto final será un mapa que muestre las tendencias en la duración de la nieve
#para cada píxel en toda la serie temporal (2000-2012).
#He calculado la tendencias de NDVI y de NIEVE, ya que son los mismo, aunque en el
#sólo se pedía el de nieve, pero quería asegurarme de saber hacerlo bien y haber
#entendido correctamente el proceso
#PARA EL NDVI
#Vamos a calcular tendencias, cargamos kendall
library('Kendall')
#Primero cargamos la capa del ndvi y le damos un nombre
datos <- read.csv("ndvi_robledal.csv", sep=";")
str(datos)
head(datos)
#Creamos un data.frame donde se van a acumular los datos que vayamos obteniendo
tabla <- data.frame()
#Creamos una tabla auxiliar donde guardamos los datos de cada pixel (iv_malla_modi_id)
#el valor de tau que nos da kendal y el pvalue
tabla_aux <- data.frame(iv_malla_modi_id=NA,
tau=NA,
pvalue=NA)
#Definimos una variable unica para el pixel
pixeles <- unique(datos$iv_malla_modi_id)
#Creamos el bucle que nos analice cada pixel en cada año
for (j in pixeles){
#Creamos variable de pixeles para que el bucle lea cada pixel de j
pixel <-datos[datos$iv_malla_modi_id==j,]
#haces el mannkendall para obtener los valores de tau y p para el ndvi
kendal <- MannKendall(pixel$ndvi_i)
#Asignas a cada uno de los valores que queremos para nuestra tabla (j que es el
#pixel unico, tau y pvalue) un nombre
tabla_aux$iv_malla_modi_id <- j
tabla_aux$tau <- kendal$tau[1]
tabla_aux$pvalue <- kendal$sl[1]
#el rbind nos pasa los valores de tabla_aux a tabla
tabla <- rbind(tabla,tabla_aux)
}
# Selecciono las columnas que me interesan: "ndvi_i", "lat" y "lng" para pintarlas
#en el mapa final.
datospintar <- datos[,c(1,4:5)]
coordenadas<- unique(datospintar)
#Unimos los datos de la tabla y las coordenadas por el campo iv_malla_modi-id con el
#mismo nombre que le dais en el script a la tabla que se debe pintar (tendencias)
tendencias <- join(tabla, coordenadas, "iv_malla_modi_id")
#Representamos el mapa con el script que nos habéis proporcionado
library(sp)
library(rgdal)
library(classInt)
library(RColorBrewer)
## definimos las coordenadas de los puntos
coordinates(tendencias) =~lng+lat
## definimos el sistema de coordenadas WGS84
proj4string(tendencias)=CRS("+init=epsg:4326")
## partimos los valores de tau en 5 clases
clases <- classIntervals(tendencias$tau, n = 5)
## obtenemos cinco colores para una paleta de colores que se llama "Spectral"
plotclr <- rev(brewer.pal(5, "Spectral"))
## Asociamos los valores de tau a su valor correspondiente
colcode <- findColours(clases, plotclr)
## plot sin tener en cuenta
plot(tendencias, col=colcode, pch=19, cex = .6, main = "Mapa de tendencias")
## mostramos la leyenda
legend("topright", legend=names(attr(colcode, "table")), fill=attr(colcode, "palette"), bty="n")
#PARA LA CAPA NIEVE
#Primero cargamos la capa de nieve y le damos un nombre
datosnieve <- read.csv("nieve_robledal.csv", sep=";")
str(datosnieve)
head(datosnieve)
#Creamos un data.frame donde se van a acumular los datos que vayamos obteniendo
tabla <- data.frame()
#Creamos una tabla auxiliar donde guardamos los datos de cada pixel (nie_malla_modi_id)
#el valor de tau que nos da kendal y el pvalue
tabla_aux <- data.frame(nie_malla_modi_id=NA,
tau=NA,
pvalue=NA)
#Definimos una variable unica para el pixel
pixeles <- unique(datosnieve$nie_malla_modi_id)
#Creamos el bucle que nos analice cada pixel en cada año
for (j in pixeles){
#Creamos variable de pixeles para que el bucle lea cada pixel de j
pixel <-datosnieve[datosnieve$nie_malla_modi_id==j,]
#haces el mannkendall para obtener los valores de tau y p para la nieve
kendal <- MannKendall(pixel$scd)
#Asignas a cada uno de los valores que queremos para nuestra tabla (j que es el
#pixel unico, tau y pvalue) un nombre
tabla_aux$nie_malla_modi_id <- j
tabla_aux$tau <- kendal$tau[1]
tabla_aux$pvalue <- kendal$sl[1]
#el rbind nos pasa los valores de tabla_aux a tabla
tabla <- rbind(tabla,tabla_aux)
}
<<<<<<< HEAD
# Selecciono las columnas que me interesan: "scd", "lat" y "lng" para pintarlas
=======
# Selecciono las columnas que me interesan: "ndvi_i", "lat" y "lng" para pintarlas
>>>>>>> 1f06e41ba5490941966e5c479c6c4cdae921b767
#en el mapa final.
datospintar <- datosnieve[,c(2,10:11)]
coordenadas<- unique(datospintar)
<<<<<<< HEAD
#Unimos los datos de la tabla y las coordenadas por el campo nie_malla_modi-id con el
=======
#Unimos los datos de la tabla y las coordenadas por el campo iv_malla_modi-id con el
>>>>>>> 1f06e41ba5490941966e5c479c6c4cdae921b767
#mismo nombre que le dais en el script a la tabla que se debe pintar (tendencias)
tendencias <- join(tabla, coordenadas, "nie_malla_modi_id")
#Representamos el mapa con el script que nos habéis proporcionado
library(sp)
library(rgdal)
library(classInt)
library(RColorBrewer)
## definimos las coordenadas de los puntos
coordinates(tendencias) =~lng+lat
## definimos el sistema de coordenadas WGS84
proj4string(tendencias)=CRS("+init=epsg:4326")
## partimos los valores de tau en 5 clases
clases <- classIntervals(tendencias$tau, n = 5)
## obtenemos cinco colores para una paleta de colores que se llama "Spectral"
plotclr <- rev(brewer.pal(5, "Spectral"))
## Asociamos los valores de tau a su valor correspondiente
colcode <- findColours(clases, plotclr)
## plot sin tener en cuenta
plot(tendencias, col=colcode, pch=19, cex = .6, main = "Mapa de tendencias")
## mostramos la leyenda
legend("topright", legend=names(attr(colcode, "table")), fill=attr(colcode, "palette"), bty="n")
|
f97e31ec243c626425c167cf6bb47663edcdce5b
|
5b0fe4dac9cc2af98c5656d5af61257650e85cd5
|
/man/rst_muffle.Rd
|
d04bab8abf56e603dc9bb280e2c6d51b4dc645de
|
[] |
no_license
|
EdwinTh/rlang
|
015ba2043483615978afb6b52db7721a85ebf802
|
01156007ca44023e14335d0f7191a6e60a478499
|
refs/heads/master
| 2021-06-29T01:25:13.103152
| 2017-09-18T15:37:40
| 2017-09-18T15:37:40
| 103,949,007
| 1
| 0
| null | 2017-09-18T14:22:50
| 2017-09-18T14:22:50
| null |
UTF-8
|
R
| false
| true
| 2,433
|
rd
|
rst_muffle.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/cnd-restarts.R
\name{rst_muffle}
\alias{rst_muffle}
\title{Jump to a muffling restart}
\usage{
rst_muffle(c)
}
\arguments{
\item{c}{A condition to muffle.}
}
\description{
Muffle restarts are established at the same location as where a
condition is signalled. They are useful for two non-exclusive
purposes: muffling signalling functions and muffling conditions. In
the first case, \code{rst_muffle()} prevents any further side effects of
a signalling function (a warning or message from being displayed,
an aborting jump to top level, etc). In the second case, the
muffling jump prevents a condition from being passed on to other
handlers. In both cases, execution resumes normally from the point
where the condition was signalled.
}
\examples{
side_effect <- function() cat("side effect!\\n")
handler <- inplace(function(c) side_effect())
# A muffling handler is an inplace handler that jumps to a muffle
# restart:
muffling_handler <- inplace(function(c) {
side_effect()
rst_muffle(c)
})
# You can also create a muffling handler simply by setting
# muffle = TRUE:
muffling_handler <- inplace(function(c) side_effect(), muffle = TRUE)
# You can then muffle the signalling function:
fn <- function(signal, msg) {
signal(msg)
"normal return value"
}
with_handlers(fn(message, "some message"), message = handler)
with_handlers(fn(message, "some message"), message = muffling_handler)
with_handlers(fn(warning, "some warning"), warning = muffling_handler)
# Note that exiting handlers are thrown to the establishing point
# before being executed. At that point, the restart (established
# within the signalling function) does not exist anymore:
\dontrun{
with_handlers(fn(warning, "some warning"),
warning = exiting(function(c) rst_muffle(c)))
}
# Another use case for muffle restarts is to muffle conditions
# themselves. That is, to prevent other condition handlers from
# being called:
undesirable_handler <- inplace(function(c) cat("please don't call me\\n"))
with_handlers(foo = undesirable_handler,
with_handlers(foo = muffling_handler, {
cnd_signal("foo", mufflable = TRUE)
"return value"
}))
# See the `mufflable` argument of cnd_signal() for more on this point
}
\seealso{
The \code{muffle} argument of \code{\link[=inplace]{inplace()}}, and the \code{mufflable}
argument of \code{\link[=cnd_signal]{cnd_signal()}}.
}
|
2b45373aa8d289918359f3deea924777b4181ff8
|
11365780c328f089d72facb5f88d182f5eb3f4dc
|
/UpdateSubset.R
|
3592ee710c68b9b91ec913ff98b28e6223ab7781
|
[] |
no_license
|
helenphillips/UpdateSubsets
|
5ad0cc9a38ef3113bddc6232f4a432c0d3996c07
|
4b10e6b880b4fba73aad49af63f6196c53b3192f
|
refs/heads/master
| 2020-05-17T12:15:00.853367
| 2013-08-15T15:37:43
| 2013-08-15T15:37:43
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 1,441
|
r
|
UpdateSubset.R
|
## 15th August 2013
## Helen Phillips
## Working direction is where original pixels were downloaded
## LoadDat would need a lat, long, end.date column.
UpdateSubsets <- function(LoadDat, Dir = ".")
{
## if end.date is in POSIXlt format
end.date <- LoadDat$end.date
if(class(end.date) == "POSIXlt"){
year <- end.date$year + 1900
}else{year <- end.date }
all.subsets <- unique(paste(LoadDat$lat, LoadDat$long, year))
filelist <- list.files(path = Dir, pattern = ".asc") ## list of MODIS pixels already downloaded
downloaded <- c()
for (count in 1:length(filelist)) ## for each file collects lat, long and end year of pixels downloaded
{
ds <- read.csv(filelist[count], header = FALSE)
names(ds) <- c("row.id", "land.product.code", "MODIS.acq.date", "where", "MODIS.proc.date", 1:(ncol(ds) - 5))
wS <- regexpr("Samp", ds$where[1]) ## number of rows according to number of pixels
wlong <- regexpr("Lon", ds$where[1])
lat <- as.numeric(substr(ds$where[1], 4, wlong - 1))
long <- as.numeric(substr(ds$where[1], wlong + 3, wS - 1))
dsyear <- as.numeric(substr(ds$MODIS.acq.date, 2, 5))
endyear <- max(dsyear)
downloaded[count] <- paste(lat, long, endyear)
} ## end of filelist count
revised.subsets <- LoadDat[which(all.subsets %in% downloaded == FALSE),] ## creates a subset of the LoadDat which has not already been downloaded
return(revised.subsets)
} ## end of function
|
be387ade6f8616c0f6466608287e4b0c43a0e9aa
|
3563523e5a07de4d2f459dcecc388d5110831941
|
/docs/airquality_app.R
|
e4674553cfb267c7ca5d2a4ae83f648e6b6b035c
|
[] |
no_license
|
corytu/r-language-presentations
|
df8e37105f797ec93bfe162f4ed64e75ab7c83bb
|
fe234c79597e57f421308a3b33aeeb79ad62804e
|
refs/heads/master
| 2022-02-13T02:16:11.570881
| 2019-07-27T09:53:28
| 2019-07-27T09:53:28
| 81,834,211
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 2,099
|
r
|
airquality_app.R
|
library(shiny)
library(ggplot2)
ui <- fluidPage(
# Application title
titlePanel("New York Air Quality Measurements, May to September 1973"),
sidebarLayout(
sidebarPanel(
selectInput(
"month", "Select the month",
choices = month.name[5:9]
),
selectInput(
"obs", "Select one variable",
choices = names(airquality)[1:4]
),
# actionButton can be a better choice than submitButton
submitButton("Confirm"),
helpText("1. Select one month and one variable which interest you"),
helpText("2. Click \"Confirm\""),
helpText("3. The line graph (with a blue trend curve) of the chosen variable versus days of the month will be showed"),
tags$i(helpText("Data from R built-in airquality data set")),
HTML("<div><b>Ozone:</b><br>
Mean ozone in parts per billion from 1300 to 1500 hours at Roosevelt Island<br>
<b>Solar.R:</b><br>
Solar radiation in Langleys in the frequency band 4000-7700 Angstroms from 0800 to 1200 hours at Central Park<br>
<b>Wind:</b><br>
Average wind speed in miles per hour at 0700 and 1000 hours at LaGuardia Airport<br>
<b>Temp:</b><br>
Maximum daily temperature in degrees Fahrenheit at La Guardia Airport</div>")
),
# Show a plot of the selected condition
mainPanel(
plotOutput("airplot")
)
)
)
ylab_list <- list(
Ozone = "Mean Ozone (ppb)",
Solar.R = "Solar Radiation (Langleys)",
Wind = "Average Wind Speed (miles/hour)",
Temp = expression(paste("Maximun Daily Temperature (", degree ~ F, ")", sep = ""))
)
# Define server logic required to draw a line chart
server <- function(input, output) {
month_selected <- reactive({match(input$month, month.name)})
output$airplot <- renderPlot({
ggplot(aes_string(x = "Day", y = input$obs),
data = subset(airquality, Month == month_selected())) +
geom_line(size = 1) + geom_smooth() +
labs(list(x = "Day of Month", y = ylab_list[[input$obs]]))
})
}
shinyApp(ui = ui, server = server)
|
c175cc70972fa56590426423c7334b2ea5f34149
|
42a40d1f9c44007bd0a37b3daa4b975d799e35de
|
/man/ThreePlusThreeDesign.Rd
|
adbaf2949e569c384bd8776ec8ee532d72f8a772
|
[] |
no_license
|
cran/crmPack
|
cb11c2a9a49c7662206ad2e7a576f854010949a2
|
2325e3fef8dbfd4d68f0fd918bf377d27dfff573
|
refs/heads/master
| 2022-09-27T13:51:37.214576
| 2022-09-02T22:00:11
| 2022-09-02T22:00:11
| 48,078,571
| 1
| 0
| null | null | null | null |
UTF-8
|
R
| false
| true
| 650
|
rd
|
ThreePlusThreeDesign.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/Design-class.R
\name{ThreePlusThreeDesign}
\alias{ThreePlusThreeDesign}
\title{Creates a new 3+3 design object from a dose grid}
\usage{
ThreePlusThreeDesign(doseGrid)
}
\arguments{
\item{doseGrid}{the dose grid to be used}
}
\value{
the object of class \code{\linkS4class{RuleDesign}} with the
3+3 design
}
\description{
Creates a new 3+3 design object from a dose grid
}
\examples{
# inizialing a 3+3 design
myDesign <- ThreePlusThreeDesign(doseGrid=c(5, 10, 15, 25, 35, 50, 80))
}
\author{
Daniel Sabanes Bove \email{sabanesd@roche.com}
}
\keyword{programming}
|
46ddbda68cb244e4e72b3fdea9d8c0c3e73ffd5d
|
2764167b5743be62adadc491ec7dfde210e0703d
|
/R/OverTurned.R
|
183787512e400565c8d43b78f1f3596dd7c0b1cb
|
[] |
no_license
|
cran/GEOmap
|
528a4cbe293211d324405037eb280b415e65f62e
|
0149894022496cee8237868b0bb693d00ef01e41
|
refs/heads/master
| 2023-08-18T14:47:52.021469
| 2023-08-13T12:40:21
| 2023-08-13T13:30:31
| 17,713,753
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 2,072
|
r
|
OverTurned.R
|
`OverTurned` <-
function(x,y, syn=TRUE, spacing=NULL, N=1, r1= 1, r2= 1.2, h1= .5, h2= .5, endtol=.1,
REV=FALSE, col='black', ...)
{
if(missing(spacing)) spacing=NULL
if(missing(REV)) REV=FALSE
if(missing(r1)) { r1 = 1 }
if(missing(r2)) { r2 = 1.2 }
if(missing(h1)) { h1 = .5 }
if(missing(h2)) { h2 = .5 }
if(missing(col)) { col='black' }
if(missing(N)) { N = 1 }
if(missing(syn)) { syn=TRUE }
if(REV){ x= rev(x); y = rev(y) }
if(missing(endtol)) { endtol=.1 }
n = length(x)
g = PointsAlong(x, y, N=N, endtol=endtol)
lines(x,y, col=col, ...)
## arrows(x[n-1], y[n-1], x[n], y[n], col=col)
## g$rot$sn = -g$rot$sn
HOR= horseshoe(g$x , g$y , r1=r1, r2=r2, h2=h2, h1=h1, rot=g$rot, col=col)
## fin = par("fin")
pin = par("pin")
u = par("usr")
umm = (u[4]-u[3])/pin[2]/25.4
lenarr = umm*h1
## phi = 160*pi/180
## vleg1 = list(x= (cos(phi)*g$rot$cs-sin(phi)*g$rot$sn), y=sin(phi)*g$rot$cs+cos(phi)*g$rot$sn )
## vlen = sqrt(vleg1$x^2+vleg1$y^2)
## vleg = list(x= vleg1$x/vlen, y=vleg1$y/vlen)
## p5 = list(x=g$x+lenarr*vleg$x , y = g$y+lenarr*vleg$y )
## p6 = list(x= p3$x-shoff*m*vleg$x , y = p3$y-shoff*m*vleg$y )
## segments( p2$x, p2$y,p5$x, p5$y, col=col)
## segments( p3$x, p3$y,p6$x, p6$y, col=col)
if(syn==FALSE)
{
for(i in 1:length(HOR))
{
x = HOR[[i]]$x
y = HOR[[i]]$y
m = length(x)
## p5 = list(x=x[1]+lenarr*vleg$x[i] , y = y[1]+lenarr*vleg$y[i] )
## segments( x[1], y[1],p5$x, p5$y, col=col)
arrows(x[1],y[1], x[2], y[2], col=col, length=lenarr)
arrows( x[m], y[m], x[m-1], y[m-1], col=col, length=lenarr)
}
}
else
{
for(i in 1:length(HOR))
{
x = HOR[[i]]$x
y = HOR[[i]]$y
m = length(x)
arrows( x[2], y[2], x[1],y[1],col=col, length=lenarr)
arrows( x[m-1], y[m-1], x[m], y[m],col=col, length=lenarr)
}
}
}
|
b1cd1dbbc3c4c54ff330adc3a44140651daf50f3
|
ce72162d2546955b5f65edb587436928481692e5
|
/shiny展示/shiny_timeseries/ui.R
|
d7d39371ed2ca36218293af974832a559f95ef31
|
[] |
no_license
|
zackery65/ml-learn-r
|
0f493b0e56586a638341c9e35692b8bb42c32262
|
a6cf7045e3af0118945c320b6163dff83e437621
|
refs/heads/master
| 2021-01-23T02:54:56.202411
| 2017-05-08T06:16:04
| 2017-05-08T06:16:04
| 86,035,562
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 670
|
r
|
ui.R
|
library(shiny)
library(rCharts)
navbarPage("运营数据预测",
theme = shinythemes::shinytheme("yeti"),
tabPanel("预测图表",
sidebarPanel(
sliderInput("slider","预测天数:",1,31,1)
),
mainPanel(
textOutput('tx', container=h4),
h3('分城市数据'),
showOutput('citys',lib="morris"),
h3('分车型数据'),
showOutput('car',lib="morris"),
h3('用户和司机'),
showOutput('user',lib="morris"),
h3('GMV'),
showOutput('gmv',lib="morris"),
h3('客单价'),
showOutput('price',lib="morris")
)
),
tabPanel("预测数据",
tableOutput("view")
)
)
|
6a688672f24657337f87785ef16c1bd75a5ce1c9
|
ea9c06b25ee8b3fcee3e5ba315d9e021cbbd0b1a
|
/R/test.R
|
b60c4603023f31dd6f2e0122c369d171db241b16
|
[] |
no_license
|
kmaheshkulkarni/widgetdown
|
48c3841b7e299c906c1331ff6c8a9c9a050a1d66
|
20f189c0d7da9d44851d78281284862812312c0c
|
refs/heads/master
| 2020-09-09T13:03:15.484869
| 2018-06-29T12:56:06
| 2018-06-29T12:56:06
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 1,606
|
r
|
test.R
|
#' Test case: widgets
#'
#' @name test-widgets
#' @family tests
#' @examples
#' if (requireNamespace("htmltools", quietly = TRUE)) {
#' htmlwidgets::setWidgetIdSeed(42)
#' }
#'
#' library(leaflet)
#' leaflet() %>%
#' addTiles() %>%
#' addMarkers(
#' lng=174.768,
#' lat=-36.852,
#' popup="The birthplace of R"
#' )
#'
#' library(heatmaply)
#' heatmaply(mtcars, k_row = 3, k_col = 2)
#'
#' library(plotly)
#' plot_ly(midwest,
#' x = ~percollege,
#' color = ~state,
#' type = "box"
#' )
#'
#' library(DiagrammeR)
#' grViz("
#' digraph {
#' layout = twopi
#' node [shape = circle]
#' A -> {B C D}
#' }")
NULL
#' Test case: lists
#'
#' @noMd
#' @description
#' \itemize{\item a}
#'
#' \itemize{
#' \item a
#' \item This is an item...
#'
#' That spans multiple paragraphs.
#' }
#'
#' \enumerate{
#' \item a
#' \item b
#' }
#'
#' \describe{
#' \item{a}{1}
#' \item{b}{2}
#' }
#' @family tests
#' @name test-lists
NULL
#' Test case: links
#'
#' @name test-links
#' @family tests
#' @examples
#' magrittr::subtract(10, 1)
#'
#' library(magrittr, warn.conflicts = FALSE)
#' subtract(10, 1)
NULL
#' Test case: figures
#'
#' \figure{bacon.jpg}
#'
#' @name test-figures
#' @family tests
#' @examples
#' x <- seq(0, 2 * pi, length = 25)
#' plot(x, sin(x))
NULL
#' Test case: don't
#'
#' @name test-dont
#' @family tests
#' @examples
#' \dontrun{
#' 1 + 3
#' }
#'
#' \donttest{
#' 1 + 3
#' }
#'
#' answer <- 1
#' \dontshow{
#' answer <- 42
#' }
#' answer # should be 42
NULL
#' Test case: params
#'
#' @name test-params
#' @param ... ellipsis
#' @family tests
NULL
|
f3765db233ddf75b1225752e15d44b0c37fa9fa9
|
f694134656ed20ab912a473bb31e3819aa5c878e
|
/SetTheory/set-relation-level-1.R
|
112e9caed69294ed0ba9747967f2134d4e86109a
|
[] |
no_license
|
ISPEL-ECU/question_generation
|
764bdcf286c7e537d9c28c79e6faa606e5e0e678
|
07f2e2e210ab57d5ba8c5848f990fa7ba2a461c1
|
refs/heads/main
| 2023-07-23T20:28:00.184742
| 2021-08-30T16:23:59
| 2021-08-30T16:23:59
| 393,118,341
| 0
| 1
| null | null | null | null |
UTF-8
|
R
| false
| false
| 27,902
|
r
|
set-relation-level-1.R
|
library(set)
library(nsprcomp)
library(sets)
library(rje)
library(ggm)
source("utils/format.R") #set string formatting
source("utils/questions.R")
source("utils/distractors.R")
source("utils/set-generation.R") #set generation
# getSetUnionMC() generates and prepares a number
# of sets as well as 3 "distractors" and 1
# correct "answer" when considering the union of
# said sets.
#
# param numSets The number of sets to consider
# param setSize The number of elements in each set.
# param dType The desired data type for set elements
# (1: Ints, 2: Real, 3: Complex,
# 4: Char, 5: String, 6: Mixed)
#
# return toSend A json-like object containing the
# sets, correct, and incorrect
# answers.
get_set_union_level_1 <- function(numSets=2, setSize=5, dType = 1) {
#define the text of the question
questionText <-('Let A and B be two sets. What is \\$A\\cup B\\$?')
#generate and fill sets
sourceSets <- getSets(n = numSets, m = setSize, x = dType)
#creating the correct answer
correct <- union(sourceSets[[1]], sourceSets[[2]])
#Create the distractors
distractors <- vector(mode="list", length = 3)
# add distractors to the list.
# each element of the list should be a set
# represented as a list.
for(i in (1:3)){
# generate a set
currentDist <- correct
if(i == 1){ #alter answer by removing an element
currentDist <- list(currentDist[-(setSize-1)])
}
else if(i ==2){ #add an element to the correct answer
# the issue here is that the "incorrect" element needs to be believable and
# also not possible to be in the source sets.
currentDist <- list(c(currentDist, getValue(x=dType)))
}
else if(i == 3){ #remove another element
currentDist <- list(currentDist[-(setSize-2)])
}
currentDist <- formatListAsSet(currentDist[[1]]) #The [[1]] is important here as it removes a layer of abstraction imposed by R
#Note the single brackets '[1]' here
distractors[i] <- currentDist
}
#formatting for output
correct <- formatListAsSet(correct) #format for output
#Iterate through the sourceSets. format list as Set and insert at the index.
counter <- 1
for (s in sourceSets){
sourceSets[counter] <- formatListAsSet(s)
counter <- counter + 1
}
#format the the sourceSets as Question Strings
# "A = {...}"
# "B = {...}"
sourceSets <- insertSetQStrings(sourceSets)
# now we concatenate the question contents together
questionContents <- c(questionText, sourceSets)
#add all items to a list for return
toSend <- list(content = questionContents, correct = correct, distractors = distractors)
#return question info
return(toSend)
}
# getSetIntersect() generates and prepares n sets
# of m elements, 3 "distractors" and 1 correct answer
# when considering the intersections of said sets.
#
# param numSets The number of sets to consider
# param setSize The number of elements in each set.
# param dType The desired data type for set elements
# (1: Ints, 2: Real, 3: Complex,
# 4: Char, 5: String, 6: Mixed)
#
# return toSend A json-like object containing the
# sets, correct, and incorrect
# answers.
get_set_intersect_level_1 <- function(numSets=2, setSize=5, dType = 1) {
#define the text of the question
questionText <-('Let A and B be two sets. What is \\$A\\cap B\\$?')
#generate and fill sets
sourceSets <- getSets(n = numSets, m = setSize, x = dType)
#creating the correct answer
correct <- intersect(sourceSets[[1]], sourceSets[[2]])
if(length(correct) > 0){
correct <- formatListAsSet(correct) #format for output
} else {
correct <- "\\$\\emptyset\\$"
}
#Create the distractors
distractors <- vector(mode="list", length = 3)
#add distractors to the list.
for(i in (1:3)){
currentDist <- correct
if(i == 1){ #alter answer by removing an element
if(currentDist == "\\$\\emptyset\\$"){
currentDist <- not(sourceSets[[1]], sourceSets[[2]])
currentDist <- formatListAsSet(currentDist[[1]]) #The [[1]] is important here as it removes a layer of abstraction imposed by R
} else {
currentDist <- "\\$\\emptyset\\$"
}
}
else if(i ==2){
currentDist <- list(not(sourceSets[[1]], sourceSets[[2]]))
currentDist <- formatListAsSet(currentDist[[1]]) #The [[1]] is important here as it removes a layer of abstraction imposed by R
}
else if(i == 3){ #remove another element
currentDist <- list(union(sourceSets[[1]], sourceSets[[2]]))
currentDist <- formatListAsSet(currentDist[[1]]) #The [[1]] is important here as it removes a layer of abstraction imposed by R
}
#Note the single brackets '[1]' here
distractors[i] <- currentDist
}
#Iterate through the sourceSets. format list as Set and insert at the index.
counter <- 1
for (s in sourceSets){
sourceSets[counter] <- formatListAsSet(s)
counter <- counter + 1
}
#format the the sourceSets as Question Strings
# "A = {...}"
# "B = {...}"
sourceSets <- insertSetQStrings(sourceSets)
# now we concatenate the question contents together
questionContents <- c(questionText, sourceSets)
#format answers and sources into json and return results
toSend <- list(content= questionContents, correct= correct, distractors= distractors)
return(toSend)
}
# getAsymDiff(n, m) genereates and prepares n sets
# of m integers, 3 "distractors" and 1 correct answer
# when considering the difference of the generated
# sets.
#
# NOTE: the results reflect A-B where A is the first set in
# 'source' and B is the second set in 'source'
# param numSets The number of sets to consider
# param setSize The number of elements in each set.
# param dType The desired data type for set elements
# (1: Ints, 2: Real, 3: Complex,
# 4: Char, 5: String, 6: Mixed)
#
# return toSend A json-like object containing the
# sets, correct, and incorrect
# answers.
get_asym_diff_level_1 <- function(numSets=2, setSize=5, dType = 1) {
questionStr <- "Let A and B be two sets. What is A - B?"
#generate and fill sets
sourceSets <- getSets(n = numSets, m = setSize, x = dType)
#creating the correct answer
correct <- not(sourceSets[[1]], sourceSets[[2]])
if(length(correct) > 0){
correct <- correct #format for output
} else {
correct <- "\\$\\emptyset\\$"
}
#Create the distractors
distractors <- vector(mode="list", length = 3)
#add distractors to the list.
for(i in (1:3)) {
# generate a set
currentDist <- correct
if(i == 1){ #alter answer by removing an element
currentDist <- list(currentDist[-(setSize-1)])
}
else if(i ==2){ #add an element to the correct answer
# the issue here is that the "incorrect" element needs to be believable and
# also not possible to be in the source sets.
currentDist <- list(c(currentDist, getValue(x=dType)))
}
else if(i == 3){ #remove another element
currentDist <- list(currentDist[-(setSize-2)])
}
currentDist <- formatListAsSet(currentDist[[1]]) #The [[1]] is important here as it removes a layer of abstraction imposed by R
#Note the single brackets '[1]' here
distractors[i] <- currentDist
}
correct <- formatListAsSet(correct)
#now we format the sourceSets for output. We waited to do this so we could use
# the sourceSets for distractor generation.
#Iterate through the sourceSets. format list as Set and insert at the index.
counter <- 1
for (s in sourceSets){
sourceSets[counter] <- formatListAsSet(s)
counter <- counter + 1
}
#format the the sourceSets as Question Strings
# "A = {...}"
# "B = {...}"
sourceSets <- insertSetQStrings(sourceSets)
# now we concatenate the question contents together
questionContents <- c(questionStr, sourceSets)
#add all items to a list for return
toSend <- list(content = questionContents, correct = correct, distractors = distractors)
return(toSend)
}
# getSetCompliment() generates and prepares n sets of m members
# as well as 3 "distractors" and 1 correct answer.
# The correct answer reflects the compliment of the n sets against the
# universal set.
#
#
#
# @param numSets The number of sets to consider
# @param setSize The number of elements in each set.
# @response json A json object containing the
# sets, correct, and incorrect
# answers.
#
get_set_complement_level_1 <- function(numSets = 2, setSize = 9, dType = 1) {
questionText <- "Let A be a set and B be the universal set. What is the complement of set A?"
#generate and fill sets
sourceSets <- getSets(n = numSets, m = setSize, x = dType)
sourceSets[[1]] <- sourceSets[[2]]
#scramble Universal set
sourceSets[[2]] <- sample(sourceSets[[2]], length(sourceSets[[2]]), replace = FALSE)
length(sourceSets[[1]]) <- 5
correct <- not(sourceSets[[2]], sourceSets[[1]])
d1 <- correct
d2 <- correct
correct <- formatListAsSet(correct)
distractors <- vector(mode="list", length = 3)
#distractor 1 Is similar to the correct answer, but with one different value
d1 <- replace(d1, length(d1) - 2, getValue(x = dType, min = 1, max = 20))
#distractor 2 is also similar to the correct answer, but with one replaced value
d2 <- replace(d2, length(d2), getValue(x = dType, min = 1, max = 20))
#distractor 3 is the original set which is not the complement and is wrong
d3 <- sourceSets[[1]]
distractors[[1]] <- formatListAsSet(d1)
distractors[[2]] <- formatListAsSet(d2)
distractors[[3]] <- formatListAsSet(d3)
#Iterate through the sourceSets. format list as Set and insert at the index.
counter <- 1
for (s in sourceSets){
sourceSets[counter] <- formatListAsSet(s)
counter <- counter + 1
}
#format the the sourceSets as Question Strings
# "A = {...}"
# "B = {...}"
sourceSets <- insertSetQStrings(sourceSets)
# now we concatenate the question contents together
questionContents <- c(questionText, sourceSets)
#add all items to a list for return
toSend <- list(content = questionContents, correct = correct, distractors = distractors)
#return question info
return(toSend)
}
# getSetEquality() generates and prepares 2 sets of m members
# as well as 1 "distractor" and 1 correct answer.
# The correct answer is a string which states whether the sets are equal
# or not.
#
#
# @param numSets The number of sets to consider
# @param setSize The number of elements in each set.
# @response json A json object containing the
# sets, correct, and incorrect
# answers.
#
get_set_equality_level_1 <- function(numSets = 2, setSize = 5, dType = 1) {
probability <- sample(1:2, 1, replace = FALSE)
questionText <- "Let A and B be two sets. Are A and B equal?"
#Hard Coded this as the function only works with two sets at the moment.
numSets <- 2
#sets 50/50 probability of generated sets being equal or not.
#generate and fill sets
sourceSets <- getSets(n = numSets, m = setSize, x = dType)
if (probability == 1) {
#makes 2nd set equal to first and formats correct and incorrect answers.
sourceSets[[2]] <- sourceSets[[1]]
correct <- "Equal" #format for output
distractors <- "Not Equal"
}
if (probability == 2) {
#makes 2nd set equal to first except for one replaced member, and formats answers.
sourceSets[[2]] <- sourceSets[[1]]
sourceSets[[2]] <- replace(sourceSets[[2]],
length(sourceSets[[2]]) - sample(1:4, 1, replace = FALSE),
getValue(x = dType, min = 1, max = 20, cat = 6))
correct <- "Not Equal"
distractors <- "Equal"
}
#Iterate through the sourceSets. format list as Set and insert at the index.
counter <- 1
for (s in sourceSets){
sourceSets[counter] <- formatListAsSet(s)
counter <- counter + 1
}
#format the the sourceSets as Question Strings
# "A = {...}"
# "B = {...}"
sourceSets <- insertSetQStrings(sourceSets)
# now we concatenate the question contents together
questionContents <- c(questionText, sourceSets)
#add all items to a list for return
toSend <- list(content = questionContents, correct = correct, distractors = distractors)
#return question info
return(toSend)
}
# getSetCardinality() generates and prepares 1 set of a random number of
# members between 1 and 9, as well as 3 "distractors" and 1 correct answer.
# The correct answer is a string which states the correct cardinality
# of the generated set.
#
#
# @param numSets The number of sets to consider
# @param setSize The number of elements in each set.
# @response json A json object containing the
# sets, correct, and incorrect
# answers.
#
get_set_cardinality_level_1 <- function(numSets = 1, setSize = sample(1:9, 1, replace = FALSE), dType = 1) {
#define the text of the question
questionText <-('Let A be a set. What is the cardinality of set A?')
#generate and fill sets
sourceSet <- getSets(n = numSets, m = setSize, x = dType)
#creating the correct answer based on length of SourceSet
correct <- lengths(sourceSet)
if (probability == 1) {
#Creating distractors based on correct answer.
distractors <- vector(mode="list", length = 3)
probability <- sample(1:2, 1, replace = FALSE)
for(i in 1:3) {
if (probability == 1) {
distractors[[i]] <- correct - sample(1:3, 1, replace = FALSE)
}
if (probability == 2) {
distractors[[i]] <- correct + sample(1:3, 1, replace = FALSE)
}
}
}
#Iterate through the sourceSet. format list as Set and insert at the index.
counter <- 1
for (s in sourceSet){
sourceSet[counter] <- formatListAsSet(s)
counter <- counter + 1
}
#format the the sourceSet as Question Strings
# "A = {...}"
sourceSets <- insertSetRStrings(sourceSet)
# now we concatenate the question contents together
questionContents <- c(questionText, sourceSets)
#add all items to a list for return
toSend <- list(content = questionContents, correct = correct, distractors = distractors)
#return question info
return(toSend)
}
# getSymmDiff() generates and prepares 2 sets of length 5,
# as well as 3 "distractors" and 1 correct answer.
# The correct answer is a string which states the unique members of each set
# which constitute the symmetric difference between the two sets.
#
#
# @param numSets The number of sets to consider
# @param setSize The number of elements in each set.
# @response json A json object containing the
# sets, correct, and incorrect
# answers.
#
get_symm_diff_level_1 <- function(numSets = 2, setSize = 5, dType = 1, difficulty = 1){
#define the text of the question
questionText <-('Let A and B be two sets. What is the symmetric difference of A and B?')
#generate and fill sets
sourceSets <- getSets(n = numSets, m = setSize, x = dType)
#set sourceSet 2 equal to sourceSet 1 and scramble the set, then replace three members.
sourceSets[[2]] <- sourceSets[[1]]
sourceSets[[2]] <- sample(sourceSets[[2]], length(sourceSets[[2]]), replace = FALSE)
sourceSets[[2]] <- replace(sourceSets[[2]], length(sourceSets[[2]]) - sample(0:1, 1, replace = FALSE),
getValue(x = dType, min = 21, max = 30, cat = 6))
sourceSets[[2]] <- replace(sourceSets[[2]], length(sourceSets[[2]]) - sample(0:1, 1, replace = FALSE),
getValue(x = dType, min = 21, max = 30, cat = 6))
sourceSets[[2]] <- replace(sourceSets[[2]], length(sourceSets[[2]]) - sample(0:1, 1, replace = FALSE),
getValue(x = dType, min = 21, max = 30, cat = 6))
#set correct answer as a list of values unique to both sets
correct <- list()
correct <- not(sourceSets[[1]], sourceSets[[2]])
correct <- append(correct, not(sourceSets[[2]], sourceSets[[1]]), after = length(correct))
#Iterate through the sourceSets. format list as Set and insert at the index.
counter <- 1
for (s in sourceSets){
sourceSets[counter] <- formatListAsSet(s)
counter <- counter + 1
}
#Creating distractors based on correct answer.
distractors <- vector(mode="list", length = 3)
for (i in (1:3)) {
currentDist <- list()
currentDist[[1]] <- correct
wrong <- currentDist[[1]]
wrong <- replace(wrong, length(wrong) - sample(0:1, 1, replace = FALSE), getValue(x = dType, min = 1, max = 30, cat = 6))
currentDist[[1]] <- wrong
currentDist <- formatListAsSet(currentDist[[1]])
distractors[i] <- currentDist
}
correct <- formatListAsSet(correct)
#format the the sourceSet as Question Strings
# "A = {...}"
# "B = {...}"
sourceSets <- insertSetQStrings(sourceSets)
# now we concatenate the question contents together
questionContents <- c(questionText, sourceSets)
#add all items to a list for return
toSend <- list(content = questionContents, correct = correct, distractors = distractors)
#return question info
return(toSend)
}
# getSetPartitions() generates and prepares 1 set of length 5,
# as well as 3 "distractors" and 1 correct answer.
# The correct answer is the set which represents an incorrect partition
# of the sourceSet.
#
#
# @param numSets The number of sets to consider
# @param setSize The number of elements in each set.
# @response json A json object containing the
# sets, correct, and incorrect
# answers.
#
get_set_partitions_level_1 <- function(numSets = 1, setSize = 5, dType = 1) {
#define the text of the question
questionText <-('Let A be a set. Which answer represents a correct set partition of set A?')
#generate and fill sets
sourceSets <- getSets(n = numSets, m = setSize, x = dType)
#scrambling the sets to be used for both the correct and distractor partitions.
initial <- sample(sourceSets[[1]], length(sourceSets[[1]]), replace = FALSE)
correct <- list()
# Creating a probability variable which will cause the correct answer to be
# generated with one of three different partitions for question variety.
# Setting length of initial variable creates the first partition.
probability <- sample(1:3, 1, replace = FALSE)
if (probability == 1) {
length(initial) <- 3
}
else if (probability == 2) {
length(initial) <- 2
}
else {
length(initial) <- 0
}
# Then creates the second partition with the remaining members from the source.
secondSet <- not(sourceSets[[1]], initial)
# Formats each inner partition as a set and concatenates each set
# within the larger list. Then formats the larger list as a set.
initial <- formatPartitionAsSet(initial)
secondSet <- formatPartitionAsSet(secondSet)
correct <- c(correct, initial)
correct <- c(correct, secondSet)
correct <- formatListAsSet(correct)
distractors <- vector(mode="list", length = 3)
for(i in (1:3)) {
#generate and partition distractor sets
currentDist <- (getSets(n = 1, m = 5, x = dType))
firstSet <- sample(sourceSets[[1]], length(sourceSets[[1]]), replace = FALSE)
length(firstSet) <- sample(2:4, 1, replace = FALSE)
secondSet <- not(sourceSets[[1]], firstSet)
firstSet <- replace(firstSet, length(firstSet) - sample(0:2, 1, replace = FALSE),
getValue(x = dType, min = 1, max = 20))
firstSet <- formatPartitionAsSet(firstSet)
secondSet <- formatPartitionAsSet(secondSet)
wrong <- list()
# and concatenating both sets inside larger empty list.
# Wrong variable is created to deal with weird out of bounds issue in R.
wrong <- c(wrong, firstSet)
wrong <- c(wrong, secondSet)
currentDist[[1]] <- wrong
currentDist <- formatListAsSet(currentDist[[1]]) #The [[1]] is important here as it removes a layer of abstraction imposed by R
#Note the single brackets '[1]' here
distractors[i] <- currentDist
}
#Iterate through the sourceSets. format list as Set and insert at the index.
counter <- 1
for (s in sourceSets){
sourceSets[counter] <- formatListAsSet(s)
counter <- counter + 1
}
#format the the sourceSet as Question Strings
# "A = {...}"
sourceSets <- insertSetRStrings(sourceSets)
# now we concatenate the question contents together
questionContents <- c(questionText, sourceSets)
#add all items to a list for return
toSend <- list(content = questionContents, correct = correct, distractors = distractors)
#return question info
return(toSend)
}
# powerSetLevel1() generates and prepares 1 set of a random number of
# members between 1 and 9, as well as 3 "distractors" and 1 correct answer.
# The correct answer is a string which states the correct cardinality
# of the generated set.
# @param numSets The number of sets to consider in the question
# @param setSize The length of the source sets.
# @param dType The desired data type for set elements
# (1: Ints, 2: Real, 3: Complex,
# 4: Char, 5: String, 6: Mixed)
#
# @return toSend A json-like object containing the
# source sets, correct, and
# distractors (incorrect answers)
#
get_power_set_level_1 <- function(numSets = 1, setSize = 3, dType = 1) {
#level 1 difficulty only generates random sets with random data type.
#question text
questionText <- ('What is the power set of A?')
#generate and fill sets
sourceSets <- getSets(n = numSets, m = setSize, x = dType)
#convert the set into a vector to manipulate better.
sourceSetsVector <- unlist(sourceSets)
#the powerset function takes a vector but prints out a list.
correct3 <- powerset(sourceSetsVector)
#formats into a string
correct3 <- formatPowerSetListAsSet(correct3)
print(correct3)
#Create a vector that will hold distractors
#NOTE: we declare the list with vector() here so that
# we can also declare a length. This prevents R from
# copying the list every time we add an element
distractors <- vector(mode = "list", length = 3)
#add distractors to the list.
# each element of the list should be a set
# represented as a list.
for (i in (1:3)) {
#generate a set using getSets function.
distractorSourceSet <- getSets(n = numSets, m = setSize, x = dType)
#convert the set into a vector to manipulate better.
distractorSourceSetVector <- unlist(distractorSourceSet)
#the powerset function takes a vector but prints out a list.
currentDist <- powerset(distractorSourceSetVector)
#formats into a string
currentDist <- formatPowerSetListAsSet(currentDist)
#Note the single brackets '[1]' here
distractors[i] <- currentDist
}
#now we format the sourceSets for output. We waited to do this so we could use
# the sourceSets for distractor generation.
#Iterate through the sourceSets. format list as Set and insert at the index.
#COpy a
counter <- 1
for (s in sourceSets) {
sourceSets[counter] <- formatListAsSet(s)
counter <- counter + 1
}
#format the the sourceSets as Question String.
# "A = {...}"
sourceSets <- insertSetRStrings(sourceSets)
# now we concatenate the question contents together
questionContents <- c(questionText, sourceSets)
#add all items to a list for return
toSend <-
list(content = questionContents,
correct = correct3,
distractors = distractors)
print(toSend)
}
# @param numSets The number of sets to consider in the question
# @param setSize The length of the source sets.
# @param dType The desired data type for set elements
# (1: Ints, 2: Real, 3: Complex,
# 4: Char, 5: String, 6: Mixed)
# @param difficulty The level of difficulty of the question generated.
#
# @return toSend A json-like object containing the
# source sets, correct, and
# distractors (incorrect answers)
#
get_cartesian_product_level_1 <- function(numSets = 2, setSize = 3, dType = 1) {
#sets difficulty level, question type, and style of the question type.
qa_level = 1
question_type = 1
question_style = 1
#generate and fill sets
sourceSets <- getSets(n = numSets, m = setSize, x = dType)
#assigning the sets to variables to manipulate each one
a <- sourceSets[[1]]
b <- sourceSets[[2]]
question <- cartseian_product_question_bank(qa_level = 1, question_type = question_type, question_style = question_style)
correct <- cartesian2sets(a, b)
#Create a vector that will hold distractors
#NOTE: we declare the list with vector() here so that
# we can also declare a length. This prevents R from
# copying the list every time we add an element
distractors <- vector(mode="list", length = 3)
# three types of distractors are implemented for lvl 2 difficulty: reverse Sets, flipped cartesian product (for example B X A instead of A X B), and random sets.
# add distractors to the list.
# each element of the list should be a set
# represented as a list.
for(i in (1:3) ) {
current_distractor_list <- cartseian_product_distractor_bank(qa_level = 2, distractor_type = 3, source_set_1 = a, source_set_2 = b, setSize = setSize, dType = dType)
currentDist <- cartesian2sets(current_distractor_list[[1]], current_distractor_list[[2]])
currentDist <- formatListAsSet(format_cart_set_list(list_length = length(currentDist), currentDist))
distractors[i] <- currentDist
}
#formats the correct answer
correct <- formatListAsSet(format_cart_set_list(list_length = length(correct), correct))
#now we format the sourceSets for output. We waited to do this so we could use
# the sourceSets for distractor generation.
#Iterate through the sourceSets. format list as Set and insert at the index.
#COpy a
counter <- 1
for (s in sourceSets){
sourceSets[counter] <- formatListAsSet(s)
counter <- counter + 1
}
#format the the sourceSets as Question String.
# "A = {...}"
# "B = {...}"
sourceSets <- insertSetQStrings(sourceSets)
# now we concatenate the question contents together
questionContents <- c(question, sourceSets)
#format answers and sources into json and return results
toSend <- list(content= questionContents, correct= correct, distractors= distractors)
print(toSend)
}
|
4a1b5dfaef49dc610d1aa0137cb58724adf52ce4
|
9bd311ddca0e60ee4007a806c19e5e764c2c8558
|
/man/plotDFT.Rd
|
9d4ee51cd304f264041b276c28b89cb8259bb542
|
[] |
no_license
|
raim/segmenTools
|
8b34fc62c4e10d2ffa4423cc972367b9def2c689
|
7d356916b09a0cc019baf152de5dbf778130c0a1
|
refs/heads/master
| 2023-08-18T15:50:03.200348
| 2023-08-09T14:27:16
| 2023-08-09T14:27:16
| 77,826,031
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| true
| 1,592
|
rd
|
plotDFT.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/clusterTools.R
\name{plotDFT}
\alias{plotDFT}
\title{plot polar coordinates}
\usage{
plotDFT(dft, col, cycles = 3, radius = 0.9, lambda = 1, bc = "component", ...)
}
\arguments{
\item{dft}{the Fourier transform of a time series as returned
by \code{t(mvfft(t(timeseries)))}, or alternatively, a `timeseries' object
from segmenTier's
\code{\link[segmenTier:processTimeseries]{processTimeseries}} when
run with (\code{use.fft=TRUE})}
\item{col}{a color vector for the rows in argument \code{dft} or
alternatively, `clustering' object as returned by
segmenTier's \code{\link[segmenTier:clusterTimeseries]{clusterTimeseries}}
with coloring information}
\item{cycles}{the number of cycles (index of non-DC DFT component)
to be plotted}
\item{radius}{radius of the polar plot circle as a fraction
of data to be contained within the radius (smaller amplitude)}
\item{lambda}{parameter for Box-Cox transformation of DFT data; has no
effect for \code{lambda==1}}
\item{bc}{type of Box-Cox transformation (\code{if lambda!=1});
"component": separate transformation of real and imaginary parts of
the DFT; "amplitude": Box-Cox transformation of the amplitude}
\item{...}{arguments to the base \code{\link[graphics:plot]{plot}}
and/or \code{\link[graphics:points]{points}} functions}
}
\description{
Plots the components of a Discrete Fourier Transform (DFT)
as polar coordinates (Re and Im of the complex numbers in the DFT).
Arguments \code{dft} and \code{col} can be segmenTier timeseries
and clustering objects.
}
|
e31d19b965f49418613fabab6706d030dd051fac
|
ab65a2c1eeed334ea8fddc402dc89c46141ce24a
|
/plot3.R
|
729fb532595af2691cb4fe315a1b02ef02bdd464
|
[] |
no_license
|
mjfarooq/ExData_Plotting1
|
0c3d0796ebda738abd58c691649d5be838040ae3
|
d862cc5bee604d1842c2d3775a56dddda8174d0c
|
refs/heads/master
| 2021-01-17T06:44:43.922428
| 2015-09-12T10:14:26
| 2015-09-12T10:14:26
| 42,335,209
| 0
| 0
| null | 2015-09-11T22:29:54
| 2015-09-11T22:29:53
| null |
UTF-8
|
R
| false
| false
| 1,150
|
r
|
plot3.R
|
#### Read data from File #############################################
X <- read.table("household_power_consumption.txt", header = TRUE, sep = ";", na.strings = "?")
Y <- X
#### Convert Date field into appropriate format #########
Y$Date <- as.Date(X$Date, "%d/%m/%Y")
#### Define dates ###Time####################################
date_index <- Y$Date == as.Date("01/02/2007", "%d/%m/%Y") | Y$Date == as.Date("02/02/2007","%d/%m/%Y")
date_time <- as.POSIXct(paste(as.character(X$Date[date_index]), as.character(X$Time[date_index]), sep = " "), format="%d/%m/%Y %H:%M:%S")
#### Select the active power data of two days i.e. 01/02/2007 and 02/02/2007 #####
Z1<- X$Sub_metering_1[date_index]
Z2<- X$Sub_metering_2[date_index]
Z3<- X$Sub_metering_3[date_index]
#### Open PNG file #######################################
png("plot3.png")
#### Plot ######################################
plot(date_time,Z1,type="l",xlab ="",ylab="Energy sub metering")
lines(date_time,Z2,col = "red")
lines(date_time,Z3,col = "blue")
legend("topright", c("Sub_metering_1","Sub_metering_2","Sub_metering_3"), lty = c(1,1,1), col = c("black","red","blue"))
dev.off()
|
498e54681b4b1fadaeb9af36f556f086b618637c
|
a6059cf32319ed6b92624bdf590f5fb23711629e
|
/R/form.R
|
1bd0b718e6463f7d6cf8ef071db93062afeb5f7e
|
[
"MIT"
] |
permissive
|
tidyverse/rvest
|
e26f23e5c9ae0deb3d1548f5d3ab7a8a4d0ba948
|
8e19075c54fdd8391873c5df454c8e6deb0a2230
|
refs/heads/main
| 2023-04-06T09:41:47.964837
| 2023-01-11T17:59:22
| 2023-01-11T17:59:22
| 22,178,685
| 467
| 108
|
NOASSERTION
| 2023-08-25T13:59:23
| 2014-07-23T21:22:27
|
R
|
UTF-8
|
R
| false
| false
| 8,890
|
r
|
form.R
|
#' Parse forms and set values
#'
#' Use `html_form()` to extract a form, set values with `html_form_set()`,
#' and submit it with `html_form_submit()`.
#'
#' @export
#' @inheritParams html_name
#' @param base_url Base url of underlying HTML document. The default, `NULL`,
#' uses the url of the HTML document underlying `x`.
#' @seealso HTML 4.01 form specification:
#' <https://www.w3.org/TR/html401/interact/forms.html>
#' @return
#' * `html_form()` returns as S3 object with class `rvest_form` when applied
#' to a single element. It returns a list of `rvest_form` objects when
#' applied to multiple elements or a document.
#'
#' * `html_form_set()` returns an `rvest_form` object.
#'
#' * `html_form_submit()` submits the form, returning an httr response which
#' can be parsed with [read_html()].
#' @examples
#' html <- read_html("http://www.google.com")
#' search <- html_form(html)[[1]]
#'
#' search <- search %>% html_form_set(q = "My little pony", hl = "fr")
#'
#' # Or if you have a list of values, use !!!
#' vals <- list(q = "web scraping", hl = "en")
#' search <- search %>% html_form_set(!!!vals)
#'
#' # To submit and get result:
#' \dontrun{
#' resp <- html_form_submit(search)
#' read_html(resp)
#' }
html_form <- function(x, base_url = NULL) UseMethod("html_form")
#' @export
html_form.xml_document <- function(x, base_url = NULL) {
html_form(xml2::xml_find_all(x, ".//form"), base_url = base_url)
}
#' @export
html_form.xml_nodeset <- function(x, base_url = NULL) {
lapply(x, html_form, base_url = base_url)
}
#' @export
html_form.xml_node <- function(x, base_url = NULL) {
stopifnot(xml2::xml_name(x) == "form")
attr <- as.list(xml2::xml_attrs(x))
name <- attr$id %||% attr$name %||% "<unnamed>" # for human readers
method <- toupper(attr$method %||% "GET")
enctype <- convert_enctype(attr$enctype)
nodes <- html_elements(x, "input, select, textarea, button")
fields <- lapply(nodes, function(x) {
switch(xml2::xml_name(x),
textarea = parse_textarea(x),
input = parse_input(x),
select = parse_select(x),
button = parse_button(x)
)
})
names(fields) <- map_chr(fields, function(x) x$name %||% "")
structure(
list(
name = name,
method = method,
action = xml2::url_absolute(attr$action, base_url %||% xml2::xml_url(x)),
enctype = enctype,
fields = fields
),
class = "rvest_form")
}
#' @export
print.rvest_form <- function(x, ...) {
cat("<form> '", x$name, "' (", x$method, " ", x$action, ")\n", sep = "")
cat(format_list(x$fields, indent = 1), "\n", sep = "")
}
# set ----------------------------------------------------------------
#' @rdname html_form
#' @param form A form
#' @param ... <[`dynamic-dots`][rlang::dyn-dots]> Name-value pairs giving
#' fields to modify.
#'
#' Provide a character vector to set multiple checkboxes in a set or
#' select multiple values from a multi-select.
#' @export
html_form_set <- function(form, ...) {
check_form(form)
new_values <- list2(...)
check_fields(form, new_values)
for (field in names(new_values)) {
type <- form$fields[[field]]$type %||% "non-input"
if (type == "hidden") {
warn(paste0("Setting value of hidden field '", field, "'."))
} else if (type == "submit") {
abort(paste0("Can't change value of input with type submit: '", field, "'."))
}
form$fields[[field]]$value <- new_values[[field]]
}
form
}
# submit ------------------------------------------------------------------
#' @rdname html_form
#' @param submit Which button should be used to submit the form?
#' * `NULL`, the default, uses the first button.
#' * A string selects a button by its name.
#' * A number selects a button using its relative position.
#' @export
html_form_submit <- function(form, submit = NULL) {
check_form(form)
subm <- submission_build(form, submit)
submission_submit(subm)
}
submission_build <- function(form, submit) {
method <- form$method
if (!(method %in% c("POST", "GET"))) {
warn(paste0("Invalid method (", method, "), defaulting to GET"))
method <- "GET"
}
if (length(form$action) == 0) {
abort("`form` doesn't contain a `action` attribute")
}
list(
method = method,
enctype = form$enctype,
action = form$action,
values = submission_build_values(form, submit)
)
}
submission_submit <- function(x, ...) {
if (x$method == "POST") {
httr::POST(url = x$action, body = x$values, encode = x$enctype, ...)
} else {
httr::GET(url = x$action, query = x$values, ...)
}
}
submission_build_values <- function(form, submit = NULL) {
fields <- form$fields
submit <- submission_find_submit(fields, submit)
entry_list <- c(Filter(Negate(is_button), fields), list(submit))
entry_list <- Filter(function(x) !is.null(x$name), entry_list)
if (length(entry_list) == 0) {
return(list())
}
values <- lapply(entry_list, function(x) as.character(x$value))
names <- map_chr(entry_list, "[[", "name")
out <- set_names(unlist(values, use.names = FALSE), rep(names, lengths(values)))
as.list(out)
}
submission_find_submit <- function(fields, idx) {
buttons <- Filter(is_button, fields)
if (is.null(idx)) {
if (length(buttons) == 0) {
list()
} else {
if (length(buttons) > 1) {
inform(paste0("Submitting with '", buttons[[1]]$name, "'"))
}
buttons[[1]]
}
} else if (is.numeric(idx) && length(idx) == 1) {
if (idx < 1 || idx > length(buttons)) {
abort("Numeric `submit` out of range")
}
buttons[[idx]]
} else if (is.character(idx) && length(idx) == 1) {
if (!idx %in% names(buttons)) {
abort(c(
paste0("No <input> found with name '", idx, "'."),
i = paste0("Possible values: ", paste0(names(buttons), collapse = ", "))
))
}
buttons[[idx]]
} else {
abort("`submit` must be NULL, a string, or a number.")
}
}
is_button <- function(x) {
tolower(x$type) %in% c("submit", "image", "button")
}
# Field parsing -----------------------------------------------------------
rvest_field <- function(type, name, value, attr, ...) {
structure(
list(
type = type,
name = name,
value = value,
attr = attr,
...
),
class = "rvest_field"
)
}
#' @export
format.rvest_field <- function(x, ...) {
if (x$type == "password") {
value <- paste0(rep("*", nchar(x$value %||% "")), collapse = "")
} else {
value <- paste(x$value, collapse = ", ")
value <- str_trunc(encodeString(value), 20)
}
paste0("<field> (", x$type, ") ", x$name, ": ", value)
}
#' @export
print.rvest_field <- function(x, ...) {
cat(format(x, ...), "\n", sep = "")
invisible(x)
}
parse_input <- function(x) {
attr <- as.list(xml2::xml_attrs(x))
rvest_field(
type = attr$type %||% "text",
name = attr$name,
value = attr$value,
attr = attr
)
}
parse_select <- function(x) {
attr <- as.list(xml2::xml_attrs(x))
options <- parse_options(html_elements(x, "option"))
rvest_field(
type = "select",
name = attr$name,
value = options$value,
attr = attr,
options = options$options
)
}
parse_options <- function(options) {
parse_option <- function(option) {
name <- xml2::xml_text(option)
list(
value = xml2::xml_attr(option, "value", default = name),
name = name,
selected = xml2::xml_has_attr(option, "selected")
)
}
parsed <- lapply(options, parse_option)
value <- map_chr(parsed, "[[", "value")
name <- map_chr(parsed, "[[", "name")
selected <- map_lgl(parsed, "[[", "selected")
list(
value = value[selected],
options = stats::setNames(value, name)
)
}
parse_textarea <- function(x) {
attr <- as.list(xml2::xml_attrs(x))
rvest_field(
type = "textarea",
name = attr$name,
value = xml2::xml_text(x),
attr = attr
)
}
parse_button <- function(x) {
attr <- as.list(xml2::xml_attrs(x))
rvest_field(
type = "button",
name = attr$name,
value = attr$value,
attr = attr
)
}
# Helpers -----------------------------------------------------------------
convert_enctype <- function(x) {
if (is.null(x)) {
"form"
} else if (x == "application/x-www-form-urlencoded") {
"form"
} else if (x == "multipart/form-data") {
"multipart"
} else {
warn(paste0("Unknown enctype (", x, "). Defaulting to form encoded."))
"form"
}
}
format_list <- function(x, indent = 0) {
spaces <- paste(rep(" ", indent), collapse = "")
formatted <- vapply(x, format, character(1))
paste0(spaces, formatted, collapse = "\n")
}
check_fields <- function(form, values) {
no_match <- setdiff(names(values), names(form$fields))
if (length(no_match) > 0) {
str <- paste("'", no_match, "'", collapse = ", ")
abort(paste0("Can't set value of fields that don't exist: ", str))
}
}
|
ec678c47bb8428beaa2e9d472ce550b8c823a67e
|
5d895c875c67a0695b6b91ee6145ce960ec4b716
|
/R/g.brownian.motion.R
|
e60e9eb0c1a37f8ad9334d92ded62c948b176a7d
|
[] |
no_license
|
talgalili/animation
|
80866d3f83526a68f0b401161fe2f8a831ba140f
|
76d495c7aa79d0a088ab81ed691c74e61dac3604
|
refs/heads/master
| 2020-12-30T18:30:18.158860
| 2013-03-01T17:11:49
| 2013-03-01T17:11:49
| 8,755,335
| 1
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 3,658
|
r
|
g.brownian.motion.R
|
#' Brownian Motion using Google Visualization API
#'
#' We can use R to generate random numbers from the Normal
#' distribution and write them into an HTML document, then the Google
#' Visualization gadget ``motionchart'' will prepare the animation
#' for us (a Flash animation with several buttons).
#' @param p number of points
#' @param start start ``year''; it has no practical meaning in this
#' animation but it's the required by the Google gadget
#' @param digits the precision to round the numbers
#' @param file the file name
#' @param width,height width and height of the animation
#' @return \code{NULL}. An HTML page will be opened as the side
#' effect.
#' @note The number of frames is controlled by
#' \code{ani.options("nmax")} as usual.
#'
#' Due to the ``security settings'' of Adobe Flash player, you might
#' not be able to view the generated Flash animation locally,
#' i.e. using an address like \url{file:///C:/Temp/index.html}. In
#' this case, you can upload the HTML file to a webserver and use the
#' http address to view the Flash file.
#' @author Yihui Xie <\url{http://yihui.name}>
#' @seealso \code{\link{brownian.motion}}, \code{\link{BM.circle}},
#' \code{\link[stats]{rnorm}}
#' @references \url{http://code.google.com/apis/visualization/}
#'
#' \url{http://yihui.name/en/2008/11/brownian-motion-using-google-visualization-api-and-r/}
#' @keywords dynamic IO
#' @examples
#'
#' oopt = ani.options(htmlfile = 'BM-motion-chart.html')
#'
#' g.brownian.motion(15, digits = 2, width = 600, height = 500)
#'
#' unlink(file.path(ani.options('outdir'), ani.options('htmlfile')))
#'
#' ani.options(oopt)
#'
g.brownian.motion = function(p = 20, start = 1900,
digits = 14, file = file.path(ani.options('outdir'),
ani.options('htmlfile')), width = 800,
height = 600) {
n = ani.options("nmax")
x = round(c(t(apply(matrix(rnorm(p * n), p, n), 1, cumsum))),
digits)
y = round(c(t(apply(matrix(rnorm(p * n), p, n), 1, cumsum))),
digits)
tmp = character(p * n * 4)
tmp[seq(1, p * n * 4, 4)] = shQuote(formatC(rep(1:p, n),
width = nchar(p), flag = 0))
tmp[seq(2, p * n * 4, 4)] = rep(start + (1:n), each = p)
tmp[seq(3, p * n * 4, 4)] = x
tmp[seq(4, p * n * 4, 4)] = y
cat(c("<html>", " <head>", " <script type=\"text/javascript\" src=\"http://www.google.com/jsapi\"></script>",
" <script type=\"text/javascript\">", " google.load(\"visualization\", \"1\", {packages:[\"motionchart\"]});",
" google.setOnLoadCallback(drawChart);", " function drawChart() {",
" var data = new google.visualization.DataTable();"),
paste(" data.addRows(", p * n, ");", sep = ""),
c(" data.addColumn('string', 'point');", " data.addColumn('number', 'year');",
" data.addColumn('number', 'X');", " data.addColumn('number', 'Y');"),
paste(" data.setValue(", rep(0:(p * n - 1), each = 4),
", ", rep(0:3, p * n), ", ", tmp, ");", sep = "",
collapse = "\n"), c(" var chart = new google.visualization.MotionChart(document.getElementById('chart_div'));"),
paste(" chart.draw(data, {width: ", width, ", height: ",
height, "});\n }", sep = ""), c(" </script>",
" </head>", "", " <body>"), paste(" <div id=\"chart_div\" style=\"width: ",
width, "px; height: ", height, "px;\"></div>", sep = ""),
c(" </body>", "</html>"), file = file, sep = "\n")
if (ani.options('autobrowse'))
browseURL(paste('file:///', normalizePath(file), sep = ''))
}
|
d4ceadbfdb2f153cc1d8d8aea58b6b38ffae42ba
|
01f9dd6a6257459a306eecc25a8991403837a363
|
/R/coef_handling.R
|
0942fe7afe00215795c1f4ce2bedefc274498f7f
|
[] |
no_license
|
areyesq89/DEXSeq
|
a05a6858604ca1a3be06fd1bda66210de1cac25c
|
1e42f0c322ae753f3d322eea562203826b2d9dc9
|
refs/heads/master
| 2023-09-02T13:38:59.587957
| 2023-02-24T10:45:24
| 2023-02-24T10:45:24
| 127,189,778
| 8
| 8
| null | 2023-02-24T10:45:25
| 2018-03-28T19:42:32
|
R
|
UTF-8
|
R
| false
| false
| 13,520
|
r
|
coef_handling.R
|
arrangeCoefs <- function( frm, mf, mm = model.matrix( frm, mf ), fit = NULL, insertValues = TRUE ) {
if( any( attr( mm, "contrasts" ) != "contr.treatment" ) )
stop( "Can only deal with standard 'treatment' contrasts." ) # Do I need this?
if( is.null(fit) & insertValues )
stop( "If fit==NULL, returnCoefValues must be FALSE" )
if( !is.null(fit) )
stopifnot( all( colnames(mm) == names(coefficients(fit)) ) )
fctTbl <- attr( terms(frm), "factors" )
coefIndicesList <-
lapply( seq_len(ncol(fctTbl)), function( fctTblCol ) {
termName <- colnames(fctTbl)[ fctTblCol ]
varsInTerm <- stringr::str_split( termName, stringr::fixed(":") )[[1]]
stopifnot( all( fctTbl[ varsInTerm, fctTblCol ] == 1 ) )
stopifnot( sum( fctTbl[ , fctTblCol ] ) == length( varsInTerm ) )
coefNames <- colnames(mm)[ attr( mm, "assign" ) == fctTblCol ]
lvlTbl <- stringr::str_match( coefNames,
stringr::str_c( "^", stringr::str_c( varsInTerm, "([^:]*)", collapse=":" ), "$" ) )[ , -1, drop=FALSE ]
stopifnot( ncol(lvlTbl) == length( varsInTerm ) )
stopifnot( nrow(lvlTbl) == length( coefNames ) )
if( !all( sapply( varsInTerm, function(v) is.factor(mf[[v]]) | is.character(mf[[v]]) ) ) )
stop( "Non-factor in model frame" )
varLevels <- lapply( varsInTerm, function(v) levels( factor( mf[[v]] ) ) )
coefIndices <- array( NA_character_, dim = sapply( varLevels, length ), dimnames = varLevels )
names( dimnames( coefIndices ) ) <- varsInTerm
for( i in seq_len( nrow(lvlTbl) ) )
coefIndices <- do.call( `[[<-`, c( quote(coefIndices), as.list( lvlTbl[ i, ] ), coefNames[i] ) )
coefIndices
} )
names( coefIndicesList ) <- colnames( fctTbl )
if( attr( terms(frm), "intercept" ) ) {
a <- array( c( `(Intercept)` = "(Intercept)" ) )
dimnames(a) <- list( `(Intercept)` = c( "(Intercept)" ) )
coefIndicesList <- c( list( `(Intercept)` = a ), coefIndicesList )
}
if( !insertValues )
ans <- coefIndicesList
else
ans <- lapply( coefIndicesList, function(coefIndices) {
a <- ifelse( is.na(coefIndices), 0, coefficients(fit)[ coefIndices ] )
attr( a, "variables" ) <- attr( coefIndices, "variables" )
a } )
lapply( ans, function(x)
if( is.array(x) )
x
else {
y <- array( x, dim=length(x) )
attr( y, "variables" ) <- attr( x, "variables" )
dimnames(y) <- list( names(x) )
y } )
}
apply2 <- function( X, MARGIN, FUN, ... ) {
if( length(MARGIN) > 0 )
apply( X, MARGIN, FUN, ... )
else
FUN( X, ... ) }
balanceExons <- function( coefs, dispersions ) {
stopifnot( any( sapply( coefs, function(x)
identical( names(dimnames(x)), "(Intercept)" ) ) ) )
termsWithExon <- sapply( coefs, function(x) "exon" %in% names(dimnames(x)) )
meanMainEffect <- sum( sapply( coefs[!termsWithExon], mean, na.rm=TRUE ) )
meanExonEffects <- rowSums( sapply( coefs[termsWithExon], function(x)
apply2( x, "exon", mean, na.rm=TRUE ) ) )
meanExonFittedValue <- exp( meanMainEffect + meanExonEffects )
exonWeights <- 1 / ( dispersions + 1 / meanExonFittedValue )
shifts <- lapply( coefs[termsWithExon], function(x) {
nonExonDims <- which( names(dimnames(x)) != "exon" )
list(
vars = names(dimnames(x))[ nonExonDims ],
wmeans = apply2( x, nonExonDims, weighted.mean, exonWeights) ) } )
lapply( coefs, function(x) {
nonExonVars <- names(dimnames(x))[ names(dimnames(x)) != "exon" ]
if( identical( nonExonVars, "(Intercept)" ) )
whichShift <- which( sapply( shifts, function(xx) length( xx$vars ) == 0 ) )
else
whichShift <- which( sapply( shifts, function(xx) identical( xx$vars, nonExonVars ) ) )
if( length( whichShift ) == 0 )
return( x )
if( length( whichShift ) > 1 )
stop( "Confused about selecting shift." )
if( "exon" %in% names(dimnames(x)) )
x - shifts[[ whichShift ]]$wmeans
else
x + shifts[[ whichShift ]]$wmeans
} )
}
fitAndArrangeCoefs <- function( frm = count ~ condition * exon, balanceExons = TRUE, mf, fitExpToVar, geneID)
{
if( length(levels(mf$exon)) <= 1 )
return( NULL )
mm <- model.matrix( frm, mf )
fit <- tryCatch(
{
glmnb.fit(mm, mf$count, dispersion=mf$dispersion,
offset=log(mf$sizeFactor), tol=0.01)
},
error=function(cond){
message( sprintf("Fit for gene/exon %s failed, coefficients for this gene won't show up.", geneID ) )
return(NULL)
},
warning=function(cond){
message( sprintf("Fit for gene/exon %s threw the next warning(s): %s", geneID, unique(cond$message) ) )
} )
if( is.null( fit ) ){
return( NULL )
}
if( is( mf[[fitExpToVar]], "numeric" ) ){
coefs <- fit$coefficients
attributes(coefs)$fitType <- "numeric"
}else{
coefs <- arrangeCoefs( frm, mf, mm, fit )
if( balanceExons ) {
coefs <- balanceExons( coefs, tapply( mf$dispersion, mf$exon, `[`, 1 ) )
}
attributes(coefs)$fitType <- "factor"
}
coefs
}
getEffectsForPlotting <- function( coefs, groupingVar = "condition", averageOutExpression=FALSE, frm, mf )
{
if( attributes(coefs)$fitType == "factor" ){
groupingExonInteraction <- which( sapply( coefs, function(x)
all( c( groupingVar, "exon") %in% names(dimnames(x)) ) & length(dim(x)) == 2 ) )
fittedValues <- coefs[[ groupingExonInteraction ]]
if( names(dimnames(fittedValues))[1] == "exon" )
fittedValues <- t( fittedValues )
stopifnot( identical( names(dimnames(fittedValues)), c( groupingVar, "exon" ) ) )
for( x in coefs[ -groupingExonInteraction ] ) {
if( all( c( groupingVar, "exon") %in% names(dimnames(x)) ) )
stop( "Cannot yet deal with third-order terms." )
if( !any( c( groupingVar, "exon") %in% names(dimnames(x)) ) ) {
fittedValues <- fittedValues + mean( x )
} else if( averageOutExpression & identical( names(dimnames(x)), groupingVar ) ) {
fittedValues <- fittedValues + mean( x )
} else if( groupingVar %in% names(dimnames(x)) ) {
groupMeans <- apply2( x, groupingVar, mean )
stopifnot( identical( names(groupMeans), dimnames(fittedValues)[[1]] ) )
fittedValues <- fittedValues + groupMeans
} else if( "exon" %in% names(dimnames(x)) ) {
exonMeans <- apply2( x, "exon", mean )
fittedValues <- t( t(fittedValues) + exonMeans )
} else {
print( x )
stop( "Unexpected term encountered." )
}
}
return( fittedValues )
}else{
stopifnot( "(Intercept)" %in% names(coefs) )
stopifnot( "exonthis" %in% names(coefs) )
allVars <- all.vars(frm)
continuousVar <- allVars[!allVars %in% c("count", "exon")]
interactionCoefName <- paste0( continuousVar, ":exonthis" )
stopifnot( interactionCoefName %in% names(coefs) )
mf[[continuousVar]]
predictors <- unique( mf[[continuousVar]] )
if( averageOutExpression ){
fittedValues <- coefs["exonthis"] + coefs[interactionCoefName]*predictors
}else{
fittedValues <- coefs["(Intercept)"] + coefs["exonthis"] +
(coefs[continuousVar] + coefs[interactionCoefName])*predictors
}
fittedValues <- matrix(fittedValues, ncol=1)
colnames(fittedValues) <- "this"
# rownames(fittedValues) <- sprintf("%s=%s", continuousVar, predictors)
rownames(fittedValues) <- as.character(predictors)
return( fittedValues )
}
}
modelFrameSM <- function(object)
{
mfSmall <- as.data.frame( colData(object) )
mfSmall$exon <- factor( mfSmall$exon, levels=c("others", "this") )
##mfSmall$exon <- relevel( mfSmall$exon, "others" )
mfSmall$dispersion <- NA
mfSmall$count <- NA
mfSmall
}
getEffectsForGeneBM <- function(geneID, groups, notNAs, countsAll,
disps, features, mf, frm, numsamples,
fitExpToVar, averageOutExpression=TRUE)
{
rt <- groups %in% geneID & notNAs
if( sum(rt) < 2 ){ return(NULL) }
countsThis <- countsAll[rt,]
rownames(countsThis) <- gsub("\\S+:", "", rownames(countsThis))
dispsThis <- disps[rt]
names(dispsThis) <- features[rt]
numexons <- sum(rt)
newMf <- mf[as.vector( sapply( split( seq_len(nrow(mf)), mf$sample ), "[", seq_len( numexons ) ) ),]
newMf$exon <- factor( rep( features[rt], numsamples ) )
for (i in seq_len(nrow(newMf))) {
newMf[i, "dispersion"] <- dispsThis[as.character(newMf[i, "exon"])]
newMf[i, "count"] <- countsThis[as.character(newMf[i, "exon"]), as.character(newMf[i, "sample"])]
}
newMf <- droplevels(newMf)
coefficients <- fitAndArrangeCoefs( frm, balanceExons = TRUE, mf=newMf, fitExpToVar=fitExpToVar, geneID=geneID)
if (is.null(coefficients)){
return(coefficients)
}
ret <- t( getEffectsForPlotting(coefficients, averageOutExpression = averageOutExpression,
groupingVar = fitExpToVar, frm=frm, mf=newMf))
rownames(ret) <- paste(geneID, rownames(ret), sep = ":")
return(ret)
}
getEffectsForExonsSM <- function(index, frm, countsAll, disps,
mfSmall, averageOutExpression=TRUE,
fitExpToVar)
{
mfSmall$count <- countsAll[index,]
mfSmall$dispersion <- disps[index]
coefs <- fitAndArrangeCoefs( frm, mf=mfSmall, balanceExons=FALSE, fitExpToVar=fitExpToVar, rownames(countsAll)[index])
if( is.null(coefs) ){
return(NULL)
}
getEffectsForPlotting(coefs,
averageOutExpression=averageOutExpression,
groupingVar=fitExpToVar, frm, mfSmall)[,"this"]
}
getEffectsForGene <- function( geneID, object, maxRowsMF, fitExpToVar)
{
rt <- object$groupID %in% geneID
sampleData <- object@sampleData
if( is(object@sampleData[[fitExpToVar]], "numeric") ){
maxRowsMF <- 0
}
numsamples <- nrow(object@sampleData)
numexons <- sum(rt)
featuresInGene <- object$featureID[rt]
dispersions <- object$dispersion[rt]
dispersions[is.na(dispersions)] <- 1e-08
frm <- as.formula(paste("count ~", fitExpToVar, "* exon"))
bigFlag <- numsamples*numexons < maxRowsMF
if( bigFlag ){
mf <- object@modelFrameBM
mf <- mf[as.vector(sapply(split(seq_len(nrow(mf)), mf$sample),
"[", seq_len(numexons))), ]
mf$exon <- factor(rep(featuresInGene, nrow(sampleData)))
counts <- object$countData[rt,]
rownames(counts) <- gsub("\\S+:", "", rownames(counts))
names(dispersions) <- object$featureID[rt]
for (i in seq_len(nrow(mf))) {
mf[i, "dispersion"] <-
dispersions[as.character(mf[i, "exon"])]
mf[i, "count"] <-
counts[as.character(mf[i, "exon"]), as.character(mf[i, "sample"])]
}
mf <- droplevels(mf)
coefs <- fitAndArrangeCoefs(frm, balanceExons=TRUE, mf=mf, fitExpToVar=fitExpToVar, geneID)
if( is.null(coefs ) ){
return(NULL)
}
splicing <- t(getEffectsForPlotting( coefs, groupingVar=fitExpToVar, averageOutExpression=TRUE, frm=frm, mf=mf))
expression <- t(getEffectsForPlotting( coefs, groupingVar=fitExpToVar, averageOutExpression=FALSE, frm=frm, mf=mf))
rownames(splicing) <- sprintf("%s:%s", geneID, rownames(splicing))
rownames(expression) <- rownames(splicing)
list( expression=expression, splicing=splicing )
}else{
mf <- object@sampleData
mf <- rbind( data.frame(mf, exon="this"), data.frame(mf, exon="others"))
mf$exon <- factor( mf$exon, levels=c("others", "this") )
##mf$exon <- relevel( mf$exon, "others" )
countsThis <- object$countData[rt,]
countsOthers <- sapply( rownames( countsThis ),
function(x){
colSums(countsThis[!rownames(countsThis) %in% x,,drop=FALSE])
})
countsOthers <- t(countsOthers)
stopifnot(all(rownames(countsThis) == rownames(countsOthers)))
effects <- lapply( seq_len(numexons), function(x){
mf$count <- c( countsThis[x,], countsOthers[x,])
mf$dispersion <- dispersions[x]
coefs <- fitAndArrangeCoefs(frm, balanceExons=FALSE, mf=mf, fitExpToVar=fitExpToVar, geneID)
if( is.null(coefs) ){
return(NULL)
}
splicing <- getEffectsForPlotting( coefs, groupingVar=fitExpToVar, averageOutExpression=TRUE, frm=frm, mf=mf)[,"this"]
expression <- getEffectsForPlotting( coefs, groupingVar=fitExpToVar, averageOutExpression=FALSE, frm=frm, mf=mf)[,"this"]
list(splicing=splicing, expression=expression)
})
names(effects) <- rownames(object)[rt]
splicing <- t(sapply(effects, "[[", "splicing"))
expression <- t( sapply(effects, "[[", "expression" ))
list( expression=expression, splicing=splicing )
}
}
|
2f53cfbf217ee04c8c6b2ede85f97b40da10f1e6
|
9aafde089eb3d8bba05aec912e61fbd9fb84bd49
|
/codeml_files/newick_trees_processed/1446_0/rinput.R
|
cbc7fa922c299508f507bb60592e8cc436900864
|
[] |
no_license
|
DaniBoo/cyanobacteria_project
|
6a816bb0ccf285842b61bfd3612c176f5877a1fb
|
be08ff723284b0c38f9c758d3e250c664bbfbf3b
|
refs/heads/master
| 2021-01-25T05:28:00.686474
| 2013-03-23T15:09:39
| 2013-03-23T15:09:39
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 135
|
r
|
rinput.R
|
library(ape)
testtree <- read.tree("1446_0.txt")
unrooted_tr <- unroot(testtree)
write.tree(unrooted_tr, file="1446_0_unrooted.txt")
|
e9e221b42433ab947dd925bdbbb09b5ebe5152cb
|
412dcb4e4dde561595e24271514324f368399222
|
/man/MAX.Rd
|
14f77c3ddd049a1dbe5a43d853c759e5c71cdad2
|
[] |
no_license
|
cran/MoLE
|
fed7494ccac0bce2a2e4b336700422b445c57e14
|
7834749b4a4395b05c06d7a32baae78a1842240e
|
refs/heads/master
| 2020-12-02T22:16:12.599204
| 2017-10-24T06:21:35
| 2017-10-24T06:21:35
| 96,105,432
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 1,623
|
rd
|
MAX.Rd
|
\name{MAX}
\alias{MAX}
\alias{MIN}
%- Also NEED an '\alias' for EACH other topic documented here.
\title{
Find maximum value
}
\description{
Extension of standard \code{max} and \code{min} functions with which rank position(s) can be specified and result can be either rank or value.
}
\usage{
MAX(vector, rank = 1, value = FALSE, rank.adjust = TRUE, forceChoice = FALSE)
}
%- maybe also 'usage' for other objects documented here.
\arguments{
\item{vector}{
Vector in which maximum/minimum element needs to be identified
}
\item{rank}{
value(s) or rank(s) of maximum values.
}
\item{value}{
Should value or rank be returned?
}
\item{rank.adjust}{
If maximum value of range of ranks exceeds vector length, should this be adjusted?
}
\item{forceChoice}{
In case of ties, should all results be returned or only one?
}
}
\value{
numeric vector (either value or rank)
}
\author{
Sander Lestrade
}
%% ~Make other sections like Warning with \section{Warning }{....} ~
\section{Warning}{
If minimum value (of a range of) rank(s) exceeds vector length, results are meaningless.
}
\seealso{
\code{MIN}, \code{NOUNS}, \code{SITUATION}, \code{SELECTVERB}, \code{SELECTACTOR}, \code{SELECTUNDERGOER}, \code{REFCHECK}, \code{TOPICCOPY}, \code{GENERALIZE}, \code{CHECKSUCCESS}, \code{ANALYZE}, \code{TYPEMATCH}, \code{NOUNMORPHOLOGY}, \code{VERBMORPHOLOGY}, \code{INTERPRET.INT}, \code{INTERPRET}, \code{NOUNDESEMANTICIZATION}, \code{VERBDESEMANTICIZATION}, \code{SEMUPDATE}, \code{DIE}
}
\examples{
a=rep(1:10, 2)
MAX(a, rank=1:3, value=TRUE, forceChoice=TRUE)
MIN(a, rank=1:3, value=TRUE, forceChoice=TRUE)
}
\keyword{misc}
|
49ce0f38b9e59b883a89cf9551220580bbde01e2
|
a85a7455b8df60364908da1d9838a1ffc93c8145
|
/R/bwdnrd.R
|
980df5cd17bdde9ad4308aa635e72e57132f2e30
|
[] |
no_license
|
cran/decon
|
2ce4105fd7432e22fb072406700f2b0f4d3beecc
|
8ec7e23c0e2c51bd93e78ff85596485c0b68b8ae
|
refs/heads/master
| 2021-10-28T08:53:28.754901
| 2021-10-20T19:50:05
| 2021-10-20T19:50:05
| 17,695,436
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 777
|
r
|
bwdnrd.R
|
bw.dnrd <- function(y,sig,error='normal')
{
homo = FALSE;
sig = sig^2;
if(length(sig)==1) homo=TRUE;
if(!homo){
if(length(y)!=length(sig))stop("Different length of 'y' and the variances.");
if(any(is.na(y))||any(is.na(sig))){
sele = (!is.na(y))&(!is.na(sig));
y=y[sele];sig=sig[sele];
}
s2bar = mean(sig);sbar=sqrt(s2bar);
}
else{
if(is.na(sig)||is.null(sig)) stop("SD(s) can not be empty!");
s2bar=sig;sbar=sqrt(s2bar);}
if(length(y)<3){stop("Data set is too small!");}
else{n=length(y);}
result= switch(substr(tolower(error),1,3),
lap = (5*sbar^4/n)^(1/9),
nor = sbar*(log(n)/2)^{-.5},
stop("This error type is not supported yet!")
);
return(result);
}
|
ce78692dd70b7d3d189bd3c74ee462c285552c18
|
55ee84107d5ba9417f80b380a0eacbb45dff90c6
|
/pipeline/lin-cycif/parse-lin-cycif.R
|
7287c3a2be37e2577ba98bf2fc3a2bda182028c8
|
[] |
no_license
|
camlab-bioml/astir-manuscript
|
09cce196c4e58d08f79d67e4d9703bb6a0279661
|
045a2a5e52c5db4d3614f33c1572708b6a58adff
|
refs/heads/master
| 2023-04-14T00:26:10.384924
| 2021-08-03T14:31:27
| 2021-08-03T14:31:27
| 257,439,838
| 1
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 1,432
|
r
|
parse-lin-cycif.R
|
.libPaths(c("/home/ltri/campbell/kcampbel/R/x86_64-redhat-linux-gnu-library/3.6", .libPaths()))
suppressPackageStartupMessages({
library(tidyverse)
library(SingleCellExperiment)
library(argparser)
library(devtools)
library(matrixStats)
})
select <- dplyr::select
mutate <- dplyr::mutate
arrange <- dplyr::arrange
rename <- dplyr::rename
filter <- dplyr::filter
devtools::load_all("../taproom")
p <- arg_parser("Read lin cycif data")
p <- add_argument(p, "--input_sc", "Input CSV")
p <- add_argument(p, "--id", "Sample id")
p <- add_argument(p, "--output_csv", "Output CSV location")
p <- add_argument(p, "--output_rds", "Output RDS location")
argv <- parse_args(p)
df <- read_csv(argv$input_sc)
cd <- select(df, Area:Y)
df <- select(df, -(Area:Y))
expr_mat_raw <- t(as.matrix(df))
# expr_mat_raw <- t(apply(expr_mat_raw, 1, winsorize_one, c(0.01, 0.99)))
expr_mat_raw <- t(apply(expr_mat_raw, 1, function(x) {
x / mean(x)
}))
# rm <- rowMins(expr_mat_raw)
# expr_mat_raw <- expr_mat_raw - rm # make minimum as zero
expr_mat <- asinh(expr_mat_raw / 5)
# expr_mat <- t( scale( t (expr_mat ), center = FALSE))
rownames(expr_mat) <- rownames(expr_mat_raw)
sce <- SingleCellExperiment(
assays = list(raw = expr_mat_raw, logcounts = expr_mat),
colData = cd
)
colnames(sce) <- paste0(argv$id, "_", seq_len(ncol(sce)))
to_csv(sce, argv$output_csv, include_xy=FALSE)
saveRDS(sce, argv$output_rds)
|
07633bebf03639a289fe0eb590f3d674a1859473
|
05ebb4d386cb2604bb7642bd79d09fa3ca76dc72
|
/man/tbk_hdrs.Rd
|
6efdb8477b8fea2b4e142b70c8b5fd4cca580481
|
[] |
no_license
|
trichelab/tbmater
|
a322d5b3c558c4b45474e3ed1e394754543cc5d5
|
dafbf46ca7a021849a0e5b86c1669fe7d2ad3447
|
refs/heads/master
| 2023-01-08T04:53:43.023578
| 2020-11-12T02:59:17
| 2020-11-12T02:59:17
| 312,152,018
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| true
| 307
|
rd
|
tbk_hdrs.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/tbk_hdrs.R
\name{tbk_hdrs}
\alias{tbk_hdrs}
\title{retrieve tbk headers}
\usage{
tbk_hdrs(tbk_fnames)
}
\arguments{
\item{tbk_fnames}{the tbk file names}
}
\value{
an S3 object of class tbk
}
\description{
retrieve tbk headers
}
|
87e9182a36f8bd9e5666aadd0e39ce223655fbe3
|
381c0c5080ca97ffa2ef2fafea236c723869e1cd
|
/0_R_scripts/2.2_Relative_usage.R
|
36c1a2d0fb8053a355f64ce8f7ec4bc6cec1c40a
|
[] |
no_license
|
ath32/ASCs
|
051d60510c4ed09d986f5cd30eddad66a573b5d3
|
33b0ee4df0cab2f0e2a6d06a32fd7231fb5799a2
|
refs/heads/master
| 2020-04-22T15:31:53.858680
| 2019-08-12T11:55:37
| 2019-08-12T11:55:37
| 170,480,341
| 1
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 5,412
|
r
|
2.2_Relative_usage.R
|
### SOURCE ###
source <- "/Users/atho/Documents/Project_ASCs/4_Outputs/CSVs/3.1_Frequencies_all.csv"
data = read.csv(source, header = TRUE)
#Loading columns...
Acc <- unlist(data[1])
stop.0 <- unlist(data[2])
taa.0 <- unlist(data[3])
tag.0 <- unlist(data[4])
tga.0 <- unlist(data[5])
stop.1 <- unlist(data[6])
taa.1 <- unlist(data[7])
tag.1 <- unlist(data[8])
tga.1 <- unlist(data[9])
stop.2 <- unlist(data[10])
taa.2 <- unlist(data[11])
tag.2 <- unlist(data[12])
tga.2 <- unlist(data[13])
stop.3 <- unlist(data[14])
taa.3 <- unlist(data[15])
tag.3 <- unlist(data[16])
tga.3 <- unlist(data[17])
stop.4 <- unlist(data[18])
taa.4 <- unlist(data[19])
tag.4 <- unlist(data[20])
tga.4 <- unlist(data[21])
stop.5 <- unlist(data[22])
taa.5 <- unlist(data[23])
tag.5 <- unlist(data[24])
tga.5 <- unlist(data[25])
stop.6 <- unlist(data[26])
taa.6<- unlist(data[27])
tag.6 <- unlist(data[28])
tga.6 <- unlist(data[29])
gc <- unlist(data[30])
gc3 <- unlist(data[31])
#Calculate relative codon usage
r.taa.1 <- taa.1 / stop.1 * 100
r.tga.1 <- tga.1 / stop.1 * 100
r.tag.1 <- tag.1 / stop.1 * 100
r.taa.2 <- taa.2 / stop.2 * 100
r.tga.2 <- tga.2 / stop.2 * 100
r.tag.2 <- tag.2 / stop.2 * 100
r.taa.3 <- taa.3 / stop.3 * 100
r.tga.3 <- tga.3 / stop.3 * 100
r.tag.3 <- tag.3 / stop.3 * 100
r.taa.4 <- taa.4 / stop.4 * 100
r.tga.4 <- tga.4 / stop.4 * 100
r.tag.4 <- tag.4 / stop.4 * 100
r.taa.5 <- taa.5 / stop.5 * 100
r.tga.5 <- tga.5 / stop.5 * 100
r.tag.5 <- tag.5 / stop.5 * 100
r.taa.6 <- taa.6 / stop.6 * 100
r.tga.6 <- tga.6 / stop.6 * 100
r.tag.6 <- tag.6 / stop.6 * 100
r.taa.0 <- taa.0 / stop.0 * 100
r.tga.0 <- tga.0 / stop.0 * 100
r.tag.0 <- tag.0 / stop.0 * 100
#Plotting stop codon usage at each position
par(mfrow=c(2,4), cex=1)
plot(gc3, r.taa.0, pch=16, col="#B3F5FC", xlab = "GC3 content (%)", ylab = "Stop codon usage (%)", ylim=c(0,100))
points(gc3, r.tag.0, col="#01579B", pch=16)
points(gc3, r.tga.0, pch=16, col="#29B6F6")
legend("topright", pch=16, col = c("#B3F5FC", "#01579B", "#29B6F6"), legend = c("TAA", "TAG", "TGA"), cex=1)
title("Primary Stop (Position +0)")
plot(gc3, r.taa.1, pch=16, col="#B3F5FC", xlab = "GC3 content (%)", ylab = "Stop codon usage (%)", ylim=c(0,100))
points(gc3, r.tag.1, col="#01579B", pch=16)
points(gc3, r.tga.1, pch=16, col="#29B6F6")
legend("topright", pch=16, col = c("#B3F5FC", "#01579B", "#29B6F6"), legend = c("TAA", "TAG", "TGA"))
title("Position +1")
plot(gc3, r.taa.2, pch=16, col="#B3F5FC", xlab = "GC3 content (%)", ylab = "Stop codon usage (%)", ylim=c(0,100))
points(gc3, r.tag.2, col="#01579B", pch=16)
points(gc3, r.tga.2, pch=16, col="#29B6F6")
legend("topright", pch=16, col = c("#B3F5FC", "#01579B", "#29B6F6"), legend = c("TAA", "TAG", "TGA"))
title("Position +2")
plot(gc3, r.taa.3, pch=16, col="#B3F5FC", xlab = "GC3 content (%)", ylab = "Stop codon usage (%)", ylim=c(0,100))
points(gc3, r.tag.3, col="#01579B", pch=16)
points(gc3, r.tga.3, pch=16, col="#29B6F6")
legend("topright", pch=16, col = c("#B3F5FC", "#01579B", "#29B6F6"), legend = c("TAA", "TAG", "TGA"))
title("Position +3")
plot(gc3, r.taa.4, pch=16, col="#B3F5FC", xlab = "GC3 content (%)", ylab = "Stop codon usage (%)", ylim=c(0,100))
points(gc3, r.tag.4, col="#01579B", pch=16)
points(gc3, r.tga.4, pch=16, col="#29B6F6")
legend("topright", pch=16, col = c("#B3F5FC", "#01579B", "#29B6F6"), legend = c("TAA", "TAG", "TGA"))
title("Position +4")
plot(gc3, r.taa.5, pch=16, col="#B3F5FC", xlab = "GC3 content (%)", ylab = "Stop codon usage (%)", ylim=c(0,100))
points(gc3, r.tag.5, col="#01579B", pch=16)
points(gc3, r.tga.5, pch=16, col="#29B6F6")
legend("topright", pch=16, col = c("#B3F5FC", "#01579B", "#29B6F6"), legend = c("TAA", "TAG", "TGA"))
title("Position +5")
plot(gc3, r.taa.6, pch=16, col="#B3F5FC", xlab = "GC3 content (%)", ylab = "Stop codon usage (%)", ylim=c(0,100))
points(gc3, r.tag.6, col="#01579B", pch=16)
points(gc3, r.tga.6, pch=16, col="#29B6F6")
legend("topright", pch=16, col = c("#B3F5FC", "#01579B", "#29B6F6"), legend = c("TAA", "TAG", "TGA"))
title("Position +6")
#Correlation testing
shapiro.test(gc)
shapiro.test(gc3)
spr.r.taa.0 <- cor.test( ~ r.taa.0 + gc3, method="spearman")
spr.r.tag.0 <- cor.test( ~ r.tag.0 + gc3, method="spearman")
spr.r.tga.0 <- cor.test( ~ r.tga.0 + gc3, method="spearman")
spr.r.taa.1 <- cor.test( ~ r.taa.1 + gc3, method="spearman")
spr.r.tag.1 <- cor.test( ~ r.tag.1 + gc3, method="spearman")
spr.r.tga.1 <- cor.test( ~ r.tga.1 + gc3, method="spearman")
spr.r.taa.2 <- cor.test( ~ r.taa.2 + gc3, method="spearman")
spr.r.tag.2 <- cor.test( ~ r.tag.2 + gc3, method="spearman")
spr.r.tga.2 <- cor.test( ~ r.tga.2 + gc3, method="spearman")
spr.r.taa.3 <- cor.test( ~ r.taa.3 + gc3, method="spearman")
spr.r.tag.3 <- cor.test( ~ r.tag.3 + gc3, method="spearman")
spr.r.tga.3 <- cor.test( ~ r.tga.3 + gc3, method="spearman")
spr.r.taa.4 <- cor.test( ~ r.taa.4 + gc3, method="spearman")
spr.r.tag.4 <- cor.test( ~ r.tag.4 + gc3, method="spearman")
spr.r.tga.4 <- cor.test( ~ r.tga.4 + gc3, method="spearman")
spr.r.taa.5 <- cor.test( ~ r.taa.5 + gc3, method="spearman")
spr.r.tag.5 <- cor.test( ~ r.tag.5 + gc3, method="spearman")
spr.r.tga.5 <- cor.test( ~ r.tga.5 + gc3, method="spearman")
spr.r.taa.6 <- cor.test( ~ r.taa.6 + gc3, method="spearman")
spr.r.tag.6 <- cor.test( ~ r.tag.6 + gc3, method="spearman")
spr.r.tga.6 <- cor.test( ~ r.tga.6 + gc3, method="spearman")
|
17a86d1ae3548101979ce2b58f5757a561c171ef
|
430e757a1e6dae14ddd80ec37268adc410ba5793
|
/man/fp_average_replicates.Rd
|
fce63d6483ddb6eb3d80ee9dcc5fc1a1417fd968
|
[] |
no_license
|
alb202/rfret
|
bf6496460320886f445f40f4c18e27e77a399ac8
|
9b444e238cd89f829e278e6d9505c11d7e4d450c
|
refs/heads/master
| 2021-01-21T18:53:37.742723
| 2017-11-30T22:05:20
| 2017-11-30T22:05:20
| 92,093,777
| 0
| 0
| null | 2017-05-22T19:54:13
| 2017-05-22T19:54:13
| null |
UTF-8
|
R
| false
| true
| 1,774
|
rd
|
fp_average_replicates.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/fp_average_replicates.R
\name{fp_average_replicates}
\alias{fp_average_replicates}
\title{Average technical replicates from a fluorescence polarization or anisotropy experiment}
\usage{
fp_average_replicates(raw_data)
}
\arguments{
\item{raw_data}{A dataframe containing the fluorescence polarization or
anisotropy data. It must contain at least 8 columns named:
\describe{
\item{Experiment}{A unique name identifying each experiment.}
\item{Type}{For example, "titration".}
\item{Replicate}{A number identifying the technical replicate (1, 2, etc.).}
\item{Observation}{A number identifying each observation in a titration
series (corresponds to the plate column numbers, if experiments are set
up as rows in a 384-well plate). The number of observations for an
experiment and its blanks must match, and a given observation number must
associate data points at the same concentration in the titration series.}
\item{concentration}{The ligand concentration in the titration series.}
\item{polarization}{Fluorescence polarization.}
\item{anisotropy}{Fluorescence anisotropy.}
\item{intensity}{Fluorescence intensity.}
}
The output of \code{\link{fp_format_data}} can be used directly as input
for this function.}
}
\value{
A dataframe containing the reduced dataset after averaging across
replicates. It contains all of the above columns \emph{except}
\code{Replicate}, because it returns the average values over replicates.
}
\description{
This function calculates averages of fluorescence polarization
or anisotropy values from an arbitrary number of technical replicates.
}
\seealso{
\code{\link{fp_format_data}} to prepare datasets for use with
\code{fp_average_replicates}.
}
|
2bd7e0347f71adb29c140ed390276f10da0eb7c6
|
f46108919f134e6f37cc9390a1064878697a6fb5
|
/run_analysis.R
|
a7101f5a8cb69be1d5ca81a801ed9e05d44dfd08
|
[] |
no_license
|
smithraphael/Getting_Cleaning_Data
|
b4fdcdff5ce42e2a2767375402f8dd34084f2d38
|
7b08fb5a5b91b9b0873b93b597874966b2d4434a
|
refs/heads/master
| 2016-09-06T19:11:02.648490
| 2015-02-13T12:27:05
| 2015-02-13T12:27:05
| 30,621,643
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 3,183
|
r
|
run_analysis.R
|
library("reshape2")
library("data.table")
setwd("C:\\Raphael\\estudos\\datasciencecoursera\\Clearing Data\\Week3")
dir_train = ".\\UCI HAR Dataset\\train"
dir_test = ".\\UCI HAR Dataset\\test"
dir_feature = ".\\UCI HAR Dataset"
#1 Merges the training and the test sets to create one data set
x_train = read.table(paste(dir_train, "\\", "X_train.txt", sep=""), sep="", header=FALSE)
subject_train = read.table(paste(dir_train, "\\", "subject_train.txt", sep=""), col.names=c("subject"))
y_train = read.table(paste(dir_train, "\\", "y_train.txt", sep="") , col.names=c("activity"))
data_train = cbind(x_train, subject_train, y_train)
x_test = read.table(paste(dir_test, "\\", "X_test.txt", sep=""), comment.char="")
subject_test = read.table(paste(dir_test, "\\", "subject_test.txt", sep=""), col.names=c("subject"))
y_test = read.table(paste(dir_test, "\\", "y_test.txt", sep=""), col.names=c("activity"))
data_test = cbind(x_test, subject_test, y_test)
data_set = rbind(data_train, data_test)
#2 Extracts only the measurements on the mean and standard deviation for each measurement
# include column names at data_set using "features.txt"
# values are in the "filtered_data_set"
feature_list = read.table(paste(dir_feature, "\\", "features.txt", sep=""), col.names = c("id", "name"))
feature_v = c(as.vector(feature_list[, "name"]), "subject", "activity")
# Loop at features to assign column names at dataset
for (i in 1:561){
name_col = feature_v[i]
names(data_set)[i] = name_col
}
# search for columns wich names contains "mean", "Mean" and "std"
# plus Activity and Subject colimns
filter_col_mean = data_set[ , grepl( "mean" , names( data_set ) ) ]
filter_col_Mean = data_set[ ,grepl ("Mean", names(data_set))]
filter_col_std = data_set[ , grepl( "std" , names( data_set ) ) ]
filter_col_subject = data_set[ , grepl( "subject" , names( data_set ) ) ]
filter_col_activity = data_set[ , grepl( "activity" , names( data_set ) ) ]
filtered_data_set = cbind(filter_col_mean, filter_col_Mean, filter_col_std,
filter_col_subject, filter_col_activity)
#3 Uses descriptive activity names to name the activities in the data set
activities = read.table(paste(dir_feature, "\\", "activity_labels.txt", sep=""), col.names=c("id", "name"))
m = merge(filtered_data_set, activities, by.x = "filter_col_activity", by.y = "id")
#4 Appropriately labels the data set with descriptive variable names
names(m) = gsub("filter_col_activity", "Activity_id", names(m))
names(m) = gsub("mean()", "Mean", names(m), fixed = TRUE)
names(m) = gsub("std()", "Standard Deviation", names(m), fixed = TRUE)
names(m) = gsub("-", "_", names(m), fixed = TRUE)
names(m) = gsub("name", "Activity Name", names(m))
names(m) = gsub("filter_col_subject", "Subject", names(m))
names(m) = gsub(" ", "_", names(m), fixed = TRUE)
#5 From the data set in step 4, creates a second, independent tidy data set with the average of each variable for each activity and each subject
dt = melt(m, id.vars = c("Subject","Activity_Name"))
tidym = dcast(dt, Subject + Activity_Name ~ variable, mean)
write.table(tidym, "tidyData.txt", quote = FALSE, row.name=FALSE)
|
827eb7a0cd97612ac3738ba77d70fc32add97824
|
81509106d36b18992d00a2652bd2cb6b4c104add
|
/man/organize_data.Rd
|
627c964c13b47d1d4de47594ab3439f22928fb56
|
[
"MIT"
] |
permissive
|
cpsievert/shinymodels
|
2da4fd33ea9de707603f8b45efa7d70d0d35186a
|
2cc0dd418bb9291791306a16be6e2dd66d3ba91e
|
refs/heads/main
| 2023-07-15T00:19:57.530026
| 2021-08-05T18:09:40
| 2021-08-05T18:09:40
| 393,146,906
| 0
| 1
|
NOASSERTION
| 2021-08-05T19:14:52
| 2021-08-05T19:09:24
| null |
UTF-8
|
R
| false
| true
| 1,236
|
rd
|
organize_data.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/organize_data.R
\name{organize_data}
\alias{organize_data}
\alias{organize_data.default}
\alias{organize_data.tune_results}
\title{Extract data from objects to use in a shiny app}
\usage{
organize_data(x, hover_cols = NULL, ...)
\method{organize_data}{default}(x, hover_cols = NULL, ...)
\method{organize_data}{tune_results}(x, hover_cols = NULL, ...)
}
\arguments{
\item{x}{The \code{\link[tune:fit_resamples]{tune::fit_resamples()}} result.}
\item{hover_cols}{The columns to display while hovering.}
\item{...}{Other parameters not currently used.}
}
\value{
A list with elements data frame and character vectors. The data frame includes
an outcome variable \code{.outcome}, a prediction variable \code{.pred}, model
configuration variable \code{.config}, and hovering columns \code{.hover}. The default
configuration is based on the optimal value of the first metric.
}
\description{
This function joins the result of \code{\link[tune:fit_resamples]{tune::fit_resamples()}} to the original
dataset to give a list that can be an input for the Shiny app.
}
\keyword{classes,}
\keyword{classif}
\keyword{graphs,}
\keyword{models,}
\keyword{regression,}
|
5e1a4bbeb2620e23e910dceec9d982e1dc308856
|
2c0e460a0d1a229640da96cd4a1bb2f1ba2ab8b7
|
/man/prepare_model.Rd
|
5121422904a4d53d0b0770410efdb9de984b1ba5
|
[
"MIT"
] |
permissive
|
ensley/gpcovr
|
d24aa981d485e06e54d9e8de1776c388e1a02af3
|
8d96197b965e9807f7b9c17fc4fed7c34163617a
|
refs/heads/master
| 2021-05-08T17:25:39.116358
| 2018-01-30T02:41:46
| 2018-01-30T02:41:46
| 119,471,663
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| true
| 1,195
|
rd
|
prepare_model.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/utility_funcs.R
\name{prepare_model}
\alias{prepare_model}
\title{Prepare the covariance model}
\usage{
prepare_model(family, params)
}
\arguments{
\item{family}{The parametric family the covariance function will belong to}
\item{params}{A vector of parameters for the covariance function. They must be
\itemize{
\item \code{matern}: (nu, rho, sigma, nugget)
\item \code{dampedcos}: (lambda, theta, sigma, nugget)
}}
}
\value{
A \code{GPmodel} object containing the following:
\itemize{
\item \code{family}: The family
\item \code{model}: The model, from the \code{RandomFields} package
\item \code{params}: The parameter vector
\item \code{specdens}: The spectral density function
\item \code{knotlocs}: Range of knots
\item \code{logx}: Range of log frequency values over which
\code{specdens} seems to be well-defined
}
}
\description{
Creates the model (from the RandomFields package) and spectral density
function for the specified covariance family.
}
\details{
This gets called internally by \code{\link{simulate_gp}}.
}
\examples{
model <- prepare_model('matern', c(1.5, 0.2, 1, 0.01))
}
|
54c48bcc4d7600cc496d338c70d3c3bcb2d31992
|
25fd9e2057ffbe7bd0b48ed73dc58d3bd45f29d8
|
/supplementary/figures-codes/FigureS1.R
|
33163dc3720ca27130da8b3d251f4a262a3e2520
|
[] |
no_license
|
benhvt/PostClusteringDifferenceTesting
|
981ba13939e3f09d619a4b6da8f7167ad31df7d9
|
bcd692d26ce7d9e799df1a8769defa19fe8574f3
|
refs/heads/main
| 2023-04-07T03:53:37.924294
| 2023-02-07T13:46:18
| 2023-02-07T13:46:18
| 463,532,452
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 4,606
|
r
|
FigureS1.R
|
#Figure S1
library(ggplot2)
library(RColorBrewer)
library(wesanderson)
library(viridis)
library(paletteer)
library(patchwork)
library(cowplot)
library(dplyr)
library(ggcorrplot)
library(latex2exp)
library(multimode)
source(file = "utils.R")
theme_set(theme_bw())
# Results over 2000 simulations of the data
## Under the null
nsimu <- 500
pval_null_U <- as.matrix(read.csv(file = "supplementary/simulations-results/results_figureS1/multimode_test_unimodale_uniform.csv"))
pval_null_N <- as.matrix(read.csv(file = "supplementary/simulations-results/results_figureS1/multimode_test_unimodale_gaussian.csv"))
pval_null_U.df <- data.frame(pvalues = as.numeric(pval_null_U),
Test = c(rep("Silverman", nsimu),
rep("DipTest", nsimu),
rep("Cheng and Hall", nsimu),
rep("Ameijeiras", nsimu)))
pval_null_N.df <- data.frame(pvalues = as.numeric(pval_null_N),
Test = pval_null_U.df$Test)
pval_null_U.df$Distribution <- "Uniform"
pval_null_N.df$Distribution <- "Gaussian"
pval_null <- rbind(pval_null_U.df, pval_null_N.df)
p_unimod <- ggplot(pval_null)+ aes(x=Distribution, y = pvalues, colour = Test) +
geom_boxplot() +
scale_colour_brewer(palette = "Dark2") +
ylab("p-values (under H0)") +
ylim(c(0, 1)) + theme_classic(base_size=17) +
theme(legend.position = "none",
axis.title = element_text(size = 14),
strip.text = element_text(size=12))
## Under the alternative
delta <- seq(0,8, 0.5)
power_multimod_delta <- read.csv(file = "supplementary/simulations-results/results_figureS1/multimode_test_power.csv")
power_multimod_delta <- power_multimod_delta[,-1]
power_delta <- data.frame(Power = as.numeric(as.matrix(power_multimod_delta)),
Test = c(rep("Silverman", length(delta)),
rep("DipTest", length(delta)),
rep("Cheng and Hall", length(delta)),
rep("Ameijeiras", length(delta))),
delta = rep(delta, 4))
p_power_delta <- ggplot(power_delta) + aes(x=delta, y = Power, colour = Test) +
geom_point() +
geom_line() +
scale_colour_brewer(palette = "Dark2") +
xlab(TeX(r'(Value of the mean difference $\delta$)'))+
ylab("Statistical power (5% levels)") +
theme_classic() +
theme(axis.title = element_text(size = 14),
strip.text = element_text(size=12))
nn <- c(10,15, 25, 50, 75, 100, 200, 1000)
power_multimod_n <- as.matrix(read.csv(file="supplementary/simulations-results/results_figureS1/multimode_test_power_n.csv",
row.names = NULL))
time_multimod_n <- as.matrix(read.csv(file="supplementary/simulations-results/results_figureS1/time_multimode.csv"))
power_multimod_n <- power_multimod_n[,-1]
time_multimod_n <- time_multimod_n[,-1]
multimod_n <- data.frame(N=rep(nn, 4),
Power = as.numeric(power_multimod_n),
Time = as.numeric(time_multimod_n),
Test = c(rep("Silverman", length(nn)),
rep("DipTest", length(nn)),
rep("Cheng and Hall", length(nn)),
rep("Ameijeiras", length(nn))))
p_multimod_power_n <- ggplot(multimod_n) + aes(x=N, y=Power, colour = Test) +
geom_point() +
geom_line() +
scale_colour_brewer(palette = "Dark2") +
xlab("Number of observations (n)") +
ylab("Statistical power (5% levels)") +
theme_classic() +
theme(legend.position = "none",
axis.title = element_text(size = 14),
strip.text = element_text(size=12))
p_multimod_time_n <- ggplot(multimod_n) + aes(x=N, y = Time, colour = Test) +
geom_point() +
geom_line() +
scale_colour_brewer(palette = "Dark2") +
xlab("Number of observations (n)") +
ylab("Mean computation time (sec)
log10 scale") +
theme_classic() +
theme(legend.position = "none",
axis.title = element_text(size = 14),
strip.text = element_text(size=12)) +
scale_y_continuous(trans = 'log10')
# Make figure
p_multimod<- (p_unimod+p_multimod_time_n )/(p_power_delta + p_multimod_power_n) +
plot_layout(guides = "collect") +
plot_annotation(tag_levels = "A") & theme(plot.tag = element_text(face = "bold", size = 14))
p_multimod
ggsave(p_multimod, filename = "supplementary/figures/FigureS1.pdf",
dpi = 600,
width = 225,
height = 150,
units = "mm")
|
704f0e31e414d37dba54929d8acde74898a7f7bb
|
6f257dfac5625c2bc5cd0fa418c94b432bac472d
|
/inst/examples/datetimeExample.R
|
a8c3b19276a7c32ee8ecfec226a202459cc754bf
|
[] |
no_license
|
GastonMauroDiaz/caiman
|
591ac8fa2d46a291ff2692cd825021ec3970b650
|
c37d0e4a0af4774b67c30dc3c22c1b55cbe3f153
|
refs/heads/master
| 2022-01-25T21:34:30.218488
| 2022-01-21T18:52:43
| 2022-01-21T18:52:43
| 61,065,044
| 10
| 1
| null | null | null | null |
UTF-8
|
R
| false
| false
| 119
|
r
|
datetimeExample.R
|
x <- loadPhoto()
datetime(x)
datetime(x) <- "1980-11-20 14:00:00"
datetime(x)
\dontrun{
datetime(x) <- "an error"
}
|
c8d951643c331ba2518dce62e9e0d76d3a277efd
|
a48797beca55474d7b39676389f77f8f1af76875
|
/man/satisfies_numadmix.Rd
|
7b3fc368ffb7f2ae81bce572e560d9441c07c43b
|
[] |
no_license
|
uqrmaie1/admixtools
|
1efd48d8ad431f4a325a4ac5b160b2eea9411829
|
26759d87349a3b14495a7ef4ef3a593ee4d0e670
|
refs/heads/master
| 2023-09-04T02:56:48.052802
| 2023-08-21T21:15:27
| 2023-08-21T21:15:27
| 229,330,187
| 62
| 11
| null | 2023-01-23T12:19:57
| 2019-12-20T20:15:32
|
R
|
UTF-8
|
R
| false
| true
| 870
|
rd
|
satisfies_numadmix.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/toposearch.R
\name{satisfies_numadmix}
\alias{satisfies_numadmix}
\title{Test admixture constraints on a graph}
\usage{
satisfies_numadmix(graph, admix_constraints)
}
\arguments{
\item{graph}{An admixture graph}
\item{admix_constraints}{A data frame with columns \code{pop}, \code{min}, \code{max}}
}
\value{
\code{TRUE} if all admixture constraints are satisfied, else \code{FALSE}
}
\description{
This function returns \code{TRUE} if and only if the admixture graph satisfies all constraints on
the number of admixture events for the populations in \code{admix_constraints}
}
\examples{
\dontrun{
# At least one admixture event for C, and none for D:
constrain_cd = tribble(
~pop, ~min, ~max,
'C', 1, NA,
'D', NA, 0)
satisfies_numadmix(random_admixturegraph(5, 2), constrain_cd)
}
}
|
8f86ded60b3990ac89e64384aaeef4649298fc38
|
694f56aa8fe0a0d03f142aae4c1d8686f50a7dbc
|
/tests/manual/manual_test_large_images.R
|
7e9309be43af38dd8504362c9d4d0834de0d99a5
|
[
"ISC",
"Apache-2.0"
] |
permissive
|
brianwdavis/quadrangle
|
f2e4dbd888eb23c31a75de2be916d3f35c4b5961
|
cb012c0934360065bd608aa24a5485ca125ed362
|
refs/heads/master
| 2023-02-28T07:36:29.262282
| 2023-02-22T19:05:58
| 2023-02-22T19:05:58
| 209,638,942
| 7
| 2
|
NOASSERTION
| 2023-02-23T09:11:10
| 2019-09-19T20:02:28
|
JavaScript
|
UTF-8
|
R
| false
| false
| 612
|
r
|
manual_test_large_images.R
|
library(testthat)
library(quadrangle)
context("Slow/network tests to be run manually")
test_that(
"Large images (~8MB, 20Mpx) don't crash JS engine",
{
skip_if_offline()
skip_on_cran()
large_path <- "https://github.com/brianwdavis/QRdemo/raw/master/inst/extdata/DSC_0003.jpg"
large_image <- magick::image_read(large_path)
x <- large_image %>%
magick::image_flop() %>%
qr_scan_js_from_corners(code_pts = data.frame())
expect_equal(nrow(x$location), 8)
expect_equal(x$data, "W ETO C1 T1")
}
)
# testthat::test_file("./tests/manual/manual_test_large_images.R")
|
23a854fad48fd98a3968cc469ee03d1bd76c9cb9
|
968c93b1f961e4ddb5c1e24d9c76779897952ed8
|
/man/OneRow.Rd
|
0a07c2cfb9e6cd3d4413ded52fe09d67a3d372c0
|
[] |
no_license
|
cran/ImaginR
|
5c4ebee995d79bbe0f281b3186c53fc181a91dc0
|
64ff9c7dbe59f39b001c93ceb69703421bdea2c8
|
refs/heads/master
| 2021-01-23T02:11:01.010826
| 2017-05-31T05:23:29
| 2017-05-31T05:23:29
| 92,912,581
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| true
| 946
|
rd
|
OneRow.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/ImaginR.R
\name{OneRow}
\alias{OneRow}
\title{Get phenotype, HEX and HSV color code for one picture}
\usage{
OneRow(picture)
}
\arguments{
\item{picture}{The picture uploaded by load.image()}
}
\value{
The HEX and HSV color code and the color phenotype of the pearl oyster's inner shell for one image in one row
}
\description{
Get results in one row
}
\details{
In header:
\itemize{
\item{id : the name of your pictures}
\item{h : the hue of the hsv color code}
\item{s : the saturation of the hsv color code}
\item{v : the value of the hsv color code}
\item{hex : the hexadecimal color code}
\item{phenotype : returns the color phenotype of the pearl oyster's inner shell (\emph{Pinctada margaritifera})}
}
}
\examples{
fpath <- system.file('extdata/image.jpg',package='ImaginR')
picture <- load.image(fpath)
OneRow(picture)
}
|
0240e315daad27b280748731f7dc778737966516
|
e756bcd2b578b74c238dbca53ef88e2d026bd121
|
/man/h_rainsnow.Rd
|
0086ba0774ae5e83ab82b54d1ede21392e734a62
|
[] |
no_license
|
HYDFKI7/htsr
|
d95abbdbafde547b726733524176bd08e55ccdee
|
d56e8ea98c99f0158b0465d279ae805728ec5adb
|
refs/heads/master
| 2023-02-02T23:56:09.548157
| 2020-12-16T07:50:02
| 2020-12-16T07:50:02
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| true
| 1,040
|
rd
|
h_rainsnow.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/h_rainsnow.R
\name{h_rainsnow}
\alias{h_rainsnow}
\title{Share the solid and liquid precipitations with a temperature criteria}
\usage{
h_rainsnow(fpr, fta, ta0, ta1, sta = NA)
}
\arguments{
\item{fpr}{Precipitation file name}
\item{fta}{Temperature file name}
\item{ta0}{Low temperature threshold}
\item{ta1}{High temperature threshold}
\item{sta}{Station id. (default = NA)}
}
\value{
2 hts files, one with the liquid precipitation (prefix rn_) and one with
the solid precipitation (prefix sn_).
}
\description{
The precipitations are shared with a linear bevel between two temperature
values
}
\details{
The two time-series must be previously restricted to the same interval of time.
The two temperature thresholds can be equal.
The temperature time-series must be complete with no gap. Gaps are allowed
in the precipitation time-series.
Is the station id is NA, the station id of the file fta is used.
}
\author{
P. Chevallier - Oct 2017- Feb 2019
}
|
a8f7da55d6d67acd24a71a0abeaf022508adfb5a
|
606bb9c31d5c2badba82e4e5c664ae912cb95d19
|
/run_analysis.R
|
e1edccf588220965dfa5705835ae360c3a50bc1c
|
[] |
no_license
|
vmatutea/DataScience
|
7a534539ded838fb2531e6ac5d6516bd3d44751e
|
5f3885b1621fe1882a96cab07608467ce5cdbc63
|
refs/heads/master
| 2021-07-24T04:27:29.063006
| 2017-11-05T13:14:11
| 2017-11-05T13:14:11
| 79,155,137
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 2,114
|
r
|
run_analysis.R
|
installed.packages("reshape2")
library(reshape2)
# 1.Merges the training and the test sets to create one data set.
filename <- "get_dataset.zip"
if (!file.exists(filename)){
fileURL <- "https://d396qusza40orc.cloudfront.net/getdata%2Fprojectfiles%2FUCI%20HAR%20Dataset.zip "
download.file(fileURL, filename, method="curl")
}
if (!file.exists("UCI HAR Dataset")) {
unzip(filename)
}
activity_labels <- read.table("./UCI HAR Dataset/activity_labels.txt")
train_subject <- read.table("./UCI HAR Dataset/train/subject_train.txt")
test_subject <- read.table("./UCI HAR Dataset/test/subject_test.txt")
train_X <- read.table("./UCI HAR Dataset/train/X_train.txt")
test_X <- read.table("./UCI HAR Dataset/test/X_test.txt")
train_y <- read.table("./UCI HAR Dataset/train/y_train.txt")
test_y <- read.table("./UCI HAR Dataset/test/y_test.txt")
data_x <- rbind(train_X,test_X)
# 2.Extracts only the measurements on the mean and standard deviation for each measurement.
features <- read.table("./UCI HAR Dataset/features.txt")
col_meanStd <- features$V1[grep("mean()|std()",features$V2)]
data_x_meanStd <- data_x [,col_meanStd]
# 3.Uses descriptive activity names to name the activities in the data set
data_y <- rbind(train_y,test_y)
fdata_y<- factor(unlist(data_y))
Actdata <- factor(fdata_y, levels = activity_labels[,1], labels = activity_labels[,2])
data_all <- cbind(data_x_meanStd,Actdata)
# 4.Appropriately labels the data set with descriptive variable names.
col_names <- features$V2[grep("mean()|std()",features$V2)]
col_names <- as.character(col_names)
colnames(data_all)<-c(col_names,"activity")
# 5.From the data set in step 4, creates a second, independent tidy data set with the average of each variable for each activity and each subject.
data_subject <- rbind(train_subject,test_subject)
data_sub <- cbind(data_all,data_subject)
colnames(data_sub)<-c(colnames(data_all),"subject")
mdata <- melt(data_sub, id=c("activity","subject"))
mdata_mean <- dcast (mdata, activity + subject ~ variable, mean)
write.table(mdata_mean, "tidy_data.txt", row.names = FALSE, quote = FALSE)
|
1acd7fa733838685173162210379c438d1c300eb
|
1d4a28f95725fc50dbc6b0ad0912372b5254094c
|
/run_analysis.r
|
3daf16e619761eb44792af70ccf1f619eb666b75
|
[] |
no_license
|
keithabailey/keithWD
|
f2cd2e219b606f8a08b25cb6fe0254fa05cfcda6
|
8c924bdaf001ef27edd760187a4b6d1f592ed624
|
refs/heads/master
| 2021-01-17T19:23:14.701626
| 2016-07-10T19:33:33
| 2016-07-10T19:33:33
| 63,014,723
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 8,486
|
r
|
run_analysis.r
|
###### Purpose
# Instructions
#
# The purpose of this project is to demonstrate your ability to collect, work with, and clean a data set.
# Review criterialess
# The submitted data set is tidy.
# The Github repo contains the required scripts.
# GitHub contains a code book that modifies and updates the available codebooks with the data to indicate all the variables and summaries calculated, along with units, and any other relevant information.
# The README that explains the analysis files is clear and understandable.
# The work submitted for this project is the work of the student who submitted it.
# Getting and Cleaning Data Course Projectless
# The purpose of this project is to demonstrate your ability to collect, work with, and clean a data set. The goal is to prepare tidy data that can be used for later analysis. You will be graded by your peers on a series of yes/no questions related to the project. You will be required to submit: 1) a tidy data set as described below, 2) a link to a Github repository with your script for performing the analysis, and 3) a code book that describes the variables, the data, and any transformations or work that you performed to clean up the data called CodeBook.md. You should also include a README.md in the repo with your scripts. This repo explains how all of the scripts work and how they are connected.
#
# One of the most exciting areas in all of data science right now is wearable computing - see for example this article . Companies like Fitbit, Nike, and Jawbone Up are racing to develop the most advanced algorithms to attract new users. The data linked to from the course website represent data collected from the accelerometers from the Samsung Galaxy S smartphone. A full description is available at the site where the data was obtained:
#
# http://archive.ics.uci.edu/ml/datasets/Human+Activity+Recognition+Using+Smartphones
#
# Here are the data for the project:
#
# https://d396qusza40orc.cloudfront.net/getdata%2Fprojectfiles%2FUCI%20HAR%20Dataset.zip
#
# You should create one R script called run_analysis.R that does the following.
#
# Merges the training and the test sets to create one data set.
# Extracts only the measurements on the mean and standard deviation for each measurement.
# Uses descriptive activity names to name the activities in the data set
# Appropriately labels the data set with descriptive variable names.
# From the data set in step 4, creates a second, independent tidy data set with the average of each variable for each activity and each subject.
# Good luck!
#
######
###### Author: Keith Bailey
###### Date: 2016-07-09
#Download dataset
if(!file.exists("./data")){dir.create("./data")}
fileURL1<- "https://d396qusza40orc.cloudfront.net/getdata%2Fprojectfiles%2FUCI%20HAR%20Dataset.zip"
download.file(fileURL1, destfile="./data/dataset.zip",method="curl")
unzip("./data/dataset.zip", exdir="./data/dataset")
#######################################################################################################
###### 1. Merges the training and the test sets to create one data set. #######
#######################################################################################################
#create a vector to hold the names of the two X_ files we want to import;
X_<-c("./data/dataset/UCI HAR Dataset/test/X_test.txt","./data/dataset/UCI HAR Dataset/train/X_train.txt")
bindData_tempX1<-read.table(file=X_[1])
bindData_tempX2<-read.table(file=X_[2])
bindDataX<-rbind(bindData_tempX1, bindData_tempX2)
#rename the columns of the consolidated dataset to those specified in the features file
#also rename the training labels column we added to our dataset to training labels
features<-read.table("./data/dataset/UCI HAR Dataset/features.txt")
names(bindDataX)<-c(as.vector(features$V2))
#######################################################################################################
###### 2. Extracts only the measurements on the mean and standard deviation for each measurement#######
#######################################################################################################
#We will need to establish which variables names have mean and standard deviation (std) in them
#In order to do this we will have to use grepl and regular expressions to search for matches
bindDataX.MeanStd <- bindDataX[,grepl("mean\\(\\)|std\\(\\)",as.vector(features$V2))]
#######################################################################################################
###### 3. Uses descriptive activity names to name the activities in the data set #######
#######################################################################################################
Y_<-c("./data/dataset/UCI HAR Dataset/test/y_test.txt","./data/dataset/UCI HAR Dataset/train/y_train.txt")
bindData_tempY1<-read.table(file=Y_[1])
bindData_tempY2<-read.table(file=Y_[2])
bindDataY<-rbind(bindData_tempY1, bindData_tempY2)
#rename the columns of bindData Y
names(bindDataY)<-c("activityLabelNo")
#Not explicitly stated in the project requirements yet, but logical to bring in the subjects before producing the final dataset
Subjects<-c("./data/dataset/UCI HAR Dataset/test/subject_test.txt","./data/dataset/UCI HAR Dataset/train/subject_train.txt")
bindData_tempSubjects1<-read.table(file=Subjects[1])
bindData_tempSubjects2<-read.table(file=Subjects[2])
bindDataSubjects<-rbind(bindData_tempSubjects1, bindData_tempSubjects2)
#rename the columns of bindData Subjects
names(bindDataSubjects)<-c("subjects")
#add the activitylabel numbers and subjects to the dataset
bindDataX.MeanStd.Y<-cbind(bindDataX.MeanStd,bindDataY,bindDataSubjects)
#Next we need to transform our activityLabels into meaningful names
#as described in the activity_labels file
#read in the activity labels
activityLabels<-read.table(file="./data/dataset/UCI HAR Dataset/activity_labels.txt")
#rename activity label columns
names(activityLabels)<-c("activityLabelNo","activityLabelName")
#It is not absolutely necessary to name both the x and y table columns names twice,
#but I do so as it is a good habit to get into as often the names will not be the same
finalDataSet<-merge(bindDataX.MeanStd.Y, activityLabels, by=c("activityLabelNo","activityLabelNo"))
#######################################################################################################
###### 4. Appropriately labels the data set with descriptive variable names. #######
#######################################################################################################
#Rename the final dataset to make the variables more meaningful
#Note that I have resisted temptation to rename Acc to Acceration, Mag to Magnitude etc as this is clearly
#laid out in the codebook. I believe that the names already convey meaning and a thorough understanding of
#this dataset can only truely be obtained from reading the accompanying codebook
names(finalDataSet)<-gsub('[-()]', '', names(finalDataSet))
#change to factors to remove ambiguity
finalDataSet$subjects <- as.factor(finalDataSet$subjects)
#######################################################################################################
###### 5. From the data set in step 4, creates a second, independent tidy data set with the ######
###### average of each variable for each activity and each subject. ######
#######################################################################################################
require(tidyr)
require(dplyr)
#I have interpretted the tidy dataset requirement to be a tall narrow dataset with one observation per
#row as all data herein are sensor signals. I understand that might not be the way you, my reviewer have
#implemented this, arguing instead that each row are one observation each of which has many signals.
#Tidy dataset narrow
finalDataset.tidynarrow <- finalDataSet %>%gather(measurementType, measurement, -subjects, -activityLabelNo, -activityLabelName)
#Create a summarised dataset showing the average for each activity and subject.
#In this case there are no NA data, but again leaving for good practice
finalDataset.tidynarrow.averages <- finalDataset.tidynarrow %>% group_by(subjects, activityLabelName, measurementType) %>% summarise(average = mean(measurement, na.rm=TRUE))
write.table(finalDataset.tidynarrow.averages, file="tidyDataset.txt", sep=" ", row.names=FALSE)
|
b692cda29510dc28a34a6d77a862f997143f79e2
|
7ab1674370ca36d36aae47a5ca26badab021f28b
|
/Logistic Regression on Bank Churn Data/bank with reduced catvar.R
|
8025c896abca20e2f6e077d7f8cc4ba54c1cb81c
|
[] |
no_license
|
maheshnandd/Data-Science-with-R
|
784d8b1c801953767eec018695e6ac569ad87b9c
|
f765430bb56f68e8f2abde2633683ba0f01d8dcd
|
refs/heads/master
| 2022-11-20T10:10:40.179997
| 2020-07-22T12:29:38
| 2020-07-22T12:29:38
| 281,641,536
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 3,808
|
r
|
bank with reduced catvar.R
|
#Build a classification model using logistic regression to predict the credibility of the customer, in order to minimize the risk and maximize the profit of German Credit Bank.
#Algorithm :Logistic Regeration
#dataset:Bank credit card fraud detection
library(corrplot)
library(caret)
#Read file
path="D:/R/Logistic/Dataset/BankCreditCard.csv"
bank=read.csv(path,header = T)
head(bank)
ncol(bank)
nrow(bank)
str(bank)
View(bank)
summary(bank)
#Removing unwanted feature
bank$Customer.ID=NULL
#cheaking for levels of features
for (c in colnames(bank))
{
print(paste("levels of",c))
print(levels(factor(bank[c])))
print("----------------------")
}
#making features factor
bank$Gender=as.factor(bank$Gender)
bank$Academic_Qualification=as.factor(bank$Academic_Qualification)
bank$Marital=as.factor(bank$Marital)
bank$Age_Years=as.numeric(bank$Age_Years)
str(bank)
#making Y var as categorical data
bank$Default_Payment=as.factor(bank$Default_Payment)
str(bank)
#seperate out numerical and categorical columns
cat_cols=colnames(bank)[unlist(lapply(bank,is.factor))]
num_cols=colnames(bank)[unlist(lapply(bank,is.numeric))]
#checking data for nulls
checknulls=function(x)return(any(is.na(x)))
c=colnames(bank)[unlist(lapply(bank,checknulls))]
print(c)
#cheacking data for zeros
checkzeros=function(x) return(any(x==0))
z=colnames(num_cols)[unlist(lapply(num_cols,checkzeros))]
print(z)
#checking levels of categorical data
for(c in cat_cols)
{
print(paste("levels of",c))
print(levels(factor(unlist(bank[c]))))
print("-------------------")
}
#correcting levels of Marital from category '0' which is undefined to '3'-prefer not to say
bank$Marital[bank$Marital==0]=3
levels(factor(unlist(bank$Marital)))
#checking multicolinearity
corr=cor(bank[num_cols])
corrplot(corr,method = "number",type = "lower")
#multicollerity exist in
#Feb_Bill_Amount x Jan_Bill_Amount =0.84
#march_Bill_Amount x Jan_Bill_Amount= 0.86
#march_Bill_Amount x Feb_Bill_Amount = 0.85
#April_bill_amount x Jan_Bill_Amoun = 0.82
#April_bill_amount x Feb_Bill_Amount = 0.83
#April_bill_amount x march_Bill_Amount = 0.91
#May_bill_amount x Jan_Bill_Amount = 0.76
#May_bill_amount x Feb_Bill_Amount =0.84
#May_bill_amount x march_Bill_Amount =0.84
#May_bill_amount x April_bill_amount =0.91
#June_bill_amount x Jan_Bill_Amount =0.76
#June_bill_amount x Feb_Bill_Amount =0.80
#June_bill_amount x march_Bill_Amount =0.82
#June_bill_amount x April_bill_amount =0.86
#June_bill_amount x May_bill_amount =0.95
#checking proportion of class in Dependent veriable
prop.table(table(bank$Default_Payment))
##spliting data ##ensure that data grouping is not exist otherwise model will appear as biased
rows=nrow(bank)
s=sample(seq(1:rows),0.7*rows)
train=bank[s,]
test=bank[-s,]
print(paste("train",nrow(train),"test",nrow(test)))
##validation should be done that data classified properly
prop.table(table(bank$Default_Payment))
prop.table(table(train$Default_Payment))
prop.table(table(test$Default_Payment))
lvl_tr=length(levels(factor(train$Default_Payment)))
lvl_te=length(levels(factor(test$Default_Payment)))
if (lvl_tr>=lvl_te)
print('levels are OK')else
print('levels are more in Testing')
#building a base model
m1=glm(Default_Payment~.,data = train,binomial(link = "logit"))
summary(m1)
p1=predict(m1,test,type="response")
#finding zeros means people will pay next month bill
prop.table(table(p1))
#taking cut off to 0.5
length(p1[p1<=0.5])
length(p1[p1>0.5])
#converting likelyhood to classes of 0 to 1
pred1=ifelse(p1<=0.5,0,1)
print
print(pred1[1:10])
cbind(p1[1:10],pred1[1:10])
confusionMatrix(test$Default_Payment,factor(pred1),positive = "1")
|
c0987c8500fc9009a9fd94c5da620a729c4b3d89
|
fcaed3894375c80a84199fae031277b2a0e66812
|
/Plot1.R
|
6e7234cfbe400290178e6b9f860e7c0dba165e36
|
[] |
no_license
|
jzazueta/ExData_Plotting1
|
2f886cf333dbe8f6105cf9f78914a81caa744644
|
545bea205e7afa60b7910eb3a11b82f0e9e9bb25
|
refs/heads/master
| 2020-12-24T09:15:37.969550
| 2014-06-07T02:01:59
| 2014-06-07T02:01:59
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 930
|
r
|
Plot1.R
|
## Plot 1
## Unzip data source
dir <- getwd()
unzip("exdata-data-household_power_consumption.zip",exdir=dir)
## Read File
power <- read.csv("household_power_consumption.txt",
sep = ";", header = TRUE, stringsAsFactors = FALSE)
## Convert dates, time and numbers
power$Time <- strptime(power$Time,"%H:%M:%S")
power$Date <- as.Date(power$Date,"%d/%m/%Y")
power$Global_active_power <- as.numeric(power$Global_active_power)
## Subsetting data to "2007-02-02" or "2007-02-01"
power <- power[power$Date=="2007-02-02"| power$Date== "2007-02-01",]
## Creat Plot file
png(file="plot1.png", bg="white")
hist(power$Global_active_power,
main = paste("Global Active Power"),
xlab = "Global Active Power (kilowatts)",
col = "orange red")
dev.off()
|
6d271dd6feb0bcdb255f769983fc55f699f348bc
|
f6187d6a6734884c02a449d8eb44f29db14819b5
|
/bananas_data_play.R
|
9b9efe7364caf1c5ee43c7792bbcb8b9679b18e9
|
[] |
no_license
|
dlato/Stat744
|
b6a2ed9a45ac69af5c37f9d4cea078868662816d
|
def4a4a4d239cdf502ee7e9cec318b8d8d2cc5a4
|
refs/heads/master
| 2020-07-23T00:30:58.580569
| 2019-11-20T00:14:23
| 2019-11-20T00:14:23
| 207,383,986
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 4,249
|
r
|
bananas_data_play.R
|
#making a really terrible graph better
#based on this:
# https://github.com/thomasdebeus/colourful-facts/blob/develop/projects/Redesign-of-worst-chart-ever/redesign-of-worst-chart-ever.md
#######################
library(tidyverse)
library(ggthemes)
library(ggplot2)
library(directlabels)
#######################
#read in data after downloading
df <- read.csv("FAOSTAT_data_1_7_2018.csv")
#plotting just to see what we have
#recognizing that we really only have 3 categories, Country, Year and Value
g1 <- ggplot(df, aes(x = Year, y = Value, color = Country))+
geom_line()+
theme_bw()
plot <- ggplot(df, aes(x = Year, y = Value, color = Country))+
geom_line()+
theme_bw() +
theme(legend.position = "none") +
geom_dl(aes(label = Country), method = list("last.points"))
plot + xlim(1995, 2010)
g1 + facet_wrap(~Country)
#now ordering the data so that in facet it looks nicer and makes more sense instead of alphbetical
#reodering levels, not values
df$Country <- with(df, reorder(Country, Value, FUN = mean))
#reordering the opposite direction
df$Country <- with(df, reorder(Country, Value, FUN = -x))
g1 <- ggplot(df, aes(x = Year, y = Value, color = Country))+
geom_line()+
theme_classic() +
theme(legend.position = "none")
g1 + facet_wrap(~Country)
#####################################################
#Sep 27: playing with bananas again
#####################################################
#trying to make this same bananas data into a heat map
#below are colours scales, viridis is a new one
library(colorspace)
library(viridis)
#sometimes ggplot does not define colour as you would think, sometimes its fill instead
#presumably you could make log breaks in the colour when you make a heat map, which in this case woud help with this visualization
#the year has decimals bc the year is a continuous var, so you should add in breaks to make it better
#coord flip is good so that way you can easily flip the axis at the last min
coord_flip()
#so anything that is inhearitantly vertical, you need to use coord_filp() to switch it (like box plots)
#but, this does not play nice with facet, so you have to use ggstance
#where you can then use geom_boxh where the h means horizontal
#if doing anything in tidyverse then you are working in a dataframe, so if you use the fct_reorder()
#that is working on a variable and therefore you need to mutate the tidyverse dataframe to
#then become a variable
#remember if you flip the axis then sometimes your x in the ggplot becomes your y
#wilkie says that you should not have white spaces, so you should fill in your box plots with some sort of
#neutral colour so it is distinguished from the others
#aes() means asthetic, which is a way to map data to a graph
#so if the log scale squishes things too much, you can try to use square root scale
# but this is confusing to know how to interpret it
#can define your theme as my_theme so that you dont have to keep re-defining it for each graph you make
#once your graph looks roughly the way you want you should go back and clean up your code and remove redundancy
#when using jitter, it will jitter in the x and y axis, which is not what we want sometimes
#so as always, it is best to specify
#dodge basically is the same but says, only move something if you have to
#expand() can tell you how much space to have around the data
#so talking about white space btwn the xaxis and data and how we usually dont want it
#usually goes into the the scale_x_continuous()
#so you should never have data hard coded in your code, try to have it read in as a separate csv
# if you do have to hard code it in, you should put all that at the top of you file as a variable
# where it is really clear that you are hard coding these things
# the best way to do this is to actually add it to the CSV so that it is part of the datafile
#
#if you want to do facets with different sizes its really not that hard apparently
#some aspects of graphs do not scale properly, so you should decide what width you want
#it to be (below)
dev.new(width=5, height=7)
#so you should make a separte graph window of that size and export so that you get the exact size of
#things that you want
#openGL is a general thing that oppens 3D graphics
|
f3152d6521937381b12e292d663dc0f828cb4e89
|
78cd6c3c511f95d8b8a57b36c5fb71da0632e989
|
/slurm_variances.R
|
13bed67899ad3626106a558f0ee4965c5a097402
|
[] |
no_license
|
NTomasetti/OHF
|
57c3103f7b6c85bc00a3c53d7e179b89a993c48e
|
9743f4b007a807c881142c8e2173c97cc45251ce
|
refs/heads/master
| 2021-04-03T07:46:18.189602
| 2018-08-13T04:06:38
| 2018-08-13T04:06:38
| 125,163,024
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 1,930
|
r
|
slurm_variances.R
|
rm(list=ls())
repenv <- Sys.getenv("SLURM_ARRAY_TASK_ID")
j <- as.numeric(repenv)
set.seed(1000 + j)
library(Rcpp, lib.loc = 'packages')
library(RcppArmadillo, lib.loc = 'packages')
library(RcppEigen, lib.loc = 'packages')
library(rstan, lib.loc = 'packages')
source('slurm_RFuns.R')
sourceCpp('slurm_cppFuns.cpp')
ids <- readRDS('idTest.RDS')
datatest <- readRDS('dataTest.RDS')
results <- data.frame()
homogDraws <- readRDS('homogMCMCN2000.RDS')$draws[floor(seq(5001, 7500, length.out = 500)),]
CHprior <- readRDS('CHFit.RDS')
T <- 400
lagsA <- 4
lagsD <- 4
K <- mix <- 6
dim <- 2 + lagsA + lagsD
model <- c('IH', 'CH')
# VB Prior Distributions: 1) IH, 2) CH
prior <- list()
prior[[1]] <- c(-5, -5, rep(0, 8), c(chol(diag(10, 10))))
prior[[2]] <- CHprior
# MCMC Hyper parameters
hyper <- list()
hyper[[1]] <- list()
hyper[[1]]$mean <- c(-5, -5, rep(0, 8))
hyper[[1]]$varInv <- solve(diag(10, 10))
hyper[[2]] <- list(mean = prior[[2]]$mean, varInv = prior[[2]]$varInv, weights = prior[[2]]$weights)
for(i in 1:100){
data <- datatest[[(j-1)*100 + i]]
id <- ids[(j-1)*100 + i]
# Get MCMC posteriors
MCMCDraw <- list()
for(k in 1:2){
MCMCDraw[[k]] <- singleMCMCallMH(data = data[1:min(nrow(data), T), 1:2],
reps = 15000,
draw = homogDraws[1, ],
hyper = hyper[[k]],
stepsize = 0.05,
mix = (k == 2))$draws[floor(seq(10001, 15000, length.out = 500)),]
var <- colMeans(exp(MCMCDraw[[k]][,1:2]))
results <- rbind(results,
data.frame(id = id,
model = model[k],
variance = var,
parameter = c('a', 'delta')))
}
}
write.csv(results, paste0('variances/group', j, '.csv'), row.names=FALSE)
|
1356d077b05cdeff4ead32c6b404292504ded6f3
|
409490d9da29446f5fb1672eab7e774731554785
|
/man/list.reverse.Rd
|
1f6d1995ae9c8caf47df03c0cff54bc59ad9b36e
|
[
"MIT"
] |
permissive
|
timelyportfolio/rlist
|
8004c472fb6835182773d4458c9d604cb03795a3
|
d3299cec59c36f9295493feea3d53d21278a8a2a
|
refs/heads/master
| 2020-11-30T23:33:33.408653
| 2014-08-07T16:28:24
| 2014-08-07T16:28:24
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 283
|
rd
|
list.reverse.Rd
|
% Generated by roxygen2 (4.0.1): do not edit by hand
\name{list.reverse}
\alias{list.reverse}
\title{Reverse a list}
\usage{
list.reverse(.data)
}
\arguments{
\item{.data}{\code{list}}
}
\description{
Reverse a list
}
\examples{
\dontrun{
x <- list(a=1,b=2,c=3)
list.reverse(x)
}
}
|
ca4310e605ffa502426850f4ad61331a4732a186
|
5113b035b5e5022e71fb6d065f7033a2ba2c1a96
|
/R/setup_lambda.R
|
5816f8cb7bfc707bd37d714ee62e6f3a4c2ed8e6
|
[] |
no_license
|
cran/glmtlp
|
2f9a636f47d3f35c0852608b5d8244aa58030956
|
ec957cac73e6c7e82731a5ab6f8665392cf1b4bf
|
refs/heads/master
| 2021-12-25T07:43:50.859511
| 2021-12-17T22:00:02
| 2021-12-17T22:00:02
| 120,621,828
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 1,199
|
r
|
setup_lambda.R
|
#' Generate lambda sequence.
#'
#' @param X Input matrix, of dimension \code{nobs} x \code{nvars};
#' each row is an observation vector.
#' @param y Response variable, of length \code{nobs}. For \code{family="gaussian"},
#' it should be quantitative; for \code{family="binomial"}, it should be either
#' a factor with two levels or a binary vector.
#' @param weights Observation weights.
#' @param lambda.min.ratio The smallest value for \code{lambda}, as a fraction of
#' \code{lambda.max}, the smallest value for which all coefficients are zero.
#' The default depends on the sample size \code{nobs} relative to the number
#' of variables \code{nvars}.
#' @param nlambda The number of \code{lambda} values.
#'
#' @importFrom stats weighted.mean
#'
setup_lambda <- function(X, y, weights, lambda.min.ratio, nlambda) {
lambda.max <- get_lambda_max(X, y, weights)
lambda <- exp(seq(
from = log(lambda.max),
to = log(lambda.min.ratio * lambda.max),
length.out = nlambda
))
lambda
}
get_lambda_max <- function(X, y, weights) {
rw <- (y - weighted.mean(y, weights)) * weights
max(abs(crossprod(X, rw)), na.rm = TRUE) / nrow(X)
}
|
ba2e3cef086d6b6a82403cb451568ffd0fba0206
|
8e35e074ce3973078c4cf50eb46533f250f66a78
|
/examples/example5_script.R
|
79b119c85044150f7c9ba69b322d1e539c3a74c7
|
[] |
no_license
|
Moeneeb/NLP_workshop
|
1f9afdf8748d5eba6483f4c5a4c2a30dc3b3f9c8
|
be98ebc590fd45ad6e278b5909238df17382aabb
|
refs/heads/master
| 2021-05-23T08:06:41.673965
| 2020-04-01T09:21:43
| 2020-04-01T09:21:43
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 774
|
r
|
example5_script.R
|
library(dplyr)
library(magrittr)
library(tidytext)
library(stringr)
library(ggplot2)
# Example 5
# Load the star_wars_scripts.rds dataset
df <- readRDS("data/star_wars_scripts.rds")
# Use {tidytext} to tokenize the star wars scripts, where a token is a single
# word to create a one-token-per-row data frame. Also remove summary columns.
# Then count the frequency of each word for each move and apply the
# TF-IDF function of {tidytext} and extract the top 10 words per movie
tf_idf_script <- df %>%
select(-length, -ncap, -nexcl, -nquest, -nword) %>%
unnest_tokens(output = word, input = dialogue) %>%
count(movie, word, sort = TRUE) %>%
bind_tf_idf(word, movie, n) %>%
ungroup() %>%
group_by(movie) %>%
top_n(10) %>%
arrange(movie, desc(tf_idf))
|
64f3bc6610553db0c7d673241e9d75a736bcab4b
|
64bd1f5b0928dc4218282d8dd88b876a3dec877a
|
/sigmoid_unsmoothed_left.R
|
672ff63cfbf7f54d4285a908bc609897bad53d6f
|
[] |
no_license
|
CoBrALab/BSC
|
fdb1ae344b9264c8ec18a91e20849635e1837459
|
93ec7ed15803d3922da2c36d01311bee222e2f22
|
refs/heads/main
| 2023-07-04T21:16:27.954367
| 2021-08-09T15:59:10
| 2021-08-09T15:59:10
| 311,529,692
| 1
| 1
| null | 2020-12-17T20:22:32
| 2020-11-10T03:03:44
|
Shell
|
UTF-8
|
R
| false
| false
| 3,435
|
r
|
sigmoid_unsmoothed_left.R
|
# May 5th 2019
# Can the cortical intensity profile be approximated with a sigmoid function?
### Load required libraries
##
library(plyr)
library(RMINC)
library(metafor)
library(mni.cortical.statistics)
#!/usr/bin/env Rscript
args = commandArgs(trailingOnly=TRUE)
WD <- getwd()
setwd(WD)
setwd("..")
setwd("./samples/")
print(args)
gf <- data.frame(matrix(NA, nrow =1, ncol = 9))
gf$left_50 <- paste(args,"_GM_50_left.txt", sep = "")
gf$left_25 <- paste(args,"_GM_25_left.txt", sep = "")
gf$left_18_75 <- paste(args,"_GM_18_75_left.txt", sep = "")
gf$left_12_5 <- paste(args,"_GM_12_5_left.txt", sep = "")
gf$left_6_25 <- paste(args,"_GM_6_25_left.txt", sep = "")
gf$left_whiteboundary <- paste(args,"_WM_0_left.txt", sep = "")
#gf$left_wm50 <- paste(args,"_mid_left_WM.txt", sep = "")
gf$left_wm25 <- paste(args,"_WM_25_left.txt", sep = "")
gf$left_wm18_75 <- paste(args,"_WM_18_75_left.txt", sep = "")
gf$left_wm12_5 <- paste(args,"_WM_12_5_left.txt", sep = "")
gf$left_wm6_25 <- paste(args,"_WM_6_25_left.txt", sep = "")
gray_left50 <- vertexTable(gf$left_50)
gray_left25 <- vertexTable(gf$left_25)
gray_left18_75 <- vertexTable(gf$left_18_75)
gray_left12_5 <- vertexTable(gf$left_12_5)
gray_left6_25 <- vertexTable(gf$left_6_25)
left_whiteboundary <- vertexTable(gf$left_whiteboundary)
white_left25 <- vertexTable(gf$left_wm25)
white_left18_75 <- vertexTable(gf$left_wm18_75)
white_left12_5 <- vertexTable(gf$left_wm12_5)
white_left6_25 <- vertexTable(gf$left_wm6_25)
sf <- rbind(t(white_left25), t(white_left18_75), t(white_left12_5), t(white_left6_25), t(left_whiteboundary), t(gray_left6_25), t(gray_left12_5), t(gray_left18_75), t(gray_left25), t(gray_left50))
df <- cbind(sf, c(-25, -18.75, -12.5, -6.25, 0, 6.25, 12.5, 18.75, 25, 50))
dff <- as.data.frame(df)
model_a <- list()
model_k <- list()
model_c<- list()
model_d <- list()
model_c_adj <- list()
model_ratio <- list()
model_max <-list()
model_min <- list()
for (i in 1:40963){
xvalues <- dff[,40963]
yvalues <- dff[,i]
adj_vals <- (yvalues - min(yvalues))/(max(yvalues)-min(yvalues))
nls_fit_rest <- nls(formula = yvalues ~ a+exp(k)+(-exp(k))/(1 + exp(-c*(xvalues-d))), start = list(a =min(yvalues), k = 5, c = 0.1, d = 0), algorithm="port", lower=c(min(yvalues), 0, 0, -50), upper=c(2000,100, 1, 50),control = nls.control(maxiter = 50, tol = 1e-05, minFactor = 1/5096, printEval = FALSE, warnOnly = TRUE))
nls_fit_adj <- nls(formula = adj_vals ~ a+exp(k)+(-exp(k))/(1 + exp(-c*(xvalues-d))), start = list(a =min(adj_vals), k = 5, c = 0.1, d = 0), algorithm="port", lower=c(min(adj_vals), 0, 0, -50), upper=c(2000,100, 1, 50),control = nls.control(maxiter = 50, tol = 1e-05, minFactor = 1/5096, printEval = FALSE, warnOnly = TRUE))
coeffs <- coef(nls_fit_rest)
coeffs_adj <- coef(nls_fit_adj)
model_c_adj[i] <- coeffs_adj[3]
model_c[i] <- coeffs[3]
model_ratio[i] <- yvalues[1]/yvalues[9]
}
model_c[1:10]
model_c_adj[1:10]
model_c_log<- log(as.numeric(model_c)+0.1)
setwd("..")
setwd("./sigmoid_fit/unsmoothed/")
#write lists to .txt files
write.table(as.numeric(model_c), paste(args,"_model_c_left.txt", sep=""), sep = "", col.names = FALSE, row.names = FALSE)
write.table(as.numeric(model_ratio), paste(args,"_model_ratio_left.txt", sep=""), sep = "", col.names = FALSE, row.names = FALSE)
write.table(as.numeric(model_c_log), paste(args,"_model_c_left_log.txt", sep=""), sep = "", col.names = FALSE, row.names = FALSE)
|
22c25ddcbb8d49c531468828501cd500e83ce277
|
846559394848a21cf4859096455b2e2b31c8c77e
|
/man/ComonGAP.Rd
|
dc461c6aa11f89cbbebbeab4b97acac613086381
|
[] |
no_license
|
ivotebexreni/JADE
|
e8d2c2087b35a7236a607e04317904da96ab97d3
|
c9efee1d940dc49d30739f88ce6855caa4f00284
|
refs/heads/master
| 2023-03-18T20:39:48.617218
| 2020-03-25T10:30:02
| 2020-03-25T10:30:02
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 1,483
|
rd
|
ComonGAP.Rd
|
\name{ComonGAP}
\alias{ComonGAP}
%- Also NEED an '\alias' for EACH other topic documented here.
\title{ Comon's Gap}
\description{
Comon's GAP criterion to evaluate the performance of an ICA algorithm.
}
\usage{
ComonGAP(A, A.hat)
}
%- maybe also 'usage' for other objects documented here.
\arguments{
\item{A}{The true square mixing matrix.}
\item{A.hat}{The estimated square mixing matrix.}
}
\details{
Comon's GAP criterion is permutation and scale invariant. It can take every positive value and 0 corresponds to an optimal separation.
If \code{A} is however nearly singular the values of the criterion can be huge.
Note that this function assumes the ICA model is \eqn{X = S A'}, as is assumed by \code{\link{JADE}} and \code{ics}. However \code{fastICA} and
\code{PearsonICA} assume \eqn{X = S A}. Therefore matrices from those functions have to be transposed first.
}
\value{
The value of the Comon's GAP.
}
\references{
\cite{Comon, P., (1994), Independent Component Analysis, A new concept?, \emph{Signal Processing}, \bold{36}, 287--314. }
}
\author{Klaus Nordhausen}
\seealso{\code{\link{amari.error}}, \code{\link{SIR}}}
\examples{
S <- cbind(rt(1000, 4), rnorm(1000), runif(1000))
A <- matrix(rnorm(9), ncol = 3)
X <- S \%*\% t(A)
A.hat <- JADE(X, 3)$A
ComonGAP(A, A.hat)
}
% Add one or more standard keywords, see file 'KEYWORDS' in the
% R documentation directory.
\keyword{ multivariate }
|
2ae152cfc43671273529936c0aa178efe2dbfd5f
|
26dea210be60fafab93c89e4bb11d5ff9edeba72
|
/01Basic/26Array__Dimension_name.R
|
0010ca05c3036e5b9aaecf5ef6c78419a5f9dd4f
|
[] |
no_license
|
MomusChao/R
|
a71df4f7430d644c18f853ad4f06b0838d5545c9
|
014c8e5ec43dc5d02b9faa41b49032ed5c340439
|
refs/heads/master
| 2021-06-19T02:21:11.297723
| 2020-12-09T22:28:18
| 2020-12-09T22:28:18
| 83,297,248
| 1
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 100
|
r
|
26Array__Dimension_name.R
|
x = array(1:8,dim=c(2,2,2))
dimnames(x)[[2]]=c("Momus","Eva")
dimnames(x)[[3]]=c("3-1","3-2")
x
|
9e76c1cbc9a3a1a65e75a9dd6d564d3c9e048c0d
|
cf9500bad69d602c2e14562ae82e871617eb33cd
|
/Team 1/text_mining_Wei_Wang/12_question_#9.R
|
a53ae59ee35403da9e5bc631b5e64d6efa796862
|
[] |
no_license
|
PHP2560-Statistical-Programming-R/text-mining-review-all-join-this-team
|
9b03c8df59424091383e461f45bd41999d3560cf
|
013ccf9ecc9bac96f0a3b5be5c925862af1ff226
|
refs/heads/master
| 2021-08-19T22:38:08.195634
| 2017-11-27T16:24:00
| 2017-11-27T16:24:00
| 110,900,068
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 578
|
r
|
12_question_#9.R
|
# Analyze word counts that contribute to each sentiment. Can we view this visually?
counting_bing_word <- series %>%
inner_join(get_sentiments("bing")) %>%
count(word, sentiment, sort = TRUE) %>%
ungroup()
counting_bing_word
# We can view this visually.
counting_bing_word %>%
group_by(sentiment) %>%
top_n(10) %>%
ggplot(aes(reorder(word, n), n, fill = sentiment)) +
geom_bar(alpha = 0.8, stat = "identity", show.legend = FALSE, col = "yellow") +
facet_wrap(~sentiment, scales = "free_y") +
labs(y = "Contribution to sentiment", x = NULL) +
coord_flip()
|
83172aa3d1113285d1a0b605a4e149ef691bbaa4
|
c69bb8c12eb205627783c8ae7a10280235873724
|
/man/tnirp.Rd
|
48dfcb0612f0ed027476c2ca91e8d1285162a271
|
[] |
no_license
|
cran/HelpersMG
|
384b45838d5fa110fe31c3eaca5b545774136797
|
c3bd166e7d24bf4d51414819539262db9e30495a
|
refs/heads/master
| 2023-06-21T20:05:01.243339
| 2023-06-14T19:02:05
| 2023-06-14T19:02:05
| 33,549,168
| 4
| 1
| null | null | null | null |
UTF-8
|
R
| false
| true
| 2,497
|
rd
|
tnirp.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/tnirp.R
\name{tnirp}
\alias{tnirp}
\title{Read an ASCII text representation of a named or not vector object}
\usage{
tnirp(x, named = TRUE)
}
\arguments{
\item{x}{A string or a vector of strings with value and possibly names.}
\item{named}{TRUE if names are included.}
}
\value{
A vector
}
\description{
Read an ASCII text representation of a named or not vector object.\cr
Note that paste0(rev(c("p", "r", "i", "n", "t")), collapse="") = "tnirp"
}
\details{
tnirp reads an ASCII text representation of a named or not vector object
}
\examples{
A <- structure(runif(26), .Names=letters)
text <- capture.output(A)
tnirp(text)
tnirp(" mu mu_season OTN p1.09 p1.10 p1.11
4.63215947 10.78627511 0.36108497 0.08292101 -0.52558196 -0.76430859
p1.12 p1.13 p1.14 p1.15 p1.16 p1.17
-0.75186542 -0.57632291 -0.58017174 -0.57048696 -0.56234135 -0.80645122
p1.18 p1.19 p1.20 p1.21 p1.22 p1.23
-0.77752524 -0.80909494 -0.56920540 -0.55317302 0.45757298 -0.64155368
p1.24 p1.25 p1.26 p1.27 p1.28 p1.29
-0.59119637 -0.66006794 -0.66582399 -0.66772684 -0.67351412 -0.66941992
p1.30 p1.31 p1.32 p1.33 p1.34 p1.35
-0.67038245 -0.68938726 -0.68889078 -0.68779016 -0.68604629 -0.68361820
p1.36 p1.37 p2.09 p2.10 p2.11 p2.12
-0.67045238 -0.66115613 2.55403149 2.31060620 2.31348160 2.20958757
p2.13 p2.14 p2.15 p2.16 p2.17 p2.18
2.14304918 2.19699719 2.30705457 2.18740019 2.32305811 2.31668302
p2.19 p2.20 p2.21 p2.22 p2.23 p2.24
1.99424288 2.06613445 2.38092301 2.40551276 2.31987342 2.30344402
p2.25 p2.26 p2.27 p2.28 p2.29 p2.30
2.26869058 2.25008836 2.23385204 2.22768782 2.25341904 1.77043360
p2.31 p2.32 p2.33 p2.34 p2.35 p2.36
2.21606813 2.21581431 2.21153872 2.21118013 2.21375660 2.21182196
p2.37
1.86137833 ")
tnirp(" 27.89 289.99
90.56", named=FALSE)
}
\seealso{
Other Characters:
\code{\link{asc}()},
\code{\link{char}()},
\code{\link{d}()}
}
\author{
Marc Girondot \email{marc.girondot@gmail.com}
}
\concept{Characters}
|
01711174f1d691cfd171a198510238184e5db76e
|
cd3252d6bb33b1254219eef160781dc68654d4a9
|
/R_W2_Project1-Boost revenue by reactivating dormant one-time buyers.R
|
3dfa5c461ea3f4a86cf7a9ab29e4dc93bdd8cbfc
|
[] |
no_license
|
gracesiny12/Business_Analysis
|
3af10fcbbef2e0d8e31e1dc67f5b314cd5d3496a
|
505e9ab3003684a75ef17de8b82788115498c370
|
refs/heads/master
| 2021-09-10T17:30:44.786984
| 2018-03-30T03:39:05
| 2018-03-30T03:39:05
| 123,096,780
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 3,294
|
r
|
R_W2_Project1-Boost revenue by reactivating dormant one-time buyers.R
|
# R version 3.4.0 (2017-04-21)
install.packages("data.table")
install.packages("sqldf")
install.packages('doMC')
install.packages('glmnet')
path ='~/Downloads'
setwd(path)
library(data.table)
library(sqldf)
library(doMC)
library(glmnet)
options(scipen=999)
customer_table <- fread("customer_table.csv")
order_table <- fread("order_table.csv")
product_table <- fread("product_table.csv")
category_table <- fread("category_table.csv")
#### to change from scientific notation to the actual number
customer_table[,customer_id := as.character(customer_table$customer_id)]
order_table[,customer_id := as.character(order_table$customer_id)]
order_table[,order_id := as.character(order_table$order_id)]
order_table[,product_id := as.character(order_table$product_id)]
product_table[,product_id := as.character(product_table$product_id)]
## find customers who only made one purchase before 2016/12/22
base <- subset(
order_table[
order_date<'20161222' & order_amount > 0,
.(count=.N
,order_date=max(order_date)
,order_amount=max(order_amount)
,product_id=max(product_id)),by=customer_id],
count==1)
## find customers who only made purchase between 2016/12/22 and 2017/02/22
purchase_again <- sqldf("SELECT customer_id
, MAX(order_date) AS latest_orderdate
FROM order_table
WHERE order_date BETWEEN '20161222' and '20170222'
AND order_amount > 0
GROUP BY 1");
## find customers who were dormant between 2016/12/22 and 2017/02/22
dormant_3month <- sqldf("SELECT *
FROM base
WHERE customer_id NOT IN
(SELECT customer_id FROM purchase_again);")
## find customers who purchased again between 2017/02/23 and 2017/05/22
purchase_flag <- sqldf("SELECT customer_id
, MAX(order_date) AS latest_orderdate
FROM order_table
WHERE order_date BETWEEN '20170223' and '20170522'
AND order_amount > 0
GROUP BY 1");
is_converted <- sqldf("SELECT d.customer_id,
CASE WHEN pf.customer_id IS NOT NULL THEN 1
ELSE 0 END AS is_converted
FROM dormant_3month d
LEFT JOIN purchase_flag pf
ON d.customer_id = pf.customer_id
;")
user_features <- sqldf("SELECT ct.*
,ic.is_converted
FROM customer_table ct
INNER JOIN is_converted ic
ON ct.customer_id = ic.customer_id");
### fill all NA's with 0
user_features[is.na(user_features)] <- 0
### remove all categorical variables, for now
user_features$customer_id <- NULL
user_features$country <- NULL
user_features$gender <- NULL
user_features$first_visit_date <- NULL
user_features$last_visit_date <- NULL
train_x <- subset(user_features,select=-c(is_converted))
train_y_categorical <- ifelse(train_y == 1, 'YES', 'NO')
### let's train the model!
registerDoMC(cores=6)
model_lasso <- cv.glmnet(as.matrix(train_x),train_y_categorical,alpha=1
,family="binomial",type.measure = "auc",parallel = TRUE)
|
43289a70004984320e87d82fa8ae8c7ff41eff82
|
cb84593a51f0fe91e2071402582d4c61bd2c39e3
|
/Run_Analysis.R
|
7ed5038c706e260b061b2676117b465e6578ae49
|
[] |
no_license
|
TMagodi/Getting_and_Cleaning_Data_Week_4_Project
|
f32f17c8a47f95e1157c0272f38cd712a41c7662
|
1363d8901df47e4f29dfdfa95bdd297ce9a29eef
|
refs/heads/main
| 2023-01-23T10:08:01.207554
| 2020-11-24T12:21:31
| 2020-11-24T12:21:31
| 315,565,248
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 2,657
|
r
|
Run_Analysis.R
|
# Preparation Stage
setwd("C:/Users/vkjh6510/Desktop/RPROGRAMMING COURSERA/FUCI HAR Dataset/FUCI HAR Dataset/UCI HAR Dataset/")
#Loading required packages
library(dplyr)
#Reading data
features <- read.table("./features.txt", col.names = c("n","functions"))
activities <- read.table("./activity_labels.txt", col.names = c("code", "activity"))
subject_test <- read.table("./test/subject_test.txt", col.names = "subject")
xtest <- read.table("./test/X_test.txt", col.names = features$functions)
ytest <- read.table("./test/y_test.txt", col.names = "code")
subject_train <- read.table("./train/subject_train.txt", col.names = "subject")
xtrain <- read.table("./train/X_train.txt", col.names = features$functions)
ytrain <- read.table("./train/y_train.txt", col.names = "code")
#Step 1: Merges the training and the test sets to create one data set.
X <- rbind(xtrain, xtest)
Y <- rbind(ytrain, ytest)
Subject <- rbind(subject_train, subject_test)
Combined_Data <- cbind(Subject, Y, X)
#Step 2: Extracts only the measurements on the mean and standard deviation for each measurement.
CleanData <- Combined_Data %>% select(subject, code, contains("mean"), contains("std"))
#Step 3: Uses descriptive activity names to name the activities in the data set.
CleanData$code <- activities[CleanData$code, 2]
#Step 4: Appropriately labels the data set with descriptive variable names.
names(CleanData)[2] = "activity"
names(CleanData)<-gsub("Acc", "Accelerometer", names(CleanData))
names(CleanData)<-gsub("Gyro", "Gyroscope", names(CleanData))
names(CleanData)<-gsub("BodyBody", "Body", names(CleanData))
names(CleanData)<-gsub("Mag", "Magnitude", names(CleanData))
names(CleanData)<-gsub("^t", "Time", names(CleanData))
names(CleanData)<-gsub("^f", "Frequency", names(CleanData))
names(CleanData)<-gsub("tBody", "TimeBody", names(CleanData))
names(CleanData)<-gsub("-mean()", "Mean", names(CleanData), ignore.case = TRUE)
names(CleanData)<-gsub("-std()", "STD", names(CleanData), ignore.case = TRUE)
names(CleanData)<-gsub("-freq()", "Frequency", names(CleanData), ignore.case = TRUE)
names(CleanData)<-gsub("angle", "Angle", names(CleanData))
names(CleanData)<-gsub("gravity", "Gravity", names(CleanData))
#Step 5: From the data set in step 4, creates a second, independent tidy data set with the average of each variable for each activity and each subject.
TidyData <- CleanData %>%
group_by(subject, activity) %>%
summarise_all(tibble::lst(mean))
write.table(TidyData, "TidyData.txt", row.name=FALSE)
#Final Check Stage
#Checking variable names
str(TidyData)
View(TidyData)
|
298fe459c3c0c2d3a96637dfa4d70112d10dde45
|
9719c43e784f48e79c81c7151ada584c105bbe11
|
/tests/testthat/test-biocbox.R
|
8f35221a1ba72b4f7abeef107cb537e9a30921a4
|
[
"Apache-2.0"
] |
permissive
|
edavidaja/FacileAnalysis
|
ade3de9b07fb4d614a04dce7783843dfc57d5ce4
|
8f96cdf41904d606f81294f4ff169c658113dd86
|
refs/heads/main
| 2023-09-04T21:15:56.014307
| 2021-11-17T20:35:35
| 2021-11-17T20:35:35
| 430,765,832
| 0
| 0
|
NOASSERTION
| 2021-11-22T15:38:31
| 2021-11-22T15:38:30
| null |
UTF-8
|
R
| false
| false
| 4,029
|
r
|
test-biocbox.R
|
context("Building bioconductor assay containers with biocbox (biocbox)")
if (!exists("FDS")) FDS <- FacileData::exampleFacileDataSet()
test_that("Correct bioc container built from model def and method spec", {
mdef <- FDS %>%
filter_samples(indication == "BLCA") %>%
flm_def(covariate = "sample_type",
numer = "tumor",
denom = "normal")
# DGEList for quasi-likelihood teseting
y <- biocbox(mdef, "rnaseq", method = "edgeR-qlf")
expect_class(y, "DGEList")
expect_subset("sample_type", colnames(y$samples))
# expect dispersions estimated
expect_number(y$common.dispersion)
expect_numeric(y$trended.dispersion, len = nrow(y))
expect_numeric(y$tagwise.dispersion, len = nrow(y))
# EList with $weights matrix for voom
vm <- biocbox(mdef, "rnaseq", method = "voom")
expect_class(vm, "EList")
expect_matrix(vm$weights, nrows = nrow(vm), ncols = ncol(vm))
expect_equal(y$design, vm$design)
expect_set_equal(rownames(vm), rownames(y))
# EList without weights for limma-trend analysis
e <- biocbox(mdef, "rnaseq", method = "limma-trend")
expect_class(e, "EList")
expect_null(e$weights)
expect_matrix(e$E, nrows = nrow(e), ncols = ncol(e))
expect_set_equal(rownames(e), rownames(y))
# the expression matrices returned from voom and limma-trended are the same
cors <- cor(e$E, vm$E, method = "spearman")
expect_true(all(diag(cors) > 0.999))
})
test_that("Various filter* combinations to biocbox work", {
mdef <- FDS %>%
filter_samples(indication == "BLCA") %>%
flm_def(covariate = "sample_type",
numer = "tumor",
denom = "normal")
all.features <- features(FDS, assay_name = "rnaseq")
# ELists generated for limma-trend (skip the voom step)
# No filtering takes place when filter = NULL
full.box <- biocbox(mdef, "rnaseq", method = "limma-trend", filter = FALSE)
expect_equal(nrow(full.box), nrow(all.features))
expect_set_equal(rownames(full.box), all.features$feature_id)
# Default low-expression filtering happens when filter isn't specified
filtered.box <- biocbox(mdef, "rnaseq", method = "limma-trend")
expect_true(nrow(filtered.box) < nrow(full.box))
expect_subset(rownames(filtered.box), rownames(full.box))
# Filtering works on a restricted universe.
# Here we restrict the universe to genes with 5 letter names.
# A more common usecase might be to filter the features based on
# meta == "protein_coding"
fives.all <- filter(all.features, nchar(name) == 5)
# Low expression filtering happens within restricted universe
fives.box <- biocbox(mdef, "rnaseq", method = "limma-trend",
filter_universe = fives.all)
expect_true(all(nchar(fives.box$genes$symbol) == 5))
expect_subset(fives.box$genes$feature_id, fives.all$feature_id)
# Specifying filter_require rescues lowly-expressed genes
add.req <- setdiff(fives.all$feature_id, rownames(fives.box))
add.req <- sample(add.req, 10)
expect_false(any(add.req %in% rownames(fives.box)))
rescued.box <- biocbox(mdef, "rnaseq", method = "limma-trend",
filter_universe = fives.all, filter_require = add.req)
expect_true(nrow(rescued.box) == nrow(fives.box) + length(add.req))
expect_set_equal(rownames(rescued.box), c(rownames(fives.box), add.req))
})
test_that("biocbox(..., features = something) only ever returns `something`", {
mdef <- FDS %>%
filter_samples(indication == "BLCA") %>%
flm_def(covariate = "sample_type",
numer = "tumor",
denom = "normal")
all.features <- features(FDS, assay_name = "rnaseq")
some.features <- sample_n(all.features, 10)
box1 <- biocbox(mdef, "rnaseq", method = "limma-trend",
filter = TRUE, features = some.features)
expect_set_equal(rownames(box1), some.features$feature_id)
box2 <- biocbox(mdef, "rnaseq", method = "limma-trend",
filter = "default", features = some.features)
expect_set_equal(rownames(box2), some.features$feature_id)
})
|
953de19701df28f4939756e65574f6e6eb5fdbf4
|
081576efbe245e5c437d4c45a30c10453cd2cac8
|
/man/vis_pancan_anatomy.Rd
|
f098a4364e2bbc4bd61f524c0103d9ddb6256d24
|
[
"curl",
"MIT"
] |
permissive
|
fei0810/UCSCXenaShiny
|
93ecad178e560cbfe6668978bb77dac31e5b169a
|
398605a5da859886313649036da739feee0edec3
|
refs/heads/master
| 2023-04-18T00:58:07.866434
| 2021-04-28T12:39:02
| 2021-04-28T12:39:02
| 274,140,433
| 1
| 0
|
NOASSERTION
| 2021-04-28T12:39:02
| 2020-06-22T13:11:24
|
R
|
UTF-8
|
R
| false
| true
| 978
|
rd
|
vis_pancan_anatomy.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/vis_pancan_value.R
\name{vis_pancan_anatomy}
\alias{vis_pancan_anatomy}
\title{Visualize Single Gene Expression in Anatomy Location}
\usage{
vis_pancan_anatomy(
Gene = "TP53",
Gender = c("Female", "Male"),
data_type = "mRNA",
option = "D"
)
}
\arguments{
\item{Gene}{a molecular identifier (e.g., "TP53") or a formula specifying
genomic signature (\code{"TP53 + 2 * KRAS - 1.3 * PTEN"}).}
\item{Gender}{a string, "Female" (default) or "Male".}
\item{data_type}{choose gene profile type, including "mRNA","transcript","methylation","miRNA","protein","cnv_gistic2"}
\item{option}{A character string indicating the colormap option to use. Four
options are available: "magma" (or "A"), "inferno" (or "B"), "plasma" (or "C"),
"viridis" (or "D", the default option) and "cividis" (or "E").}
}
\value{
a \code{ggplot} object
}
\description{
Visualize Single Gene Expression in Anatomy Location
}
|
af60fd16407c9b0de0f30de0f8bacaf27773fe87
|
7f72ac13d08fa64bfd8ac00f44784fef6060fec3
|
/RGtk2/man/gtkLabelGetLineWrapMode.Rd
|
812ad930ad8e9ec78c099fcc410ae579d4b6ec60
|
[] |
no_license
|
lawremi/RGtk2
|
d2412ccedf2d2bc12888618b42486f7e9cceee43
|
eb315232f75c3bed73bae9584510018293ba6b83
|
refs/heads/master
| 2023-03-05T01:13:14.484107
| 2023-02-25T15:19:06
| 2023-02-25T15:20:41
| 2,554,865
| 14
| 9
| null | 2023-02-06T21:28:56
| 2011-10-11T11:50:22
|
R
|
UTF-8
|
R
| false
| false
| 488
|
rd
|
gtkLabelGetLineWrapMode.Rd
|
\alias{gtkLabelGetLineWrapMode}
\name{gtkLabelGetLineWrapMode}
\title{gtkLabelGetLineWrapMode}
\description{Returns line wrap mode used by the label. See \code{\link{gtkLabelSetLineWrapMode}}.}
\usage{gtkLabelGetLineWrapMode(object)}
\arguments{\item{\verb{object}}{a \code{\link{GtkLabel}}}}
\details{Since 2.10}
\value{[\code{\link{PangoWrapMode}}] \code{TRUE} if the lines of the label are automatically wrapped.}
\author{Derived by RGtkGen from GTK+ documentation}
\keyword{internal}
|
2e93ad37d70b6cd7a3fe111522f9add50a3e2521
|
c85b1424f959ec964861d3cc0b396d75cceb84ae
|
/sec3.R
|
eb90b491e0eecffdffbb514ff0d2b3fc7b20a583
|
[] |
no_license
|
happyplatypus/investableUniverse
|
efed2b3251b4efa5211459ed9f87affd7e02d16d
|
7395a3808c2f415cc36da2c95561eb56f742c17f
|
refs/heads/master
| 2021-03-27T14:03:12.054011
| 2020-07-14T12:07:57
| 2020-07-14T12:07:57
| 65,375,897
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 4,839
|
r
|
sec3.R
|
#!/usr/bin/Rscript
library(TTR)
library(quantmod)
library(data.table)
library(lubridate)
args<-commandArgs(trailingOnly = TRUE)
if(length(args)<3){stop('<> marketcapmillion adv-million howmuchaway (cutoffs) 800 10 0.98 ')}
#rm(list=ls(all=TRUE))
date_<-Sys.Date()
#date_<-as.Date(args[1],format="%Y-%m-%d")
#yahootickerlist<-function(ticlist,date_){
print('ticlist is small case')
print('<> mcmillion advmillion howmuchawayfromMAX(0.98) (cutoffs) ')
### date_ is the date_ on which you want the return and abnormal volume ranking
st<-stockSymbols(exchange = c("NASDAQ", "NYSE","NYSEMKT"),sort.by = c("Exchange", "Symbol"), quiet = FALSE)
# st[st$tic=='spy','Sector']<-'Needed' ## otherwise thrown out
st<-st[-c( intersect( which(is.na(st$Sector)),which(is.na(st$Industry)) )) , ]
st<-st[!is.na(st$LastSale),]
st<-st[!is.na(st$MarketCap),]
#st<-st[st$LastSale>=5,]
colnames(st)[1]<-'tic'
head(st)
st$IPOyear[is.na(st$IPOyear)]<-2000
nrow(st)
#st<-st[st$IPOyear<2014,]
#?(Sys.Date())
convertBM<-function(in_){
in_<-as.character(in_)
tmp<-strsplit(in_,"")[[1]]
if(tmp[length(tmp)]=="M"){as.numeric(paste(tmp[2:(length(tmp)-1)],collapse=""))}else{
if(tmp[length(tmp)]=="B"){1000*as.numeric(paste(tmp[2:(length(tmp)-1)],collapse=""))
}else{return(0)}
}
}
st$tic<-tolower(st$tic)
st$MarketCap1<-sapply(st$MarketCap,convertBM)
st<-st[st$MarketCap1>=as.numeric(args[1]),]
#st<-st[st$MarketCap1>=800,]
print(args[1])
#'tivo' %in% st$tic
head(st)
nrow(st)
## capture short term momentum
### 1 5 10 day returns
## capture short term momentum
### 1 3 5 day adv compared to 20 day adv
#tic<-'tivo'
#date_<-Sys.Date()
##set standard
year_start<-as.Date(paste0(year(Sys.Date()),'01','01'),format="%Y%m%d")
res<-try(x<-getSymbols('aapl',auto.assign=F,from=year_start,to=date_))
#x<-tail(x,20)
start_<-index(x)[1]
GG<-length(lastret<-diff(log(as.numeric(x[,6]))))
#dummy<-rep(0,GG)
##set standard
tic='cy'
adv<-function(tic,GG){
print(tic)
#res<-try(x<-getSymbols(as.character(tic),auto.assign=F,from=date_-21,to=date_))
res<-try(x<-getSymbols(as.character(tic),auto.assign=F,from=year_start,to=date_))
dummy<-NA
if(class(res)[1]=='try-error'){return(list(dummy))}
head(x)
## this is 20 day adv
## this is 20 day adv
x_20<-tail(x,20)
adv<-floor(mean(as.numeric(x_20[,5]*x_20[,6])/1e6,na.rm=T))
#x$adv.ratio<-floor(as.numeric(x[,5]*x[,6])/1e6)/adv
lastret<-( (as.numeric(x[,6]))-(as.numeric(x[,1])))/(as.numeric(x[,1]))
prices<-as.numeric(x[,6])
x$returns<-c(0,round(diff(log(prices)),2))
head(x)
if( length(lastret)!= (GG+1) ){return(dummy)}
a=sum(as.numeric(tail(x,1)$returns))
b=sum(as.numeric(tail(x,5)$returns))
c=sum(as.numeric(tail(x,9)$returns))
e=sum(as.numeric(tail(x,1)$adv.ratio))/1
f=sum(as.numeric(tail(x,5)$adv.ratio))/5
g=sum(as.numeric(tail(x,9)$adv.ratio))/9
obj<-c(a,(b-a)/3,(c-(a+b))/5,e,f,g)
#e=sum(as.numeric(tail(x,1)$adv.ratio))/1
#f=sum(as.numeric(tail(x,3)$adv.ratio))/3
#g=sum(as.numeric(tail(x,5)$adv.ratio))/5
obj<-as.numeric(Map(function(x) round(x,2) ,obj))
N<-length(prices)
ratio <- prices[N]/max(prices)
ifelse( ((adv>=as.numeric(args[2]))&(ratio>=args[3]) ) ,return(obj),return(dummy) )
#ifelse( ( (adv>=10)&(ratio>0.95) ) ,return(obj),return(dummy) )
}
adv('cy',GG)
st<-st[order(st$tic),]
print("Running ADV check $10M $ per day cutoff and last return")
#if(!missing(ticlist)){st<-st[st$tic %in% ticlist ,]}
#x<-sapply(st$tic[1:10],adv,GG=GG)
#st<-st[1:200,]
st1<-st[st$tic %in% c('cy','hmsy','hway','gnw'),]
x<-sapply(st$tic,adv,GG=GG)
#x1<-x
x1<-data.frame(t(data.frame(x)))
x1$tic<-rownames(x1)
x1<-data.table(x1)
#x1<-data.frame(oneDayRet=as.numeric(x),tic=names(x) )
#colnames(x1)<-'oneDayRet'
#x1$tic<-names(x)
st<-data.table(st)
setkey(st,tic)
setkey(x1,tic)
#colnames(x1)<-c('ret1','ret3','ret5','advratio1','advratio3','advratio5','tic')
st1<-merge(st,x1)
head(st1)
st2<-st1[complete.cases(st1),]
rank<-data.table(scale(st2[,c('X1','X2','X3','X4','X5','X6'),with=FALSE]))
rank[rank>2]<- 2
rank[rank<-2]<- -2
st2$score<-apply(rank,1,sum)
#st1<-st1[order(-st1$oneDayRet),]
head(st2)
length(unique(st2$tic))
#st1<-data.table(st1)
#st1[st1$tic=='msg',]
#st1[st1$tic=='tumi',]
nrow(st2)
st2$idx<-1:nrow(st2)
c<-st2[,list(idx=min(idx)),by='tic']
setkey(st2,idx)
setkey(c,idx)
st3<-merge(st2,c)
st3$idx<-1:nrow(st3)
colnames(st3)[2]<-'tic'
if(length(unique(st3$tic))!=nrow(st3)){print('Does not pass uniqueness test')}else{print('Passes uniqueness test')}
st3<-st3[order(-st3$score),]
print(head(st3,20))
print(tail(st3,20))
#View((head(st3,20)))
home=Sys.getenv("HOME")
write.csv(st3,paste0(home,'/code/r_projects/investableUniverse/investableUniverse_highs.csv'),row.names=FALSE)
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.