blob_id
stringlengths 40
40
| directory_id
stringlengths 40
40
| path
stringlengths 2
327
| content_id
stringlengths 40
40
| detected_licenses
listlengths 0
91
| license_type
stringclasses 2
values | repo_name
stringlengths 5
134
| snapshot_id
stringlengths 40
40
| revision_id
stringlengths 40
40
| branch_name
stringclasses 46
values | visit_date
timestamp[us]date 2016-08-02 22:44:29
2023-09-06 08:39:28
| revision_date
timestamp[us]date 1977-08-08 00:00:00
2023-09-05 12:13:49
| committer_date
timestamp[us]date 1977-08-08 00:00:00
2023-09-05 12:13:49
| github_id
int64 19.4k
671M
⌀ | star_events_count
int64 0
40k
| fork_events_count
int64 0
32.4k
| gha_license_id
stringclasses 14
values | gha_event_created_at
timestamp[us]date 2012-06-21 16:39:19
2023-09-14 21:52:42
⌀ | gha_created_at
timestamp[us]date 2008-05-25 01:21:32
2023-06-28 13:19:12
⌀ | gha_language
stringclasses 60
values | src_encoding
stringclasses 24
values | language
stringclasses 1
value | is_vendor
bool 2
classes | is_generated
bool 2
classes | length_bytes
int64 7
9.18M
| extension
stringclasses 20
values | filename
stringlengths 1
141
| content
stringlengths 7
9.18M
|
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
23f68145fe47ccee164e9c94308f33ece419f7f3
|
1d7b8d97be6d3b3aed26bc19ea6855bbdb2d21bc
|
/man/simulate-mixedDiffusion-method.Rd
|
4c6eaaf94798578cc4d79de96d3ae7e464f1770d
|
[] |
no_license
|
cran/BaPreStoPro
|
18633df8e18b518225e7c01147473684d9369a46
|
f3e8f06b07ec4b4ca0be9de3d481734d5e154c31
|
refs/heads/master
| 2021-01-19T01:11:31.691784
| 2016-06-07T14:28:11
| 2016-06-07T14:28:11
| 60,611,461
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 1,267
|
rd
|
simulate-mixedDiffusion-method.Rd
|
% Generated by roxygen2 (4.1.1): do not edit by hand
% Please edit documentation in R/simulate.R
\docType{methods}
\name{simulate,mixedDiffusion-method}
\alias{simulate,mixedDiffusion-method}
\title{Simulation of hierarchical (mixed) diffusion model}
\usage{
\S4method{simulate}{mixedDiffusion}(object, nsim = 1, seed = NULL, t,
mw = 1, plot.series = TRUE)
}
\arguments{
\item{object}{class object of parameters: "mixedDiffusion"}
\item{nsim}{number of data sets to simulate. Default is 1.}
\item{seed}{optional: seed number for random number generator}
\item{t}{vector of time points}
\item{mw}{mesh width for finer Euler approximation to simulate time-continuity}
\item{plot.series}{logical(1), if TRUE, simulated series are depicted grafically}
}
\description{
Simulation of the stochastic process model
\eqn{dY_t = b(\phi_j,t,Y_t)dt + \gamma \widetilde{s}(t,Y_t)dW_t, \phi_j~N(\mu, \Omega)}.
}
\examples{
mu <- 2; Omega <- 0.4; phi <- matrix(rnorm(21, mu, sqrt(Omega)))
model <- set.to.class("mixedDiffusion", y0.fun = function(phi, t) 0.5,
parameter = list(phi = phi, mu = mu, Omega = Omega, gamma2 = 0.1),
b.fun = function(phi, t, x) phi*x, sT.fun = function(t, x) x)
t <- seq(0, 1, by = 0.01)
data <- simulate(model, t = t, plot.series = TRUE)
}
|
dbd40c9350d8072fe7ab17aaeeda34cba6622e66
|
91121200f32d304f59d64a4da8e96caf2429338a
|
/splineABS/R/gen_simu_data.R
|
2f9047caf545999be1e0223036c7379d3c8689a9
|
[] |
no_license
|
pqrXTN/Spline-Package-R
|
efeb10b12e3f2aa341b429b48f2fd143557ae075
|
13321e0a631f51f413a57b64a95a65aff08a9222
|
refs/heads/master
| 2021-06-15T21:57:45.858974
| 2021-01-25T02:22:07
| 2021-01-25T02:22:07
| 102,338,133
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 2,579
|
r
|
gen_simu_data.R
|
#' @title Generate 1D Fan-Gijbels simulation data
#'
#' @description Return a list of x and y value generating by 1D FanGijbels
#' function.
#'
#' @details
#' In \code{genMethod}: "DJ" stands for functions in Donoho and
#' Johnstone(1994).
#'
#' The return value of function is a list of \code{x, y, z}.
#' \itemize{
#' \item x: independent varible.
#' \item y: response varible with noise.
#' \item v: the true value of response varible without noise.
#' }
#'
#' @param nobs A number of observations in simulation.
#' @param genMethod A string of data generating method among "1dfan",
#' "DJ.blocks", "DJ.bumps", "DJ.heavi", "DJ.doppler".
#' @param SNR A number of "signal to noise" ratio: the ratio of standard
#' deviation of signal to noise.
#' @param signal A number about amplitude in response value of "DJ" method.
#' @param seed A number of base of random seed.
#'
#'
#' @return A list contains generated \code{x, y, v}.
#'
#' @examples
#' # generate data of "DJ.blocks".
#' ## numbers of observation: 1024, signal to noise ratio is 7.
#' data.xyv <- gen.simu.data(nobs = 1024, genMethod = "DJ.blocks", SNR = 7)
#'
#' @seealso
#' 4 kinds of functions of in Donoho and Johnstone(1994) in function:
#' \code{\link{DJ.EX.rand}}.
#' Add Guassian noise to response varible in function:
#' \code{\link{add.noise}}.
#' @import stats
#' @export
#'
gen.simu.data <- function(nobs = 1024, genMethod = "1dfan", SNR = 7, signal = 7,
seed = NULL){
# if not test, set random seed.
if(is.null(seed) == FALSE){
set.seed(seed)
}
# generate independent varible: x
# generate original dependent varible: v (without noise)
if(genMethod == "1dfan"){
x <- c(0,sort(runif(nobs-2)),1)
v <- sin(2*(4*x-2))+2*exp(-16*(4*x-2)^2)
}else if(genMethod == "DJ.blocks"){
x <- c(0,sort(runif(nobs-2)),1)
v <- DJ.EX.rand(x = x, signal = signal)$blocks
}else if(genMethod == "DJ.bumps"){
x <- c(0,sort(runif(nobs-2)),1)
v <- DJ.EX.rand(x = x, signal = signal)$bumps
}else if(genMethod == "DJ.heavi"){
x <- c(0,sort(runif(nobs-2)),1)
v <- DJ.EX.rand(x = x, signal = signal)$heavi
}else if(genMethod == "DJ.doppler"){
x <- c(0,sort(rbeta(nobs-2,2,2)),1)
v <- DJ.EX.rand(x = x, signal = signal)$doppler
}
# generate y: add Gussian noise into signal, y = v + err
if(is.null(seed) == TRUE){
y <- add.noise(v, SNR = SNR, seed = 123)
}else{
y <- add.noise(v, SNR = SNR, seed = seed)
}
# return x, y(withnoise), v(without noise)
return(list(x=x, y=y, v=v))
}
|
fac910625aaf36f879defebb7f0d76c0e16ae248
|
80d7a0c4180f0078c1c444883503667005c1b8e3
|
/tests/testthat.R
|
593575e9d733a17722d1e83f6db25dd9d98a2958
|
[] |
no_license
|
cran/trip
|
e9dfeb5f7b6a9d8cacb5ccd60e0a9aa79c13d428
|
944c8d31a48bfb9dbf5d1ed70eac4363b80be9f5
|
refs/heads/master
| 2023-07-13T08:02:22.139701
| 2023-06-29T13:30:02
| 2023-06-29T13:30:02
| 17,700,603
| 1
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 52
|
r
|
testthat.R
|
library(testthat)
library(trip)
test_check("trip")
|
1994c8f9d5cee5b0c2436272d3d76dbac71a08f8
|
b55a15dba181a1396f5c308dc4f34838b4281062
|
/man/k_fold.Rd
|
6928fd52e5e781622338ee4bd81fec8ca9af467c
|
[] |
no_license
|
aballou16/geneticriskR
|
154223918022163847867f44585924564b982c50
|
7a2913302caaa0380c6630bb71184c65f2381754
|
refs/heads/master
| 2020-09-14T02:24:50.697643
| 2019-12-19T18:37:26
| 2019-12-19T18:37:26
| 222,984,917
| 5
| 1
| null | null | null | null |
UTF-8
|
R
| false
| true
| 442
|
rd
|
k_fold.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/analyses.R
\name{k_fold}
\alias{k_fold}
\title{performs 5-fold cross validation}
\usage{
k_fold(phenotype, score_col, full_table)
}
\arguments{
\item{phenotype}{character vector for phenotype}
\item{score_col}{character vector for score}
\item{full_table}{data.frame containing phenotype and score information}
}
\description{
performs 5-fold cross validation
}
|
d2c1643f39766aba9e94147d97e6319212d7a8f3
|
ed640b2eab34ddbde1435b83aa29d49d2c01422d
|
/man/wilcoxonR.Rd
|
4181cdf2950024dc66612db2e2e5421656acac9e
|
[] |
no_license
|
cran/rcompanion
|
4cf285cf6d43197e55df85de86d23904f9418c37
|
dea4b790b5d78fe350ff303e5c04603c7e672ae1
|
refs/heads/master
| 2023-05-12T14:48:28.937161
| 2023-05-05T07:20:05
| 2023-05-05T07:20:05
| 67,362,460
| 3
| 2
| null | null | null | null |
UTF-8
|
R
| false
| true
| 3,975
|
rd
|
wilcoxonR.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/wilcoxonR.r
\name{wilcoxonR}
\alias{wilcoxonR}
\title{r effect size for Wilcoxon two-sample rank-sum test}
\usage{
wilcoxonR(
x,
g = NULL,
group = "row",
coin = FALSE,
ci = FALSE,
conf = 0.95,
type = "perc",
R = 1000,
histogram = FALSE,
digits = 3,
reportIncomplete = FALSE,
...
)
}
\arguments{
\item{x}{Either a two-way table or a two-way matrix.
Can also be a vector of observations.}
\item{g}{If \code{x} is a vector, \code{g} is the vector of observations for
the grouping, nominal variable.
Only the first two levels of the nominal variable are used.}
\item{group}{If \code{x} is a table or matrix, \code{group} indicates whether
the \code{"row"} or the \code{"column"} variable is
the nominal, grouping variable.}
\item{coin}{If \code{FALSE}, the default, the Z value
is extracted from a function similar to the
\code{wilcox.test} function in the stats package.
If \code{TRUE}, the Z value
is extracted from the \code{wilcox_test} function in the
coin package. This method may be much slower, especially
if a confidence interval is produced.}
\item{ci}{If \code{TRUE}, returns confidence intervals by bootstrap.
May be slow.}
\item{conf}{The level for the confidence interval.}
\item{type}{The type of confidence interval to use.
Can be any of "\code{norm}", "\code{basic}",
"\code{perc}", or "\code{bca}".
Passed to \code{boot.ci}.}
\item{R}{The number of replications to use for bootstrap.}
\item{histogram}{If \code{TRUE}, produces a histogram of bootstrapped values.}
\item{digits}{The number of significant digits in the output.}
\item{reportIncomplete}{If \code{FALSE} (the default),
\code{NA} will be reported in cases where there
are instances of the calculation of the statistic
failing during the bootstrap procedure.}
\item{...}{Additional arguments passed to the \code{wilcox_test} function.}
}
\value{
A single statistic, r.
Or a small data frame consisting of r,
and the lower and upper confidence limits.
}
\description{
Calculates r effect size
for Mann-Whitney two-sample rank-sum test,
or a table with an ordinal variable and a
nominal variable with two levels; confidence intervals
by bootstrap.
}
\details{
r is calculated as Z divided by
square root of the total observations.
This statistic reports a smaller effect size than does
Glass rank biserial correlation coefficient
(\code{wilcoxonRG}), and cannot reach
-1 or 1. This effect is exaserbated when sample sizes
are not equal.
Currently, the function makes no provisions for \code{NA}
values in the data. It is recommended that \code{NA}s be removed
beforehand.
When the data in the first group are greater than
in the second group, r is positive.
When the data in the second group are greater than
in the first group, r is negative.
Be cautious with this interpretation, as R will alphabetize
groups if \code{g} is not already a factor.
When r is close to extremes,
or with small counts in some cells,
the confidence intervals
determined by this
method may not be reliable, or the procedure may fail.
}
\examples{
data(Breakfast)
Table = Breakfast[1:2,]
library(coin)
chisq_test(Table, scores = list("Breakfast" = c(-2, -1, 0, 1, 2)))
wilcoxonR(Table)
data(Catbus)
wilcox.test(Steps ~ Gender, data = Catbus)
wilcoxonR(x = Catbus$Steps, g = Catbus$Gender)
}
\references{
\url{http://rcompanion.org/handbook/F_04.html}
}
\seealso{
\code{\link{freemanTheta}},
\code{\link{wilcoxonRG}}
}
\author{
Salvatore Mangiafico, \email{mangiafico@njaes.rutgers.edu}
}
\concept{Wilcoxon-Mann-Whitney}
\concept{confidence interval}
\concept{effect size}
|
06bb6284bfb0344ea348c11e6cb4407851af4155
|
3d4136fe7640a691ecc191fe9dd55be8cd1c9091
|
/Archives/MLShiny_Lucas/ui.R
|
b2b9d222fe9c46a9fa298c1a364980584ad4e95a
|
[] |
no_license
|
m2-rshiny/ProjetTut
|
f551d46aa4fb93a0971fae57578a5715294e4ebc
|
d8d620010ffed1acb50926e4b78d9321f308546d
|
refs/heads/master
| 2020-09-12T02:05:41.881964
| 2020-02-20T17:00:45
| 2020-02-20T17:00:45
| 222,264,933
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 4,433
|
r
|
ui.R
|
#Appel des packages dont nous avons besoin pour ce script
source("scripts/packages.R")
#Importation du fichier csv contenant la liste des algorithmes
train_conf <- read.csv("train_conf.csv", sep=";", header=TRUE)
##### Interface utilisateur (ici, j'ai pris un format prédéfini) --------------------------------------------------------
shinyUI(
fluidPage(
theme = shinytheme("simplex"),
tags$link(rel = "stylesheet", type = "text/css", href = "style.css"),
#Nous avons une page avec plusieur onglets
navbarPage(
title = "Machine Learning BD",
position = "static-top", responsive = TRUE,
### Onglet 1 : Importation des données
tabPanel(
"Préparation données",
#Layout de l'onglet
sidebarLayout(
#Contenu de la barre sur le côté
sidebarPanel(
#Importation d'un fichier extérieur
fileInput(
inputId = 'dataSet',
label = 'Chemin du fichier (csv ou tsv)',
#Types de fichiers acceptés
accept = c('text/csv', '.csv')
),
#Petite barre de séparation
tags$hr(),
#Bouton choix Header
checkboxInput(
inputId = 'header',
label = 'Header',
value = TRUE
),
#Choix du séparateur
radioButtons(
inputId = 'sep',
label = 'Separator',
choices = c('Comma' = ',', 'Semicolon' = ';', 'Tab' = '\t'),
selected = '\t'
),
#Choix du charactère de citacion
radioButtons(
inputId = 'quote',
label = 'Quote',
choices = c('None' = '', '"' = '"', "'" = "'"),
selected = "'"
),
#Bouton pour charger les données
actionButton("load_data", "Charger les données"),
#Petite barre de séparation
tags$hr()
),
#Contenu au centre de la page
mainPanel(
#Tableau des données importées
tableOutput(outputId = 'contents')
)
)
),
### Onglet 2 : Paramétrage des algorithmes
tabPanel(
"ML Algorithme",
class = "pages",
fluidRow(
# colonne pour ploter les algo
column(12,
#Choix des algos à utiliser
selectizeInput(
inputId = 'algorithme',
label = 'Algorithmes',
choices =
),
checkboxGroupInput(
inputId = 'algorithme',
label = 'Algorithme',
choices = c()
),
#Choix de la proportion d'apprentissage
sliderInput(
inputId = "prop_learn",
label = "Proportion de données d'apprentissage",
min = 5,
max = 95,
value = 70
),
#Petite barre de séparation
#tags$hr(),
#Bouton pour lancer l'éxecution des algorithmes.
actionButton("exec", "Executer")
)
)
),
# On pourra mettre des stat desc ou autre chose. Pour l'instant y'a rien dedans
tabPanel(
"Stat Desc",
class = "pages",
fluidRow(
column(4,),
column(4,),
column(4,)
),
),
# Données utilisées
tabPanel("Data", htmlOutput(outputId = "Donnees"))
)
)
)
|
ea1b7c926d663a650fedad0858953d3df037d3dc
|
d8a5e3b9eef3c76bb7ca64d29ef2746cebd4c542
|
/R/isLetter.R
|
60e64736ef9a70f9321df5fa9dd839a45c4bbbab
|
[] |
no_license
|
cran/qmrparser
|
0539ad4bf5b97039e50b2bffa16c3012899c6134
|
bb1bb2b50b358d79f6400d521f995e1d2a55a784
|
refs/heads/master
| 2022-05-09T03:49:13.511049
| 2022-04-23T23:00:05
| 2022-04-23T23:00:05
| 17,698,845
| 1
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 102
|
r
|
isLetter.R
|
#do not edit, edit noweb/qmrparser.nw
isLetter <- function(ch) isUppercase(ch) || isLowercase(ch)
|
0174546c7f825d31c72a027807215c039ddca02e
|
01b1302af51d339f7c8827a620c4a5fb26c890f1
|
/resource_tracking/archive/create_fr_focus_topic_mapping_list.R
|
50eba81fc8c2bcea1a8489fd25512a8cb7a891af
|
[] |
no_license
|
ihmeuw/gf
|
64ab90fb5a5c49694bde1596f4b20fcf107a76e3
|
29e0c530b86867d5edd85104f4fe7dcb1ed0f1ee
|
refs/heads/develop
| 2021-08-15T02:16:59.086173
| 2021-08-03T19:52:31
| 2021-08-03T19:52:31
| 109,062,373
| 3
| 6
| null | 2019-03-21T01:48:02
| 2017-10-31T23:17:16
|
R
|
UTF-8
|
R
| false
| false
| 1,076
|
r
|
create_fr_focus_topic_mapping_list.R
|
# create a codebook using unique values of the funding request interventions that are not already in the old identifyTopicAreas_PCE2020_forsubsetting
library(data.table)
old <- fread("J:/Project/Evaluation/GF/resource_tracking/modular_framework_mapping/archive/identifyTopicAreas_PCE2020_forSubsetting.csv")
new <- fread("C:/Users/frc2/Box Sync/Global Fund Files/tableau_data/fr_budgets_all.csv")
# keep certain columns from new
new <- new[,.(loc_name, disease, gf_module, gf_intervention)]
new <- unique(new)
# add suffix to column names
setnames(new,
old=c("loc_name", "disease", "gf_module", "gf_intervention"),
new=c("loc_name_new", "disease_new", "gf_module_new", "gf_intervention_new"))
# keep only the modules or interventions that are not already included in the previous sheet ("Old")
new <- new[!new$gf_module_new %in% old$gf_module|!new$gf_intervention_new %in% old$gf_intervention,]
# output file
write.csv(new, "J:/Project/Evaluation/GF/resource_tracking/modular_framework_mapping/identifyTopicAreas_PCE2020_forSubsettingFRs_blank.csv")
|
dc6113348f68bfeffd9ddfca135e3fda44a29853
|
e5840be944e8cbb6474de233c8be8cae9a45b2e9
|
/man/list_gtex_tissues.Rd
|
1637034b8fb260c858c74dc0f446db36d3b73cab
|
[] |
no_license
|
machiela-lab/LDlinkR
|
304f37c414893d6f2158f249d29efad9bf786bfd
|
83f439f98e398dc8498807283055d975e056a854
|
refs/heads/master
| 2023-06-21T12:11:49.230327
| 2023-06-13T20:18:59
| 2023-06-13T20:18:59
| 251,702,603
| 1
| 0
| null | 2020-07-21T20:39:46
| 2020-03-31T18:57:09
| null |
UTF-8
|
R
| false
| true
| 827
|
rd
|
list_gtex_tissues.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/list_gtex_tissues.R
\name{list_gtex_tissues}
\alias{list_gtex_tissues}
\title{Provides a data frame listing the GTEx full names, `LDexpress` full names
(without spaces) and acceptable abbreviation codes of the 54 non-diseased
tissue sites collected for the GTEx Portal and used as input for the
`LDexpress` function.}
\usage{
list_gtex_tissues()
}
\value{
a data frame listing the GTEx tissues, their names and abbreviation codes
used as input for LDexpress.
}
\description{
Provides a data frame listing the GTEx full names, `LDexpress` full names
(without spaces) and acceptable abbreviation codes of the 54 non-diseased
tissue sites collected for the GTEx Portal and used as input for the
`LDexpress` function.
}
\examples{
list_gtex_tissues()
}
|
1ed7a52d13d09cbf95f0d383b14b0f9d1c8077db
|
ffdea92d4315e4363dd4ae673a1a6adf82a761b5
|
/data/genthat_extracted_code/RSEIS/examples/saveWPX.Rd.R
|
ba34fca398588f76feab1d3223348993f802a1d3
|
[] |
no_license
|
surayaaramli/typeRrh
|
d257ac8905c49123f4ccd4e377ee3dfc84d1636c
|
66e6996f31961bc8b9aafe1a6a6098327b66bf71
|
refs/heads/master
| 2023-05-05T04:05:31.617869
| 2019-04-25T22:10:06
| 2019-04-25T22:10:06
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 377
|
r
|
saveWPX.Rd.R
|
library(RSEIS)
### Name: saveWPX
### Title: Save WPX
### Aliases: saveWPX
### Keywords: misc
### ** Examples
## Not run:
##D tdir = tempdir()
##D s1 <- setWPX(name="HI", yr=2011, jd=231, hr=4, mi=3, sec = runif(5))
##D hh <- saveWPX(s1, destdir = tdir )
##D
##D ### read in the data
##D
##D
##D load(hh)
##D
##D data.frame(twpx)
##D
##D
##D
## End(Not run)
|
bb991192c7a0441ce172eb7f6e400b61f6130ca4
|
7cb1187afb7de7fc966545a164557054a01d25d5
|
/code/271-cellphonedb-pseudobulk-WT-KO.r
|
6b33b13f6c5b9c9cfc6ec9fadb20743410d39065
|
[] |
no_license
|
anders-biostat/hepatocytes-zonation
|
5d818cf3b65e239ce58a574c509ef27ab269c2ae
|
58d5fe9865b6740f6e2394d7aa89163d6a4a317d
|
refs/heads/master
| 2023-08-11T03:20:19.232585
| 2021-10-07T10:24:51
| 2021-10-07T10:24:51
| 279,832,652
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 3,775
|
r
|
271-cellphonedb-pseudobulk-WT-KO.r
|
## here we use the results of the DE from the pseudobulks for tests
## between genotypes and filter the genes which are located in the cellphonedb.
library(purrr)
library(ggplot2)
library(dplyr)
library(tidyr)
library(RSQLite)
library(DBI)
library(Matrix)
source("code/func.r")
source("code/assets.r")
resdir <- "results/tables/"
pbulkres <- readRDS("./results/rds/pbulks-deseq.rds")
## first get the mapping from biomaRt
library("biomaRt")
createMouse2HumanMapping <- function(mouseGenes) {
mart <- list(
human = useMart("ensembl", dataset = "hsapiens_gene_ensembl"),
mouse = useMart("ensembl", dataset = "mmusculus_gene_ensembl"))
mouse2human <- getLDS(
filters = "mgi_symbol",
values = mouseGenes,
mart = mart$mouse,
martL = mart$human,
attributes = c("mgi_symbol"),
attributesL = c("hgnc_symbol"),
uniqueRows=T)
mouse2human
}
mouse2humanFile <- file.path(rdsDir, "mouse2human-biomart.rds")
mouseGenes <- map(pbulkres, "result") %>% map(rownames) %>% unlist %>% unique
mouse2human <- createMouse2HumanMapping(mouseGenes)
saveRDS(mouse2human, mouse2humanFile)
## add mapping information to the cellphonedb database
mouse2human <- readRDS(mouse2humanFile)
con <- dbConnect(RSQLite::SQLite(), "ext/cellphone.db")
dbtables <- dbListTables(con)
cptables <- map(set_names(dbtables), dbReadTable, conn = con)
annotation <- cptables$protein_table %>%
inner_join(cptables$gene_table, by = c("id_protein" = "protein_id"))
annotation <- annotation %>%
inner_join(mouse2human, by = c("gene_name" = "HGNC.symbol"))
inter <- cptables$interaction_table
id2gene <- function(id) {
unique(annotation$MGI.symbol[annotation$id_protein == id])
}
createMouseGeneLists <- function(inter) {
allids <- unique(inter$multidata_1_id, inter$multidata_2_id)
## not all mouse genes were found, we exclude the not found ones here
withprotein <- map(allids, id2gene) %>% map_lgl(~ length(.x) > 0)
allids <- allids[withprotein]
x <- map(set_names(allids), function(id) {
c(inter$multidata_2_id[inter$multidata_1_id == id],
inter$multidata_1_id[inter$multidata_2_id == id])
})
x <- map(x, unlist)
x <- map(x, ~ map(.x, id2gene))
x <- map(x, unlist) %>% Filter(function(x) length(x) > 0, x = .)
## some human genes are mapped to several mouse genes:
## we copy lists for this genes as well
interactingMGI <- map(names(x), id2gene) %>%
imap(function(mousegenes, i) {
set_names(rep(x[i], length(mousegenes)), mousegenes)
}) %>% do.call(what = c)
interactingMGI
}
interactingMGI <- createMouseGeneLists(inter)
annotation <- annotation %>% inner_join(cptables$multidata_table,
by = c("protein_multidata_id" = "id_multidata"))
genePairs <- map(interactingMGI, function(x) data.frame(second = x)) %>%
bind_rows(.id = "first") %>% unique
saveRDS(genePairs, file.path(rdsDir, "gene-pairs-cellphone-mouse-biomart.rds"))
## and now finally intersect the DE results and the cellphonedb orthologes
genePairs <- readRDS(file.path(rdsDir, "gene-pairs-cellphone-mouse-biomart.rds"))
depairs <- pbulkres %>%
map(function(x) {
x$result %>% tibble::rownames_to_column(var = "gene") %>%
mutate(gene = capitalize(gene)) %>%
filter(gene %in% unlist(genePairs))
})
## add info about the pairs
g <- genePairs
g$first <- genePairs$second
g$second <- genePairs$first
g <- unique(rbind(genePairs, g))
decellphone <- depairs$heps %>%
inner_join(g, by = c(gene = "first")) %>%
inner_join(depairs$lsec, by = c(second = "gene"),
suffix = c(".hep", ".lsec")) %>%
rename(lsec.gene = "second", hep.gene = "gene")
write.csv(decellphone,
file = file.path(resdir, "pbulks-wt-ko-cellphonedb-pairs.csv"),
row.names = FALSE)
|
1aac330057ff5fb10434d11a08d2b67b2cfbadc8
|
c61b367db07762465c749e85ed0c933a0d2e9f5d
|
/Code/Test_raw_immune_clinical.R
|
943a0dca7776cc3c3fc3d49072663e00e147ed43
|
[] |
no_license
|
lizhu06/TILsComparison_PBTvsMET
|
08880f46b9d1a42e3f9b8e841a0edc35fd00386e
|
5adec0fc526a3025ccbcd99ea34d40b3c56055ba
|
refs/heads/master
| 2020-06-02T05:54:49.924270
| 2019-06-09T22:45:08
| 2019-06-09T22:45:08
| 191,060,883
| 7
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 4,719
|
r
|
Test_raw_immune_clinical.R
|
rm(list=ls())
options(stringsAsFactors = FALSE)
library(survival)
library(rms)
setwd("/net/wong05/home/liz86/Steffi/primary_vs_mets/")
library(ggplot2)
library(gridExtra)
#### load data
load("Data_v2/sample_annot.RData")
load("Data_v2/sample_pair_info.RData")
load("Results_v2/immune_res_aveDup.RData")
load("Data_v2/all_clinical_info.RData")
all_clin[, "Met.Location"] <- gsub("bone", "Bone", all_clin[, "Met.Location"])
all_clin[, "Met.Location"] <- gsub("ovary", "Ovary", all_clin[, "Met.Location"])
age_cut_median <- all_clin[,"Dx.Age"] > median(all_clin[, "Dx.Age"])
age_cut_median2 <- rep(NA, length(age_cut_median))
age_cut_median2[age_cut_median==1] <- "age>=median"
age_cut_median2[age_cut_median==0] <- "age<median"
all_clin[,"Dx.Age"] <- age_cut_median2
age_cut_median <- all_clin[,"mets.age"] > median(all_clin[, "mets.age"],
na.rm=TRUE)
age_cut_median2 <- rep(NA, length(age_cut_median))
age_cut_median2[age_cut_median==1] <- "mets.age>=median"
age_cut_median2[age_cut_median==0] <- "mets.age<median"
all_clin[,"mets.age"] <- age_cut_median2
tissue <- sample_annot[match(rownames(immune_res_aveDup$ciber_rel),
sample_annot[,"ID"]), "site"]
## variables to test
var_for_test <- c("Dx.Age", "mets.age",
"Met.Location", "Race",
"Primary.Histology.ILCIDC", "Menopausal.Status",
"Pathological.Stage.Num",
"ER.Prim", "ER.MET", "PR.Prim", "PR.MET", "HER2.Prim", "HER2.MET",
"HER2.Prim.PosNeg", "HER2.MET.PosNeg",
"HR.Prim", "HR.MET", "HR.HER2.Prim", "HR.HER2.MET",
"PreEndocrine", "PreHER2", "PreChemo")
all(var_for_test %in% colnames(all_clin))
#all_clin[] <- lapply(all_clin, factor)
#for(i in 1:length(var_for_test)){
# print(var_for_test[i])
# print(levels(all_clin[,i]))
#}
## function to test clinical var with immune
test_with_immune_per_var <- function(clin_var_name, all_clin,
immune_var_name, score_matrix,
primary=TRUE, mets=FALSE, delta=FALSE){
if(FALSE){
clin_var_name <- "Met.Location"
score_matrix <- immune_res_aveDup$estimate
score_matrix_name <- "estimate"
if(score_matrix_name == "estimate"){
score_matrix <- score_matrix[, -c(1, 3,4), drop=FALSE]
}
primary <- TRUE
continuous <- FALSE
immune_var_name <- colnames(score_matrix)[1]
}
id <- rownames(all_clin)[!is.na(all_clin[,clin_var_name])]
var <- all_clin[id, clin_var_name]
if(primary==TRUE){
immune <- score_matrix[id, immune_var_name]
}else if(mets==TRUE){
id <- sample_pair_info[match(id,
sample_pair_info[,"primary_id"]), "mets_id"]
immune <- score_matrix[id, immune_var_name]
}else{
primary_id <- id
mets_id <- sample_pair_info[match(id,
sample_pair_info[,"primary_id"]), "mets_id"]
immune <- (score_matrix[mets_id, immune_var_name] -
score_matrix[primary_id, immune_var_name])
}
# calculate mean at each categories
uni_var_cat <- unique(var)
res <- matrix(NA, 1, length(uni_var_cat)*3+1)
for(i in 1:length(uni_var_cat)){
immune_cat <- immune[var==uni_var_cat[i]]
res[1, (i-1)*3+1] <- median(immune_cat)
res[1, ((i-1)*3+2):((i-1)*3+3)] <- quantile(immune_cat, prob=c(0.25, 0.75))
}
if(length(uni_var_cat)>2){
res[1, ncol(res)] <- kruskal.test(immune, factor(var))$p.value
}else{
res[1, ncol(res)] <- wilcox.test(immune[var==uni_var_cat[1]],
immune[var==uni_var_cat[2]])$p.value
}
colnames(res) <- c(sapply(1:length(uni_var_cat), function(x) paste(uni_var_cat[x],
c("median", "25perc", "75perc"), sep="_")), "pval")
return(res)
}
#res <- test_with_immune_per_var("Dx.Age", all_clin,
# "ImmuneScore", immune_res_aveDup$estimate,
# primary=TRUE, mets=FALSE, delta=FALSE, posNeg=FALSE)
#res <- test_with_immune_per_var("Race", all_clin,
# "ImmuneScore", immune_res_aveDup$estimate,
# primary=TRUE, mets=FALSE, delta=FALSE, posNeg=FALSE)
test_with_immune <- function(var_for_test,
score_matrix, immune_var_name){
if(FALSE){
score_matrix <- immune_res_aveDup$estimate
score_matrix_name <- "estimate"
immune_var_name <- "ImmuneScore"
}
#if(score_matrix_name == "estimate"){
# score_matrix <- score_matrix[, -c(1, 3, 4), drop=FALSE]
#}
primary_vec <- c(TRUE, FALSE, FALSE)
mets_vec <- c(FALSE, TRUE, FALSE)
delta_vec <- c(FALSE, FALSE, TRUE)
table_name_vec <- c("primary", "mets", "delta")
note <- NULL
res_matrix_all <- list()
res <- list()
for(j in 1:3){
res[[j]] <- list()
for(x in 1:length(var_for_test)){
res[[j]][[x]] <- test_with_immune_per_var(clin_var_name=var_for_test[x],
all_clin, immune_var_name,
score_matrix,
primary=primary_vec[j], mets=mets_vec[j],
delta=delta_vec[j])
}
names(res[[j]]) <- var_for_test
}
names(res) <- c("primary", "mets", "delta")
return(res)
}
foo <- test_with_immune(var_for_test, immune_res_aveDup[[1]], "ImmuneScore")
|
6eea108168d98ae999be4be5020225aa0d84e67e
|
d27c969b5db11a353bd05afc6dc94e84fc44f4ba
|
/man/ices_catch_plot.Rd
|
8054f49ea6f2e94809767a0751cb1a40db827086
|
[] |
no_license
|
ices-tools-prod/fisheryO
|
d2b98f04de69707e139777022ce1d86be9ed1d8e
|
289159dd4e738178e7a597f08f56a0eb180eea93
|
refs/heads/master
| 2021-10-08T14:49:26.854955
| 2018-02-08T14:12:20
| 2018-02-08T14:12:20
| 65,477,686
| 4
| 3
| null | 2017-11-10T14:25:57
| 2016-08-11T14:51:32
|
R
|
UTF-8
|
R
| false
| true
| 2,037
|
rd
|
ices_catch_plot.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/plot_functions.R
\name{ices_catch_plot}
\alias{ices_catch_plot}
\title{Landings over time by country, guild, or species}
\usage{
ices_catch_plot(ecoregion, type = c("COMMON_NAME", "COUNTRY", "GUILD")[1],
line_count = 4, plot_type = c("line", "area")[1], data_caption = TRUE,
output_path = NULL, file_name = "figure2", save_plot = FALSE,
return_plot = TRUE, return_data = FALSE, fig.width = 174,
fig.height = 68, text.size = 9)
}
\arguments{
\item{ecoregion}{ecoregion name, e.g. Greater North Sea Ecoregion}
\item{type}{the variable that will be used to group and display data: COMMON_NAME, GUILD, or COUNTRY}
\item{line_count}{number of lines to display}
\item{plot_type}{area or line plot}
\item{data_caption}{print the data source as a caption, boolean.}
\item{output_path}{path for output to live.}
\item{file_name}{name for the output.}
\item{save_plot}{logical to save plot.}
\item{return_plot}{logical to return plot to current environment.}
\item{return_data}{logical on returning a .csv of plotted data}
\item{fig.width}{width pf combined set of plots}
\item{fig.height}{height of combined set of plots}
\item{text.size}{= size of text in plots}
}
\value{
A ggplot2 object when \code{return_plot} is \code{TRUE} or .png when \code{save_plot} is \code{TRUE}.
Output is saved as \code{file_name} in \code{output_path}.
When \code{file_name} is \code{NULL}, the file name is the ecoregion.
When \code{output_path} is \code{NULL}, the file is saved to "~/".
}
\description{
The \code{ices_catch_plot} function returns an area or line plot of landings (historic and official catch) for an ecoregion by country,
guild, or species.
}
\note{
Historic and official nominal catch are actually only the landings and do not account for discards, misreporting, or other
potential issues.
}
\examples{
\dontrun{
ices_catch_plot("Greater North Sea Ecoregion", type = "COMMON_NAME", return_plot = TRUE, line_count = 4)
}
}
\author{
Scott Large
}
|
454ca74e05b8dec26485bd742673b25cd919998f
|
860b2ea580931dde774426eb97106dd8b42eb57e
|
/R/biomass_estimates-class.R
|
3c24b9c48d46d273ccbdb2046b66fa81a7388071
|
[
"Apache-2.0"
] |
permissive
|
jdyen/trophic
|
07c045e8cabf0f2b2a498c0303212cc3faa02db8
|
6474e24b811bfca8e298fd2601bb7600e3445201
|
refs/heads/master
| 2020-03-19T07:12:39.447731
| 2018-06-07T23:37:28
| 2018-06-07T23:37:28
| 136,096,422
| 1
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 8,636
|
r
|
biomass_estimates-class.R
|
#' Estimate biomass for each node in a food web
#'
#' @description Estimation converts production estimates to biomass estimates
#'
#' @rdname biomass_estimates
#'
#' @param production_estimates a production_estimates object calculated with \link[trophic]{production_estimates}
#' @param pb_ratio a production:biomass ratio object created with \link[trophic]{pb_ratio}
#' @param x a biomass_estimates object
#' @param nodes integer, character, integer vector, or character vector of nodes to plot (indexed by name or food_web row)
#' @param settings plot settings passed directly to \link[graphics]{plot}
#' @param FUN function used to summarise information extracted from a biomass_estimates object
#' @param ... further arguments passed to or from other methods
#'
#' @return An object of class \code{biomass_estimates}
#'
#' @export
#'
#' @importFrom stats quantile
#' @importFrom graphics lines plot points axis mtext par
#'
#' @examples
#'
#' library(trophic)
#' library(future)
#' plan(multiprocess)
#'
#' # Construct the component objects
#' test_fw <- build_food_web(interaction_matrix = food_web)
#' test_efficiency_matrix <- build_efficiency_matrix(efficiency_mean = efficiency_mean,
#' efficiency_sd = 0.01)
#' test_dominance <- build_dominance_matrix(dominance = dominance_matrix)
#' test_primary_producers <- build_primary_producers(production_mean = c(1, 2),
#' production_sd = c(0.5, 0.5))
#'
#' # Construct the trophic_dynamics object
#' test_trophic_dynamics <- build_trophic_dynamics(food_web = test_fw,
#' efficiency_matrix = test_efficiency_matrix,
#' dominance_matrix = test_dominance)
#'
#' # Estimate production values from constructed trophic_dynamics object
#' production_estimates <- estimate_production(test_trophic_dynamics,
#' test_primary_producers)
#'
#' # Create a pb_ratio object
#' test_pb_ratio <- build_pb_ratio(range = c(0.25, 5.75),
#' probs = c(5, 20, 10, 3, 1, 1, 1))
#'
#' # Convert production to biomass estimates
#' biomass_estimates <- estimate_biomass(production_estimates, test_pb_ratio)
estimate_biomass <- function(production_estimates, pb_ratio) {
# switch on type of pb_ratio object
if (pb_ratio$type == "fixed") {
biomass <- lapply(production_estimates$production,
function(x) x * (10 / pb_ratio$values))
}
if (pb_ratio$type == "gradient") {
biomass <- vector("list", length = length(pb_ratio$values))
for (i in seq_along(pb_ratio$values)) {
biomass[[i]] <- lapply(production_estimates$production,
function(x) x * (10 / pb_ratio$values[i]))
}
}
if (pb_ratio$type == "stochastic") {
stochastic_pb <- sample(pb_ratio$values,
size = ncol(production_estimates$production[[1]]),
replace = TRUE,
prob = pb_ratio$probs)
biomass <- lapply(production_estimates$production,
function(x) sweep(x, 2, 10 / stochastic_pb, "*"))
}
biomass_estimates <- list(biomass = biomass,
replicates = production_estimates$replicates,
trophic_dynamics = production_estimates$trophic_dynamics,
primary_producers = production_estimates$primary_producers,
pb_ratio = pb_ratio,
stochastic = production_estimates$stochastic,
nsim = production_estimates$nsim)
as.biomass_estimates(biomass_estimates)
}
#' @rdname biomass_estimates
#'
#' @export
#'
#' @examples
#'
#' # Test if object is of the type 'biomass_estimates'
#'
#' \dontrun{
#' is.biomass_estimates(x)
#' }
is.biomass_estimates <- function (x) {
inherits(x, 'biomass_estimates')
}
#' @rdname biomass_estimates
#'
#' @export
#'
#' @examples
#'
#' # Print information about the 'biomass_estimates' object
#'
#' \dontrun{
#' print(x)
#' }
print.biomass_estimates <- function (x, ...) {
cat(paste0("This is a biomass_estimates object with ", x$replicates, " replicates"))
}
#' @rdname biomass_estimates
#'
#' @export
#'
#' @examples
#'
#' # Plot a 'biomass_estimates' object
#'
#' \dontrun{
#' plot(x)
#' }
plot.biomass_estimates <- function (x, nodes = NULL, settings = list(), ...) {
nplot <- x$replicates
plot_set <- list(pch = 16,
las = 1,
bty = "l",
col = "black",
barwidth = c(1, 1.5, 2.6),
mar = c(5.1, 10.1, 2.1, 1.1))
plot_set[names(settings)] <- settings
for (i in seq_len(nplot)) {
if (is.null(nodes)) {
node_set <- seq_len(nrow(x$biomass[[i]]))
} else {
if (is.character(nodes)) {
node_set <- match(nodes, rownames(x$biomass[[i]]))
if (any(is.na(node_set))) {
warning(paste0("some nodes were not found in node names and have been removed: ",
node_set[is.na(node_set)]))
node_set <- node_set[!is.na(node_set)]
}
if (!length(node_set)) {
stop("there are no matches between nodes and node names")
}
} else {
if (is.integer(nodes)) {
node_set <- nodes
} else {
stop("nodes must be a character or integer vector")
}
}
}
if (length(node_set) == 1) {
to_plot <- quantile(x$biomass[[i]],
p = c(0.025, 0.1, 0.25, 0.5, 0.75, 0.9, 0.975))
to_plot <- matrix(to_plot, ncol = 1)
colnames(to_plot) <- rownames(x$biomass[[i]])[node_set]
} else {
to_plot <- apply(x$biomass[[i]][node_set, ], 1, quantile,
p = c(0.025, 0.1, 0.25, 0.5, 0.75, 0.9, 0.975))
}
old_mar <- par()$mar
par(mar = plot_set$mar)
plot(to_plot[4, ], seq_along(node_set),
type = "n",
xaxt = "n", yaxt = "n",
xlab = "", ylab = "",
xlim = range(to_plot),
las = plot_set$las,
bty = plot_set$bty,
...)
axis(1,
las = plot_set$las)
mtext("Estimated biomass",
side = 1, adj = 0.5, line = 3.1)
axis(2, at = seq_along(node_set), labels = colnames(to_plot),
las = plot_set$las)
mtext("Node",
side = 2, adj = 0.5, line = 9.2)
for (k in seq_len(3)) {
for (j in seq_along(node_set)) {
lines(c(to_plot[k, j], to_plot[(8 - k), j]),
c(j, j),
lwd = plot_set$barwidth[k],
col = plot_set$col)
}
}
points(to_plot[4, ], seq_along(node_set),
pch = plot_set$pch,
col = plot_set$col)
par(mar = old_mar)
}
}
#' @rdname biomass_estimates
#'
#' @export
#'
#' @examples
#'
#' # Extract information on one or several nodes from a 'biomass_estimates' object
#'
#' \dontrun{
#' extract_nodes(x, nodes = c(5:7), FUN = mean)
#' }
extract_nodes <- function (x, nodes = NULL, FUN = summary, ...) {
out <- vector("list", length = x$replicates)
for (i in seq_len(x$replicates)) {
if (is.null(nodes)) {
node_set <- seq_len(nrow(x$biomass[[i]]))
} else {
if (is.character(nodes)) {
node_set <- match(nodes, rownames(x$biomass[[i]]))
if (any(is.na(node_set))) {
warning(paste0("some nodes were not found in node names and have been removed: ",
node_set[is.na(node_set)]))
node_set <- node_set[!is.na(node_set)]
}
if (!length(node_set)) {
stop("there are no matches between nodes and node names")
}
} else {
if (is.integer(nodes)) {
node_set <- nodes
} else {
stop("nodes must be a character or integer vector")
}
}
}
if (length(node_set) == 1) {
out[[i]] <- FUN(x$biomass[[i]],
...)
out[[i]] <- matrix(out[[i]], ncol = 1)
colnames(out[[i]]) <- rownames(x$biomass[[i]])[node_set]
} else {
out[[i]] <- apply(x$biomass[[i]][node_set, ], 1, FUN, ...)
}
}
names(out) <- paste0("replicate", seq_len(x$replicates))
out
}
# internal function: create biomass_estimates object
as.biomass_estimates <- function (biomass_estimates) {
as_class(biomass_estimates, name = "biomass_estimates", type = "list")
}
|
28a6b1de2797787abf333960e64d2fbadb9c5e41
|
a0296de45161a795fd7199c25bbfe54d66a96392
|
/run_analysis.R
|
4ccf57c750dc2684d479513ef721d1e16e996a5a
|
[] |
no_license
|
josefair/HARtidydataset
|
a44d80b22848d4335099f2a2b9b4715b341d280b
|
8cedfb03b7a5fc23bcc63dcd6b03e44edc9337a8
|
refs/heads/master
| 2021-01-17T09:39:06.715969
| 2014-10-25T22:07:26
| 2014-10-25T22:07:26
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 13,883
|
r
|
run_analysis.R
|
##################################################################################################################
##This scritps reads activity data from differente seat of users created from wearable hardware and will output ##
##Tidy data set with the average of each variable for each activity and each subject. ##
## ##
##By: Joseph Sefair ##
##################################################################################################################
##Setting working directory
##setwd("C:/Users/Joseph/Documents/DataScience/UCI HAR Dataset")
##########################################################
##Read test set, merge it with user and activity. ##
## ##
##########################################################
##Read User txt file into a dataframe
test_data_user <- read.table("./test/subject_test.txt",
sep="\t",
col.names=c("User"))
##Read Activity for test set and add descriptive activity names to name the activities in the data set
test_data_activity <- read.table("./test/y_test.txt",
sep="\t",
col.names=c("activity"))
test_data_activity$activity[test_data_activity$activity == 1] <- "walking"
test_data_activity$activity[test_data_activity$activity == 2] <- "walking_upstairs"
test_data_activity$activity[test_data_activity$activity == 3] <- "walking_downstairs"
test_data_activity$activity[test_data_activity$activity == 4] <- "walking_sitting"
test_data_activity$activity[test_data_activity$activity == 5] <- "walking_standing"
test_data_activity$activity[test_data_activity$activity == 6] <- "walking_laying"
##Read test set with column names, first I will ready the column names and then the test set combining column names
##Read test set column names
test_names <- read.table("features.txt",
sep="")
#Read test set and add the test column headings
test_data <- read.table("./test/x_test.txt",
sep="", col.names = test_names[,2])
##Merging all information for the test training datasets
test_dataset <- cbind(test_data_user, test_data_activity, test_data)
##########################################################
##Read train set, merge it with user and activity. ##
##Uses descriptive activity names to name the activities##
##in the data set ##
##########################################################
train_data_user <- read.table("./train/subject_train.txt",
sep="\t",
col.names=c("User"))
##Read Activity for training set and add descriptive activity names to name the activities in the data set
train_data_activity <- read.table("./train/y_train.txt",
sep="\t",
col.names=c("activity"))
train_data_activity$activity[train_data_activity$activity == 1] <- "walking"
train_data_activity$activity[train_data_activity$activity == 2] <- "walking_upstairs"
train_data_activity$activity[train_data_activity$activity == 3] <- "walking_downstairs"
train_data_activity$activity[train_data_activity$activity == 4] <- "walking_sitting"
train_data_activity$activity[train_data_activity$activity == 5] <- "walking_standing"
train_data_activity$activity[train_data_activity$activity == 6] <- "walking_laying"
##Read train set with column names, first I will ready the column names and then the test set combining column names
##Read train set column names
training_names <- read.table("features.txt",
sep="")
#Read test set and add the train column headings
train_data <- read.table("./train/x_train.txt",
sep="", col.names = training_names[,2])
##Merging all information for the test training datasets
train_dataset <- cbind(train_data_activity, train_data_user, train_data)
##########################################################
##Merge training and test datasets ##
##Extract only measurements on mean and std ##
##########################################################
merged_dataset <- rbind(test_dataset, train_dataset)
merged_dataset_mean_std <- merged_dataset[,c("activity","User", colnames(merged_dataset)[grepl("mean[[:punct:]]|std[[:punct:]]", colnames(merged_dataset))])]
##########################################################
##Merge training and test datasets ##
##Extract only measurements on mean and std ##
##Appropriately labels the data set with descriptive ##
##variable names ##
##########################################################
column_names <- c("Activity", "User", "Accelerometer_Body_Mean_X", "Accelerometer_Body_Mean_Y", "Accelerometer_Body_Mean_Z",
"Accelerometer_Body_Std_X", "Accelerometer_Body_Std_Y", "Accelerometer_Body_Std_Z", "Accelerometer_Gravity_Mean_X",
"Accelerometer_Gravity_Mean_Y", "Accelerometer_Gravity_Mean_Z", "Accelerometer_Gravity_Std_X", "Accelerometer_Gravity_Std_Y",
"Accelerometer_Gravity_Std_Z", "Jerk_Accelerometer_Body_Mean_X", "Jerk_Accelerometer_Body_Mean_Y", "Jerk_Accelerometer_Body_Mean_Z",
"Jerk_Accelerometer_Body_Std_X", "Jerk_Accelerometer_Body_Std_Y", "Jerk_Accelerometer_Body_Std_Z", "Gyroscope_Body_Mean_X",
"Gyroscope_Body_Mean_Y", "Gyroscope_Body_Mean_Z", "Gyroscope_Body_Std_X", "Gyroscope_Body_Std_Y", "Gyroscope_Body_Std_Z",
"Jerk_Gyroscope_Body_Mean_X", "Jerk_Gyroscope_Body_Mean_Y", "Jerk_Gyroscope_Body_Mean_Z", "Jerk_Gyroscope_Body_Std_X",
"Jerk_Gyroscope_Body_Std_Y" , "Jerk_Gyroscope_Body_Std_Z", "Mag_Accelerometer_Body_Mean", "Mag_Accelerometer_Body_Std", "Mag_Accelerometer_Gravity_Mean",
"Mag_Accelerometer_Gravity_Std", "Jerk_Mag_Accelometer_Body_Mean", "Jerk_Mag_Accelometer_Body_Std", "Mag_Gyroscope_Body_Mean",
"Mag_Gyroscope_Body_Std", "Jerk_Mag_Gyroscope_Body_Mean", "Jerk_Mag_Gyroscope_Body_Std", "FFT_Accelometer_Body_Mean_X",
"FFT_Accelometer_Body_Mean_Y", "FFT_Accelometer_Body_Mean_Z", "FFT_Accelometer_Body_Std_X", "FFT_Accelometer_Body_Std_Y",
"FFT_Accelometer_Body_Std_Z" , "FFT_Jerk_Accelometer_Body_Mean_X", "FFT_Jerk_Accelometer_Body_Mean_Y", "FFT_Jerk_Accelometer_Body_Mean_Z",
"FFT_Jerk_Accelometer_Body_Std_X", "FFT_Jerk_Accelometer_Body_Std_Y", "FFT_Jerk_Accelometer_Body_Std_Z", "FFT_Gyroscope_Body_Mean_X",
"FFT_Gyroscope_Body_Mean_Y", "FFT_Gyroscope_Body_Mean_Z", "FFT_Gyroscope_Body_Std_X", "FFT_Gyroscope_Body_Std_Y", "FFT_Gyroscope_Body_Std_Z",
"FFT_Mag_Accelometer_Body_Mean", "FFT_Mag_Accelometer_Body_Std", "FFT_Jerk_Mag_Accelometer_Body_Body_Mean" , "FFT_Jerk_Mag_Accelometer_Body_Body_Std",
"FFT_Mag_Gyroscope_Body_Body_Mean", "FFT_Mag_Gyroscope_Body_Body_Std", "FFT_Jerk_Mag_Gyroscope_Body_Body_Mean", "FFT_Jerk_Mag_Gyroscope_Body_Body_Std")
colnames(merged_dataset_mean_std) <- column_names
##########################################################
##creates a second, independent tidy data set with the ##
##average of each variable ##
##for each activity and each subject. ##
## ##
##########################################################
desc_variable <- cbind(merged_dataset_mean_std[,3], merged_dataset_mean_std[,4], merged_dataset_mean_std[,5], merged_dataset_mean_std[,6],
merged_dataset_mean_std[,7], merged_dataset_mean_std[,8], merged_dataset_mean_std[,9], merged_dataset_mean_std[,10],
merged_dataset_mean_std[,11], merged_dataset_mean_std[,12], merged_dataset_mean_std[,13], merged_dataset_mean_std[,14],
merged_dataset_mean_std[,15], merged_dataset_mean_std[,16], merged_dataset_mean_std[,17], merged_dataset_mean_std[,18],
merged_dataset_mean_std[,19], merged_dataset_mean_std[,20], merged_dataset_mean_std[,21], merged_dataset_mean_std[,22],
merged_dataset_mean_std[,23], merged_dataset_mean_std[,24], merged_dataset_mean_std[,25], merged_dataset_mean_std[,26],
merged_dataset_mean_std[,27], merged_dataset_mean_std[,28], merged_dataset_mean_std[,29], merged_dataset_mean_std[,30],
merged_dataset_mean_std[,31], merged_dataset_mean_std[,32], merged_dataset_mean_std[,33], merged_dataset_mean_std[,34],
merged_dataset_mean_std[,35], merged_dataset_mean_std[,36],
merged_dataset_mean_std[,37], merged_dataset_mean_std[,38], merged_dataset_mean_std[,39], merged_dataset_mean_std[,40],
merged_dataset_mean_std[,41], merged_dataset_mean_std[,42], merged_dataset_mean_std[,43], merged_dataset_mean_std[,44],
merged_dataset_mean_std[,45], merged_dataset_mean_std[,46], merged_dataset_mean_std[,47], merged_dataset_mean_std[,48],
merged_dataset_mean_std[,49], merged_dataset_mean_std[,50], merged_dataset_mean_std[,51], merged_dataset_mean_std[,52],
merged_dataset_mean_std[,53], merged_dataset_mean_std[,54], merged_dataset_mean_std[,55], merged_dataset_mean_std[,56],
merged_dataset_mean_std[,57], merged_dataset_mean_std[,58], merged_dataset_mean_std[,59], merged_dataset_mean_std[,60],
merged_dataset_mean_std[,61], merged_dataset_mean_std[,62], merged_dataset_mean_std[,63], merged_dataset_mean_std[,64],
merged_dataset_mean_std[,65], merged_dataset_mean_std[,66], merged_dataset_mean_std[,67], merged_dataset_mean_std[,68])
tidy_data_avg_activity_user <- aggregate(desc_variable ~ merged_dataset_mean_std$Activity+merged_dataset_mean_std$User, data = merged_dataset_mean_std, mean)
column_names_avg_activity_user <- c("Activity", "User", "Avg_Accelerometer_Body_Mean_X", "Avg_Accelerometer_Body_Mean_Y", "Avg_Accelerometer_Body_Mean_Z",
"Avg_Accelerometer_Body_Std_X", "Avg_Accelerometer_Body_Std_Y", "Avg_Accelerometer_Body_Std_Z", "Avg_Accelerometer_Gravity_Mean_X",
"Avg_Accelerometer_Gravity_Mean_Y", "Avg_Accelerometer_Gravity_Mean_Z", "Avg_Accelerometer_Gravity_Std_X", "Avg_Accelerometer_Gravity_Std_Y",
"Avg_Accelerometer_Gravity_Std_Z", "Avg_Jerk_Accelerometer_Body_Mean_X", "Avg_Jerk_Accelerometer_Body_Mean_Y", "Avg_Jerk_Accelerometer_Body_Mean_Z",
"Avg_Jerk_Accelerometer_Body_Std_X", "Avg_Jerk_Accelerometer_Body_Std_Y", "Avg_Jerk_Accelerometer_Body_Std_Z", "Avg_Gyroscope_Body_Mean_X",
"Avg_Gyroscope_Body_Mean_Y", "Avg_Gyroscope_Body_Mean_Z", "Avg_Gyroscope_Body_Std_X", "Avg_Gyroscope_Body_Std_Y", "Avg_Gyroscope_Body_Std_Z",
"Avg_Jerk_Gyroscope_Body_Mean_X", "Avg_Jerk_Gyroscope_Body_Mean_Y", "Avg_Jerk_Gyroscope_Body_Mean_Z", "Avg_Jerk_Gyroscope_Body_Std_X",
"Avg_Jerk_Gyroscope_Body_Std_Y" , "Avg_Jerk_Gyroscope_Body_Std_Z", "Avg_Mag_Accelerometer_Body_Mean", "Avg_Mag_Accelerometer_Body_Std", "Avg_Mag_Accelerometer_Gravity_Mean",
"Avg_Mag_Accelerometer_Gravity_Std", "Avg_Jerk_Mag_Accelometer_Body_Mean", "Avg_Jerk_Mag_Accelometer_Body_Std", "Avg_Mag_Gyroscope_Body_Mean",
"Avg_Mag_Gyroscope_Body_Std", "Avg_Jerk_Mag_Gyroscope_Body_Mean", "Avg_Jerk_Mag_Gyroscope_Body_Std", "Avg_FFT_Accelometer_Body_Mean_X",
"Avg_FFT_Accelometer_Body_Mean_Y", "Avg_FFT_Accelometer_Body_Mean_Z", "Avg_FFT_Accelometer_Body_Std_X", "Avg_FFT_Accelometer_Body_Std_Y",
"Avg_FFT_Accelometer_Body_Std_Z" , "Avg_FFT_Jerk_Accelometer_Body_Mean_X", "Avg_FFT_Jerk_Accelometer_Body_Mean_Y", "Avg_FFT_Jerk_Accelometer_Body_Mean_Z",
"Avg_FFT_Jerk_Accelometer_Body_Std_X", "Avg_FFT_Jerk_Accelometer_Body_Std_Y", "Avg_FFT_Jerk_Accelometer_Body_Std_Z", "Avg_FFT_Gyroscope_Body_Mean_X",
"Avg_FFT_Gyroscope_Body_Mean_Y", "Avg_FFT_Gyroscope_Body_Mean_Z", "Avg_FFT_Gyroscope_Body_Std_X", "Avg_FFT_Gyroscope_Body_Std_Y", "Avg_FFT_Gyroscope_Body_Std_Z",
"Avg_FFT_Mag_Accelometer_Body_Mean", "Avg_FFT_Mag_Accelometer_Body_Std", "Avg_FFT_Jerk_Mag_Accelometer_Body_Body_Mean" , "Avg_FT_Jerk_Mag_Accelometer_Body_Body_Std",
"Avg_FFT_Mag_Gyroscope_Body_Body_Mean", "Avg_FFT_Mag_Gyroscope_Body_Body_Std", "Avg_FFT_Jerk_Mag_Gyroscope_Body_Body_Mean", "Avg_FFT_Jerk_Mag_Gyroscope_Body_Body_Std")
colnames(tidy_data_avg_activity_user) <- column_names_avg_activity_user
##########################################################
##Writting table to file tidy_data_avg_activity_user.txt##
##No row names ##
##########################################################
write.table(tidy_data_avg_activity_user, 'tidy_data_avg_activity_user.txt', row.name=FALSE)
|
730c7c49f1e4cc28e8c56ced1333716cc2165c92
|
5a3067e041fae461e386bd53fac413d545db8a52
|
/preprocessing_scripts/osm_fish.R
|
c49ef0b27b908ee8b17055bb94a28e0e23f19b0b
|
[] |
no_license
|
kvshams/Integration2019
|
851a6a1a1a9586d2c0826a808ae894f99373dd8c
|
e5821bd242fa0a46eb6fd37764275737512032a4
|
refs/heads/master
| 2023-03-20T06:36:27.884732
| 2019-06-05T18:01:50
| 2019-06-05T18:01:50
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 1,509
|
r
|
osm_fish.R
|
library(Seurat)
library(hdf5r)
library(methods)
args <- commandArgs(trailingOnly = TRUE)
osm <- H5File$new(paste0(getwd(), "/raw_data/spatial/osm_fish.loom"))
mat <- osm[['matrix']][,]
colnames(mat) <- osm[['row_attrs']][['Gene']][]
rownames(mat) <- paste0('osm_', osm[['col_attrs']][['CellID']][])
x_dim <- osm[['col_attrs']][['X']][]
y_dim <- osm[['col_attrs']][['Y']][]
region <- osm[['col_attrs']][['Region']][]
cluster <- osm[['col_attrs']][['ClusterName']][]
osm$close_all()
spatial <- data.frame(spatial1 = x_dim, spatial2 = y_dim)
rownames(spatial) <- rownames(mat)
spatial <- as.matrix(spatial)
mat <- t(mat)
osm_seurat <- CreateSeuratObject(counts = mat, project = 'osmFISH', assay = 'RNA', min.cells = -1, min.features = -1)
names(region) <- colnames(osm_seurat)
names(cluster) <- colnames(osm_seurat)
osm_seurat <- AddMetaData(osm_seurat, region, col.name = 'region')
osm_seurat <- AddMetaData(osm_seurat, cluster, col.name = 'cluster')
osm_seurat[['spatial']] <- CreateDimReducObject(embeddings = spatial, key = 'spatial', assay = 'RNA')
Idents(osm_seurat) <- 'region'
osm_seurat <- SubsetData(osm_seurat, ident.remove = 'Excluded')
osm_seurat <- NormalizeData(osm_seurat, normalization.method = 'CLR')
osm_seurat <- RunUMAP(osm_seurat, features = rownames(osm_seurat))
saveRDS(object = osm_seurat, file = args[2])
dir.create("analysis_data/spatial/")
write.table(rownames(mat), paste0(getwd(), "/analysis_data/spatial/spatial_genes.txt"), quote = FALSE, row.names = FALSE, col.names = FALSE)
|
438002cd76b37d80a5237a4743ab7ffd50a0ff61
|
2758eef8d85ae79b6df52a0787b9e0ffdd245bf3
|
/R/HootenMM.r
|
ba0bfde2bdea2b99be2606b04c2ce4e97e922b95
|
[] |
no_license
|
pkuhnert/HSTMM
|
1b2950630c7acb27292740714f07bd4476b5d70e
|
4e38b24562def34c993241e84b2922c81bccbfb4
|
refs/heads/master
| 2020-03-18T23:22:51.170605
| 2018-12-13T04:55:34
| 2018-12-13T04:55:34
| 135,398,980
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 4,601
|
r
|
HootenMM.r
|
#' Hooten Space-Time Matrix Model
#'
#' @param niter number of iterations
#' @param m number of spatial locations
#' @param nT number of time points
#' @param r growth rate (scalar)
#' @param lambda intensity matrix (m x nT)
#' @param K carrying capacity (scalar)
#' @param coords matrix of spatial co-ordinates of locations (longitude, latitude)
#' @param tau dispersal parameter (vector of length m)
#' @param N true number of organisms (m x nT)
#' @param acceptR acceptance rates of parameters for M-H step
#' @param Hparams hyper-parameters for prior probability distributions
#' @param prior_lambda prior for lambda (mu_lambda, Sigma_lambda)
#' @param prior_tau prior for dispersal parameter, tau
#' @param prior_K prior for carrying capacity, K
#' @param prior_r prior for growth rate, r
#' @param prior_theta prior for theta
#' @param sample_n sample_n
#'
#' @description Uses the M-H algorithm to implement a BHM S-T model for invasions
#'
#' @details This implements the M-H algorithm outlined in Hooten et al. (2007).
#'
#' @references Hooten, M.B., Wikle, C.K., Dorazio, R.M. and Royle, J.A. (2007) Hierarchical Spatiotemporal Matrix
#' Models for Characterizing Invasions, Biometrics, 63, 558-567.
#'
#' @import stats
#' @importFrom svMisc progress
#'
#' @export
HootenMM <- function(niter, m, nT, r, lambda, K, coords, tau, N, acceptR, Hparams,
prior_lambda, prior_tau, prior_K, prior_r, prior_theta, sample_n){
#----------------------- Extraction ------------------------#
# spatial co-ordinates
longitude <- coords$longitude
latitude <- coords$latitude
# acceptance rates
accept_lambda <- acceptR$lambda
accept_tau <- acceptR$tau
accept_K <- acceptR$K
accept_r <- acceptR$r
# Hyper-parameters
h_lambda <- Hparams$h_lambda
h_tau <- Hparams$h_tau
h_K <- Hparams$h_K
h_r <- Hparams$h_r
h_n <- Hparams$h_n
accept_lambda <- accept_tau <- accept_K <- accept_r <- accept_N <- NULL
#----------------------- Initialisation ------------------------#
# initial values for the process at j=1
j <- 1
progress(j, progress.bar = TRUE)
G <- vector("list", length = nT)
G[[1]] <- Create_G(m = m, r = r[j], lambda = lambda[[j]], K = K[j], t = 1, plot = FALSE)
M <- Create_M(m = m, longitude = longitude, latitude = latitude, tau = tau[[j]], plot = FALSE)
# run forward process model to obtain initial values of the process (#4 in Biometrics paper)
for(t in 2:(nT+1)){
lambda[[j]][,t] <- M %*% G[[t-1]] %*% lambda[[j]][,t-1]
N[[j]][,t] <- rpois(m, lambda[[j]][,t])
G[[t]] <- Create_G(m = m, r = r[j], lambda = lambda[[j]], K = K[j], t = t, plot = FALSE)
}
#----------------------- Sampling ------------------------#
cat("MCMC Run \n")
for(j in 2:niter){
progress(j, progress.bar = TRUE)
# Sample lambda
val_lambda <- Sample_lambda(m = m, nT = nT, r = r, j = j, lambda = lambda, K = K, coords = coords, tau = tau, N = N,
mu_lambda = prior_lambda$mu_lambda, sigma_lambda = prior_lambda$Sigma_lambda, h_lambda = h_lambda)
# N <- val_lambda$N
lambda <- val_lambda$lambda
accept_lambda[j-1] <- val_lambda$accept_lambda
# Sample N
val_N <- Sample_N(n = sample_n, N = N, lambda = lambda, h_n = h_n,
alpha_theta = prior_theta$alpha_theta,
beta_theta = prior_theta$beta_theta, j = j, m = m, nT = nT)
N <- val_N$N
accept_N[j-1] <- val_N$accept_N
# sample tau
val_tau <- Sample_tau(tau = tau, j = j, m = m, nT = nT, r = r, lambda = lambda, K = K, coords = coords,
N = N, h_tau = h_tau, h_lambda = h_lambda, mu_tau = prior_tau$mu_tau, sigma_tau = prior_tau$Sigma_tau)
tau <- val_tau$tau
accept_tau[j-1] <- val_tau$accept_tau
# Sample K
val_K <- Sample_K(tau = tau, j = j, m = m, nT = nT, r = r, lambda = lambda, K = K, coords = coords, N = N, h_K = h_K,
h_lambda = h_lambda, alpha_K = prior_K$alpha, beta_K = prior_K$beta)
K <- val_K$K
accept_K[j-1] <- val_K$accept_K
# Sample r
val_r <- Sample_r(tau = tau, j = j, m = m, nT = nT, r = r, lambda = lambda, K = K, coords = coords, N = N, h_r = h_r,
h_lambda = h_lambda, mu_r = prior_r$mu, sig2_r = prior_r$sig2)
r <- val_r$r
accept_r[j-1] <- val_r$accept_r
if(j == niter) cat("Done!\n")
}
AR <- list(accept_lambda = accept_lambda, accept_tau = accept_tau,
accept_K = accept_K, accept_r = accept_r, accept_N = accept_N)
list(N = N, lambda = lambda, tau = tau, K = K, r = r, AR = AR)
}
|
4f75c1390c8ffe243e26f795f3c6573c4be3de5e
|
93ee786dbdc303baef3bde1c8c0436f9c160f5e9
|
/man/as_survey_rep.Rd
|
00682f07973069b05ba0d7ae8c391b1cc4591d23
|
[] |
no_license
|
gergness/srvyr
|
88c4bf8ba09c4ed0435566ddce00a0899638762c
|
554210568b700f5a4aab896d15a92c0dc019960b
|
refs/heads/main
| 2023-06-25T03:15:51.359512
| 2023-02-21T03:03:11
| 2023-02-21T03:03:11
| 43,221,630
| 216
| 41
| null | 2023-09-10T16:08:24
| 2015-09-26T20:24:01
|
R
|
UTF-8
|
R
| false
| true
| 4,461
|
rd
|
as_survey_rep.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/as_survey_rep.r
\name{as_survey_rep}
\alias{as_survey_rep}
\alias{as_survey_rep.data.frame}
\alias{as_survey_rep.tbl_lazy}
\alias{as_survey_rep.svyrep.design}
\alias{as_survey_rep.survey.design2}
\alias{as_survey_rep.tbl_svy}
\title{Create a tbl_svy survey object using replicate weights}
\usage{
as_survey_rep(.data, ...)
\method{as_survey_rep}{data.frame}(
.data,
variables = NULL,
repweights = NULL,
weights = NULL,
type = c("BRR", "Fay", "JK1", "JKn", "bootstrap", "successive-difference", "ACS",
"other"),
combined_weights = TRUE,
rho = NULL,
bootstrap_average = NULL,
scale = NULL,
rscales = NULL,
fpc = NULL,
fpctype = c("fraction", "correction"),
mse = getOption("survey.replicates.mse"),
...
)
\method{as_survey_rep}{tbl_lazy}(
.data,
variables = NULL,
repweights = NULL,
weights = NULL,
type = c("BRR", "Fay", "JK1", "JKn", "bootstrap", "successive-difference", "ACS",
"other"),
combined_weights = TRUE,
rho = NULL,
bootstrap_average = NULL,
scale = NULL,
rscales = NULL,
fpc = NULL,
fpctype = c("fraction", "correction"),
mse = getOption("survey.replicates.mse"),
...
)
\method{as_survey_rep}{svyrep.design}(.data, ...)
\method{as_survey_rep}{survey.design2}(
.data,
type = c("auto", "JK1", "JKn", "BRR", "bootstrap", "subbootstrap", "mrbbootstrap",
"Fay"),
rho = 0,
fpc = NULL,
fpctype = NULL,
...,
compress = TRUE,
mse = getOption("survey.replicates.mse")
)
\method{as_survey_rep}{tbl_svy}(
.data,
type = c("auto", "JK1", "JKn", "BRR", "bootstrap", "subbootstrap", "mrbbootstrap",
"Fay"),
rho = 0,
fpc = NULL,
fpctype = NULL,
...,
compress = TRUE,
mse = getOption("survey.replicates.mse")
)
}
\arguments{
\item{.data}{A data frame (which contains the variables specified below)}
\item{...}{ignored}
\item{variables}{Variables to include in the design (default is all)}
\item{repweights}{Variables specifying the replication weight variables}
\item{weights}{Variables specifying sampling weights}
\item{type}{Type of replication weights}
\item{combined_weights}{\code{TRUE} if the \code{repweights} already
include the sampling weights. This is usually the case.}
\item{rho}{Shrinkage factor for weights in Fay's method}
\item{bootstrap_average}{For \code{type = "bootstrap"}, if the bootstrap
weights have been averaged, gives the number of iterations averaged over.}
\item{scale, rscales}{Scaling constant for variance, see
\code{\link[survey]{svrepdesign}} for more information.}
\item{fpc}{Variables specifying a finite population correction, see
\code{\link[survey]{svrepdesign}} for more details.}
\item{fpctype}{Finite population correction information}
\item{mse}{if \code{TRUE}, compute variances based on sum of squares
around the point estimate, rather than the mean of the replicates}
\item{compress}{if \code{TRUE}, store replicate weights in compressed form
(if converting from design)}
}
\value{
An object of class \code{tbl_svy}
}
\description{
Create a survey object with replicate weights.
}
\details{
If provided a data.frame, it is a wrapper around \code{\link[survey]{svrepdesign}}.
All survey variables must be included in the data.frame itself. Variables are
selected by using bare column names, or convenience functions described in
\code{\link[dplyr]{select}}.
If provided a \code{svyrep.design} object from the survey package,
it will turn it into a srvyr object, so that srvyr functions will work with it
If provided a survey design (\code{survey.design2} or \code{tbl_svy}), it is a wrapper
around \code{\link[survey]{as.svrepdesign}}, and will convert from a survey design to
replicate weights.
}
\examples{
# Examples from ?survey::svrepdesign()
library(survey)
library(dplyr)
data(scd)
# use BRR replicate weights from Levy and Lemeshow
scd <- scd \%>\%
mutate(rep1 = 2 * c(1, 0, 1, 0, 1, 0),
rep2 = 2 * c(1, 0, 0, 1, 0, 1),
rep3 = 2 * c(0, 1, 1, 0, 0, 1),
rep4 = 2 * c(0, 1, 0, 1, 1, 0))
scdrep <- scd \%>\%
as_survey_rep(type = "BRR", repweights = starts_with("rep"),
combined_weights = FALSE)
# dplyr 0.7 introduced new style of NSE called quosures
# See `vignette("programming", package = "dplyr")` for details
repwts <- quo(starts_with("rep"))
scdrep <- scd \%>\%
as_survey_rep(type = "BRR", repweights = !!repwts,
combined_weights = FALSE)
}
|
6f586718463f161b2ead4631d0cf00dde07d99e9
|
3a7a0e04c468e1cbbe3cfd38107b30b8d41dc8af
|
/create_data/coastline/create_coastlineWorld.R
|
0faeca8539f3a92b8bd4b127fe122ba3593496de
|
[] |
no_license
|
AnneMTreasure/oce
|
d3158253eac5bd3029d8b4312bf32271a916aec4
|
ae68d2753485b150d292f27d4cc4681608d470a1
|
refs/heads/develop
| 2021-01-18T18:28:07.909255
| 2016-06-21T21:00:06
| 2016-06-21T21:00:06
| 61,804,091
| 1
| 0
| null | 2016-06-23T12:39:34
| 2016-06-23T12:39:34
| null |
UTF-8
|
R
| false
| false
| 448
|
r
|
create_coastlineWorld.R
|
library(oce)
coastlineWorld <- read.oce("ne_110m_admin_0_countries/ne_110m_admin_0_countries.shp")
## We happen to know the units; read.oce() does not try to infer them
coastlineWorld@metadata$units$longitude <- list(unit=expression(degree*E), scale="")
coastlineWorld@metadata$units$latitude <- list(unit=expression(degree*N), scale="")
save(coastlineWorld, file="coastlineWorld.rda")
tools::resaveRdaFiles("coastlineWorld.rda", compress="auto")
|
faa977eeffc9c25fc731a5a08a754cc7cc9f0dae
|
97252268b8e5a0de5cbd0631a7d39e9bbc7e06ac
|
/app.R
|
1d7b7c6c21c4e1bda0a14424ea9d4b873cd8fc2c
|
[] |
no_license
|
angelbyte01/shiny.trabajofinal
|
24ae8df2ccc2490543e5bccd3ca123a9b8e5fc71
|
084a93d9a71e71559f2b9fbd7358d2d11ca4d5e7
|
refs/heads/main
| 2023-06-28T06:57:16.764104
| 2021-07-27T23:28:50
| 2021-07-27T23:28:50
| 389,470,789
| 0
| 1
| null | null | null | null |
UTF-8
|
R
| false
| false
| 41,812
|
r
|
app.R
|
#########################################################
#LIBRERias #
#########################################################
library(readxl)
library(tibble)
library(caret)
library(lattice)
library(base)
library(stringr)
library(writexl)
library(tm)
library(readr)
library(stats)
library(graphics)
library(grDevices)
library(utils)
library(datasets)
library(methods)
library(base)
library(dplyr)
library(ggplot2)
library(janitor)
library(tidytext)
library(tidyr)
library(arules)
library(Matrix)
library(arulesViz)
library(grid)
library(readxl)
library(reshape)
library(rfm)
library(tm)
library(NLP)
library(stats)
library(datetime)
library(lubridate)
library(shiny)
library(shinythemes)
library(quantmod)
library(tidyverse)
library(RPostgreSQL)
library(DBI)
library(jsonlite)
library(googleCloudStorageR)
library(xgboost)
#-----------------------------------------------------------
###########################################################################
# EXTIENDE EL TAMAÑO DEL ARCHIVO A CARGAR EN SHINY: #
###########################################################################
options(shiny.maxRequestSize=100*1024^2)
shinyApp(
#########################################################################################################################################################
# #
# INICIO PARTE: ui #
# #
#########################################################################################################################################################
ui <- tagList(
##################################################
# TITULO Y TEMA DE LA APP: #
##################################################
#fluidPage(
navbarPage("SEGMENTACIÓN DE CLIENTES",
# color app
theme = shinytheme("flatly"),
###############################################################################################################################################
# INICIO PRIMERA PESTANA PRINCIPAL: INGRESO DE DATOS #
###############################################################################################################################################
##################################################
# TITULO PRIMERA PESTANA: #
##################################################
tabPanel("INGRESO DE DATOS",
#sidebarLayout(
##################################################
# TITULO CONTENIDO PRIMERA PESTANA: #
##################################################
titlePanel("1. Revisión general de los datos"),
sidebarPanel(
#########################################
# CARGA DE ARCHIVO #
#########################################
fileInput("file1", "Seleccionar el archivo a cargar:",
multiple = TRUE,
#accept = c("text/csv",
# "text/comma-separated-values,text/plain",
# ".csv")),
accept = c(".xlsx")),
# Horizontal line ----
tags$hr(),
#######################################################
# CARGA DE MODELO DE EFECTIVIDAD #
#######################################################
fileInput("file2", "Seleccionar el archivo de efectividad:",
multiple = TRUE),
# Horizontal line ----
tags$hr(),
#######################################################
# BOTÓN PARA CARGAR A AWS #
#######################################################
actionButton(inputId = "ejecutar01", label = "Cargar a AWS"),
# Horizontal line ----
tags$hr()
),
#fin de sidebarPanel
##################################################
# TITULOS DE PESTANA Y DE SUS CONTENIDOS #
##################################################
mainPanel(
#tableOutput("contents"),
tabsetPanel(type = "tabs",
tabPanel("Tabla de datos",
h4("Visualización de datos cargados:"),DT::dataTableOutput("tabledata1")),
tabPanel("Resumen de datos",
h4("Estadísticos principales:"),verbatimTextOutput("tabledata2"))
)
#dataTableOutput('horario'),
# verbatimTextOutput("summary"),
#tableOutput("view")
)
#fin de mainPanel
#)
#fin de sidebarLayout
),
#fin de tabPanel
###############################################################################################################################################
# FIN PRIMERA PESTANA PRINCIPAL: INGRESO DE DATOS #
###############################################################################################################################################
###############################################################################################################################################
# INICIO SEGUNDA PESTANA PRINCIPAL: EFECTIVIDAD #
###############################################################################################################################################
##################################################
# TITULO SEGUNDA PESTANA: #
##################################################
tabPanel("EFECTIVIDAD",
#sidebarLayout(
##################################################
# TITULO CONTENIDO SEGUNDA PESTANA: #
##################################################
# titulo app
titlePanel("2. Modelo de efectividad"),
sidebarPanel(
#########################################
# SELECCION DE VARX_30 #
#########################################
selectInput(inputId = "VARX_30",
label = "Variables para el modelo:",
choices = c("CÁLCULO TOTAL DE UNIDADES","FECHA DE ACTIVACIÓN DE CONTRATO","PLAN DESTINO","CLUSTER MODELO","PROMEDIO DE RECARGA","RB_IG","PLAN DESTINO","RECICLADOS","REGIÓN"),
selected = "TOTAL",
multiple = FALSE)
#"GAP"
),
#fin de sidebarPanel
##################################################
# TITULOS DE PESTANA Y DE SUS CONTENIDOS #
##################################################
mainPanel(
#tableOutput("contents"),
tabsetPanel(type = "tabs",
tabPanel("Base a segmentar",
h4("Tabla con variables agrupadas:"),DT::dataTableOutput("tabledata30")),
tabPanel("Gráficos por variables agrupadas",
h4("Diagrama de frecuencias de variables agrupadas:"),plotOutput("plot30")),
tabPanel("Base segmentada",
h4("Tabla con variables de efectividad:"),DT::dataTableOutput("tabledata31")),
tabPanel("Gráficos por variables de efectividad",
h4("Diagrama de frecuencias de variables de efectividad:"),plotOutput("plot31"))
)
#dataTableOutput('horario'),
# verbatimTextOutput("summary"),
#tableOutput("view")
)
#fin de mainPanel
#)
#fin de sidebarLayout
),
#fin de tabPanel
###############################################################################################################################################
# FIN SEGUNDA PESTANA PRINCIPAL: ANALISIS DE DATOS #
###############################################################################################################################################
###############################################################################################################################################
# INICIO TERCERA PESTANA PRINCIPAL: MODELO DE SEGMENTACION #
###############################################################################################################################################
##################################################
# TITULO DE TERCERA PESTANA #
##################################################
tabPanel("BASE A GESTIONAR",
#sidebarLayout(
##################################################
# TITULO DE CONTENIDO TERCERA PESTANA #
##################################################
titlePanel("3. Formato de carga"),
sidebarPanel(
downloadButton("downloadData", "Download")
# Horizontal line ----
),
#fin de sidebarPanel
##################################################
# TITULOS DE PESTANA Y DE SUS CONTENIDOS #
##################################################
mainPanel(
#tableOutput("contents"),
tabsetPanel(type = "tabs",
tabPanel("Tabla de datos",
h4("Tabla de clientes y segmentos"),DT::dataTableOutput("tabledata50")),
tabPanel("Resumen de segmentos de efectividad",
h4("R's por efectividad:"),DT::dataTableOutput("tabledata51"))
)
#dataTableOutput('horario'),
# verbatimTextOutput("summary"),
#tableOutput("view")
)
#fin de mainPanel
#)
#fin de sidebarLayout
)
#fin de tabPanel
)
# fin de navbarPage
),
#########################################################################################################################################################
# #
# FIN PARTE: ui #
# #
#########################################################################################################################################################
#-------------------------------------------------------------------------------
#########################################################################################################################################################
# #
# INICIO PARTE: SERVER #
# #
#########################################################################################################################################################
server <- function(input, output) {
############################################################
# CARGANDO LA BASE AL TEMPORAL #
############################################################
##################################################
# GUARDANDO ARCHIVO #
##################################################
datasetInput1 <- reactive({
#cargamos el archivo
inFile1 <- input$file1
if (is.null(inFile1))
return(NULL)
library(readxl)
#datos <- read_excel(inFile$datapath)
#datos1 <- read.csv(inFile$datapath,sep=";")
datos1 <- read_excel(inFile1$datapath)
datos1 <- as.data.frame(datos1)
})
#fin de datasetInput1
modeloVenta <- reactive({
#cargamos el archivo
inFile2 <- input$file2
file2 <- inFile2$datapath
})
#fin de datasetInput1
observeEvent(input$ejecutar01,{
############################################
# SUBIENDO DATA A AWS #
############################################
library("DBI")
library("RMySQL")
df <- as.data.frame(datasetInput1())
withProgress(message = 'Subiendo a AWS', value = 0, {
for (i in 1:1) {
if(i==1) {
############################################################
# RECALCULANDO VARIABLES #
############################################################
db2 <- dbConnect(RMySQL::MySQL(),
dbname = "movil",
host = "database-movil.catjbiapkswk.us-east-1.rds.amazonaws.com",
user = "user_movil",
password = rstudioapi::askForPassword("Database password"),
Port = 3306)
dbExecute(db2, "TRUNCATE TABLE OUTBOUND_MIGRACION")
dbWriteTable(conn = db2,"OUTBOUND_MIGRACION", value = df, append = TRUE, row.names = FALSE)
incProgress(1/1, detail = paste("parte ", i))
Sys.sleep(0.1)
}
}
})
})
datasetInput2 <- reactive({
datos2 <- as.data.frame(datasetInput1())
############################################
# RECALCULANDO VARIABLES #
############################################
datos2$TELEFONO <- as.factor(datos2$TELEFONO) ##9
datos2$TELEFONO <- as.factor(paste("88",substring(datos2$TELEFONO,3,11),sep=""))
datos2$RUCCOMPANIA <- as.factor(datos2$RUCCOMPANIA) ##8
datos2$RUCCOMPANIA <- as.factor(str_pad(datos2$RUCCOMPANIA,8,pad="0"))
datos2$TIPO_DE_IDENTIFICACION[nchar(as.character(datos2$RUCCOMPANIA)) == 8] <- 'DNI' ##49
datos2$TIPO_DE_IDENTIFICACION[nchar(as.character(datos2$RUCCOMPANIA)) != 8] <- 'OTROS'
datos2$TIPO_DE_IDENTIFICACION <- as.factor(datos2$TIPO_DE_IDENTIFICACION)
datos2$FLAG_NG <- 0 ##50
datos2$FLAG_NG <- as.factor(datos2$FLAG_NG)
datos2$PLAN_DESTINO_1 <- as.factor(datos2$PLAN_DESTINO_1) ##29
datos2$RB_IG <- as.factor(substring(datos2$PLAN_DESTINO_1,12,16))
###############################################
# AGREGANDO PERIODO_CARTERA #
###############################################
if (as.numeric(format(Sys.Date(),"%d")) >= 25) {
datos2$PERIODO_CARTERA <- paste(as.character(format(Sys.Date(),"%Y")),if(as.numeric(format(Sys.Date(),"%m")) >= 10)
{ as.character(as.numeric(format(Sys.Date(),"%m")))
} else {
paste('0',as.character(as.numeric(format(Sys.Date(),"%m"))+1),sep="")
}, sep="")
} else {
datos2$PERIODO_CARTERA <- paste(as.character(format(Sys.Date(),"%Y")),if(as.numeric(format(Sys.Date(),"%m")) >= 10)
{ as.character(as.numeric(format(Sys.Date(),"%m")))
} else {
paste('0',as.character(as.numeric(format(Sys.Date(),"%m"))),sep="")
}, sep="")
}
datos2$PERIODO_CARTERA <- as.factor(datos2$PERIODO_CARTERA) ##50
##########################################
# AGREGANDO ID_CLIENTE #
##########################################
datos2$ID_CLIENTE <- paste(datos2$PERIODO_CARTERA,'_',datos2$TELEFONO,'_1',sep="") ##51
datos2$ID_CLIENTE <- as.factor(datos2$ID_CLIENTE)
##########################################
# AGREGANDO ID_CAMPANA #
##########################################
datos2$ID_CAMPANA <- 1 ##52
datos2$ID_CAMPANA <- as.factor(datos2$ID_CAMPANA)
############################################################
# GENERANDO VARIABLES PARA MODELO_VENTAS #
############################################################
### VARIABLE CALC_TOTAL_UNIDADES_2 ##54
datos2$CALC_TOTAL_UNIDADES_2[datos2$CALC_TOTAL_UNIDADES == 4 | datos2$CALC_TOTAL_UNIDADES == 5] <- '[4-5]'
datos2$CALC_TOTAL_UNIDADES_2[is.na(datos2$CALC_TOTAL_UNIDADES_2)] <- '[OTRO]'
datos2$CALC_TOTAL_UNIDADES_2 <- as.factor(datos2$CALC_TOTAL_UNIDADES_2)
### VARIABLE FECHAACTIVACIONCONTRATO_2 ##55
datos2$FECHAACTIVACIONCONTRATO <- as.factor(str_pad(datos2$FECHAACTIVACIONCONTRATO,8,pad="0"))
inicio <- paste(substring(datos2$PERIODO_CARTERA,1,4),'-',substring(datos2$PERIODO_CARTERA,5,6),'-','01', sep="")
fin <- paste(substring(datos2$FECHAACTIVACIONCONTRATO,5,8),'-',substring(datos2$FECHAACTIVACIONCONTRATO,3,4),'-',substring(datos2$FECHAACTIVACIONCONTRATO,1,2), sep="")
calculo <- as.data.frame(round((as.Date(inicio,"%Y-%m-%d") - as.Date(fin,"%Y-%m-%d"))/30,0))
names(calculo) <- c('calculo')
#calculo$FECHAACTIVACIONCONTRATO_2[calculo$calculo == 0] <- '[OTRO]'
#calculo$FECHAACTIVACIONCONTRATO_2[calculo$calculo >= 1 & calculo$calculo <= 3] <- '[X<3 MESES]'
calculo$FECHAACTIVACIONCONTRATO_2[calculo$calculo <= 3] <- '[X<3 MESES]'
calculo$FECHAACTIVACIONCONTRATO_2[calculo$calculo >= 4 & calculo$calculo <= 6] <- '[4-6 MESES]'
calculo$FECHAACTIVACIONCONTRATO_2[calculo$calculo >= 7 & calculo$calculo <= 12] <- '[7-12 MESES]'
calculo$FECHAACTIVACIONCONTRATO_2[calculo$calculo >= 13 & calculo$calculo <= 24] <- '[13-24 MESES]'
calculo$FECHAACTIVACIONCONTRATO_2[calculo$calculo >= 25 & calculo$calculo <= 36] <- '[25-36 MESES]'
calculo$FECHAACTIVACIONCONTRATO_2[calculo$calculo > 36] <- '[X>36 MESES]'
calculo$FECHAACTIVACIONCONTRATO_2[is.na(calculo$FECHAACTIVACIONCONTRATO)] <- '[OTRO]'
calculo$FECHAACTIVACIONCONTRATO_2 <- as.factor(calculo$FECHAACTIVACIONCONTRATO_2)
datos2 <- cbind(datos2,calculo$FECHAACTIVACIONCONTRATO_2)
names(datos2)[55] = "FECHAACTIVACIONCONTRATO_2"
### VARIABLE PLAN_TARIFARIO_2 ##56
datos2$PLAN_TARIFARIO_2[datos2$PLAN_TARIFARIO == 'Desconocido' | datos2$PLAN_TARIFARIO == 'Plan Demo'] <- '[DESCONOCIDO-DEMO]'
datos2$PLAN_TARIFARIO_2[datos2$PLAN_TARIFARIO == 'Entel Prepago' | datos2$PLAN_TARIFARIO == 'Entel Prepago Power 5'] <- '[PREPAGO+POWER]'
datos2$PLAN_TARIFARIO_2[datos2$PLAN_TARIFARIO == 'Prepago Chip 29'] <- '[PREPAGO CHIP29]'
datos2$PLAN_TARIFARIO_2[is.na(datos2$PLAN_TARIFARIO_2)] <- '[OTROS]'
datos2$PLAN_TARIFARIO_2 <- as.factor(datos2$PLAN_TARIFARIO_2)
### VARIABLE CLUSTER_MODELO_2 ##57
datos2$CLUSTER_MODELO_2[datos2$CLUSTER_MODELO == 'Grupo 1' | datos2$CLUSTER_MODELO == 'Grupo 4'] <- '[GRUPO 1 Y 4]'
datos2$CLUSTER_MODELO_2[datos2$CLUSTER_MODELO == 'Grupo 2'] <- '[GRUPO 2]'
datos2$CLUSTER_MODELO_2[datos2$CLUSTER_MODELO == 'Grupo 3'] <- '[GRUPO 3]'
datos2$CLUSTER_MODELO_2[is.na(datos2$CLUSTER_MODELO)] <- '[OTRO]'
datos2$CLUSTER_MODELO_2 <- as.factor(datos2$CLUSTER_MODELO_2)
### VARIABLE PROM_REC ##27
datos2$PROM_REC <- as.numeric(datos2$PROM_REC)
datos2$PROM_REC[is.na(datos2$PROM_REC)] <- 0
### VARIABLE RB_IG ##30
datos2$RB_IG <- as.numeric(datos2$RB_IG)
### VARIABLE GAP2 ##37
datos2$GAP2[is.na(datos2$PROM_REC)] <- 0
datos2$GAP2 <- datos2$PROM_REC - datos2$RB_IG
### VARIABLE PLAN_DESTINO_2 ##59
datos2$PLAN_DESTINO_3[datos2$PLAN_DESTINO_1 == 'Entel Chip 20.90 REV'] <- '[GRUPO1]'
datos2$PLAN_DESTINO_3[is.na(datos2$PLAN_DESTINO_3)] <- '[GRUPO2]'
datos2$PLAN_DESTINO_3 <- as.factor(datos2$PLAN_DESTINO_3)
### VARIABLE RECICLADOS_2 ##60
inicio1 <- paste(substring(datos2$PERIODO_CARTERA,1,4),'-',substring(datos2$PERIODO_CARTERA,5,6),'-','01', sep="")
fin1 <- datos2$RECICLADOS
fin1[is.na(fin1)] <- paste('9999',substring(datos2$PERIODO_CARTERA,5,6), sep="")
fin1 <- paste(substring(fin1,1,4),'-',substring(fin1,5,6),'-','01', sep="")
calculo1 <- as.data.frame(round((as.Date(inicio1,"%Y-%m-%d") - as.Date(fin1,"%Y-%m-%d"))/30,0))
names(calculo1) <- c('calculo1')
calculo1$RECICLADOS_2[calculo1$calculo1 < 0] <- '[NUEVO]'
calculo1$RECICLADOS_2[calculo1$calculo1 == 0] <- '[OTROS]'
calculo1$RECICLADOS_2[calculo1$calculo1 >= 1 & calculo1$calculo1 <= 6] <- '[1-6 MESES]'
calculo1$RECICLADOS_2[calculo1$calculo1 > 6] <- '[7 A MAS MESES]'
calculo1$RECICLADOS_2 <- as.factor(calculo1$RECICLADOS_2)
datos2 <- cbind(datos2,calculo1$RECICLADOS_2)
names(datos2)[60] = "RECICLADOS_2"
### VARIABLE REGION ##48
datos2$REGION <- as.factor(datos2$REGION)
### VARIABLE VERSION_MODELO ##61
datos2$VERSION_MODELO <- 'V1'
datos2$VERSION_MODELO <- as.factor(datos2$VERSION_MODELO)
datos2
})
#fin de datasetInput1
datasetInput3 <- reactive({
datos3 <- as.data.frame(datasetInput2())
datos3 <- select(datos3,ID_CLIENTE,PERIODO_CARTERA,ID_CAMPANA,REGION,CALC_TOTAL_UNIDADES_2,FECHAACTIVACIONCONTRATO_2,CLUSTER_MODELO_2,RECICLADOS_2,PLAN_DESTINO_3,GAP2,RB_IG,PROM_REC)
names(datos3)[9] = "PLAN_DESTINO_2"
datos3
})
#fin de datasetInput3
datasetInput4 <- reactive({
datos4 <- as.data.frame(datasetInput2())
datos4 <- select(datos4,ID_CLIENTE,PERIODO_CARTERA,ID_CAMPANA,REGION,FECHAACTIVACIONCONTRATO,PLAN_TARIFARIO,CLUSTER_MODELO,PLAN_DESTINO_1,RECICLADOS_3)
names(datos4)[9] = "RECICLADOS"
datos4
})
#fin de datasetInput4
datasetInput5 <- reactive({
datos5 <- as.data.frame(datasetInput2())
which(colSums(is.na(datos5))!=0)#buscar nulls
datos5 <- select(datos5,ID_CLIENTE,PERIODO_CARTERA,ID_CAMPANA,TELEFONO,COMPANIA,RUCCOMPANIA,REGION,CALC_TOTAL_UNIDADES_2,FECHAACTIVACIONCONTRATO_2,CLUSTER_MODELO_2,RECICLADOS_2,PLAN_DESTINO_3,GAP2,RB_IG,PROM_REC)
names(datos5)[12] = "PLAN_DESTINO_2"
#############################################################
# GENERAMOS CLUSTER PARA VARIABLES CUANTITATIVAS #
#############################################################
cluster<-data.frame(GAP2=datos5$GAP2,RB_IG=datos5$RB_IG,PROM_REC=datos5$PROM_REC)
res<-kmeans(scale(cluster),3)
datos5_1<-cbind(datos5,cluster_cuanti=res$cluster)
###############################################################
# FORMATEAMOS LAS VARIABLES QUE ENTRARAN AL MODELO #
###############################################################
datos5_2<-data.frame(REGION=datos5_1$REGION,
CALC_TOTAL_UNIDADES_2=datos5_1$CALC_TOTAL_UNIDADES_2,
FECHAACTIVACIONCONTRATO_2=datos5_1$FECHAACTIVACIONCONTRATO_2,
CLUSTER_MODELO_2=datos5_1$CLUSTER_MODELO_2,
RECICLADOS_2=datos5_1$RECICLADOS_2,
PLAN_DESTINO_2=datos5_1$PLAN_DESTINO_2,
cluster_cuanti=datos5_1$cluster_cuanti)
#################################
# CARGAMOS EL MODELO #
#################################
#setwd("Z:/Staff/Gerencia de Calidad/Compartido Calidad/12 - Business analytics/4.-QUERYS/QUERYS ENTEL_PERU_MIGRACIONES/R/")
#load("modeloventa_entel_migra_v2")
#load(InputModVenta())
#summary(modelo1)
load(modeloVenta())
#########################################
# GENERAMOS LAS PREDICCIONES #
#########################################
datos5_2$pred1_prob<- predict(modelo1,datos5_2,type="response")
datos5_2$pred1_Clas<- ifelse(datos5_2$pred1_prob>0.5,"1","0")
datos5_2$pred1_Segm<- ifelse(datos5_2$pred1_prob>0.77,"R1",ifelse(datos5_2$pred1_prob<0.6530,"R3","R2"))
datos5_2<-data.frame(TELEFONO=datos5$TELEFONO,
PERIODO_CARTERA=datos5$PERIODO_CARTERA,
NOMBRE_CLIENTE=datos5$COMPANIA,
DOCUMENTO=datos5$RUCCOMPANIA,
#PROBVENT=datos5_2$pred1_prob,
#VENTA=datos5_2$pred1_Clas,
SEGMVENTA=datos5_2$pred1_Segm)
#PROBCONTACT=round(pred2_prob,3),
#CONTAC=pred2_Clas,
#SEGMCONT=pred2_Segm)
datos5_2
#names(datos36) <- c("TELÉFONO","PERIODO CARTERA","NOMBRE CLIENTE","DOCUMENTO","SEGMENTO DE VENTAS")
})
#fin de datasetInput5
datasetInput7 <- reactive({
datos7_1 <- data.frame(datasetInput2())
datos7_2 <- data.frame(datasetInput5())
#datos7_3 <- data.frame(datasetInput6())
datos7_4 <- data.frame(CORRELATIVO=datos7_1$CORRELATIVO,
CALC_TOTAL_UNIDADES=datos7_1$CALC_TOTAL_UNIDADES,
PRODUCTO=datos7_1$PRODUCTO,
CODIGO_COMPANIA=datos7_1$CODIGO_COMPANIA,
TIPOCOMPANIA=datos7_1$TIPOCOMPANIA,
COMPANIA=datos7_1$COMPANIA,
TIPOCUENTA=datos7_1$TIPOCUENTA,
RUCCOMPANIA=datos7_1$RUCCOMPANIA,
TELEFONO=datos7_1$TELEFONO,
ESTADOCONTRATO=datos7_1$ESTADOCONTRATO,
FECHAACTIVACIONCONTRATO=datos7_1$FECHAACTIVACIONCONTRATO,
CODIGOCONTRATOBSCS=datos7_1$CODIGOCONTRATOBSCS,
CICLOFACTURACION=datos7_1$CICLOFACTURACION,
PLAN_TARIFARIO=datos7_1$PLAN_TARIFARIO,
TIPO_PLAN=datos7_1$TIPO_PLAN,
CLUSTER_MODELO=datos7_1$CLUSTER_MODELO,
COSTO_MIN_NET=datos7_1$COSTO_MIN_NET,
COSTO_MIN_TFIJO=datos7_1$COSTO_MIN_TFIJO,
COSTO_MIN_TMOV=datos7_1$COSTO_MIN_TMOV,
COSTO_SMS=datos7_1$COSTO_SMS,
COSTO_MB=datos7_1$COSTO_MB,
PROM_CONS_CD=datos7_1$PROM_CONS_CD,
PROM_CONS_IXON=datos7_1$PROM_CONS_IXON,
PROM_CONS_IXOFF=datos7_1$PROM_CONS_IXOFF,
PROM_CONS_IXTOT=datos7_1$PROM_CONS_IXTOT,
PROM_CONS_MB=datos7_1$PROM_CONS_MB,
PROM_REC=datos7_1$PROM_REC,
GAP_1=datos7_1$GAP_1,
PLAN_DESTINO_1=datos7_1$PLAN_DESTINO_1,
RB_IG=datos7_1$RB_IG,
BUCKET_ONNET=datos7_1$BUCKET_ONNET,
BUCKET_OFFNET=datos7_1$BUCKET_OFFNET,
BUCKET_MB=datos7_1$BUCKET_MB,
BUCKET_SMS=datos7_1$BUCKET_SMS,
MOD_VENTA=datos7_1$MOD_VENTA,
EQUIP_OFREC=datos7_1$EQUIP_OFREC,
GAP_2=datos7_1$GAP_2,
PLAN_DESTINO_2=datos7_1$PLAN_DESTINO_2,
RB_IGV_PLANDESTINO_2=datos7_1$RB_IGV_PLANDESTINO_2,
BUCKET_ONNET_2=datos7_1$BUCKET_ONNET_2,
BUCKET_OFFNET_2=datos7_1$BUCKET_OFFNET_2,
BUCKET_MB_2=datos7_2$SEGMVENTA,
BUCKET_SMS_2=datos7_1$BUCKET_SMS_2,
CORREOCOMPANIA=datos7_1$CORREOCOMPANIA,
EQUIP_OFREC_2=datos7_1$EQUIP_OFREC_2,
RECICLADOS=datos7_1$RECICLADOS,
SAT_OK=datos7_1$SAT_OK,
REGION=datos7_1$REGION
)
})
#fin de datasetInput7
###############################################################################################################################################
# INICIO PRIMERA PESTANA PRINCIPAL: INGRESO DE DATOS #
###############################################################################################################################################
##################################################
# PESTANA: TABLA DE DATOS- tabledata1 #
##################################################
output$tabledata1 <- DT::renderDataTable({
DT::datatable(datasetInput1())
})
#fin de renderDataTable
##################################################
# PESTANA: RESUMEN DE DATOS- tabledata2 #
##################################################
output$tabledata2 <- renderPrint({
#summary(datasetInput1())
summary(datasetInput1())
})
#fin de renderPrint
###############################################################################################################################################
# FIN PRIMERA PESTANA PRINCIPAL: INGRESO DE DATOS #
###############################################################################################################################################
###############################################################################################################################################
# INICIO SEGUNDA PESTANA PRINCIPAL: EFECTIVIDAD #
###############################################################################################################################################
##################################################
#PESTANA: ANNLISIS X CLIENTE- tabledata30 #
##################################################
output$tabledata30 <- DT::renderDataTable({
datos30 <- as.data.frame(datasetInput2())
datos30 <- select(datos30,ID_CLIENTE,PERIODO_CARTERA,ID_CAMPANA,REGION,CALC_TOTAL_UNIDADES_2,FECHAACTIVACIONCONTRATO_2,CLUSTER_MODELO_2,RECICLADOS_2,PLAN_DESTINO_3,GAP2,RB_IG,PROM_REC)
DT::datatable(datos30)
})
#fin de renderDataTable
##################################################
#PESTANA: Gráficos X CLIENTE- plot30 #
##################################################
output$plot30 <- renderPlot({
datos32 <- as.data.frame(datasetInput2())
datos32 <- select(datos32,REGION,CALC_TOTAL_UNIDADES_2,FECHAACTIVACIONCONTRATO_2,CLUSTER_MODELO_2,RECICLADOS_2,PLAN_DESTINO_3,GAP2,RB_IG,PROM_REC)
names(datos32) <- c("REGIÓN","CÁLCULO TOTAL DE UNIDADES","FECHA DE ACTIVACIÓN DE CONTRATO","CLUSTER MODELO","RECICLADOS","PLAN DESTINO","GAP","RB_IG","PROMEDIO DE RECARGA")
inputVARX_30 <- input$VARX_30
if (inputVARX_30=="CÁLCULO TOTAL DE UNIDADES") {
i30 <- 2
} else if (inputVARX_30=="FECHA DE ACTIVACIÓN DE CONTRATO") {
i30 <- 3
} else if (inputVARX_30=="PLAN DESTINO") {
i30 <- 6
} else if (inputVARX_30=="CLUSTER MODELO") {
i30 <- 4
} else if (inputVARX_30=="PROMEDIO DE RECARGA") {
i30 <- 9
} else if (inputVARX_30=="RB_IG") {
i30 <- 8
} else if (inputVARX_30=="GAP") {
i30 <- 7
} else if (inputVARX_30=="RECICLADOS") {
i30 <- 5
} else if (inputVARX_30=="REGIÓN") {
i30 <- 1
}
LBL <- names(datos32)
datos33 = subset(datos32[,c(1,i30)])
names(datos33) <- c("COMODIN","X")
#HISTOGRAMA
ggplot(datos33, aes(x=X)) +
geom_bar(position = 'stack', stat = 'count') +
labs(x=LBL[i30], y="FRECUENCIAS") +
scale_fill_manual(values=c("#FF0033", "#3300FF","#00CC00","lightgreen","lightgoldenrod","#00CC00"))
})
##################################################
#PESTANA: ANNLISIS X CLIENTE- tabledata31 #
##################################################
output$tabledata31 <- DT::renderDataTable({
datos36 <- as.data.frame(datasetInput5())
names(datos36) <- c("TELÉFONO","PERIODO CARTERA","NOMBRE CLIENTE","DOCUMENTO","SEGMENTO DE VENTAS")
DT::datatable(datos36)
})
#fin de renderDataTable
##################################################
#PESTANA: Gráficos X CLIENTE- plot31 #
##################################################
output$plot31 <- renderPlot({
datos39 <- as.data.frame(datasetInput5())
datos040 = subset(datos39[,c(1,5)])
names(datos040) <- c("COMODIN","X")
#HISTOGRAMA
ggplot(datos040, aes(x=X)) +
geom_bar(position = 'stack', stat = 'count') +
labs(x="Segmentos de efectividad", y="Frecuencias") +
scale_fill_manual(values=c("#FF0033", "#3300FF","#00CC00","lightgreen","lightgoldenrod","#00CC00"))
})
###############################################################################################################################################
# FIN SEGUNDA PESTANA PRINCIPAL: ANALISIS DE DATOS #
###############################################################################################################################################
###############################################################################################################################################
# INICIO TERCERA PESTANA PRINCIPAL: MODELO DE SEGMENTACION #
###############################################################################################################################################
##################################################
#PESTANA: RESULTADOS- tabledata50 #
##################################################
output$tabledata50 <- DT::renderDataTable({
DT::datatable(datasetInput7())
})
#fin de renderDataTable
#############################################################
##DESCARGA: descarga de formato de carga #
#############################################################
output$downloadData <- downloadHandler(
filename = function() {
paste("FC_BASE_GESTIONAR", ".csv", sep = "")
},
content = function(file) {
write.csv(datasetInput7(), file, row.names = FALSE)
}
)
##################################################
#PESTANA: RESULTADOS- tabledata51 #
##################################################
output$tabledata51 <- DT::renderDataTable({
datos54 <- as.data.frame(datasetInput7())
datos54 = subset(datos54[,c("CORRELATIVO","BUCKET_MB_2")])
datos54$CONTEO <- 1
datos55<-aggregate(datos54$CONTEO,by=list(datos54$BUCKET_MB_2),FUN=sum)
names(datos55) <- c("R EFECTIVIDAD","Q REGISTROS")
DT::datatable(datos55)
})
#fin de renderDataTable
#########################################################################################################################################################
# #
# FIN PARTE: SERVER #
# #
#########################################################################################################################################################
}
)
# FIN DE SHINYAPP
|
af54b1cc8d6a1def0ec493cbe907c0f841ad811b
|
08e4fb6e254373dbe30fe0aeee5b9eaf91b39402
|
/sandbox/save_cps.R
|
2e43430457a431f9fb506168bef77e0299864037
|
[
"MIT"
] |
permissive
|
Reed-EVIC/cpsvote
|
67b9345b575caaead7f65934d9e8e259230a561c
|
18c063722ec92c6cbc00e3d88183ef7ff18a6799
|
refs/heads/master
| 2022-11-20T23:37:16.726099
| 2022-11-11T04:24:24
| 2022-11-11T04:24:24
| 191,612,449
| 1
| 3
|
NOASSERTION
| 2021-08-29T04:56:45
| 2019-06-12T17:01:59
|
HTML
|
UTF-8
|
R
| false
| false
| 182
|
r
|
save_cps.R
|
library(cpsvote)
library(dplyr)
cps <- cpsvote::read_cps() %>%
select(-file) %>%
cps_reweight()
usethis::use_data(cps, overwrite = TRUE, compress = "xz")
devtools::document()
|
33dd6e91259cf8b5aac7994a87446440fb532397
|
91372d03d2f6ce4e3f84d20e6b370bcfb7a61b5e
|
/man/generate.ExpressionData.Rd
|
a8fc0be0268bcf2af81da92b40c7a660bcca8f2c
|
[] |
no_license
|
cran/imputeLCMD
|
45b42699368ae00db1cd0d7ece8b9c1aff122b8a
|
23debc0c095e85072f9d352ec7c531e17a5c70b2
|
refs/heads/master
| 2022-07-08T00:00:10.879127
| 2022-06-10T10:50:02
| 2022-06-10T10:50:02
| 21,778,105
| 2
| 2
| null | null | null | null |
UTF-8
|
R
| false
| true
| 1,093
|
rd
|
generate.ExpressionData.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/generatePepData.R
\name{generate.ExpressionData}
\alias{generate.ExpressionData}
\title{Generate expression data}
\usage{
generate.ExpressionData(
nSamples1,
nSamples2,
meanSamples,
sdSamples,
nFeatures,
nFeaturesUp,
nFeaturesDown,
meanDynRange,
sdDynRange,
meanDiffAbund,
sdDiffAbund
)
}
\arguments{
\item{nSamples1}{number of samples in condition 1}
\item{nSamples2}{number of samples in condition 2}
\item{meanSamples}{xxx}
\item{sdSamples}{xxx}
\item{nFeatures}{number of total features}
\item{nFeaturesUp}{number of features up regulated}
\item{nFeaturesDown}{number of features down regulated}
\item{meanDynRange}{mean value of the dynamic range}
\item{sdDynRange}{sd of the dynamic range}
\item{meanDiffAbund}{xxx}
\item{sdDiffAbund}{xxx}
}
\value{
A list containing the data, the conditions label and the regulation
label (up/down/no)
}
\description{
this function generates artificial peptide abundance data with DA proteins
samples are drawn from a gaussian distribution
}
|
905d96ea660cd754ccb626d5ceaa7b73e9437431
|
8bf1882e1e7b506fd12104f3a989b7e781e2a735
|
/R/misc.R
|
7dcc754033f32a1b7688d74bfeecb12ea72830f5
|
[] |
no_license
|
milanaorlova723/brocks
|
2014f614951271fc7b3d906cbef16ffdb235b6ab
|
782f2f505786fcfde2925e5464a078b608fae6dd
|
refs/heads/master
| 2023-06-16T20:54:08.265717
| 2018-02-14T23:56:25
| 2018-02-14T23:56:25
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 18,288
|
r
|
misc.R
|
##' Find the number of day in the month of a given date
##'
##' Adapted from Hmisc::monthDays (which currently has a broken dependency)
##' @param time The Date/time you're interested in finding the number of days in
##' the month for
##' @return The number of days in that month
days_in_month <- function(time) {
time <- as.POSIXlt(time)
time$mday[] <- time$sec[] <- time$min <- time$hour <- 0
time$mon <- time$mon + 1
return(as.POSIXlt(as.POSIXct(time))$mday)
}
#' Read a text file
#'
#' A simple wrapper function around \code{\link{readLines}}, which combines each
#' line into a single string, via \code{paste0(x, collapse = "\n")}.
#'
#' @param file A filename to read-in
#' @param print Should the contents of the file be printed to the console with
#' \code{\link{cat}}? (If so, results will be returned invisibly.)
#'
#' @return The contents of the file, as a character vector of length one.
#' @export
read_txt <- function(file, print = FALSE) {
cat_if <- if(print) function(x){cat(x); invisible(x)} else function(x) x
cat_if(paste0(readLines(file), collapse = "\n"))
}
#' Turn and R vector into a SQL vector
#'
#' @param x A vector
#'
#' @return \code{character}
#' @export
#'
#' @examples
#' to_sql_vector(letters[1:10])
to_sql_vector <- function(x) {
x %>%
gsub("'", "''", .) %>% # "Escape" ' characters with ''
paste(collapse = "', '") %>% # Concat c("x", "y") into "'x', 'y'"
paste0("('", ., "')") # Put brackets on it
}
##' Summarise missing data in a data.frame
##'
##' @param x a data.frame
##' @return
id_na <- function(x) {
pc_missing <- x %>%
apply(2, function(x) mean(is.na(x)))
data.frame(
variable = names(pc_missing),
pc_missing = pc_missing
) %>%
arrange(desc(pc_missing)) %>%
mutate(`%` = scales::percent(pc_missing))
}
#' 'Agresti-Coull'ish Standard Errors
#'
#' Agresti-Coull (1998) intervals are a great way to get a quick and
#' non-terrible estimate of a proportion. They work byu sing a 'Wald' interval,
#' after the addition of 2 successes and 2 failures to the sample (other numbers
#' can be specified, via the \code{wt} argument). This function creates a
#' Wald-style standard-error, after adding psuedo-responses.
#'
#' @name ac_se
#' @param logical_var A \code{\link{logical}} \code{\link{vector}}
#' @param wt The number of successes and failures to add to the sample before
#' construction of a Wald interval
#' @return \code{\link{numeric}}. An estimate of the sample's standard error.
#' @export
#' @author Brendan Rocks \email{rocks.brendan@@gmail.com}
#' @references {Agresti, A., & Coull, B. A. (1998). Approximate is better than
#' "exact" for interval estimation of binomial proportions. \emph{The American
#' Statistician}, 52(2), 119-126.}
#' @examples
#' ac_se(as.logical(round(runif(10))))
ac_se <- function(logical_var, wt = 2){
x <- sum(logical_var)
n <- sum(!logical_var)
x_hat <- x + wt
n_hat <- n + wt * 2
p_hat <- x_hat / n_hat
sqrt((p_hat * (1 - p_hat))/ n_hat)
}
#' A wrapper for the googlesheets package: A lazy way to read a googlesheet
#'
#' This auths (via \code{\link{gs_auth}}), finds a sheet, and reads it (via
#' \code{\link{gs_read}}). It assumes that you've already set-up your computing
#' evironment to use the the \code{\link{googlesheets}} pacakge; see it's
#' documentation for more details.
#'
#' @param key Passed to \code{\link{gs_key}}
#' @param title Passed to \code{\link{gs_title}}
#' @param url Passed to \code{\link{gs_url}}
#'
#' @return The results of \code{\link{gs_read}}
#' @export
read_gs <- function(key = NULL, title = NULL, url = NULL){
if (length(c(key, title, url)) != 1L) {
stop("Only one sheet parameter may be supplied.")
}
# Auth
googlesheets::gs_auth()
# Use the right fun for the param
if(!is.null(key))
gs_obj <- googlesheets::gs_key(key)
if(!is.null(title))
gs_obj <- googlesheets::gs_title(title)
if(!is.null(url))
gs_obj <- googlesheets::gs_url(url)
out <- googlesheets::gs_read(gs_obj)
# Should be exported by the next version:
# https://github.com/jennybc/googlesheets/commit/61042d
# googlesheets::gs_deauth()
return(out)
}
#' Repeat a character a variable number of times
#'
#' Effectively a version of \code{\link{rep}}, where only once value can be
#' repeated (by default, a space; " "), but it can be repeated a variable number
#' of times. Useful for creating even spacing for print and summary methods.
#'
#' @name rep_char
#' @param x A value to repeat. Will be coerced to \code{\link{character}}.
#' @param times A \code{\link{numeric}} \code{\link{vector}}; the number of
#' times that \code{x} should be repeated.
#' @return A \code{\link{character}} \code{\link{vector}} of x repated various
#' times
#' @export
#' @author Brendan Rocks \email{rocks.brendan@@gmail.com}
#' @examples
#'
#' # Strings repeating 'a' a variable number of times!
#' rep_char("a", 1:5)
#'
#' # Slightly more useful. Some text strings which we'd like to present:
#' desc <- c("\n", "first : 1st\n", "second : 2nd\n", "third : 3rd\n",
#' "fourth : 4th\n", "umpteenth : ...\n")
#'
#' # However, the varying lengths make them look a little awkward
#' cat(desc)
#'
#' # We can use rep_char to add extra spaces to the strings which are shorter
#' # than the longest
#' desc_spaced <- paste0(rep_char(times = max(nchar(desc)) - nchar(desc)), desc)
#'
#' # Much better
#' cat(desc_spaced)
#'
rep_char <- function(x = " ", times){
unlist(lapply(times, function(y){paste(rep(x, y), collapse = "")}))
}
#' Format Numeric Data with HTML Arrows
#'
#' @description {
#' When producing numbers in R markdown documents, it can be nice to try and
#' draw readers' attention to increases and decreases. The \code{html_tri}
#' function takes a numeric vector, and returns a \code{\link{character}}
#' vector of HTML strings, which will render in an (R) markdown document as
#' numbers accompanied with a green 'upward' triangle for positive numbers, a
#' red 'downward' triangle for negative ones, and a black square for numbers
#' which are exactly 0 by default. The colours can be altered by passing valid
#' CSS colour values to the \code{colours} argument, and the symbols by
#' passing valid HTML character values to the \code{symbols} argument. The
#' default values are in HTML decimal character codes.
#'
#' If you'd only like to green/red triangles for some non-zero numbers, you
#' can use the subset argument to pass a \code{\link{logical}} vector (the
#' same) length as \code{x} to \code{html_tri}. This will mean that only
#' elements of \code{x} will get a traingle when they are non-negative
#' \emph{and} \code{subset} is \code{TRUE}.
#' }
#'
#' @param x A \code{\link{numeric}} \code{\link{vector}}
#' @param format A function used to format the numbers before the HTML for the
#' triangles is added.
#' @param subset A \code{logical} vector. Should elements of \code{x} get
#' coloured arrows (as opposed to the symbol for 'nochange')?
#' @param symbols The symbols to use for increases, decreases, and things
#' not chaning respectively. Must a a vector of length 3, the entries having
#' the names \code{"up"}, \code{"down"}, and \code{"nochange"}
#' @param colours As above, but for the colours of the symbols
#'
#' @return A vector of \code{\link{character}} values, containing HTML so that
#' they should render with green/red triangles in an HTML document.
#' values in \code{x}.
#' @export
#' @name html_tri
#' @author Brendan Rocks \email{rocks.brendan@@gmail.com}
#' @examples
#' # This will output 'raw' HTML. To see the final result in an HTML markdown
#' # document, see the package vignette; vignette("brocks")
#'
#' html_tri(runif(10))
#'
#' # You could use other HTML symbols, even emojis if you like!
#' # These are HTML decimal codes (only unicode allowed in R packages), but
#' # you could use any valid characters (e.g. copy and paste)
#'
#' html_tri(runif(10), symbols = c("up" = "😊", "down" = "😞",
#' "nochange" = "😐"))
#'
html_tri <- function(
x, format = round, subset = TRUE,
symbols = c(up = "▲", down = "▼", nochange = "■"),
colours = c(up = "green", down = "red", nochange = "black")
){
arrow_fun <- function(x, dir){
paste0("<a style='color:", colours[dir], "'>", symbols[dir], "</a><a>",
format(x), "</a>")
}
dir_fun <- function(x){
ifelse(!sign(x) | !subset, "nochange", ifelse(x > 0, "up", "down"))
}
arrow_fun(x, dir_fun(x))
}
#' Miscellaneous Number Formatting Functions
#'
#' @description {
#' Sometimes (for example when illustrating differences), it can be useful for
#' positive numbers to be prefixed by a + sign, just as negative numbers are
#' with a - sign. The following are a few (very simple) wrapper functions
#' which do this.
#'
#' \describe{
#' \item{\bold{\code{fmt_pm}}}{ Is a wrapper for \code{\link{round}},
#' which also \code{\link{paste}}s a + sign before positive numbers
#' }
#' \item{\bold{\code{fmt_pc}}}{ A simple formatting function for
#' percentages. Defaults to 0 decimal places
#' }
#' \item{\bold{\code{fmt_pc_pm}}}{ As above, but with a + prefix for
#' positive numbers
#' }
#' \item{\bold{\code{format_nps}}}{ A very simple formatter for the Net
#' Promoter Score
#' }
#' \item{\bold{\code{fmt_nps_pm}}}{ As above, but without the percentage
#' sign
#' }
#' \item{\bold{\code{unsci}}}{ Unscientific notation: Short colloquial
#' number formatting. For example, 1e+04 becomes "100k", 1.454e+09 becomes
#' "1.5B", etc.
#' }
#' \item{\bold{\code{unsci_dollars}}}{ A convenience function for the above,
#' with \code{currency = TRUE} as the default.
#' }
#' }
#' }
#'
#' @param x \code{\link{numeric}} data to format
#' @param currency Should numbers be prefixed with \code{symbol}?
#' @param symbol if \code{currency = TRUE}, a string to prefix numbers with
#' @param ... Passed to \code{\link{round}}
#' @param digits Parameter passed to \code{\link{round}}
#' @param type The truncation function for the number, in the context of the
#' text in which its likely to be formatted. One of \code{round} (the
#' \code{round} function is used), \code{greater} (the \code{floor} function
#' is used) or \code{less} (the \code{ceiling} function is used).
#' @param pad Should the resulting strings be prefix-padded with spaces to make
#' all strings in the character vector a uniform width?
#'
#' @return \code{\link{character}}.
#'
#' @export
#' @name misc_br_num_formats
#' @author Brendan Rocks \email{rocks.brendan@@gmail.com}
#'
fmt_pm <- function(x, ...){
paste0(ifelse(x > 0, "+", ""), round(x, ...))
}
#' @name misc_br_num_formats
#' @export
fmt_pc <- function(x, type = c("round", "greater", "less"), digits = 0) {
f <- switch(type[1], round = round, greater = floor, less = ceiling)
paste0(f(x * 100 * 10^(digits)) / 10^(digits), "%")
}
#' @name misc_br_num_formats
#' @export
fmt_pc_pm <- function(x, ...){
paste0(ifelse(x > 0, "+", ""), fmt_pc(x, ...))
}
#' @name misc_br_num_formats
#' @export
fmt_nps_pm <- function(x, ...){
paste0(ifelse(x > 0, "+", ""), round(x * 100, ...))
}
#' @name misc_br_num_formats
#' @export
fmt_nps <- function(x, ...){
paste0(round(x * 100, ...))
}
#' @name misc_br_num_formats
#' @export
unsci <- function(x, digits = 1, currency = FALSE, symbol = "$", pad = TRUE) {
r <- function(x) round(x, digits)
k <- function(x) paste0(r(x / 1e+03), "k")
M <- function(x) paste0(r(x / 1e+06), "MM")
B <- function(x) paste0(r(x / 1e+09), "B")
# Based on the size of the number, add the prefix. The `paste0("", ...` part
# is to coerce NAs to character, follwing the behaviour of the scales package
prefixed <- paste0("", ifelse(
abs(x) >= 1e+03 & abs(x) < 1e+06, k(x),
ifelse(
abs(x) >= 1e+06 & abs(x) < 1e+09, M(x),
ifelse(abs(x) >= 1e+09, B(x), r(x))
)
))
# Append dollars
if (currency) {
prefixed <- paste0(symbol, prefixed)
}
# If pad = TRUE, add spaces to make uniform widths
if (pad) {
prefixed <- paste0(
rep_char(times = max(nchar(prefixed)) - nchar(prefixed)),
prefixed
)
}
prefixed
}
#' @name misc_br_num_formats
#' @export
unsci_dollars <- function(x, ...) unsci(x, currency = TRUE, ...)
#' A vectorized version of switch
#'
#' A vectorized version of \code{\link{switch}}.
#'
#' @param EXPR As in \code{switch}, an expression which evaluated to a number or
#' character string. However, in \code{vswitch}, there can be more than one.
#'
#' @param ... Passed to \code{switch}
#'
#' @export
#' @name vswitch
#' @author Brendan Rocks \email{rocks.brendan@@gmail.com}
#' @examples
#'
#' # The usual version of 'switch' works perfectly with one value
#' x <- "a"
#' switch(x, a = 1, b = 2, c = 3)
#'
#' # But not with more than one
#' x <- letters[1:3]
#' \dontrun{switch(x, a = 1, b = 2, c = 3)}
#'
#' # vswitch works well where you'd like to 'switch' a vector
#' x <- letters[1:3]
#' vswitch(x, a = 1, b = 2, c = 3)
#'
#'
vswitch <- function(EXPR, ...){
unlist(lapply(EXPR, function(x) switch(x, ...)))
}
#' Extract package dependencies from an R script
#'
#' @param file A file containing R code to parse
#'
#' @return A character vector containing the names of packages used in
#' \code{file}
#' @export
extract_package_deps <- function(file) {
# Read file, strip comments and empty lines
txt <- readLines(file) %>% gsub("#.*$", "", .) %>%
subset(!grepl("^$|^[[:space:]]$", .))
# Find inline references to packages, like package::function or
# package:::function
inline <- txt %>% stringr::str_extract_all("[[:alnum:]_\\.]*:{2,3}") %>%
unlist() %>% gsub(":{2,3}", "", .)
# Find references to packages via library(package) or require(package)
lib_reqs <- txt %>% stringr::str_extract_all(
"library\\([[:alnum:]_\\.]*\\)|require\\([[:alnum:]_\\.]*\\)"
) %>% unlist() %>% gsub("library\\(|require\\(|\\)", "", .)
# Find some special operators which are commonly associated with certain
# packages
txt <- paste(txt, collapse = "\n")
magrittr <- if (grepl("%$%|%>%|%<>%|%T>%", txt))
"magrittr"
data.table <- if (grepl("%like%|%between%|%inrange%|%chin%", txt))
"data.table"
future <- if (grepl("%<-%|%->%|%<=%|%=>%|%plan%|%tweak%", txt))
"future"
ops_packages <- c(magrittr, data.table, future)
out <- c(inline, lib_reqs, ops_packages) %>% stats::na.omit() %>% unique()
out[out != ""]
}
#' Return a CRAN repo: The user-default, or RStudio's
#'
#' Package installation on remote machines depends on a CRAN repo, which can be
#' tricky to set non-interactively. This simple wrapper function looks to see if
#' a default CRAN mirror has already been set. If it has, it is returned. If
#' not, \code{fallback} is returned.
#'
#' @return Either \code{fallback}, or the result of
#' \code{getOption("repos")["CRAN"]}
#'
#' @keywords internal
cran_repo <- function(fallback = "https://cran.rstudio.com/") {
default_repo <- getOption("repos")["CRAN"]
# Is there a default set that can be contacted over http(s)? (The default if
# unset seems to be "@CRAN@", hence the http check)
if (!grepl("^http", default_repo)) {
return(fallback)
} else {
return(default_repo)
}
}
#' Recursively Parse R Files in a Directory, and Install Packages Used
#'
#' \code{install_deps} (recursively) finds R code files in a directory, and uses
#' regular expressions to find code that looks like it refers to an R package
#' (via \code{\link{extract_package_deps}}). It then extracts the names of all
#' of these packages, checks that they're not already installed, and that they
#' are on CRAN, and then installs them (via \code{\link{install.packages}}).
#'
#' @param dir The directory to search for R files to parse
#' @param file_pattern A regular expression used to determine whether a file
#' should be parsed or not. The default will parse only \code{.R} and
#' \code{.Rmd} files
#' @param cran_mirror The CRAN mirror to use. The default calls a small function
#' which returns the Rstudio mirror, if no current default exists
#' @param ... Passed to \code{\link{install.packages}}
#'
#' @return Used for it's side effects (the installation of packages)
#' @export
install_deps <- function(dir = getwd(), file_pattern = "\\.R$|\\.Rmd$",
cran_mirror = cran_repo(), ...) {
file_list <- list.files(dir, recursive = TRUE) %>% .[grepl(file_pattern, .)]
package_list <- file_list %>% lapply(extract_package_deps) %>% unlist() %>%
unique
# Let the user know which files you've scanned
message("Searching...\n ", paste(file_list, collapse = "\n "), "\n")
# Vector of installed packages
installed <- utils::installed.packages()[,1]
to_install <- package_list[!package_list %in% installed]
already_installed <- package_list[package_list %in% installed]
if (length(already_installed) > 0) {
message("The following packages are already installed -- no action taken:",
"\n", paste(already_installed, collapse = ", "))
}
# Get a list of everything on CRAN. Surprisingly fast!
cran_packages <- utils::available.packages(utils::contrib.url(cran_mirror))
on_cran <- to_install[ to_install %in% cran_packages]
not_on_cran <- to_install[!to_install %in% cran_packages]
if (length(not_on_cran) > 0) {
warning("The following packages are not available on CRAN, and have not ",
"been installed:\n", paste(not_on_cran, collapse = ", "))
}
# If there's nothing to do, end
if (!length(on_cran) > 0) {
message("\n\nUp to date.\n")
return(invisible())
}
# Otherwise, install stuff
if (length(on_cran) > 0) {
message("Installing the following packages:\n\n",
paste(on_cran, collapse = ", "))
utils::install.packages(on_cran, repos = cran_mirror, ...)
}
}
#' An idealised test data set, for demonstrating some of the functions
#'
#' An idealised test data set, for demonstrating some of the functions in the
#' package
#'
#' @name test_data
#' @docType data
#' @author Brendan Rocks \email{rocks.brendan@@gmail.com}
#' @keywords data
NULL
|
b79a7be095f8e79dfa967f2c2c08a5f9623d6463
|
dc4168a901e1e370941a8a01e30566738f0fbd1e
|
/code/5_days_PEG_cfu_weight.R
|
ffbe96be5d8d08decbc44070a70bcc524132444e
|
[
"CC0-1.0",
"MIT"
] |
permissive
|
SchlossLab/Tomkovich_PEG3350_mSphere_2021
|
41c23350d83c8abb81bb0d255e660aed8e18d202
|
3c025ee350f31d5edbb8da571424445d431c1d7b
|
refs/heads/master
| 2023-07-17T17:06:17.411282
| 2021-08-30T21:38:28
| 2021-08-30T21:38:28
| 275,246,469
| 1
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 18,676
|
r
|
5_days_PEG_cfu_weight.R
|
source("code/utilities.R") #Loads libraries, reads in metadata, functions
#Define color scheme for this figure----
color_scheme <- c("#238b45", "#88419d", "#f768a1", "#225ea8") #Adapted from http://colorbrewer2.org/#type=sequential&scheme=BuPu&n=4
color_groups <- c("C", "WM", "WMC", "WMR")
color_labels <- c( "Clind.", "5-day PEG", "5-day PEG + Clind.", "5-day PEG + 10-day recovery")
#Subset metadata to relevant groups and experiments (WM, WMC, WMR, C)----
metadata <- five_day_PEG_subset(metadata) %>%
filter(!sample_type %in% c("cecum", "distal_colon", "proximal_colon")) #Get rid of rows corresponding to tissue samples in the metadata as these will create duplicate values for mice at timepoints where tissues were also collected
# of mice represented in the figure
mice <- length(unique(metadata$unique_mouse_id))
# 62 mice total for 5_days_PEG figure
#C. difficile CFU dataframe----
#Narrow metadata to just timepoints relevant to C. difficile CFU tracking (Anything on or after day 0)
cfudata <- metadata %>%
filter(day > -1)
cfu_na <- sum(is.na(cfudata$avg_cfu)) #182 samples with NA values. Represent times when we either did not collect stool samples, weren't able to get a stool sample from a particular mouse, weren't able to plate the sample we did collect immediately after due to chamber issues or time constraints, or the mouse died early
#Drop rows with NA values for cfu:
cfudata <- cfudata %>%
filter(!is.na(avg_cfu))
#Weight change dataframe----
#Note baseline weight for each group of mice (based on the earliest timepoint recorded for each experiment)----
baseline <- metadata %>% #Baseline weight was taken at day -5 for groups C, WM, and WMC
filter(group == "C" & day == -5| #20 mice in C group
group == "WM" & day == -5| #21 mice in WM group
group == "WMC" & day == -5| #9 mice in WMC group
group == "WMR" & day == -15) %>% #12 mice in WMR group, baseline weight was taken at day -15
mutate(baseline_weight = weight) %>% #This column represents the initial weight that was recorded for each mouse
select(unique_mouse_id, baseline_weight) #Will use unique_mouse_id to join baseline_weights to metadata
#Make a new column that represents weight_change from baseline_weight
weightdata <- inner_join(metadata, baseline, by = "unique_mouse_id") %>% #Join baseline weight to metadata
group_by(unique_mouse_id, day) %>% #Group by each unique mouse and experiment day
mutate(weight_change = weight-baseline_weight) %>% #Make a new column that represents the change in weight from baseline (all weights recorded in grams)
ungroup() %>%
filter(!is.na(weight)) #drop rows with NA values for weightdata. 1040 samples including NAs, 870 samples after excluding NAs
#Statistical Analysis----
set.seed(19760620) #Same seed used for mothur analysis
#Shapiro-Wilk test to see if cfu and weight change data is normally distributed:
#Note: p-value > 0.05 means the data is normally distributed
shapiro.test(cfudata$avg_cfu) #p-value < 2.2e-16
shapiro.test(weightdata$weight_change) #p-value = 1.485e-09
#Since p-value < 0.05 for both variables, we will use non-parametric tests
#Statiscal analysis of C. difficile CFU data----
#Kruskal_wallis test for differences across groups at different timepoints with Benjamini-Hochburg correction----
cfu_kruskal_wallis <- cfudata %>%
filter(day %in% c(0, 1, 2, 3, 4, 5, 6, 8, 9, 10, 15, 20, 25, 30)) %>% #only test days that we have CFU data for #Only have cfu for WMR group on D7, exclude that day
select(day, group, avg_cfu) %>%
group_by(day) %>%
nest() %>%
mutate(model=map(data, ~kruskal.test(x=.x$avg_cfu, g=as.factor(.x$group)) %>% tidy())) %>%
mutate(median = map(data, get_cfu_median)) %>%
unnest(c(model, median)) %>%
ungroup()
#Adjust p-values for testing multiple days and write results to table:
cfu_kruskal_wallis_adjust <- cfu_kruskal_wallis %>%
select(day, statistic, p.value, parameter, method, C, WM, WMC, WMR) %>%
mutate(p.value.adj=p.adjust(p.value, method="BH")) %>%
arrange(p.value.adj) %>%
write_tsv("data/process/5_days_PEG_cfu_stats_all_days.tsv")
#Timepoints where C. difficile CFU is significantly different across the groups of mice after BH adjustment of p-values:
sig_cfu_days <- pull_sig_days(cfu_kruskal_wallis_adjust)
#Perform pairwise Wilcoxan rank sum tests for days that were significant by Kruskal-Wallis test
cfu_stats_pairwise <- cfu_kruskal_wallis %>%
filter(day %in% sig_cfu_days) %>% #only perform pairwise tests for days that were significant
group_by(day) %>%
mutate(model=map(data, ~pairwise.wilcox.test(x=.x$avg_cfu, g=as.factor(.x$group), p.adjust.method="BH") %>%
tidy() %>%
mutate(compare=paste(group1, group2, sep="-")) %>%
select(-group1, -group2) %>%
pivot_wider(names_from=compare, values_from=p.value)
)
) %>%
unnest(model) %>%
select(-data, -parameter, -statistic) %>%
write_tsv("data/process/5_days_PEG_cfu_stats_sig_days.tsv")
#Format pairwise stats to use with ggpubr package
cfu_plot_format_stats <- cfu_stats_pairwise %>%
#Remove all columns except pairwise comparisons and day
select(-p.value, -method,-C, -WM, -WMC, -WMR) %>%
group_split() %>% #Keeps a attr(,"ptype") to track prototype of the splits
lapply(tidy_pairwise) %>%
bind_rows()
#Statistical analysis of mouse weight change data----
#Kruskal_wallis test for differences across groups at different timepoints with Benjamini-Hochburg correction----
weight_kruskal_wallis <- weightdata %>%
filter(day %in% c(-5, -4, -3, -2, -1, 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 15, 20, 25, 30)) %>% #only test days that we have weight data for at least 3 groups
select(day, group, weight_change) %>%
group_by(day) %>%
nest() %>%
mutate(model=map(data, ~kruskal.test(x=.x$weight_change, g=as.factor(.x$group)) %>% tidy())) %>%
mutate(median = map(data, get_weight_median)) %>%
unnest(c(model, median)) %>%
ungroup()
#Adjust p-values for testing multiple days and write results to table:
weight_kruskal_wallis_adjust <- weight_kruskal_wallis %>%
select(day, statistic, p.value, parameter, method, C, WM, WMC, WMR) %>%
mutate(p.value.adj=p.adjust(p.value, method="BH")) %>%
arrange(p.value.adj) %>%
write_tsv("data/process/5_days_PEG_weight_stats_all_days.tsv")
#Timepoints where C. difficile CFU is significantly different across the groups of mice after BH adjustment of p-values:
sig_weight_days <- pull_sig_days(weight_kruskal_wallis_adjust)
#Perform pairwise Wilcoxan rank sum tests for days that were significant by Kruskal-Wallis test
weight_stats_pairwise <- weight_kruskal_wallis %>%
filter(day %in% sig_weight_days) %>% #only perform pairwise tests for days that were significant
group_by(day) %>%
mutate(model=map(data, ~pairwise.wilcox.test(x=.x$weight_change, g=as.factor(.x$group), p.adjust.method="BH") %>%
tidy() %>%
mutate(compare=paste(group1, group2, sep="-")) %>%
select(-group1, -group2) %>%
pivot_wider(names_from=compare, values_from=p.value)
)
) %>%
unnest(model) %>%
select(-data, -parameter, -statistic) %>%
write_tsv("data/process/5_days_PEG_weight_stats_sig_days.tsv")
#Format pairwise stats to use with ggpubr package
weight_plot_format_stats <- weight_stats_pairwise %>%
#Remove all columns except pairwise comparisons and day
select(-p.value, -method,-C, -WM, -WMC, -WMR) %>%
group_split() %>% #Keeps a attr(,"ptype") to track prototype of the splits
lapply(tidy_pairwise) %>%
bind_rows()
#Plots of CFU and weight data----
#Transform day column variable from character to integer variable
cfudata <- cfudata %>%
mutate(day = as.integer(day))
#Dataframe of cfu data for just the initial 10 days of the experiment
cfudata_10dsubset <- cfudata %>%
mutate(day = as.integer(day)) %>% #transform day to integer variable
filter(day < 12) #only include data through day 10
cfu_kruskal_wallis_adjust <- cfu_kruskal_wallis_adjust %>%
mutate(day = as.integer(day)) #transform day to integer variable
#Statistical annotation labels based on adjusted kruskal-wallis p-values for first 10 days of experiment:
x_annotation <- cfu_kruskal_wallis_adjust %>%
filter(day < 12) %>% #Only include results through day 10 for this plot
filter(p.value.adj <= 0.05) %>%
pull(day)
y_position <- max(cfudata$avg_cfu)
label <- kw_label(cfu_kruskal_wallis_adjust %>% filter(day < 12))
#Only include results through day 10 for this plot
#Plot cfu for just the inital 10days
cfu_10d <- plot_cfu_data(cfudata_10dsubset %>%
filter(day %in% c("-1", "0", "1", "2", "3", "4", "5", "6", "7", "8", "9", "10"))) + #Filter to just include timepoints that will be plotted
scale_x_continuous(breaks = c(0:10),
limits = c(-1, 11),
minor_breaks = c(-.5:10.5))
save_plot(filename = "results/figures/5_days_PEG_cfu_10d.png", cfu_10d, base_height = 4, base_width = 8.5, base_aspect_ratio = 2)
#Statistical annotation labels based on adjusted kruskal-wallis p-values for all timepoints:
x_annotation <- cfu_kruskal_wallis_adjust %>%
filter(p.value.adj <= 0.05) %>%
pull(day)
y_position <- max(cfudata$avg_cfu)
label <- kw_label(cfu_kruskal_wallis_adjust)
#Plot of cfu data for all days of the experiment
cfu <- plot_cfu_data(cfudata %>%
filter(day %in% c("-1", "0", "1", "2", "3", "4", "5", "6", "7", "8", "9", "10", "15", "20", "25", "30"))) +
scale_x_continuous(breaks = c(0, 5, 10, 15, 20, 25, 30),
limits = c(-1, 31),
minor_breaks = c(-.5:10.5, 11.5, 12.5, 14.5, 15.5, 19.5, 20.5, 24.5, 25.5, 29.5, 30.5))+ #only show grey lines separating days on days with statistically sig points
theme(legend.position = "none")
save_plot(filename = "results/figures/5_days_PEG_cfu.png", cfu, base_height = 4, base_width = 8.5, base_aspect_ratio = 2)
#Plot of just the cfu data for the WMR group (5-day PEG + 10-day recovery)
wmr_cfu <- cfudata %>%
filter(group == "WMR") %>%
filter(day %in% c("-1", "0", "1", "2", "3", "4", "5", "6", "7", "8", "9", "10", "15", "20", "25", "30"))
wmr_median_summary <- wmr_cfu %>%
group_by(group, day) %>%
summarize(median_avg_cfu = median(avg_cfu, na.rm = TRUE))
wmr_cfu_plot <- ggplot(NULL) +
geom_point(wmr_cfu, mapping = aes(x = day, y = avg_cfu, color= group, fill = group), alpha = 0.7, size = 1.5, show.legend = FALSE, position = position_dodge(width = 0.6)) +
geom_line(wmr_median_summary, mapping = aes(x = day, y = median_avg_cfu, group = group, color = group), alpha = 1, size = 1.5) +
scale_colour_manual(name=NULL,
values=color_scheme,
breaks=color_groups,
labels=color_labels)+
labs(x = "Days post-challenge", y = "CFU/g feces") +
scale_y_log10(breaks = c(100, 10^3, 10^4, 10^5, 10^6, 10^7, 10^8, 10^9, 10^10),
labels = c('10^2', '10^3', '10^4', '10^5', '10^6', '10^7', '10^8', '10^9', '10^10')) + # scale y axis log10 and label 10^x
geom_hline(yintercept = 100, linetype=2) + #Line that represents our limit of detection when quantifying C. difficile CFU by plating
geom_text(x = 11, y = 104, color = "black", label = "LOD") + #Label for line that represents our limit of detection when quantifying C. difficile CFU by plating
theme(text = element_text(size = 16))+ # Change font size for entire plot
theme_classic()+
scale_x_continuous(breaks = c(0, 5, 10, 15, 20, 25, 30),
limits = c(-1, 31),
minor_breaks = c(-.5:10.5, 11.5, 12.5, 14.5, 15.5, 19.5, 20.5, 24.5, 25.5, 29.5, 30.5))+ #only show grey lines separating days on days with statistically sig points
theme(legend.position = "none",
axis.text.y = element_markdown(size = 12),
legend.key= element_rect(colour = "transparent", fill = "transparent"),
text = element_text(size = 16), # Change font size for entire plot
axis.ticks.x = element_blank(),
panel.grid.minor.x = element_line(size = 0.4, color = "grey"))#Add gray lines to clearly separate symbols by days)
save_plot(filename = "results/figures/5_days_PEG_cfu_WMR.png", wmr_cfu_plot, base_height = 4, base_width = 8.5, base_aspect_ratio = 2)
#Make a second version where each unique mouse = a different colored line
#Plot of just the cfu data for the WMR group (5-day PEG + 10-day recovery)
wmr_cfu_mice <- cfudata %>%
filter(group == "WMR") %>%
filter(day %in% c("-1", "0", "1", "2", "3", "4", "5", "6", "7", "8", "9", "10", "15", "20", "25", "30")) %>%
filter(!duplicated(unique_mouse_id)) %>% #Remove duplicate mouse ids
mutate(unique_mouse_id = factor(unique_mouse_id, levels = unique(as.factor(unique_mouse_id)))) %>%
pull(unique_mouse_id)
color_mice <- c("5_M5", "6_M5", "7_M5", "8_M5", "9_M5", "10_M5", "7_M6", "9_M6", "10_M6", "11_M6", "12_M6")
color_mice_values <- c("#a6cee3", "#1f78b4", "#b2df8a", "#33a02c", "#fb9a99", "#e31a1c",
"#fdbf6f", "#ff7f00", "#cab2d6", "#6a3d9a", "#9B870C")
color_mice_labels <- c(1, 2, 3, 4, 5, 6, 7, 8, 9, 10 , 11)
wmr_cfu_plot_indiv <- wmr_cfu %>%
ggplot()+
geom_line(mapping = aes(x = day, y = avg_cfu, group = unique_mouse_id, color = unique_mouse_id), alpha = 1, size = 1.5) +
scale_colour_manual(name="Mouse",
values=color_mice_values,
breaks=color_mice,
labels=color_mice_labels)+
labs(x = "Days post-challenge", y = "CFU/g feces") +
scale_y_log10(breaks = c(100, 10^3, 10^4, 10^5, 10^6, 10^7, 10^8, 10^9, 10^10),
labels = c('10^2', '10^3', '10^4', '10^5', '10^6', '10^7', '10^8', '10^9', '10^10')) + # scale y axis log10 and label 10^x
geom_hline(yintercept = 100, linetype=2) + #Line that represents our limit of detection when quantifying C. difficile CFU by plating
geom_text(x = 11, y = 104, color = "black", label = "LOD") + #Label for line that represents our limit of detection when quantifying C. difficile CFU by plating
theme(text = element_text(size = 16))+ # Change font size for entire plot
theme_classic()+
scale_x_continuous(breaks = c(0, 5, 10, 15, 20, 25, 30),
limits = c(-1, 31),
minor_breaks = c(-.5:10.5, 11.5, 12.5, 14.5, 15.5, 19.5, 20.5, 24.5, 25.5, 29.5, 30.5))+ #only show grey lines separating days on days with statistically sig points
guides(colour = guide_legend(nrow = 1))+#Limit number of rows in the legend
theme(legend.position = "bottom",
axis.text.y = element_markdown(size = 12),
text = element_text(size = 16), # Change font size for entire plot
axis.ticks.x = element_blank(),
legend.key= element_rect(colour = "transparent", fill = "transparent"),
panel.grid.minor.x = element_line(size = 0.4, color = "grey"))#Add gray lines to clearly separate symbols by days)
save_plot(filename = "results/figures/5_days_PEG_cfu_WMR_indiv.png", wmr_cfu_plot_indiv, base_height = 4, base_width = 8.5, base_aspect_ratio = 2)
#Weight change plot----
#Dataframe of weight data for days -15 through 10 of the experiment:
weight_subset <- weightdata %>%
filter(day < 12)
weight_kruskal_wallis_adjust <- weight_kruskal_wallis_adjust %>%
mutate(day = as.integer(day))
#Statistical annotation labels based on adjusted kruskal-wallis p-values for first 10 days of experiment:
x_annotation <- weight_kruskal_wallis_adjust %>%
filter(day < 12) %>% #Only include results through day 10 for this plot
filter(p.value.adj <= 0.05) %>%
pull(day)
x_annotation == sig_weight_days #All the days where weight change significantly varied across groups of mice occured within the first 10 days post-infection
y_position <- max(weightdata$weight_change)
label <- kw_label(weight_kruskal_wallis_adjust %>%
filter(day < 12)) #Only include results through day 10 for this plot
weight_subset <- weight_subset %>%
mutate(day = as.integer(day))
weightdata <- weightdata %>%
mutate(day = as.integer(day))
#Plot of weight data for days -15 through 10 of the experiment:
weight_subset_plot <- plot_weight(weight_subset) +
scale_x_continuous(breaks = c(-15, -10, -5, 0, 5, 10),
limits = c(-16, 11),
minor_breaks = c(-15.5:10.5))
save_plot(filename = "results/figures/5_days_PEG_weight_subset.png", weight_subset_plot, base_height = 4, base_width = 8.5, base_aspect_ratio = 2)
#Plots with just the median lines for each group
v2_weight_subset <- plot_weight_medians(weight_subset) +
scale_x_continuous(breaks = c(-15, -10, -5, 0, 5, 10),
limits = c(-16, 11),
minor_breaks = c(-15.5:10.5)) #only show grey lines separating days on days with statistically sig points)
save_plot(filename = "results/figures/5_days_PEGv2_weight_subset.png", v2_weight_subset, base_height = 4, base_width = 8.5, base_aspect_ratio = 2)
#Plot of weight data for all days of the experiment:
#Note don't need to redo statistical annotations since there were no significant differences past 10 day post-infection
#Statistical annotation labels based on adjusted kruskal-wallis p-values for first 10 days of experiment:
x_annotation <- weight_kruskal_wallis_adjust %>%
filter(p.value.adj <= 0.05) %>%
pull(day)
x_annotation == sig_weight_days #All the days where weight change significantly varied across groups of mice occured within the first 10 days post-infection
y_position <- max(weightdata$weight_change)
label <- kw_label(weight_kruskal_wallis_adjust)
weight_plot <- plot_weight(weightdata) +
scale_x_continuous(breaks = c(-15, -10, -5, 0, 5, 10, 15, 20, 25, 30),
limits = c(-16, 31),
minor_breaks = c(-15.5:10.5, 11.5, 12.5, 14.5, 15.5, 19.5, 20.5, 24.5, 25.5, 29.5, 30.5)) #only show grey lines around days on days with points)
save_plot(filename = "results/figures/5_days_PEG_weight.png", weight_plot , base_height = 4, base_width = 8.5, base_aspect_ratio = 2)
#Plots with just the median lines for each group
v2_weight_plot <- plot_weight_medians(weightdata) +
scale_x_continuous(breaks = c(-15, -10, -5, 0, 5, 10, 15, 20, 25, 30),
limits = c(-16, 31),
minor_breaks = c(-15.5:10.5, 11.5, 12.5, 14.5, 15.5, 19.5, 20.5, 24.5, 25.5, 29.5, 30.5)) #only show grey lines around days on days with points)
save_plot(filename = "results/figures/5_days_PEG_weight_median.png", v2_weight_plot, base_height = 4, base_width = 8.5, base_aspect_ratio = 2)
|
7016fafd824a92bcdd4b7f324ac0379bc9067b90
|
79d6b96e230925ba6e51041f50c65c2549f7dd2a
|
/advr/results/get_em_depletion.R
|
7d6c40b47304c11ce23adf3edf7ccad962369a96
|
[] |
no_license
|
inspktrgadget/gadget-simulations
|
c87873eb791b98e9df22fd569962599c2155f14c
|
84f0723f9d8601f9b9ce469d77af4997c9fbbf79
|
refs/heads/master
| 2020-03-27T16:46:05.576400
| 2019-01-14T18:22:38
| 2019-01-14T18:22:38
| 146,804,964
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 4,164
|
r
|
get_em_depletion.R
|
library(parallel)
library(gadgetSim)
library(tidyverse)
paste_path <- function(...) {
paste(list(...), collapse = "/")
}
model_dir <-"~/gadget/simulations/advr"
models <- dir(model_dir)[grep("^age_", dir(model_dir))]
spp <- c("cod", "capelin", "flatfish")
scenarios <- c("fish_down", "flat_msy", "two_way_trip")
depletion <-
lapply(models, function(i) {
lapply(spp, function(j) {
lapply(scenarios, function(k) {
on.exit(print(paste_path(i,j,k)))
em_path <-
paste_path(model_dir, i, j, k)
om_path <- paste_path("~/gadget/simulations/advr/op_mods", j, k)
# not all replicants finished; get the ones that did
finito_reps <-
vapply(1:100, function(x) {
reps_path <- paste_path(em_path, "reps", sprintf("rep%s", x))
wgts <- dir(reps_path)
if ("WGTS" %in% wgts) {
wgts_path <- paste_path(reps_path, "WGTS")
params_ <- dir(wgts_path)
if ("params.final" %in% params_) {
params <-
readLines(paste_path(em_path, "reps",
sprintf("rep%s/WGTS/params.final", x)))
if (length(params) > 0) {
return(x)
} else {
return(0)
}
} else {
return(0)
}
} else {
return(0)
}
}, numeric(1))
reps <- finito_reps[finito_reps > 0]
# read in StockStd for each EM and the OM
em_std <-
parallel::mclapply(reps, function(x) {
tmp <-
tryCatch({
get_stock_std(main = sprintf("reps/rep%s/WGTS/main.final", x),
params_file = sprintf("reps/rep%s/WGTS/params.final",
x),
fit_dir = sprintf("reps/rep%s/WGTS", x),
path = em_path)
}, error = function(e) return(NULL))
if (!is.null(tmp)) {
tmp <- mutate(tmp, rep = x)
}
return(tmp)
})
if (!all(vapply(em_std, is.null, logical(1)))) {
em_std <-
em_std %>%
do.call("rbind", .) %>%
mutate_all(funs(as.numeric))
om_std <- get_stock_std(path = om_path)
# group to total numbers
em_summary <-
em_std %>%
group_by(rep, year, step, area, age) %>%
summarize(number = sum(number),
biomass = sum(number * weight)) %>%
ungroup()
om_summary <-
om_std %>%
group_by(year, step, area, age) %>%
summarize(number = sum(number),
biomass = sum(number * weight)) %>%
ungroup() %>%
mutate(rep = 0)
# bind the two data.frames together to calculate depletion and
# terminal year biomass
mat_age <-
switch(j,
cod = 4,
capelin = 2,
flatfish = 8)
bm_depletion <-
rbind(om_summary, em_summary) %>%
filter(year %in% c(40, 120),
step == 4,
age >= mat_age) %>%
group_by(rep, year) %>%
summarize(biomass = sum(biomass)) %>%
summarize(terminal_bm = biomass[year == 120],
depletion = biomass[year == 120] / biomass[year == 40]) %>%
mutate(true_terminal_bm = terminal_bm[rep == 0],
true_depletion = depletion[rep == 0]) %>%
filter(rep > 0) %>%
mutate(model = i, spp = j, scenario = k)
return(bm_depletion)
} else {
return(NULL)
}
}) %>% do.call("rbind", .)
}) %>% do.call("rbind", .)
}) %>%
do.call("rbind", .) %>%
select(model, spp, scenario, rep:true_depletion)
write.csv(depletion, file = paste_path(model_dir, "results", "depletion.csv"),
quote = FALSE, row.names = FALSE)
|
1d2b5977897947c25e73693ad3c6f1abab6dd97e
|
b32dd1f1c3b674c1c558570dd0319590694dee34
|
/R/rmdse.R
|
c9c6556b62fc47ae57338fee697e01634b92846c
|
[] |
no_license
|
cran/valmetrics
|
1595ca14df527d868302c7105861b94a49599986
|
9964419ce0f640ce71fe2ff7dbe8d0c1048350be
|
refs/heads/master
| 2023-02-21T04:20:10.619811
| 2021-01-13T14:30:02
| 2021-01-13T14:30:02
| 334,226,965
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 497
|
r
|
rmdse.R
|
#' @title rmdse
#' @description Calculates the Root median squared error (RMdSE) from observed
#' and predicted values.
#' @inherit mae return author
#' @inheritParams mae
#' @return Root median squared error (RMdSE).
#' @details Interpretation: smaller is better.
#' @inherit mae return references
#' @examples
#' obs<-c(1:10)
#' pred<-c(1, 1 ,3, 2, 4, 5, 6, 8, 7, 10)
#' rmdse(o=obs, p=pred)
#'
#' @export
rmdse<-function(o, p){
sqrt(stats::median((p-o)^2))
}
|
d07e4f2cfbcae2694bdfef109e0c993a9d343c07
|
35a1e73c2ab2cae03f51f10baa4bd6b488d95a84
|
/man/smbinning.custom.Rd
|
a2a1d98989eafec3e73fcd2cbf228a9594a51a90
|
[] |
no_license
|
mauropelucchi/smbinning
|
30b13e331dfecafdb7eaf46ac475fecb5859760b
|
83f402391dd31ddd4a1144dd815150ae625e98ec
|
refs/heads/master
| 2021-01-17T11:20:48.103845
| 2016-06-20T10:20:35
| 2016-06-20T10:20:35
| 66,144,448
| 0
| 0
| null | 2016-08-20T11:58:59
| 2016-08-20T11:58:59
| null |
UTF-8
|
R
| false
| true
| 2,432
|
rd
|
smbinning.custom.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/smbinning.R
\name{smbinning.custom}
\alias{smbinning.custom}
\title{Customized Binning}
\usage{
smbinning.custom(df, y, x, cuts)
}
\arguments{
\item{df}{A data frame.}
\item{y}{Binary response variable (0,1). Integer (\code{int}) is required.
Name of \code{y} must not have a dot. Name "default" is not allowed.}
\item{x}{Continuous characteristic. At least 10 different values. Value \code{Inf} is not allowed.
Name of \code{x} must not have a dot.}
\item{cuts}{Vector with the cutpoints selected by the user. It does not have a default so user must define it.}
}
\value{
The command \code{smbinning.custom} generates and object containing the necessary info and utilities for binning.
The user should save the output result so it can be used
with \code{smbinning.plot}, \code{smbinning.sql}, and \code{smbinning.gen}.
}
\description{
It gives the user the ability to create customized cutpoints. In Scoring Modeling, the analysis
of a characteristic usually begins with intervals with the same length to understand its distribution,
and then intervals with the same proportion of cases to explore bins with a reasonable sample size.
}
\examples{
# Package loading and data exploration
library(smbinning) # Load package and its data
data(chileancredit) # Load smbinning sample dataset (Chilean Credit)
str(chileancredit) # Quick description of the data
table(chileancredit$FlagGB) # Tabulate target variable
# Training and testing samples (Just some basic formality for Modeling)
chileancredit.train=subset(chileancredit,FlagSample==1)
chileancredit.test=subset(chileancredit,FlagSample==0)
# Remove exclusions from chileancredit dataset
TOB.train=
subset(chileancredit,(FlagSample==1 & (FlagGB==1 | FlagGB==0)), select=TOB)
TOB.test=
subset(chileancredit,(FlagSample==0 & (FlagGB==1 | FlagGB==0)), select=TOB)
# Custom cutpoints using percentiles (20\% each)
TOB.Pct20=quantile(TOB.train, probs=seq(0,1,0.2), na.rm=TRUE)
TOB.Pct20.Breaks=as.vector(quantile(TOB.train, probs=seq(0,1,0.2), na.rm=TRUE))
Cuts.TOB.Pct20=TOB.Pct20.Breaks[2:(length(TOB.Pct20.Breaks)-1)]
# Package application and results
result=
smbinning.custom(df=chileancredit.train,
y="FlagGB",x="TOB",cuts=Cuts.TOB.Pct20) # Run and save
result$ivtable # Tabulation and Information Value
}
|
9aad1dfcdf72468b4949d8410227c2c6af03591e
|
689502b334bef9165faa3ced8ba609479bebad31
|
/data_code/input_data.R
|
2e652d73504e684393dbbbb66d682b3aae8580d6
|
[] |
no_license
|
NutchaW/forecast_calibration
|
9f2833bba35acd3d388b31ee75c846a7cb6951e7
|
118f64d12dcc795820e7d691db0f14a0120b7cdc
|
refs/heads/master
| 2022-05-21T09:09:28.697779
| 2022-03-20T17:14:39
| 2022-03-20T17:14:39
| 219,627,012
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 11,464
|
r
|
input_data.R
|
library(tidyverse)
library(cdcfluview)
library(Matrix)
library(data.table)
library(reshape2)
library(ranger)
library(xtable)
library(here)
library(stats)
library(mixtools)
library(grid)
library(FluSight)
library(rmutil)
library(R.utils)
# combine forecast and truth files per season
# separate all dis data into different target/season
all_season <-
c(
"2010/2011",
"2011/2012",
"2012/2013",
"2013/2014",
"2014/2015",
"2015/2016",
"2016/2017",
"2017/2018",
"2018/2019"
)
targets <- c("1 wk ahead", "2 wk ahead", "3 wk ahead", "4 wk ahead")
load("./data_transformed/model_list.rda")
allDis <- read.csv("./data_transformed/rawdata/allDis.csv")
model_list <- unique(as.character(allDis$model_name))[c(1:27, 29, 31)]
componentModel_list <- model_list[1:27]
# loop to write all files by model/target/season
for (i in 1:length(all_season)) {
for (j in 1:length(targets)) {
for (k in 1:length(model_list)) {
componentMod <- allDis %>%
dplyr::filter(
target == targets[j],
season == all_season[i],
model_name == model_list[k],
calendar_week %in% c(43:53, 1:18)
)
# make sub folders for season-target
write.csv(
componentMod,
file = paste0(
"./data_transformed/rawdata/",
substr(all_season[i], 8, 9),
"/t",
j,
"/",
model_list[k],
".csv"
),
quote = FALSE,
row.names = FALSE
)
}
}
}
## cdf
for (i in 11:19) {
for (j in 1:4) {
for (k in 1:length(model_list)) {
model_data <-
read.csv(paste0(
"./data_transformed/rawdata/",
i,
"/t",
j,
"/",
model_list[k],
".csv"
)) %>%
dplyr::mutate(
calendar_week = factor(calendar_week, levels = c(43:53, 1:18)),
location = factor(location, levels = unique(location)),
bin_start_incl = as.numeric(as.character(bin_start_incl))
) %>%
dplyr::group_by(location, calendar_week) %>%
dplyr::arrange(bin_start_incl) %>%
dplyr::mutate(cdf = cumsum(value)) %>%
ungroup() %>%
data.frame()
# cdf<-get_cdf(model_data)
# cdfdata<-data.frame(cbind(model_data,cdf))
write.csv(
model_data,
file = paste0(
"./data_transformed/cdf/",
i,
"/t",
j,
"/",
model_list[k],
".csv"
),
quote = FALSE,
row.names = FALSE
)
}
}
}
# calculate PIT by target/season/model/week
truths <- read.csv("scores/target-multivals.csv") %>%
dplyr::filter(Target %in% targets, Calendar.Week %in% c(43:53, 1:18))
truths$Season <- as.character(truths$Season)
truths$Location <- as.character(truths$Location)
truths$Target <- as.character(truths$Target)
names(truths) <- tolower(colnames(truths))
temp <- truths %>%
dplyr::mutate(
calendar_week = calendar.week,
valid.bin_start_incl = round(as.numeric(valid.bin_start_incl), 1),
valid.bin_lag = ifelse((as.numeric(
valid.bin_start_incl
) - 0.1) < 0,
round(0, 1),
round((
as.numeric(valid.bin_start_incl) - 0.1
), 1))
) %>%
dplyr::select(-c("year", "model.week", "calendar.week"))
temp <- temp[, c(2:3, 1, 5, 4, 6)]
for (i in 11:19) {
for (j in 1:4) {
for (k in 1:length(model_list)) {
temp1 <- temp %>%
dplyr::filter(target == targets[j],
season == all_season[i - 10]) %>%
dplyr::arrange(factor(location, levels = unique(location)),
factor(calendar_week, levels = c(43:53, 1:18)))
model_data <-
read.csv(paste0(
"./data_transformed/cdf/",
i,
"/t",
j,
"/",
model_list[k],
".csv"
)) %>%
dplyr::select(
"location",
"target",
"season",
"calendar_week",
"bin_start_incl",
"cdf",
"model_name"
)
pitdata <- temp1 %>%
dplyr::select(-"valid.bin_lag") %>%
dplyr::left_join(
model_data,
by = c(
"valid.bin_start_incl" = "bin_start_incl",
"location" = "location",
"target" = "target",
"season" = "season",
"calendar_week" = "calendar_week"
)
)
pitdata_lag <- temp1 %>%
dplyr::select(-"valid.bin_start_incl") %>%
dplyr::left_join(
model_data,
by = c(
"valid.bin_lag" = "bin_start_incl",
"location" = "location",
"target" = "target",
"season" = "season",
"calendar_week" = "calendar_week"
)
)
write.csv(
pitdata[, c(7, 1:6)],
file = paste0(
"./data_transformed/pit/",
i,
"/t",
j,
"/",
model_list[k],
".csv"
),
quote = FALSE,
row.names = FALSE
)
write.csv(
pitdata_lag[, c(7, 1:6)],
file = paste0(
"./data_transformed/pit_lag/",
i,
"/t",
j,
"/",
model_list[k],
".csv"
),
quote = FALSE,
row.names = FALSE
)
}
}
}
# get combined pits
fix_files <- function(filename) {
require(dplyr)
dat <- read.csv(filename)
dis <- dat %>%
dplyr::select(-"valid.bin_start_incl")
return(dis)
}
fix_files2 <- function(filename) {
require(dplyr)
dat <- read.csv(filename)
dis <- dat %>%
dplyr::select(-"valid.bin_lag")
return(dis)
}
## get all model files without the metadata
some_files <-
list.files("./data_transformed/pit/",
full.names = TRUE,
recursive = TRUE)
## extract point estimates and put into one dataframe
tmp <- lapply(some_files, FUN = fix_files)
tmp.df <- do.call(rbind.data.frame, tmp)
write.csv(tmp.df,
file = "./data_transformed/pit/combined_pit.csv",
quote = FALSE,
row.names = FALSE)
sc <- read.csv("./data_transformed/scores_rename.csv") %>%
dplyr::filter(Epiweek %in% c(43:53, 1:18), Model %in% model_list)
pt <- read.csv("./data_transformed/pit/combined_pit.csv")
sc$Model <- as.character(sc$Model)
pt$model_name <- as.character(pt$model_name)
combined_pitscore <- pt %>%
dplyr::left_join(
sc,
by = c(
model_name = "Model",
location = "Location",
target = "Target",
season = "Season",
calendar_week = "Epiweek"
)
) %>%
dplyr::select(-"Multi.bin.score")
# transform
library(data.table)
library(reshape2)
comb2 <- combined_pitscore %>%
dplyr::select(-"Model.Week", -"Year", -"Model.Week", -"Score") %>%
dcast(target + location + season + calendar_week ~ model_name, value.var =
"cdf") %>%
dplyr::select(-c("target-based-weights", "equal-weights"))
write.csv(comb2,
file = "./data_transformed/pit_modelcol.csv",
quote = FALSE,
row.names = FALSE)
# repeat the same but for prepit
some_files <-
list.files("./data_transformed/pit_lag/",
full.names = TRUE,
recursive = TRUE)
## extract point estimates and put into one dataframe
tmp <- lapply(some_files, FUN = fix_files2)
tmp.df <- do.call(rbind.data.frame, tmp)
write.csv(tmp.df,
file = "./data_transformed/pit_lag/combined_prepit.csv",
quote = FALSE,
row.names = FALSE)
sc <- read.csv("./data_transformed/scores_rename.csv") %>%
dplyr::filter(Epiweek %in% c(43:53, 1:18), Model %in% model_list)
prept <- read.csv("./data_transformed/pit_lag/combined_prepit.csv")
sc$Model <- as.character(sc$Model)
prept$model_name <- as.character(prept$model_name)
combined_prepitscore <- prept %>%
dplyr::left_join(
sc,
by = c(
model_name = "Model",
location = "Location",
target = "Target",
season = "Season",
calendar_week = "Epiweek"
)
) %>%
dplyr::select(-"Multi.bin.score")
# transpose
comb3 <- combined_prepitscore %>%
dplyr::select(-"Model.Week", -"Year", -"Score") %>%
dplyr::filter(model_name != "equal-weights",
model_name != "target-based-weights") %>%
dcast(target + location + season + calendar_week ~ model_name, value.var =
"cdf")
write.csv(comb3,
file = "./data_transformed/prepit_modelcol.csv",
quote = FALSE,
row.names = FALSE)
# get cdf combined for each season
cdf_trans <- function(filename) {
dat <- read.csv(filename)
dis <- dat
return(dis)
}
## get all model files without the metadata
for (i in 11:19) {
some_files <-
list.files(
paste0("./data_transformed/cdf/", i, "/"),
full.names = TRUE,
recursive = TRUE
)
tmp <- lapply(some_files, FUN = cdf_trans)
tmp.df <- do.call(rbind.data.frame, tmp)
write.csv(
tmp.df,
file = paste0("./data_transformed/cdf/", i, "/cdf", i, ".csv"),
quote = FALSE,
row.names = FALSE
)
}
for (i in 11:19) {
for (j in 1:4) {
data1 <-
read.csv(file = paste0("./data_transformed/cdf/", i, "/cdf", i, ".csv")) %>%
dplyr::select(-"forecast_week", -"year", -"value") %>%
reshape2::dcast(
location + target + bin_start_incl + bin_end_notincl + calendar_week + season ~ model_name,
value.var = "cdf"
) %>%
dplyr::select(-"target-based-weights", -"equal-weights")
write.csv(
data1,
file = paste0("./data_transformed/cdf/", i, "/reduced_cdf", i, ".csv"),
quote = FALSE,
row.names = FALSE
)
data2 <-
read.csv(file = paste0("./data_transformed/cdf/", i, "/cdf", i, ".csv")) %>%
dplyr::select(-"forecast_week", -"year", -"cdf") %>%
reshape2::dcast(
location + target + bin_start_incl + bin_end_notincl + calendar_week + season ~ model_name,
value.var = "value"
) %>%
dplyr::select(-"target-based-weights", -"equal-weights")
write.csv(
data2,
file = paste0("./data_transformed/cdf/", i, "/reduced_pdf", i, ".csv"),
quote = FALSE,
row.names = FALSE
)
}
}
# make empirical pdf table for each year
truths$valid.bin_start_incl <-
as.numeric(as.character(truths$valid.bin_start_incl))
# pdf paths
pdf_paths <-
paste0("./data_transformed/cdf/",
11:19,
"/reduced_pdf",
11:19,
".csv")
data1 <- map_dfr(pdf_paths, function(path) {
read.csv(file = path)
})
data2 <- truths %>%
left_join(
data1,
by = c(
season = "season",
target = "target",
location = "location",
calendar.week = "calendar_week",
valid.bin_start_incl = "bin_start_incl"
)
)
write.csv(
data2,
file = paste0("./data_transformed/pdf_modelcol.csv"),
quote = FALSE,
row.names = FALSE
)
# make empirical cdf table for each year
cdf_paths <-
paste0("./data_transformed/cdf/",
11:19,
"/reduced_cdf",
11:19,
".csv")
data3 <- map_dfr(cdf_paths, function(path) {
read.csv(file = path)
})
data4 <- truths %>%
left_join(
data3,
by = c(
season = "season",
target = "target",
location = "location",
calendar.week = "calendar_week",
valid.bin_start_incl = "bin_start_incl"
)
)
write.csv(
data4,
file = paste0("./data_transformed/cdf_modelcol.csv"),
quote = FALSE,
row.names = FALSE
)
|
17a2c5b2baae6575fe5d73adb481f56fe57583d6
|
ff9cfee1133e8e6388ab4840adb8924333c01b28
|
/course4/week2_proj/plot1.R
|
af4646c16323cbefad6d7cbe7ce99d013d3eedb1
|
[] |
no_license
|
yorckzhang/data_science
|
e764ebcfa1e26f39232b9f8424658f447c438cc3
|
df91885bb4006d1d904baf1c75320394987154a4
|
refs/heads/master
| 2020-05-18T14:57:50.120747
| 2019-05-10T14:42:25
| 2019-05-10T14:42:25
| 184,484,260
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 554
|
r
|
plot1.R
|
rm(list=ls())
setwd("C:/Projects/coursera/")
Data <- read.table("household_power_consumption.txt", header=T, sep=";", na.strings="?")
Plot_Data <- Data[Data$Date %in% c("1/2/2007","2/2/2007"),]
rm(Data)
## set the date and time variable
SetTime <-strptime(paste(Plot_Data$Date, Plot_Data$Time, sep=" "),"%d/%m/%Y %H:%M:%S")
Plot_Data <- cbind(Plot_Data, SetTime)
## Plot 1
png("plot1.png",width=480,height=480)
hist(Plot_Data$Global_active_power, col="red", main="Global Active Power", xlab="Global Active Power (kilowatts)")
dev.off()
|
fa4ad5763da112b70e8a5aae101f68a3a959c3db
|
109bcf46635b47dbc60133f6770f21fe4c3301b7
|
/scripts/Shiny_App/app_data_preprocess.R
|
e0032db3a5eaac39e63b4bcd927db9bd8ecc53e2
|
[] |
no_license
|
MiyabiIshihara/JMT-Stream-Crossing-Risk
|
baba1141728eec5c73d939519c18e48b8c3e2f03
|
4bbc3313c79abcf87502e0e3815ea0632780cd47
|
refs/heads/master
| 2020-04-27T04:00:47.705798
| 2019-06-21T19:08:09
| 2019-06-21T19:08:09
| 174,041,054
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 6,385
|
r
|
app_data_preprocess.R
|
require(leaflet)
require(sp)
require(sf)
require(raster)
require(htmltools)
require(lubridate)
require(stringr)
require(tidyverse)
source("scripts/googledrive_read_write_functions.R")
# Get vector data from google drive to plot ###############
# Actual JMT trail vector and entry trails
jmt_trail <- load_rgdal_from_googledrive("1RJPvOwVY1mcfjKh1eVybZHfWuIX1jE23", "Trail Edges")
jmt_trail <- spTransform(jmt_trail, "+proj=longlat +ellps=GRS80 +no_defs") %>% st_as_sf() %>%
st_transform(crs = "+proj=longlat +datum=WGS84 +no_defs +ellps=WGS84 +towgs84=0,0,0") %>%
mutate(edge_id = row_number())
#Subset trails to main JMT or access trails
jmt_access <- jmt_trail[which(jmt_trail$Type == "Access"),]
saveRDS(jmt_access, "scripts/Shiny_App/Data/jmt_access_trails.rds")
jmt_main <- jmt_trail[which(jmt_trail$Type == "Main"),]
saveRDS(jmt_main, "scripts/Shiny_App/Data/jmt_main_trail.rds")
# JMT Stream crossing points are extensively processed in crossings_add_field.R, this just for interactive use with other layers created, edited here
jmt_crossings <- load_rgdal_from_googledrive("1klB4m5GQVIv7sVaZnZzbbomnkqpfDih2")
jmt_crossings_df <- jmt_crossings@data %>%
mutate(crossing_id = row_number())
# Watersheds upstream of stream crossings
jmt_watersheds <- load_rgdal_from_googledrive("1yB7ww8YgWCAOHjeuCa4Xu6vIZPthO3aD")
jmt_watersheds <- spTransform(jmt_watersheds, "+proj=longlat +ellps=GRS80 +no_defs") %>%
st_as_sf() %>%
st_transform(crs = "+proj=longlat +datum=WGS84 +no_defs +ellps=WGS84 +towgs84=0,0,0")
saveRDS(jmt_watersheds, "scripts/Shiny_App/Data/jmt_watersheds.rds")
# Trail routes for drop down list
trail_routes <- load_csv_from_googledrive("1L4JVtsC1Jz9_T7wFtxNnDyYq5R65Lou_")
trailheads <- unique(trail_routes$`entry trailhead`)
saveRDS(trail_routes, "scripts/Shiny_App/Data/trail_routes.rds")
saveRDS(trailheads, "scripts/Shiny_App/Data/jmt_trailheads.rds")
# Time series of SWE in watersheds and associated risk for time series viz
#Get time series of snodas data in JMT watersheds from GoogleDrive
jmt_swe_2015 <- load_csv_from_googledrive("1Py8svcq-YMBUlhKYJR_HnfVGf41dkFyp")
jmt_swe_2016 <- load_csv_from_googledrive("15vpYxV75PpQWGEmhEoTkSvkx07vp5H5P")
jmt_swe_2017 <- load_csv_from_googledrive("1bjlEhCZ9ghEoLtUXmjiOJQKkxKDOk4zS")
jmt_swe_2018 <- load_csv_from_googledrive("1OwX0bljESiE6UCDO3m6J_NlRGlkPpQ9a")
jmt_swe15_18 <- rbind(jmt_swe_2015, jmt_swe_2016, jmt_swe_2017, jmt_swe_2018) %>%
gather("watershed", "SWE", -Date) %>%
mutate(Year = year(Date),
year_day = yday(Date)) %>%
group_by(watershed) %>%
mutate(last_swe = dplyr::lag(SWE, order_by = watershed),
SWE_melt = -(SWE - last_swe),
melt_risk = if_else(SWE_melt * swe_change_coef > 0, SWE_melt * swe_change_coef, 0)) %>%
ungroup() %>%
left_join(jmt_crossings_df %>%
select(JMT_Cross, crossing_id),
by = c("watershed" = "JMT_Cross"))
mid_risk <- quantile(jmt_swe15_18$melt_risk, 0.90, na.rm = T)
hi_risk <- quantile(jmt_swe15_18$melt_risk, 0.99, na.rm = T)
jmt_swe15_18 <- jmt_swe15_18 %>%
mutate(risk_score = case_when(melt_risk <= mid_risk ~ 1,
melt_risk >= mid_risk & melt_risk <= hi_risk~ 2,
melt_risk >= hi_risk ~ 3))
saveRDS(jmt_swe15_18, "scripts/Shiny_App/Data/swe_risk_2015_2018.rds")
#Get risk model object for estimate of SEW-driven change in risk
load("scripts/risk_model_object.Rdata")
swe_change_coef <- crossing_difficulty_lin_mod_adj$coefficients["peak_SWE_melt"]
#Download and process snowdepth rasters ##########
# Get data frame of googledrive ids for all the snowdepth rasters
snodas_gd_depth <- drive_ls(as_id("1_IxGme096iUx6JJQY0nhONKSzBWaiI3k"))
#JMT extent
jmt_clipper <- extent(-120, -118, 36, 38)
#function to get snowdepth geotiff on particular day
get_snodas_tif <- function(date){
snodas_id <- snodas_gd_depth %>%
slice(grep(date, name)) %>%
pull(id)
snodas_data <- load_geotiff_from_googledrive(snodas_id)
return(snodas_data)
}
# Function to download snowdepth raster and clip it
snodas_jmt_clip <- function(date){
to_clip <- get_snodas_tif(date)
clipped <- crop(to_clip, jmt_clipper)
return(clipped)
}
# 2015 dates snow depth
dates_2015 <- seq(ymd("2015-01-01"), ymd("2015-12-31"), "days")
snow_depth_2015_jmt <- lapply(dates_2015, snodas_jmt_clip)
saveRDS(snow_depth_2015_jmt, "scripts/Shiny_App/Data/snow_depth_2015.rds")
# 2016 dates snow depth
dates_2016 <- seq(ymd("2016-01-01"), ymd("2016-12-31"), "days")
snow_depth_2016_jmt <- lapply(dates_2016, snodas_jmt_clip)
saveRDS(snow_depth_2016_jmt, "scripts/Shiny_App/Data/snow_depth_2016.rds")
# 2017 dates snow depth
dates_2017 <- seq(ymd("2017-01-01"), ymd("2017-12-31"), "days")
snow_depth_2017_jmt <- lapply(dates_2017, snodas_jmt_clip)
saveRDS(snow_depth_2017_jmt, "scripts/Shiny_App/Data/snow_depth_2017.rds")
# 2018 dates snow depth
dates_2018 <- seq(ymd("2018-01-01"), ymd("2018-12-31"), "days")
snow_depth_2018_jmt <- lapply(dates_2018, snodas_jmt_clip)
saveRDS(snow_depth_2018_jmt, "scripts/Shiny_App/Data/snow_depth_2018.rds")
#Get precip data ########
# Get data frame of googledrive ids for all the snowdepth rasters
gd_precip <- drive_ls(as_id("16TsvJGV4YNxG6EER2RokJTOZMFurhsX4"))
# Function to download snowdepth raster and clip it
precip_jmt_clip <- function(date){
#Convert date input to character with no dashes
date_char <- gsub("-", "", as.character(date))
#Find folder within googledrive that contains date
bil_id <- gd_precip$id[grepl(date_char, gd_precip$name)]
print(c(date_char, bil_id))
#Download raster from .bil in above folder
to_clip <- load_raster_from_bil_googledrive(bil_id)
#clip raster and return
clipped <- crop(to_clip, jmt_clipper)
return(clipped)
}
# 2015 precipitation
precip_2015_jmt <- lapply(dates_2015, precip_jmt_clip)
saveRDS(precip_2015_jmt, "scripts/Shiny_App/Data/prism_ppt_jmt_clip_2015.rds")
|
460c56bbe5db4d3ae5392fdc374e048bb9e261ca
|
901a2f662b1f80664bb2428fe40fde150a646da6
|
/man/fast_probability_isolation_offset.Rd
|
e71f525bb52874cbfe49bfd8f27971e28d231b02
|
[
"MIT"
] |
permissive
|
mrc-ide/hermione
|
a6a181a25473982b0a179b167f72cc4eba787dbd
|
638ff206a30a2ef772bfc3b3e6820e69653162c0
|
refs/heads/master
| 2022-11-29T16:18:17.897313
| 2020-08-13T12:25:35
| 2020-08-13T12:25:35
| 261,799,063
| 2
| 0
| null | null | null | null |
UTF-8
|
R
| false
| true
| 944
|
rd
|
fast_probability_isolation_offset.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/likelihood_fast.R
\name{fast_probability_isolation_offset}
\alias{fast_probability_isolation_offset}
\title{Probability of serial interval when primary case has been
isolated and in the presence of asymptomatic infectiousness}
\usage{
fast_probability_isolation_offset(poffset, nu, offset, inf_params)
}
\arguments{
\item{poffset}{Probability calculation with offset only}
\item{nu}{onset to isolation of primary case}
\item{offset}{days of asymptomatic infectiousness}
\item{inf_params}{named list of arguments for
infectious period distribution.}
}
\value{
numeric probability of observing the given serial interval
with the given parameters of infectious period and incubation
period distribution
}
\description{
Probability of serial interval when primary case has been
isolated and in the presence of asymptomatic infectiousness
}
\author{
Sangeeta Bhatia
}
|
eaed8c774266dd50e1bf138714a6f06f9eb4f59a
|
7529e6a16d53e8baeb784cac7cc7fdd5ff2e43b3
|
/code/supplemental_plots.R
|
0e54e91ccec4302f6d8c02c53a0faf7e9b93081c
|
[] |
no_license
|
wehr-lab/SaundersWehr-JASA2019
|
21a99f8e914eddf4c95a28e6bc7f70e1d631a61e
|
ecf993a1e0ca382050b991f1987f8ab7f3fa404d
|
refs/heads/master
| 2021-06-30T03:41:18.811731
| 2020-10-01T03:29:51
| 2020-10-01T03:29:51
| 169,143,169
| 5
| 1
| null | null | null | null |
UTF-8
|
R
| false
| false
| 1,228
|
r
|
supplemental_plots.R
|
library(ggplot2)
library(plyr)
alldat <- readRDS("../data/alldata.RData")
alldat <- alldat[alldat$step<15,]
alldat <- ddply(alldat, .(mouse), mutate,
ntrial = seq(length(correct)))
# sort factor
mouse_ntr <- ddply(alldat, .(mouse), summarize,
ntr = max(ntrial))
alldat$mouse <- factor(alldat$mouse, levels(alldat$mouse)[order(mouse_ntr$ntr, decreasing=TRUE)])
alldat[alldat$step %in% c(5, 6 ),]$step <- 1
alldat[alldat$step %in% c(7, 8 ),]$step <- 2
alldat[alldat$step %in% c(9, 10),]$step <- 3
alldat[alldat$step %in% c(11,12),]$step <- 4
alldat[alldat$step %in% c(13 ),]$step <- 5
g.offset <- ggplot(alldat, aes(x=ntrial, y=step+(as.numeric(mouse))/11, color=mouse))+
geom_line()+
labs(x="N Trials", y="Training Step")+
scale_color_manual(values=c("#00A1DE", "#FF6319", "#6CBE45", "#FCCC0A", "#D27DCA",
"#EE352E", "#00A1DE", "#FCCC0A", "#6CBE45", "#00A1DE"))+
theme(
legend.position = "none",
panel.background = element_blank(),
panel.grid = element_blank()
)
ggsave("/Users/jonny/Dropbox/Lab Self/Writing/Speech - Behavior/JASA Resubmit/figures/training_timeseries_offset.pdf",
g.offset, width=6.5, height=2, units="in")
|
e75303e3a38b3292ff3cdf11588cf142a3e41ee4
|
470568d2da48921557315eb496e96d6afe7f95c1
|
/weather_stuff.R
|
fa550dfb1cfe61c810eeb4887c653c4b45b02741
|
[] |
no_license
|
docwarren/R_course
|
5bb66b781d73afd0127c55210e4262e4b6dea6b8
|
577eaf2767722bead2040286dcf8f5933e3e649d
|
refs/heads/master
| 2016-09-05T08:47:53.171559
| 2014-01-08T11:53:23
| 2014-01-08T11:53:23
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 303
|
r
|
weather_stuff.R
|
#Basic file to start
# Eva adding a test comment
# Eva's comment 2
getweather = function(date){
require("camweather")
return weatherdata(date)
}
plotweather <- function(date){
#plots a graph of the temperature
require("camweather")
wet <- weatherdata(date)
plot(wet$Temp)
plot(wet$Sun)
}
|
df0d5a1612123a08e76befd0c8d72ad379a6f05f
|
9c906ecae3febbd781c2d1ca4f5ec366085242a1
|
/shiny_app/server.R
|
2c3181a9aeec6ad282940e392a8455605f779f8b
|
[] |
no_license
|
lynguyennl/Coursera-capstone-project
|
bd0f0c8717e383fa656a34f0157461850da8609f
|
13c3834b9c00a81ccdbb426261639a85f48db54e
|
refs/heads/master
| 2023-01-01T07:09:54.629820
| 2020-10-24T10:43:15
| 2020-10-24T10:43:15
| 306,838,899
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 4,196
|
r
|
server.R
|
library(shiny)
library(DT)
library(tm)
library(ngram)
library(stringr)
library(reshape)
library(stylo)
library(dplyr)
library(plotly)
#setwd("C:/Users/Ly Nguyen/Learning/Courses/Johns Hopkins - R/10. Capstone Project/capstone_for submit")
#source("./script/w4_model.R", local=TRUE)
source("w4_model.R", local=TRUE)
shinyServer(function(input, output) {
textPrediction <- reactive({
text <- input$text
textPredict <- textPredict(text)
})
output$guess_1 <- renderText(textPrediction()[1])
output$guess_2 <- renderText(textPrediction()[2])
output$guess_3 <- renderText(textPrediction()[3])
output$ngramP <- renderUI({
selectInput("ngram_p", "Select an ngram:", choices = c("bigram", "trigram", "tetragram", "pentagram"), selected = "bigram")
})
data <- reactive({
if (is.null(input$ngram_p)) {
return(data.frame(ngram=NA,freq=NA))
} else {
if (input$ngram_p == "bigram") {
df <- data.frame(head(bigram,50))
df$ngram <- paste0(df$ngrams$first," ",df$ngrams$second)
df <- df[c("ngram", "freq")]
}
if (input$ngram_p == "trigram"){
df <- head(trigram,50)
df$ngram <- paste0(df$ngrams$first," ",df$ngrams$second," ",df$ngrams$third)
df <- df[c("ngram", "freq")]
}
if (input$ngram_p == "tetragram"){
df <- head(tetragram,50)
df$ngram <- paste0(df$ngrams$first," ",df$ngrams$second," ",df$ngrams$third," ",df$ngrams$fourth)
df <- df[c("ngram", "freq")]
}
if (input$ngram_p == "pentagram"){
df <- head(pentagram,50)
df$ngram <- paste0(df$ngrams$first," ",df$ngrams$second," ",df$ngrams$third," ",df$ngrams$fourth," ",df$ngrams$fifth)
df <- df[c("ngram", "freq")]
}
return(df)
}
})
output$ngramT <- renderUI({
selectInput("ngram_t", "Select a ngram:", choices = c('bigram','trigram','tetragram','pentagram'), selected = "bigram")
})
data2 <- reactive({
if (is.null(input$ngram_t)){
return()
} else {
if (input$ngram_t == 'bigram') {
df <- head(bigram,50)
df$ngram <- paste0(df$ngrams$first," ",df$ngrams$second)
df <- df[c("ngram", "freq")]
}
if (input$ngram_t == 'trigram'){
df <- head(trigram,50)
df$ngram <- paste0(df$ngrams$first," ", df$ngrams$second," ", df$ngrams$third)
df <- df[c("ngram", "freq")]
}
if (input$ngram_t == 'tetragram'){
df <- head(tetragram,50)
df$ngram <- paste0(df$ngrams$first," ",df$ngrams$second," ",df$ngrams$third," ",df$ngrams$fourth)
df <- df[c("ngram", "freq")]
}
if (input$ngram_t == 'pentagram'){
df <- head(pentagram,50)
df$ngram <- paste0(df$ngrams$first," ",df$ngrams$second," ",df$ngrams$third," ",df$ngrams$fourth," ",df$ngrams$fifth)
df <- df[c("ngram", "freq")]
}
return(df)
}
})
output$ngramtable = DT::renderDataTable({
data2()
})
output$ngramPlot <- renderPlotly({
theme_set(theme_bw())
g <- ggplot(data()[1:input$n_terms,],
aes(x=reorder(ngram,-freq), y=freq),
fill=freq) +
geom_point(stat="identity", position="identity",
aes(colour = freq, text=paste("ngram: ",ngram, "\nfrequency: ", freq))) +
scale_color_continuous(low="#89CFEF", high="#1C2951") +
labs(title="ngram",
y="frequency",
x ="") +
theme(axis.text.x = element_text(angle=90, vjust = 1, hjust = 1))
ggplotly(g, tooltip = "text")
})
})
|
dd3208579030ba514c505c201f210734c337b2be
|
5ef7d7f680e2bd21cedc2ba0f722fe7da6b473ee
|
/plot2.R
|
b53f79b228bf4b12d262043ac6042c6e231bb41a
|
[] |
no_license
|
rebeccalauyl/ExData_Project2
|
a1bda8948a4640257571401ea8658c285a7a6f3c
|
71cbd49867e1cffec92e6db271d912c0301a0c0b
|
refs/heads/master
| 2021-01-21T10:59:16.394392
| 2017-03-01T03:14:48
| 2017-03-01T03:14:48
| 83,505,866
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 1,045
|
r
|
plot2.R
|
if(!file.exists("./PM25.zip")){
fileUrl <- "https://d396qusza40orc.cloudfront.net/exdata%2Fdata%2FNEI_data.zip"
download.file(fileUrl, destfile="./PM25.zip", method="curl")
unzip(zipfile="./PM25.zip", exdir="./")
}
if(!file.exists("summarySCC_PM25.rds") || !file.exists("Source_Classification_Code.rds")){
unzip(zipfile="./PM25.zip", exdir="./")
}
##read data into R
NEI <- readRDS("summarySCC_PM25.rds")
SCC <- readRDS("Source_Classification_Code.rds")
#Q2: Have total emissions from PM2.5 decreased in the Baltimore City, Maryland
#(fips=="24510")from 1999 to 2008? Use the base plotting system to make a plot
#answering this question.
Baltimore <- subset (NEI, fips == "24510")
Baltimore_total_PM25 <- tapply(Baltimore$Emissions, Baltimore$year, sum)
png(filename="./plot2.png", width=680,height=480,units="px")
plot(names(Baltimore_total_PM25), Baltimore_total_PM25, type="l", col="blue",
xlab="Year",
ylab="Total PM2.5 Emission",
main="Total PM2.5 Emissions From Baltimore")
dev.off()
|
7e76272ace07b7ea2e53d4b66c61a75b116ef6e8
|
a1405253a430168fe1b412defcec2d70271d65bd
|
/Re3-readability/src/get_readability_stats/readability-snippets-r/snippet21.R
|
fc0670ae13b5a281da1d9cf04422672e63f2ba59
|
[
"MIT"
] |
permissive
|
Re3-reproducibility-readability/21-05-Re3
|
4ccb75f671ff8c2c905cd823068c99de9f5aacc2
|
8c35318fabcffa2fb83a588b91b45d0cdd5aa04e
|
refs/heads/main
| 2023-04-20T23:18:33.066671
| 2021-05-12T18:07:28
| 2021-05-12T18:07:28
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 535
|
r
|
snippet21.R
|
if(missing(cluster)) b.ind <- matrix(NA, nrow = nrow(fit$x), ncol = B)
else b.ind <- list()
coxcph <- inherits(fit, "coxph") || inherits(fit, "cph") ||
(length(fit$fitFunction) && any(c("cph", "coxph") %in%
fit$fitFunction))
nfit <- fit$fitFunction[1]
if (!length(nfit))
nfit <- setdiff(oldClass(fit), "Design")[1]
if (length(fit$weights) && (coxcph || nfit[1] == "Rq"))
stop("does not handle weights")
if (!length(X <- fit$x) | !length(Y <- fit$y))
stop("you did not specify x=TRUE and y=TRUE in the fit")
|
923a5cadd21c3c4a63a33b6123ea59d73064db4f
|
4326d8921a2380f77b74383415531712d8b20aad
|
/2. Official Clustering.R
|
7b6e490f3dfb26d6cf16086d84d2ab858bd5ab46
|
[] |
no_license
|
jckailun/MA-Thesis-Project
|
d1391a93f83343460073f446c8c6887b8948a61a
|
bea53c58a01949a5c38963c85f252c648f95b99d
|
refs/heads/master
| 2023-07-03T06:37:00.463250
| 2021-08-04T18:54:51
| 2021-08-04T18:54:51
| 206,391,653
| 0
| 0
| null | 2021-08-04T16:27:37
| 2019-09-04T18:57:22
|
R
|
UTF-8
|
R
| false
| false
| 1,749
|
r
|
2. Official Clustering.R
|
# Clustering with PAM #
install.packages("ClusterR"); library(ClusterR)
install.packages("cluster"); library(cluster)
gower_dist_1 <- daisy(X1.cc, metric = "gower", type = list(symm = c(12, 14:17)))
gower_dist_2 <- daisy(X2.cc, metric = "gower", type = list(symm = c(12, 14:17)))
diss1 = as.matrix(gower_dist_1)
diss2 = as.matrix(gower_dist_2)
X = list(X1.c, X2.c)
diss = list(diss1, diss2)
dummy.1 = cbind(D11, D21, D31, D41)
dummy.2 = cbind(D12, D22, D32, D42)
dummy = list(dummy.1, dummy.2)
clust = function(cohort, G, BS = FALSE){
if (BS == FALSE){
dis = diss[[cohort]]
dat = X[[cohort]]
dum = dummy[[cohort]]
}
if (BS == TRUE) {
dis = diss.bs
# the quantity diss.bs comes from X.bs.c - which includes acgrd
dat = X.bs[[cohort]]
dum = dummy.bs[[cohort]]
}
n = nrow(dis)
if (G == 1){
grp = list(dat)
d = list(dum)
clus = rep(1, n)
} else {
aaa = pam(dis, G, diss = TRUE)
clus = aaa$clustering
A = aaa$clusinfo
B = aaa$silinfo$clus.avg.widths
C = cbind(A, B)
D = aaa$medoids
# returns the estimated cluster membership of each observation
idx = rep(list(0), G)
grp = rep(list(0), G)
d = rep(list(0), G)
for (j in 1:G){
for (i in 1:n){
idx[[j]] = c(idx[[j]], ifelse(clus[i] == j, i, 0))
}
idx[[j]] = idx[[j]][idx[[j]] != 0]
grp[[j]] = dat[idx[[j]], ]
d[[j]] = dum[idx[[j]], ]
}
}
list(grp, d, clus, clusinfo = C, D)
}
X = list(X1.c, X2.c)
diss = list(diss1, diss2)
dummy = list(dummy.1, dummy.2)
H = c(3, 7)
for (cohort in 1:2){
G = H[cohort]
info = clust(cohort, G, BS = FALSE)$clusinfo
info = as.data.frame(info)
write.csv(info, paste("Cluster Info cohort", cohort, ".csv"))
}
|
65a59d4e2c809504560cc43875ad7369f3a8a617
|
9ce74db0e4b87c354de26815a61d22feb53c0c3f
|
/RProgramming/ProgAssignment3-data/rankhospital.R
|
6df64704a945f9277478fd7b78e28b3178454524
|
[] |
no_license
|
cgoods94/datasciencecoursera
|
b0078ef185011fdc4f68a915c39e5ef52b3ba59e
|
6777479758b641e4f89542c88d07acf273ddf8fc
|
refs/heads/master
| 2020-12-02T21:02:56.986162
| 2017-10-02T18:11:33
| 2017-10-02T18:11:33
| 96,248,714
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 1,243
|
r
|
rankhospital.R
|
rankhospital <- function (state, outcome, num = "best") {
## Read outcome data
o_data <- read.csv("outcome-of-care-measures.csv", na.strings="Not Available", stringsAsFactors = FALSE)
## Check that state and outcome are valid
if(is.na(match(state, unique(o_data$State))))
{
stop("invalid state")
}
if(is.na(match(outcome, c("heart attack", "heart failure", "pneumonia"))))
{
stop("invalid outcome")
}
## Return hospital name in that state with the given rank
## 30-day death rate
o_index <- switch( outcome,
"heart attack" = 11,
"heart failure" = 17,
"pneumonia" = 23)
o_data <- o_data[ , c(2, 7, o_index)]
names(o_data) <- c("Name", "State", "30DayDR")
o_data <- na.omit(o_data[o_data$State == state,])
if(num == "best")
{
head(o_data[order(o_data$`30DayDR`, o_data$Name),], 1)[[1]]
}
else if (num == "worst")
{
tail(o_data[order(o_data$`30DayDR`, o_data$Name),], 1)[[1]]
}
else if (num > nrow(o_data))
{
return(NA)
}
else
{
o_data[order(o_data$`30DayDR`, o_data$Name),][[num,1]]
}
}
|
ac6c5943fd62456361ea59a9e6bda4bdf19a8f60
|
52a4b6ceb632f91db48365a3c9f0fbd1788b03ca
|
/runepidemic.R
|
6d22f376712dea205886d15ea0aa46da23860219
|
[] |
no_license
|
jhcho0915/R-and-R-Studio
|
f95539f9a1fe1191ac4850677ad6b704784b82f0
|
0b0767fd69715c3a5133f832c5c15c9bffb8c030
|
refs/heads/master
| 2020-04-07T18:14:27.635090
| 2018-11-21T20:34:33
| 2018-11-21T20:34:33
| 158,602,791
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 1,154
|
r
|
runepidemic.R
|
rm(list=ls())
# Set up initial condition
N0 <- 1000000 # total population
In0 <- 10 # initial infectives
S0 <- N0-In0 # initially, everyone else is susceptible
R0 <- 0 # initially, nobody has recovered
IC <- c(S=S0, In=In0, R=R0)
tmax = 2000 # number of years to run
# Parameter values (units: per year)
parameters <- c(d=0.02, # per capita birth and death rate
b=120, # infection transmission rate
r=100 # recovery rate
)
# Define the epidemic model
epimodel<-function(t, state, parameters) {
with(as.list(c(state, parameters)),{
N <- S+In+R
# rate of change
dS <- d*N - b*S*In/N - d*S
dIn <- b*S*In/N - r*In - d*In
dR <- r*In - d*R
# return the rate of change
list(c(dS, dIn, dR))
}) # end with(as.list ...
}
times <- seq(0, tmax, by = 1) # times to solve the system for
library(deSolve)
# Solve the system
traj <- ode(y = IC, times = times, func = epimodel, parms = parameters,
atol = 1e-7, rtol = 1e-5)
traj <- as.data.frame(traj)
plot(traj$time, traj$In, type="l",
xlab="time (years)", ylab="number infected",
ylim=c(0,100))
|
7bbf6b03fb32d4c769ba572fbee2340d0c15dde1
|
5d690f159266b2c0f163e26fcfb9f9e17a0dc541
|
/epiR/R/epi.sssimpleestc.R
|
b54258586ca8b79991d50fc3098253becc8d6a64
|
[] |
no_license
|
albrizre/spatstat.revdep
|
3a83ab87085895712d7109c813dcc8acb55493e9
|
b6fc1e73985b0b7ed57d21cbebb9ca4627183108
|
refs/heads/main
| 2023-03-05T14:47:16.628700
| 2021-02-20T01:05:54
| 2021-02-20T01:05:54
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 579
|
r
|
epi.sssimpleestc.R
|
epi.sssimpleestc <- function(N = 1E+06, xbar, sigma, epsilon.r, nfractional = FALSE, conf.level = 0.95){
N. <- 1 - ((1 - conf.level) / 2)
z <- qnorm(N., mean = 0, sd = 1)
# Vsq is the relative variance of the continuous variable to be estimated (i.e. var / mean^2):
Vsq <- sigma^2 / xbar^2
# Page 74 Levy and Lemeshow (equation 3.15):
n <- (z^2 * N * Vsq) / (z^2 * Vsq + ((N - 1) * epsilon.r^2))
if(nfractional == TRUE){
n <- n
}
if(nfractional == FALSE){
n <- ceiling(n)
}
rval <- n
return(rval)
}
|
72f42c6ad220721e3c7bfdac3ba30cc53769f33e
|
0cc578a2f2bc613488bdb22eaa5f95d091ddfde1
|
/R/bowl_samples.R
|
7c58d9a8811c1779ec95ce3c2d91ec17839a4098
|
[] |
no_license
|
ycphs/moderndive
|
3291293a79c9da025cae5b75903e50f97e0a863f
|
f3201403abb2db30f05ecd89673b00b95c29d3d6
|
refs/heads/master
| 2021-04-09T11:59:08.307400
| 2018-03-15T22:19:00
| 2018-03-15T22:19:00
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 986
|
r
|
bowl_samples.R
|
#' Sampling from a tub of balls
#'
#' Counting the number of red balls in 10 samples of size n = 50 balls from
#' \url{https://github.com/moderndive/moderndive/blob/master/data-raw/sampling_bowl.jpeg}
#'
#' @format A data frame 10 rows representing different groups of students'
#' samples of size n = 50 and 5 variables
#' \describe{
#' \item{group}{Group name}
#' \item{red}{Number of red balls sampled}
#' \item{white}{Number of white balls sampled}
#' \item{green}{Number of green balls sampled}
#' \item{n}{Total number of balls samples}
#' }
#' @examples
#' library(dplyr)
#' library(ggplot2)
#'
#' # Compute proportion red
#' bowl_samples <- bowl_samples %>%
#' mutate(prop_red = red / n)
#'
#' # Plot sampling distributions
#' ggplot(bowl_samples, aes(x = prop_red)) +
#' geom_histogram(binwidth = 0.05) +
#' labs(x = expression(hat(p)), y = "Number of samples",
#' title = "Sampling distribution of p_hat based 10 samples of size n = 50")
"bowl_samples"
|
75f86128e5a3e47865831c77d760b19da51ee463
|
8531cb0526ca547b2ecd1f0a86683c4d3328577b
|
/Code/ZMB/GAMS/Extract_GAMS_results.R
|
408cd4bbb7737d414ceba5d8f901e5b3a33bcce3
|
[] |
no_license
|
shaohuizhang/Global-to-local-GLOBIOM
|
8b1c5042d58a5dfc03e4515d3bafefa033977ec7
|
85687084068bdf05081cbb868f063de3d65289a0
|
refs/heads/master
| 2020-03-21T07:40:36.927153
| 2018-03-07T15:04:11
| 2018-03-07T15:04:11
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 6,868
|
r
|
Extract_GAMS_results.R
|
#'========================================================================================================================================
#' Project: Global-to-local GLOBIOM
#' Subject: Script to extract GAMS results and map
#' Author: Michiel van Dijk
#' Contact: michiel.vandijk@wur.nl
#'========================================================================================================================================
### PACKAGES
if(!require(pacman)) install.packages("pacman")
# Key packages
p_load("tidyverse", "readxl", "stringr", "car", "scales", "RColorBrewer", "rprojroot")
# Spatial packages
p_load("rgdal", "ggmap", "raster", "rasterVis", "rgeos", "sp", "mapproj", "maptools", "proj4", "gdalUtils", "sf")
# Additional packages
p_load("countrycode", "gdxrrw", "plotKML", "viridis")
### SET ROOT AND WORKING DIRECTORY
root <- find_root(is_rstudio_project)
setwd(root)
### SET DATAPATH
source(file.path(root, "Code/get_dataPath.r"))
### SOURCE FUNCTIONS
source(file.path(root, "Code/Support/functions.r"))
### LINK GAMS LIBRARIES
igdx(GAMSPath)
### R SETTINGS
options(scipen=999) # surpress scientific notation
options("stringsAsFactors"=FALSE) # ensures that characterdata that is loaded (e.g. csv) is not turned into factors
options(digits=4)
options(max.print=1000000) # more is printed on screen
### SET COUNTRY
source("Code/ZMB/Set_country.R")
### LOAD DATA
# Grid
grid <- raster(file.path(dataPath, paste0("Data/", iso3c_sel, "/Processed/Maps/grid/grid_30sec_r_", iso3c_sel, ".tif")))
names(grid) <- "gridID"
# Load results from land use allocation model
file <- file.path(dataPath, paste0("Model/", iso3c_sel, "/Results/min_entropy_ZMB_2000.gdx"))
lu_raw <- rgdx.param(file, "Palloc", names = c("gridID", "sy", "value"), compress = T) %>%
mutate(sy = as.character(sy),
gridID = as.numeric(as.character(gridID)))
# Adm
adm <- readRDS(file.path(dataPath, paste0("Data/", iso3c_sel, "/Processed/Maps/gaul/adm_2000_", iso3c_sel, ".rds")))
# City information
data(world.cities)
cities <- filter(world.cities, country.etc == country_sel, capital == 1)
# Lc
lc <- readRDS(file.path(paste0(dataPath, "/Data/", iso3c_sel, "/Processed/GAMS/lc_2000_", iso3c_sel, ".rds")))
# Sy
lu_sy <- readRDS(file.path(dataPath, paste0("Data/", iso3c_sel, "/Processed/GAMS/lu_sy_2000_", iso3c_sel, ".rds")))
# Priors
priors <- readRDS(file.path(paste0(dataPath, "/Data/", iso3c_sel, "/Processed/GAMS/priors_2000_", iso3c_sel, ".rds")))
# urban mask
#urban_mask <- readRDS(file.path(dataPath, "Data/MWI/Processed/Spatial_data/urban_mask_MWI.rds"))
### MAPS
# Add NA to missing values to show where there is no crop cover
lu <- lu_raw %>%
spread(sy, value, fill = NA)
# Add grid cell coordinates
grid_df <- as.data.frame(rasterToPoints(grid))
lu <- lu %>%
left_join(grid_df,.) %>%
gather(sy, value, -gridID, -x, -y)
# Add short_name and system
lu <- lu %>%
mutate(short_name = substr(sy, 0, 4),
system = str_sub(sy, start = -1))
# Plot function
plot_crop_raster_f <- function(crop, sys){
df <- filter(lu, short_name %in% crop, system %in% sys)
p = ggplot() +
geom_raster(data = df, aes(x = x, y = y, fill = value)) +
scale_fill_viridis(na.value = "light grey", direction = -1, labels = comma) +
geom_path(data = adm, aes (x = long, y = lat, group = group), colour = "black") +
facet_wrap(~short_name) +
coord_quickmap() +
labs(x="", y="", size = "#HH", fill = "Crop area (ha)") +
theme_classic() +
theme(line = element_blank(),
axis.text = element_blank(),
strip.background = element_rect(colour = NA, fill = NA)) +
geom_point(data = cities, aes(x = long, y = lat), col = "black") +
geom_text(data = cities, aes(x = long, y = lat, label = name), size = 4)
p
}
plot_crop_raster_f("maiz", "S")
plot_crop_raster_f("whea", "I")
plot_crop_raster_f("cass", "S")
# Plot all
df <- filter(lu, short_name %in% c("maiz", "whea", "cass"))
ggplot() +
geom_raster(data = lu, aes(x = x, y = y, fill = value)) +
scale_fill_viridis(na.value = "light grey", direction = -1, labels = comma) +
geom_path(data = adm, aes (x = long, y = lat, group = group), colour = "black") +
facet_wrap(~short_name, scales = "free") +
#coord_quickmap() +
coord_equal() +
labs(x="", y="", size = "#HH", fill = "Crop area (ha)") +
theme_classic() +
theme(line = element_blank(),
axis.text = element_blank(),
strip.background = element_rect(colour = NA, fill = NA)) +
guides(fill = F)
# SF MAP NEED GEOM_SF
# # Prepare map
# grid_sf <- grid_sf %>%
# left_join(.,land_use)
#
# # Map
# ggplot(grid_sf) + geom_sf(aes(fill = maiz)) +
# scale_fill_gradient(low="grey",high="red", na.value = "blue")
#
#
# grid_sf2 <- grid_sf %>%
# gather(short_name, value, -geometry, -gridID) %>%
# filter(short_name %in% c("maiz", "grou", "cass"))
# ggplot(grid_sf2) + geom_sf(aes(fill = value)) +
# facet_grid(. ~ short_name) +
# scale_fill_gradient(low="grey",high="red", na.value = "blue")
### COMPARE RESULTS WITH INPUT DATA
# Compare crop cover with allocation
check1 <- land_use_raw %>%
group_by(gridID) %>%
summarize(value = sum(value, na.rm = T)) %>%
left_join(crop_cover,.) %>%
left_join(priors,.) %>%
mutate(check = area-value) %>%
arrange(check)
# Select gridID with difference > -1000
grid_check <- filter(check1, check < -1000)
# Plot outliers
plot(grid, col = NULL)
plot(grid[grid$gridID %in% grid_check$gridID,], col = "red", add = T)
grid_check_p <- grid[grid$gridID %in% grid_check$gridID,]
ggplot(grid_sf) +
geom_sf() +
geom_sf(data = filter(grid_sf, gridID %in% grid_check$gridID), fill = "red") +
geom_sf(data = urban_mask_sf, fill = "blue", alpha = 0.5) +
geom_point(data = cities, aes(x = long, y = lat), col = "green") +
geom_text(data = cities, aes(x = long, y = lat, label = name))
plotKML(grid_check_p)
# Compare total crop area with allocation
ag_stat_adm0 <- ag_stat_2000 %>%
filter(adm_level == 0) %>%
rename(stat = value)
check2 <- land_use_raw %>%
group_by(short_name) %>%
summarize(value = sum(value, na.rm = T)) %>%
left_join(ag_stat_adm0,.) %>%
mutate(check = stat - value) %>%
arrange(desc(check))
# Compare adm2 area with allocation
ag_stat_adm2 <- ag_stat_2000 %>%
filter(adm_level == 2) %>%
rename(stat = value, adm2 = adm)
check3a <- land_use_raw %>%
left_join(crop_cover,.) %>%
group_by(adm2, short_name) %>%
summarize(value = sum(value, na.rm = T)) %>%
left_join(ag_stat_adm2,.) %>%
mutate(check = (value-stat),
share = ((value-stat)/stat)*100) %>%
arrange(desc(check))
check3b <- check3 %>%
ungroup() %>%
group_by(adm2) %>%
summarize(value = sum(value, na.rm = T),
stat = sum(stat, na.rm = T)) %>%
mutate(check = (value-stat),
share = ((value-stat)/stat)*100) %>%
arrange(desc(check))
|
c760584d5dc0810d946afc8119ca59d2b337eee1
|
ffdea92d4315e4363dd4ae673a1a6adf82a761b5
|
/data/genthat_extracted_code/eggCounts/examples/stan2mcmc.Rd.R
|
e63da21fe3d8864b8798e6b12ed5c32019fa7423
|
[] |
no_license
|
surayaaramli/typeRrh
|
d257ac8905c49123f4ccd4e377ee3dfc84d1636c
|
66e6996f31961bc8b9aafe1a6a6098327b66bf71
|
refs/heads/master
| 2023-05-05T04:05:31.617869
| 2019-04-25T22:10:06
| 2019-04-25T22:10:06
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 490
|
r
|
stan2mcmc.Rd.R
|
library(eggCounts)
### Name: stan2mcmc
### Title: Convert a Stanfit object to MCMC object
### Aliases: stan2mcmc
### Keywords: modelling
### ** Examples
## Not run:
##D data(epgs)
##D
##D ## apply zero-infation model for the paired design
##D model <- fecr_stan(epgs$before, epgs$after, rawCounts = FALSE, indEfficacy = FALSE,
##D preCF = 10, paired = TRUE, zeroInflation = TRUE)
##D samples <- stan2mcmc(model$stan.samples)
##D summary(samples)
## End(Not run)
|
fb865b1f82578ee73f21caf1158516576811b10f
|
d5cde2fb886f718387465176abc4f8dc06f850c4
|
/R/fit_losses.R
|
3388127af5491129c3d5927d844422c7049c076a
|
[] |
no_license
|
arlionn/personalized
|
ee6b853be31147daffacd221d18ddba5739b9f4d
|
f75efd9fe459f4decb0c70739cb91002c9abe326
|
refs/heads/master
| 2023-05-04T00:14:22.647945
| 2021-05-31T18:34:02
| 2021-05-31T18:34:02
| 420,626,056
| 0
| 1
| null | null | null | null |
UTF-8
|
R
| false
| false
| 37,878
|
r
|
fit_losses.R
|
# Define common predictions function types
get.pred.func <- function(fit.name, model, env = parent.frame())
{
n.trts <- env$n.trts
vnames <- env$vnames
sel.idx <- env$sel.idx
best.iter <- env$best.iter
family <- env$family
# GAM models
if (grepl("_gam$",fit.name))
{
if (grepl("_cox", fit.name))
{
pred.func <- function(x, type = c("link", "class"))
{
df.pred <- data.frame(cbind(1, x[,sel.idx[-1] - 1]))
colnames(df.pred) <- vnames
df.pred$trt_1n1 <- 1
-drop(predict(model, newdata = df.pred, type = "link"))
}
} else
{
pred.func <- function(x, type = c("link", "class"))
{
df.pred <- data.frame(cbind(1, x[,sel.idx[-1] - 1]))
colnames(df.pred) <- vnames
df.pred$trt_1n1 <- 1
drop(predict(model, newdata = df.pred, type = "link"))
}
}
# GBM models
} else if (grepl("_gbm$",fit.name))
{
pred.func <- function(x, type = c("link", "class"))
{
df.x <- data.frame(cbind(1, x))
df.x$offset <- rep(0, NROW(x))
colnames(df.x) <- vnames
drop(predict(model, newdata = df.x, n.trees = best.iter, type = "link"))
}
# non-GAM/GBM LASSO models (loss ends in _lasso)
} else if (grepl("_lasso$",fit.name))
{
if (grepl("_cox", fit.name))
{
pred.func <- function(x, type = c("link", "class"))
{
if (n.trts == 2)
{
-drop(predict(model, newx = cbind(1, x),
type = "link",
s = "lambda.min",
newoffset = rep(0, NROW(x)) ))
} else
{
## need to handle cases with multiple treatments specially
## because we don't want to sum up over all the estimated deltas.
## for K-trtments we estimate K-1 delta functions and thus need
## to extract each one individually.
all.coefs <- as.vector(predict(model, type = "coefficients", s = "lambda.min"))
n.coefs.per.trt <- length(all.coefs) / (n.trts - 1)
n.preds <- NROW(x)
pred.mat <- array(NA, dim = c(n.preds, n.trts - 1))
for (t in 1:(n.trts - 1))
{
idx.coefs.cur <- (n.coefs.per.trt * (t - 1) + 1):(n.coefs.per.trt * t)
coefs.cur <- all.coefs[idx.coefs.cur]
pred.mat[,t] <- drop(cbind(1, x) %*% coefs.cur)
}
-pred.mat
}
}
} else
{
pred.func <- function(x, type = c("link", "class"))
{
type <- match.arg(type)
if (n.trts == 2)
{
drop(predict(model, newx = cbind(1, x),
type = "link",
s = "lambda.min",
newoffset = rep(0, NROW(x)) ))
} else
{
## need to handle cases with multiple treatments specially
## because we don't want to sum up over all the estimated deltas.
if (family == "multinomial")
{
drop(predict(model, cbind(1, x),
type = type,
s = "lambda.min"))
} else
{
## for K-trtments we estimate K-1 delta functions and thus need
## to extract each one individually.
all.coefs <- as.vector(predict(model, type = "coefficients", s = "lambda.min"))[-1]
n.coefs.per.trt <- length(all.coefs) / (n.trts - 1)
n.preds <- NROW(x)
pred.mat <- array(NA, dim = c(n.preds, n.trts - 1))
for (t in 1:(n.trts - 1))
{
idx.coefs.cur <- (n.coefs.per.trt * (t - 1) + 1):(n.coefs.per.trt * t)
coefs.cur <- all.coefs[idx.coefs.cur]
pred.mat[,t] <- drop(cbind(1, x) %*% coefs.cur)
}
pred.mat
}
}
}
}
} else if (grepl("hinge_loss$", fit.name))
{
pred.func <- function(x, type = c("link", "class"))
{
drop(predict(model, newx = cbind(1, x), type = "linear.predictor"))
}
} else
{
stop(paste0("No prediction method found for loss: ", fit.name))
}
return(pred.func)
} # End get.pred.func
# Define common coefficient return methods
get.coef.func <- function(fit.name, env = parent.frame())
{
n.trts <- env$n.trts
# GAM or LASSO_GAM models (using cv.glmnet())
if ( grepl("_lasso$", fit.name) )
{
coef.func <- function(mod)
{
coef(mod, s = "lambda.min")
}
# LOSS_GAM models (using gam() )
} else if ( grepl("_loss_gam$",fit.name) & !grepl("lasso_gam$", fit.name))
{
coef.func <- function(mod)
{
coef(mod)
}
# Not sure what the analogue is for GBM models, since there aren't any coefficients to return
} else
{
coef.func <- function(mod)
{
return(NULL)
}
}
return(coef.func)
} # End get.coef.func
#' @import glmnet
#' @importFrom stats coef
fit_sq_loss_lasso <- function(x, y, trt, n.trts, wts, family, match.id, intercept = FALSE, ...)
{
# this function must return a fitted model
# in addition to a function which takes in
# a design matrix and outputs estimated benefit scores
###################################################################
##
## IMPORTANT NOTE: the name of this function *must*
## begin with "fit_" and end with
## the text string to associated with
## this function in the options for the
## 'loss' argument of the fit.subgrp()
## function
##
###################################################################
list.dots <- list(...)
dot.names <- names(list.dots)
n.unique.vars <- ncol(x) / (n.trts - 1)
zero.pen.idx <- ((1:(n.trts - 1) ) - 1) * n.unique.vars + 1
list.dots$intercept <- intercept
if ("penalty.factor" %in% dot.names)
{
## ensure treatment is not penalized
list.dots$penalty.factor[zero.pen.idx] <- 0
} else
{
list.dots$penalty.factor <- rep(1, ncol(x))
list.dots$penalty.factor[zero.pen.idx] <- 0
}
## Establish nfolds for cv.glmnet()
if ("nfolds" %in% dot.names)
{
nfolds <- list.dots$nfolds
if (nfolds < 3)
{
stop("nfolds must be bigger than 3; nfolds = 10 recommended")
}
} else
{
nfolds <- 10
}
list.dots$nfolds <- nfolds
nsel <- 0
ct <- 0
ntry <- 4
while(nsel == 0 & ct <= ntry)
{
ct <- ct + 1
## Establish foldid for cv.glmnet()
## if match.id was supplied, foldid will be structured around the clusters
if (!is.null(match.id))
{
if ("foldid" %in% dot.names)
{
warning("User-supplied foldid will be ignored since match.id was detected.
Folds will be randomly assigned to clusters according to match.id.")
}
# Assign a fold ID for each cluster level
df.folds <- data.frame(match.id = sample(levels(match.id)),
fold.id = 1:length(levels(match.id)) %% nfolds)
# Obtain vector of fold IDs with respect to the data
foldid <- sapply(match.id, function(z) {df.folds[which(z == df.folds$match.id),"fold.id"]}) + 1
} else
{
if ("foldid" %in% dot.names)
{
foldid <- list.dots$foldid
} else
{
foldid <- sample(rep(seq(nfolds), length = nrow(x)))
}
}
list.dots$foldid <- foldid
# fit a model with a lasso
# penalty and desired loss
model <- do.call(cv.glmnet, c(list(x = x, y = y, weights = wts, family = family), list.dots))
# this is needed for OWL losses, as glmnet
# no longer allows constant columns (ie an intercept)
# to have estimated coefficients
if (intercept)
{
if (family != "multinomial")
{
model$glmnet.fit$beta[1,] <- unname(model$glmnet.fit$a0)
model$glmnet.fit$a0 <- rep(0, length(model$glmnet.fit$a0))
} else
{
for (cl in 1:nrow(model$glmnet.fit$a0))
{
model$glmnet.fit$beta[[cl]][1,] <- unname(model$glmnet.fit$a0[cl,])
model$glmnet.fit$a0[cl,] <- rep(0, length(model$glmnet.fit$a0[cl,]))
}
}
}
coefs <- get.coef.func("fit_sq_loss_lasso")(model)
if (is.list(coefs))
{
nsel <- sum(sapply(coefs, function(cfs) sum(cfs != 0))) - (n.trts - 1)
} else
{
nsel <- sum(coefs != 0) - (n.trts - 1)
}
}
# Return fitted model and extraction methods
list(predict = get.pred.func("fit_sq_loss_lasso", model),
model = model,
coefficients = coefs)
}
fit_logistic_loss_lasso <- fit_sq_loss_lasso
fit_poisson_loss_lasso <- fit_sq_loss_lasso
#' @import survival
fit_cox_loss_lasso <- function(x, y, trt, n.trts, wts, family, match.id, ...)
{
list.dots <- list(...)
dot.names <- names(list.dots)
n.unique.vars <- ncol(x) / (n.trts - 1)
zero.pen.idx <- ((1:(n.trts - 1) ) - 1) * n.unique.vars + 1
if ("penalty.factor" %in% dot.names)
{
## ensure treatment is not penalized
list.dots$penalty.factor[zero.pen.idx] <- 0
} else
{
list.dots$penalty.factor <- rep(1, ncol(x))
list.dots$penalty.factor[zero.pen.idx] <- 0
}
## Establish nfolds for cv.glmnet()
if ("nfolds" %in% dot.names)
{
nfolds <- list.dots$nfolds
if (nfolds < 3)
{
stop("nfolds must be bigger than 3; nfolds = 10 recommended")
}
} else
{
nfolds <- 10
}
list.dots$nfolds <- nfolds
nsel <- 0
ct <- 0
ntry <- 4
while(nsel == 0 & ct <= ntry)
{
ct <- ct + 1
## Establish foldid for cv.glmnet()
## if match.id was supplied, foldid will be structured around the clusters
if (!is.null(match.id))
{
if ("foldid" %in% dot.names)
{
warning("User-supplied foldid will be ignored since match.id was detected.
Folds will be randomly assigned to clusters according to match.id.")
}
# Assign a fold ID for each cluster level
df.folds <- data.frame(match.id = sample(levels(match.id)),
fold.id = 1:length(levels(match.id)) %% nfolds)
# Obtain vector of fold IDs with respect to the data
foldid <- sapply(match.id, function(z) {df.folds[which(z == df.folds$match.id),"fold.id"]}) +1
} else
{
if ("foldid" %in% dot.names)
{
foldid <- list.dots$foldid
} else
{
foldid <- sample(rep(seq(nfolds), length = nrow(x)))
}
}
list.dots$foldid <- foldid
# fit a model with a lasso
# penalty and desired loss
model <- do.call(cv.glmnet, c(list(x = x, y = y, weights = wts, family = "cox"), list.dots))
coefs <- get.coef.func("fit_cox_loss_lasso")(model)
if (is.list(coefs))
{
nsel <- sum(sapply(coefs, function(cfs) sum(cfs != 0))) - (n.trts - 1)
} else
{
nsel <- sum(coefs != 0) - (n.trts - 1)
}
}
# Return fitted model and extraction methods
list(predict = get.pred.func("fit_cox_loss_lasso", model),
model = model,
coefficients = coefs)
}
#' @import mgcv
#' @importFrom stats as.formula binomial gaussian
fit_sq_loss_lasso_gam <- function(x, y, trt, n.trts, wts, family, match.id, intercept = FALSE, ...)
{
# this function must return a fitted model
# in addition to a function which takes in
# a design matrix and outputs estimated benefit scores
###################################################################
##
## IMPORTANT NOTE: the name of this function *must*
## begin with "fit_" and end with
## the text string to associated with
## this function in the options for the
## 'loss' argument of the fit.subgrp()
## function
##
###################################################################
# need to inspect the dots to extract
# the arguments supplied to cv.glmnet
# and those supplied to gam
list.dots <- list(...)
dot.names <- names(list.dots)
if ("penalty.factor" %in% dot.names)
{
## ensure treatment is not penalized
list.dots$penalty.factor[1] <- 0
} else
{
list.dots$penalty.factor <- c(0, rep(1, ncol(x) - 1))
}
list.dots$intercept <- intercept
if (is.factor(trt))
{
# drop any unused levels of trt
trt <- droplevels(trt)
unique.trts <- levels(trt)
} else
{
unique.trts <- sort(unique(trt))
}
if (n.trts == 2)
{
trt.y <- trt
trt_1n1 <- ifelse(trt == unique.trts[2], 1, -1)
} else
{
stop("gam loss not yet available for multiple treatments scenarios.")
}
## Establish nfolds for cv.glmnet()
if ("nfolds" %in% dot.names)
{
nfolds <- list.dots$nfolds
if (nfolds < 3)
{
stop("nfolds must be bigger than 3; nfolds = 10 recommended")
}
} else
{
nfolds <- 10
}
list.dots$nfolds <- nfolds
## Establish foldid for cv.glmnet()
## if match.id was supplied, foldid will be structured around the clusters
if (!is.null(match.id))
{
if ("foldid" %in% dot.names)
{
warning("User-supplied foldid will be ignored since match.id was detected.
Folds will be randomly assigned to clusters according to match.id.")
}
# Assign a fold ID for each cluster level
df.folds <- data.frame(match.id = sample(levels(match.id)),
fold.id = 1:length(levels(match.id)) %% nfolds)
# Obtain vector of fold IDs with respect to the data
foldid <- sapply(match.id, function(z) {df.folds[which(z == df.folds$match.id),"fold.id"]}) + 1
} else
{
if ("foldid" %in% dot.names)
{
foldid <- list.dots$foldid
} else
{
foldid <- sample(rep(seq(nfolds), length = nrow(x)))
}
}
list.dots$foldid <- foldid
glmnet.argnames <- union(names(formals(cv.glmnet)), names(formals(glmnet)))
gam.argnames <- names(formals(gam))
# since 'method' is an argument of 'fit.subgrp',
# let the user change the gam 'method' arg by supplying
# 'method.gam' arg instead of 'method'
dot.names[dot.names == "method.gam"] <- "method"
names(list.dots)[names(list.dots) == "method.gam"] <- "method"
# find the arguments relevant for each
# possible ...-supplied function
dots.idx.glmnet <- match(glmnet.argnames, dot.names)
dots.idx.gam <- match(gam.argnames, dot.names)
dots.idx.glmnet <- dots.idx.glmnet[!is.na(dots.idx.glmnet)]
dots.idx.gam <- dots.idx.gam[!is.na(dots.idx.gam)]
# fit a model with a lasso
# penalty and desired loss:
sel.model <- do.call(cv.glmnet, c(list(x = x, y = y, weights = wts, family = family),
list.dots[dots.idx.glmnet]))
vnames <- colnames(x)
sel.idx <- drop(predict(sel.model, type = "nonzero", s = "lambda.min")[[1]])
# always include treatment main effect
sel.idx <- union(1L, sel.idx)
# names of selected variables
sel.vnames <- vnames[sel.idx]
# find which variables are binary
var.levels <- numeric(length(sel.idx))
for (v in 1:length(sel.idx))
{
var.levels[v] <- length(unique(x[,sel.idx[v]]))
}
contin.vars <- sel.vnames[var.levels > 2]
binary.vars <- sel.vnames[var.levels <= 2]
# create formula for gam
contin.formula <- binary.formula <- NULL
# don't create smoother for binary vars
if (length(binary.vars) > 0)
{
binary.formula <- paste(binary.vars, collapse = "+")
}
# create smoother for each continuous var
if (length(contin.vars) > 0)
{
num_unique_values <- apply(x[,contin.vars,drop=FALSE], 2, function(x) length(unique(x)) )
form.cur <- paste0("s(", contin.vars, ", by = trt_1n1)")
form.cur[num_unique_values <= 10] <- paste0("s(", contin.vars[num_unique_values <= 10], ", by = trt_1n1, k=",
num_unique_values[num_unique_values <= 10]-1, ")")
contin.formula <- paste(form.cur, collapse = "+")
}
family.func <- gaussian()
if (family == "cox")
{
rhs.formula <- paste(c(binary.formula, contin.formula), collapse = "+")
family.func <- cox.ph()
} else
{
rhs.formula <- paste("-1 +", paste(c(binary.formula, contin.formula), collapse = "+"))
if (family == "binomial")
{
family.func <- binomial()
y <- as.integer(y)
} else if (family == "poisson")
{
family.func <- poisson()
y <- as.integer(y)
}
}
gam.formula <- as.formula(paste("y ~", rhs.formula))
# create data frame
df <- data.frame(y = y, x = x[,sel.idx], trt_1n1 = trt_1n1)
colnames(df) <- c("y", sel.vnames)
vnames <- sel.vnames
oversmoothing_factor <- sqrt(ncol(x) / (length(contin.vars) + 1))
# fit gam model:
# only add in dots calls if they exist
if (length(dots.idx.glmnet) > 0)
{
model <- do.call(gam, c(list(formula = gam.formula, data = df,
weights = wts, family = family.func,
gamma = oversmoothing_factor, ## oversmooth since we're in a post-selection scenario
drop.intercept = TRUE),
list.dots[dots.idx.gam]))
} else
{
model <- do.call(gam, list(formula = gam.formula, data = df,
weights = wts, family = family.func,
gamma = oversmoothing_factor, ## oversmooth since we're in a post-selection scenario
drop.intercept = TRUE))
}
# Return fitted model and extraction methods
list(predict = get.pred.func("fit_sq_loss_lasso_gam", model),
model = model,
coefficients = get.coef.func("fit_sq_loss_lasso_gam")(model))
}
fit_logistic_loss_lasso_gam <- fit_sq_loss_lasso_gam
fit_cox_loss_lasso_gam <- fit_sq_loss_lasso_gam
fit_poisson_loss_lasso_gam <- fit_sq_loss_lasso_gam
fit_sq_loss_gam <- function(x, y, trt, n.trts, wts, family, match.id, ...)
{
# this function must return a fitted model
# in addition to a function which takes in
# a design matrix and outputs estimated benefit scores
###################################################################
##
## IMPORTANT NOTE: the name of this function *must*
## begin with "fit_" and end with
## the text string to associated with
## this function in the options for the
## 'loss' argument of the fit.subgrp()
## function
##
###################################################################
list.dots <- list(...)
# since 'method' is an argument of 'fit.subgrp',
# let the user change the gam 'method' arg by supplying
# 'method.gam' arg instead of 'method'
names(list.dots)[names(list.dots) == "method.gam"] <- "method"
vnames <- colnames(x)
sel.idx <- seq_len(ncol(x))
# names of selected variables
sel.vnames <- vnames[sel.idx]
# if (sel.vnames[1] == "1")
# {
# sel.vnames[1] <- "Trt1"
# colnames(x)[1] <- sel.vnames[1]
# }
if (is.factor(trt))
{
# drop any unused levels of trt
trt <- droplevels(trt)
unique.trts <- levels(trt)
} else
{
unique.trts <- sort(unique(trt))
}
if (n.trts == 2)
{
trt.y <- trt
trt_1n1 <- ifelse(trt == unique.trts[2], 1, -1)
} else
{
stop("gam loss not yet available for multiple treatments scenarios.")
}
# find which variables are binary
var.levels <- numeric(length(sel.idx))
for (v in 1:length(sel.idx))
{
var.levels[v] <- length(unique(x[,sel.idx[v]]))
}
contin.vars <- sel.vnames[var.levels > 2]
binary.vars <- sel.vnames[var.levels <= 2]
# create formula for gam
contin.formula <- binary.formula <- NULL
# don't create smoother for binary vars
if (length(binary.vars) > 0)
{
binary.formula <- paste(binary.vars, collapse = "+")
}
# create smoother for each continuous var
if (length(contin.vars) > 0)
{
num_unique_values <- apply(x[,contin.vars,drop=FALSE], 2, function(x) length(unique(x)) )
form.cur <- paste0("s(", contin.vars, ", by = trt_1n1)")
form.cur[num_unique_values <= 10] <- paste0("s(", contin.vars[num_unique_values <= 10], ", by = trt_1n1, k=",
num_unique_values[num_unique_values <= 10]-1, ")")
contin.formula <- paste(form.cur, collapse = "+")
}
family.func <- gaussian()
if (family == "cox")
{
rhs.formula <- paste(c(binary.formula, contin.formula), collapse = "+")
family.func <- cox.ph()
} else
{
rhs.formula <- paste("-1 +", paste(c(binary.formula, contin.formula), collapse = "+"))
if (family == "binomial")
{
family.func <- binomial()
y <- as.integer(y)
} else if (family == "poisson")
{
family.func <- poisson()
y <- as.integer(y)
}
}
gam.formula <- as.formula(paste("y ~", rhs.formula))
# create data frame
df <- data.frame(y = y, x = x[,sel.idx], trt_1n1 = trt_1n1)
colnames(df) <- c("y", sel.vnames)
vnames <- sel.vnames
# fit gam model:
# only add in dots calls if they exist
if (length(list.dots) > 0)
{
model <- do.call(gam, c(list(formula = gam.formula, data = df,
weights = wts, family = family.func,
drop.intercept = TRUE),
list.dots))
} else
{
model <- do.call(gam, list(formula = gam.formula, data = df,
weights = wts, family = family.func,
drop.intercept = TRUE))
}
# Return fitted model and extraction methods
list(predict = get.pred.func("fit_sq_loss_gam", model),
model = model,
coefficients = get.coef.func("fit_sq_loss_gam")(model))
}
fit_logistic_loss_gam <- fit_sq_loss_gam
fit_poisson_loss_gam <- fit_sq_loss_gam
fit_cox_loss_gam <- fit_sq_loss_gam
#' @import gbm
fit_sq_loss_gbm <- function(x, y, trt, n.trts, wts, family, match.id, ...)
{
# this function must return a fitted model
# in addition to a function which takes in
# a design matrix and outputs estimated benefit scores
###################################################################
##
## IMPORTANT NOTE: the name of this function *must*
## begin with "fit_" and end with
## the text string to associated with
## this function in the options for the
## 'loss' argument of the fit.subgrp()
## function
##
###################################################################
list.dots <- list(...)
dot.names <- names(list.dots)
if ("cv.folds" %in% dot.names)
{
cv.folds <- list.dots["cv.folds"]
if (cv.folds < 2)
{
cv.folds <- 2L
list.dots$cv.folds <- cv.folds
warning("cv.folds must be at least 2, setting cv.folds to 2.")
}
} else
{
list.dots$cv.folds <- 5L
}
if (!is.null(match.id)) {
warning("Matched groups are not guaranteed to remain matched in the cross-validation procedure using GBM models.")
}
if ("offset" %in% dot.names)
{
df <- data.frame(y = y, x, offset = list.dots$offset)
list.dots$offset <- NULL
} else
{
df <- data.frame(y = y, x, offset = rep(0,NROW(x)))
}
formula.gbm <- as.formula("y ~ . - 1 + offset(offset)")
# fit a model with a lasso
# penalty and desired loss
model <- do.call(gbm, c(list(formula.gbm, data = df,
weights = wts,
distribution = family),
list.dots))
best.iter <- gbm.perf(model, method = "cv")
vnames <- colnames(df)[-1]
# Return fitted model and extraction methods
list(predict = get.pred.func("fit_sq_loss_gbm", model),
model = model,
coefficients = get.coef.func("fit_sq_loss_gbm")(model))
}
fit_poisson_loss_gbm <- fit_sq_loss_gbm
#
#
# fit_abs_loss_gbm <- function(x, y, trt, n.trts, wts, family, match.id, ...)
# {
# # this function must return a fitted model
# # in addition to a function which takes in
# # a design matrix and outputs estimated benefit scores
#
# ###################################################################
# ##
# ## IMPORTANT NOTE: the name of this function *must*
# ## begin with "fit_" and end with
# ## the text string to associated with
# ## this function in the options for the
# ## 'loss' argument of the fit.subgrp()
# ## function
# ##
# ###################################################################
#
# list.dots <- list(...)
#
# dot.names <- names(list.dots)
# if ("cv.folds" %in% dot.names)
# {
# cv.folds <- list.dots["cv.folds"]
# if (cv.folds < 2)
# {
# cv.folds <- 2L
# list.dots$cv.folds <- cv.folds
# warning("cv.folds must be at least 2, setting cv.folds to 2.")
# }
#
# } else
# {
# list.dots$cv.folds <- 5L
# }
#
# if (!is.null(match.id))
# {
# warning("Matched groups are not guaranteed to remain matched in the cross-validation procedure using GBM models.")
# }
#
# if ("offset" %in% dot.names)
# {
# df <- data.frame(y = y, x, offset = list.dots$offset)
# list.dots$offset <- NULL
# } else
# {
# df <- data.frame(y = y, x, offset = rep(0,NROW(x)))
# }
#
# formula.gbm <- as.formula("y ~ . - 1 + offset(offset)")
#
# # fit a model with a lasso
# # penalty and desired loss
# model <- do.call(gbm, c(list(formula.gbm, data = df,
# weights = wts,
# distribution = "laplace"),
# list.dots))
#
# best.iter <- gbm.perf(model, method = "cv")
#
# vnames <- colnames(df)[-1]
#
# # Return fitted model and extraction methods
# list(predict = get.pred.func("fit_abs_loss_gbm", model),
# model = model,
# coefficients = get.coef.func("fit_abs_loss_gbm")(model))
# }
fit_logistic_loss_gbm <- function(x, y, trt, n.trts, wts, family, match.id, ...)
{
# this function must return a fitted model
# in addition to a function which takes in
# a design matrix and outputs estimated benefit scores
###################################################################
##
## IMPORTANT NOTE: the name of this function *must*
## begin with "fit_" and end with
## the text string to associated with
## this function in the options for the
## 'loss' argument of the fit.subgrp()
## function
##
###################################################################
list.dots <- list(...)
dot.names <- names(list.dots)
if ("cv.folds" %in% dot.names)
{
cv.folds <- list.dots["cv.folds"]
if (cv.folds < 2)
{
cv.folds <- 2L
list.dots$cv.folds <- cv.folds
warning("cv.folds must be at least 2, setting cv.folds to 2.")
}
} else
{
list.dots$cv.folds <- 5L
}
if (!is.null(match.id))
{
warning("Matched groups are not guaranteed to remain matched in the cross-validation procedure using GBM models.")
}
if ("offset" %in% dot.names)
{
df <- data.frame(y = y, x, offset = list.dots$offset)
list.dots$offset <- NULL
} else
{
df <- data.frame(y = y, x, offset = rep(0,NROW(x)))
}
formula.gbm <- as.formula("y ~ . - 1 + offset(offset)")
# fit a model with a lasso
# penalty and desired loss
model <- do.call(gbm, c(list(formula.gbm, data = df,
weights = wts,
distribution = "bernoulli"),
list.dots))
best.iter <- gbm.perf(model, method = "cv")
vnames <- colnames(df)[-1]
# Return fitted model and extraction methods
list(predict = get.pred.func("fit_logistic_loss_gbm", model),
model = model,
coefficients = get.coef.func("fit_logistic_loss_gbm")(model))
}
#
# fit_poisson_loss_gbm <- function(x, y, trt, n.trts, wts, family, match.id, ...)
# {
# # this function must return a fitted model
# # in addition to a function which takes in
# # a design matrix and outputs estimated benefit scores
#
# ###################################################################
# ##
# ## IMPORTANT NOTE: the name of this function *must*
# ## begin with "fit_" and end with
# ## the text string to associated with
# ## this function in the options for the
# ## 'loss' argument of the fit.subgrp()
# ## function
# ##
# ###################################################################
#
# list.dots <- list(...)
#
# dot.names <- names(list.dots)
# if ("cv.folds" %in% dot.names)
# {
# cv.folds <- list.dots["cv.folds"]
# if (cv.folds < 2)
# {
# cv.folds <- 2L
# list.dots$cv.folds <- cv.folds
# warning("cv.folds must be at least 2, setting cv.folds to 2.")
# }
#
# } else
# {
# list.dots$cv.folds <- 5L
# }
#
# if (!is.null(match.id))
# {
# warning("Matched groups are not guaranteed to remain matched in the cross-validation procedure using GBM models.")
# }
#
# if ("offset" %in% dot.names)
# {
# df <- data.frame(y = y, x, offset = list.dots$offset)
# list.dots$offset <- NULL
# } else
# {
# df <- data.frame(y = y, x, offset = rep(0,NROW(x)))
# }
#
# formula.gbm <- as.formula("y ~ . - 1 + offset(offset)")
#
# # fit a model with a lasso
# # penalty and desired loss
# model <- do.call(gbm, c(list(formula.gbm, data = df,
# weights = wts,
# distribution = "poisson"),
# list.dots))
#
# best.iter <- gbm.perf(model, method = "cv")
#
# vnames <- colnames(df)[-1]
#
# # Return fitted model and extraction methods
# list(predict = get.pred.func("fit_poisson_loss_gbm", model),
# model = model,
# coefficients = get.coef.func("fit_poisson_loss_gbm")(model))
# }
#
fit_cox_loss_gbm <- function(x, y, trt, n.trts, wts, family, match.id, ...)
{
# this function must return a fitted model
# in addition to a function which takes in
# a design matrix and outputs estimated benefit scores
###################################################################
##
## IMPORTANT NOTE: the name of this function *must*
## begin with "fit_" and end with
## the text string to associated with
## this function in the options for the
## 'loss' argument of the fit.subgrp()
## function
##
###################################################################
list.dots <- list(...)
dot.names <- names(list.dots)
if ("cv.folds" %in% dot.names)
{
cv.folds <- list.dots["cv.folds"]
if (cv.folds < 2)
{
cv.folds <- 2L
list.dots$cv.folds <- cv.folds
warning("cv.folds must be at least 2")
}
} else
{
list.dots$cv.folds <- 5L
}
if (!is.null(match.id))
{
warning("Matched groups are not guaranteed to remain matched in the cross-validation procedure using GBM models.")
}
surv.vnames <- colnames(y)
time.idx <- which(surv.vnames == "time")
status.idx <- which(surv.vnames == "status")
if ("offset" %in% dot.names)
{
df <- data.frame(cox_gbm_time = y[,time.idx],
cox_gbm_status = y[,status.idx], x,
offset = list.dots$offset)
list.dots$offset <- NULL
} else
{
df <- data.frame(cox_gbm_time = y[,time.idx],
cox_gbm_status = y[,status.idx], x,
offset = rep(0,NROW(x)))
}
formula.gbm <- as.formula("Surv(cox_gbm_time, cox_gbm_status) ~ . - 1 + offset(offset)")
# fit a model with a lasso
# penalty and desired loss
model <- do.call(gbm, c(list(formula.gbm, data = df,
weights = wts,
distribution = "coxph"),
list.dots))
best.iter <- gbm.perf(model, method = "cv")
vnames <- colnames(df)[-c(1,2)]
# Return fitted model and extraction methods
list(predict = get.pred.func("fit_cox_loss_gbm", model),
model = model,
coefficients = get.coef.func("fit_cox_loss_gbm")(model))
}
fit_owl_hinge_loss <- function(x, y, trt, n.trts, wts, family, match.id, ...)
{
list.dots <- list(...)
dot.names <- names(list.dots)
ipop.argnames <- names(formals(ipop))
wksvm.argnames <- names(formals(weighted.ksvm))
# find the arguments relevant for each
# possible ...-supplied function
dots.idx.wksvm <- match(wksvm.argnames, dot.names)
dots.idx.ipop <- match(ipop.argnames, dot.names)
dots.idx.wksvm <- dots.idx.wksvm[!is.na(dots.idx.wksvm)]
dots.idx.ipop <- dots.idx.ipop[!is.na(dots.idx.ipop)]
list.dots <- list.dots[c(dots.idx.wksvm, dots.idx.ipop)]
dot.names <- dot.names[c(dots.idx.wksvm, dots.idx.ipop)]
## Establish nfolds for cv.glmnet()
if ("nfolds" %in% dot.names)
{
nfolds <- list.dots$nfolds
if (nfolds < 2)
{
stop("nfolds must be bigger than 2; nfolds = 10 recommended")
}
} else
{
nfolds <- 10
}
list.dots$nfolds <- nfolds
## Establish foldid for cv.glmnet()
## if match.id was supplied, foldid will be structured around the clusters
if (!is.null(match.id))
{
if ("foldid" %in% dot.names)
{
warning("User-supplied foldid will be ignored since match.id was detected.
Folds will be randomly assigned to clusters according to match.id.")
}
# Assign a fold ID for each cluster level
df.folds <- data.frame(match.id = sample(levels(match.id)),
fold.id = 1:length(levels(match.id)) %% nfolds)
# Obtain vector of fold IDs with respect to the data
foldid <- sapply(match.id, function(z) {df.folds[which(z == df.folds$match.id),"fold.id"]}) +1
} else
{
if ("foldid" %in% dot.names)
{
foldid <- list.dots$foldid
} else
{
foldid <- sample(rep(seq(nfolds), length = nrow(x)))
}
}
list.dots$foldid <- foldid
# fit a model with a lasso
# penalty and desired loss
model <- do.call(weighted.ksvm, c(list(x = x, y = as.character(y), weights = wts), list.dots))
# Return fitted model and extraction methods
list(predict = get.pred.func("fit_hinge_loss", model),
model = model,
coefficients = get.coef.func("fit_hinge_loss")(model))
}
|
9bb7298c72c97e58cb872269d7c09b7dc15bb8a8
|
ffdea92d4315e4363dd4ae673a1a6adf82a761b5
|
/data/genthat_extracted_code/NISTunits/examples/NISTminTOradian.Rd.R
|
b630a5375634066a485f79cd8d759143f87f0d3b
|
[] |
no_license
|
surayaaramli/typeRrh
|
d257ac8905c49123f4ccd4e377ee3dfc84d1636c
|
66e6996f31961bc8b9aafe1a6a6098327b66bf71
|
refs/heads/master
| 2023-05-05T04:05:31.617869
| 2019-04-25T22:10:06
| 2019-04-25T22:10:06
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 179
|
r
|
NISTminTOradian.Rd.R
|
library(NISTunits)
### Name: NISTminTOradian
### Title: Convert minute to radian
### Aliases: NISTminTOradian
### Keywords: programming
### ** Examples
NISTminTOradian(10)
|
6597c4733a779aea617e35862e50c85dc8a223d1
|
ffdea92d4315e4363dd4ae673a1a6adf82a761b5
|
/data/genthat_extracted_code/domino/examples/domino.init.Rd.R
|
23b6132d9e7547c4c79c6281deb30d47263d6bed
|
[] |
no_license
|
surayaaramli/typeRrh
|
d257ac8905c49123f4ccd4e377ee3dfc84d1636c
|
66e6996f31961bc8b9aafe1a6a6098327b66bf71
|
refs/heads/master
| 2023-05-05T04:05:31.617869
| 2019-04-25T22:10:06
| 2019-04-25T22:10:06
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 300
|
r
|
domino.init.Rd.R
|
library(domino)
### Name: domino.init
### Title: domino.init
### Aliases: domino.init
### Keywords: init
### ** Examples
## Not run:
##D # in directory ./
##D domino.init("my-new-project")
##D # new project with name "my-new-project" is initialized inside current directory.
## End(Not run)
|
c2f6e1a8b6562004b3d2966ac2df888616a10056
|
5e3060ae9b5a223c31df528c8fcb5757842d8065
|
/run_analysis.R
|
098013750d5067de95ca4d8238c75573c26a41bc
|
[] |
no_license
|
umakanta143/Module3
|
0d7adf9366b579318149fe75bd60506ac7ca5c5e
|
601207a04c8f7a43aaeecfe7817c11534d5246d2
|
refs/heads/master
| 2020-03-21T13:06:17.931242
| 2018-06-25T12:02:00
| 2018-06-25T12:02:00
| 138,588,394
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 2,826
|
r
|
run_analysis.R
|
setwd('C:/Users/usahoo/Documents/coursera/UCI HAR Dataset')
# Initialize library used
library(data.table)
library(dplyr)
# Read Supporting Metadata
featureNames <- read.table("features.txt")
activityLabels <- read.table("activity_labels.txt", header = FALSE)
#Read the training data
subjectTrain <- read.table("train/subject_train.txt", header = FALSE)
activityTrain <- read.table("train/y_train.txt", header = FALSE)
featuresTrain <- read.table("train/X_train.txt", header = FALSE)
# Read the test data
subjectTest <- read.table("test/subject_test.txt", header = FALSE)
activityTest <- read.table("test/y_test.txt", header = FALSE)
featuresTest <- read.table("test/X_test.txt", header = FALSE)
# Merge the training and the test sets to create one data set
subject <- rbind(subjectTrain, subjectTest)
activity <- rbind(activityTrain, activityTest)
features <- rbind(featuresTrain, featuresTest)
# Naming the columns
colnames(features) <- t(featureNames[2])
colnames(activity) <- "Activity"
colnames(subject) <- "Subject"
## 1. Merges the training and the test sets to create one data set.
MergedData <- cbind(features,activity,subject)
##2.Extracts only the measurements on the mean and standard deviation for each measurement.
columnsWithMeanSTD <- grep(".*Mean.*|.*Std.*", names(MergedData), ignore.case=TRUE)
requiredColumns <- c(columnsWithMeanSTD, 562, 563)
#dim(MergedData)
extractedData<-MergedData[,requiredColumns]
#dim(extractedData)
##3.Uses descriptive activity names to name the activities in the data set
extractedData$Activity <- as.character(extractedData$Activity)
for (i in 1:6){
extractedData$Activity[extractedData$Activity == i] <- as.character(activityLabels[i,2])
}
extractedData$Activity <- as.factor(extractedData$Activity)
##4.Appropriately labels the data set with descriptive variable names.
colnames(extractedData)<-gsub("Acc","Accelerometer",colnames(extractedData))
names(extractedData)<-gsub("Gyro", "Gyroscope", names(extractedData))
names(extractedData)<-gsub("BodyBody", "Body", names(extractedData))
names(extractedData)<-gsub("Mag", "Magnitude", names(extractedData))
names(extractedData)<-gsub("-mean()", "Mean", names(extractedData), ignore.case = TRUE)
names(extractedData)<-gsub("-std()", "STD", names(extractedData), ignore.case = TRUE)
names(extractedData)<-gsub("-freq()", "Frequency", names(extractedData), ignore.case = TRUE)
##5.From the data set in step 4, creates a second, independent tidy data set with the
#average of each variable for each activity and each subject
extractedData$Subject <- as.factor(extractedData$Subject)
extractedData <- data.table(extractedData)
tidydata<-aggregate(.~Subject+Activity,extractedData,mean)
write.table(tidydata, file = "tidy_data.txt", row.names = FALSE)
|
eee4410cff263486e3577792afc4f1f810c26c6c
|
bd9786e33a385aa616349a1bbdeff9ccabb251aa
|
/man/segmentation.Rd
|
e01495e4f2e7cc247d6e5560f7f18b1adface866
|
[] |
no_license
|
Chris35Wills/Lslide
|
ac5f612c0ecbbc74431e03f400950b7bc2461ec0
|
c671595b4d6f3cf14d19b688ff6a54f59922e3b9
|
refs/heads/master
| 2020-04-02T13:10:04.862781
| 2018-10-17T13:47:40
| 2018-10-17T13:47:40
| 154,470,216
| 0
| 1
| null | 2018-10-24T08:58:36
| 2018-10-24T08:58:36
| null |
UTF-8
|
R
| false
| true
| 9,091
|
rd
|
segmentation.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/segmentation.R
\name{segmentation}
\alias{segmentation}
\title{Segmentation}
\usage{
segmentation(Tool, Segments.Grid, Segments.Poly, Input.Grid,
Saga.Output.Grid = file.path(tempdir(),
paste0("SagaRepresentativenessOutputGrid", par.i, ".sgrd")),
Saga.Output.Lod = file.path(tempdir(),
paste0("SagaRepresentativenessOutputLod", par.i, ".sgrd")),
Output.Seeds = file.path(tempdir(), paste0("OutputSeed", par.i, ".sgrd")),
Fast.Representativeness.LevelOfGeneralisation = "10.0",
Saga.Similarity = file.path(tempdir(), paste0("SagaSimilarity", par.i,
".sgrd")), Saga.Segments.Seeds.Table = file.path(tempdir(),
paste0("SagaSegmentsSeedsTable", par.i, ".mtab")),
Saga.Segmentation.Normalize = "0", Saga.Segmentation.Neighbourhood = "0",
Saga.Segmentation.Method = "0", Saga.Segmentation.Sig.1 = "1.0",
Saga.Segmentation.Sig.2 = "1.0", Saga.Segmentation.Threshold = "0.0",
Saga.Segmentation.Refresh = "0", Saga.Segmentation.Leafsize = 256,
Split = "0", Grass.Segmentation.Threshold = NULL,
Grass.Segmentation.Weighted = FALSE,
Grass.Segmentation.Method = "region_growing",
Grass.Segmentation.Similarity = "euclidean",
Grass.Segmentation.Minsize = 15, Grass.Segmentation.Memory = 300,
Grass.Segmentation.Iterations = 50, Grass.Segmentation.Seeds = NULL,
Grass.Segmentation.Neighbourhood = "0", Segmentation.Boundary.Grid = NULL,
Grass.Segmentation.Goodness = paste0("Grass.Segmentation.Goodness", par.i),
AllVertices = "FALSE", NoData = FALSE, Mask = NULL,
NoData.Flag = -99999, show.output.on.console = FALSE, Seed.Method = "",
Seed.Generation.Variance = paste0(tempdir(),
paste0("SeedGenerationVariance", par.i, ".sgrd")),
Seed.Generation.Points = file.path(tempdir(), paste0("SeedGenerationPoints",
par.i, ".shp")), Seed.Generation.Type = "0",
Seed.Generation.Scale = "10.0", Generalisation.Flac = FALSE,
Generalization.Mode = "1", Generalization.Radius = "1",
Generalization.Threshold = "0.0", env = RSAGA::rsaga.env(),
Grass.SLIC.Iter = 10, Grass.SLIC.Superpixels = 200, Grass.SLIC.Step = 0,
Grass.SLIC.Compactness = 1, Grass.SLIC.Superpixels.MinSize = 1,
Grass.SLIC.Memory = 300, Grass.SLIC.Perturb = 0,
burn.Boundary.into.Segments = FALSE, estimateScaleParameter = FALSE,
Mode.Filter.Flac = FALSE, Mode.Filter.Size = 7,
Mode.Filter.Segment.MinSize = 3, par.i = "", Sieving.Flac = FALSE,
Sieving.Mode = "0", Sieving.Thresh = 4, Sieving.Expand = 4, ...)
}
\arguments{
\item{Tool}{GRASS, SAGA or GRASS Superixels SLIC. Definition of open-source software which will be used}
\item{Segments.Grid}{output path of raster with segments}
\item{Segments.Poly}{output path of polygon with segments}
\item{Input.Grid}{vector containing grid(s) for segmentation. It is possible to add multiple gridsm, as well as different grids even in combination of SAGA and GRASS. By using SAGA and GRASS combination the following separation must be used: '<>' (SAGA before GRASS grids!)}
\item{Saga.Output.Grid}{output of FAST REPRESENTATIVENESS in SAGA. Default: temp}
\item{Saga.Output.Lod}{output Lod of Representativeness Function in SAGA. Default: temp}
\item{Output.Seeds}{output of seed points as raster, used for segmentation. Default: temp}
\item{Fast.Representativeness.LevelOfGeneralisation}{determining number of seed points. Default: "10.0"}
\item{Saga.Similarity}{output of similarity grid. Default: temp}
\item{Saga.Segments.Seeds.Table}{table of seeds information. Default: temp}
\item{Saga.Segmentation.Normalize}{normalisation during imagery segmentation. Default: "0"}
\item{Saga.Segmentation.Neighbourhood}{neighbourhood considered during imagery segmentation. Default: "0" (4, alternative: "1" for 8)}
\item{Saga.Segmentation.Method}{segmentation method during imagery segmentation. Default: "0" (feature space and position)}
\item{Saga.Segmentation.Sig.1}{variance in feature space in imagery segmentation. Default: "1.0"}
\item{Saga.Segmentation.Sig.2}{variance in position space in imagery segmentation. Default: "1.0"}
\item{Saga.Segmentation.Threshold}{similarity threshold for joining pixel to segments in imagery segmentation. Default: "0.0"}
\item{Saga.Segmentation.Refresh}{refresh image after imagery segmentation. Default: "0"}
\item{Saga.Segmentation.Leafsize}{parameter for speed optimation in imagery segmentation. Default: 256}
\item{Split}{split polygons to singlepart in vectorising grid classes. Default: "0"}
\item{Grass.Segmentation.Threshold}{similarity threshold for joining pixel to segments. Default: NULL, 0.0 is not allowed}
\item{Grass.Segmentation.Weighted}{option of weighing input grids in segmentation. Default: "FALSE"}
\item{Grass.Segmentation.Method}{type of GRASS Segmentation. Default: "region_growing"}
\item{Grass.Segmentation.Similarity}{distance measurement of similarity. Default: "euclidean"}
\item{Grass.Segmentation.Minsize}{minsize of segment. Default: 15}
\item{Grass.Segmentation.Memory}{memory to be used for segmentation. Default: 300}
\item{Grass.Segmentation.Iterations}{amount of allowed iterations. Default: 50}
\item{Grass.Segmentation.Seeds}{input of seeds raster. Enables bottom-up segmentation. Default: NULL}
\item{Segmentation.Boundary.Grid}{input of boundary raster. Enables top-down (or hierarchical) segmentation. Default: NULL. NULL values must be 0 (or any other not segment value!).}
\item{Grass.Segmentation.Goodness}{name for output goodness of fit estimate map. Default:"Grass.Segmentation.Goodness"}
\item{AllVertices}{use all vertices by vectorising grid classes. Default: "FALSE"}
\item{NoData}{input data contains NoData value. Default: FALSE}
\item{Mask}{mask raster to mask NoData from input. Default: NULL}
\item{show.output.on.console}{show output on console. Default: FALSE}
\item{Seed.Method}{type of seed method for getting seeds. Default: "" (alternative: "Fast Representativeness", "Seed Generation", "Superpixels SLIC")}
\item{Seed.Generation.Variance}{output raster with variance of seed generation. Default: temp}
\item{Seed.Generation.Points}{output of seed points as shapefile. Default: temp}
\item{Seed.Generation.Type}{option of seed generation type. Default:"0" (minima of variance, alternative: maxima of variance)}
\item{Seed.Generation.Scale}{determining number of seed points in seed generation. Default: "10.0"}
\item{Generalisation.Flac}{performing (multiple) majority filter on segments. Default: FALSE}
\item{Generalization.Mode}{search mode by filtering: Default: "1" (circle, alternative: square)}
\item{Generalization.Threshold}{threshold for applying majority filters. Default: "0.0"}
\item{env}{environment of RSAGA. Default: RSAGA::rsaga.env()}
\item{Grass.SLIC.Iter}{maximum number of iterations. Default: 10}
\item{Grass.SLIC.Superpixels}{approximate number of output super pixels. Default: 200}
\item{Grass.SLIC.Step}{distance (number of cells) between initial super pixel centers. A step size > 0 overrides the number of super pixels. Default: 0}
\item{Grass.SLIC.Compactness}{compactness. A larger value causes more compact superpixels. Default: 1.0}
\item{Grass.SLIC.Superpixels.MinSize}{minimum superpixel size. Default: 1}
\item{Grass.SLIC.Memory}{memory in MB. Default: 300}
\item{Grass.SLIC.Perturb}{Perturb initial super pixel centers. Percent of intitial superpixel radius. Default: 0, range: 0-100}
\item{burn.Boundary.into.Segments}{vector specifing if boundary grid is burned into segmentation (1) or seeds (2). Default: FALSE, maximum length: 2}
\item{estimateScaleParameter}{must be only be TRUE when scale parameter function is used. Default: FALSE}
\item{Mode.Filter.Flac}{re-assign objects of a specific size based on mode values to its surroundings (moving window). Default: FALSE}
\item{Mode.Filter.Size}{moving window size of mode-filter. Default: 3}
\item{Mode.Filter.Segment.MinSize}{objects smaller and equal to this size are selected for filtering. Default: 3}
\item{par.i}{run number. Default: ""}
\item{Sieving.Flac}{perform sieving. Default: FALSE}
\item{Sieving.Mode}{sieving mode. Default:"0"}
\item{Sieving.Thresh}{minsize of clumps. Default: 4}
\item{Sieving.Expand}{expand cells using majority filter. radius in cell sizes. Default:4}
}
\description{
This function uses the open-source software of GRASS and SAGA GIS
to fulfill an imagery segmentation. It is possible to decide bet
ween SAGA and GRASS GIS segmentation.\cr \cr
In SAGA GIS, the tools SEED GENERATION or FAST REPRESENTATIVENESS
are used for computing seed points. Then, the SEEDED REGION GROWING al-
gorithm is used for the segmentation.\cr \cr
In GRASS GIS, the tool OBJECT-SEGMENTATION (i.segment) is used
for the segmentation. There is also the possibility to use
the new SLIC algorithm.\cr \cr
Moreover, there is the option to compute a generalisation of
segments by (multiple) majority filter (SAGA GIS) at the end.
}
\keyword{Clustering}
\keyword{Iterative}
\keyword{Linear}
\keyword{Simple}
\keyword{growing,}
\keyword{region}
\keyword{segmentation,}
\keyword{superpixels}
|
02568a7c77d559d284629d4240da6f40cba3d878
|
66a2afd9c0dab1d55e6d236f3d85bc1b61a11a66
|
/man/sf_list_rest_api_versions.Rd
|
0398a74243610b8ee112ee2888ffb2736edf9cef
|
[
"MIT"
] |
permissive
|
StevenMMortimer/salesforcer
|
833b09465925fb3f1be8da3179e648d4009c69a9
|
a1e1e9cd0aa4e4fe99c7acd3fcde566076dac732
|
refs/heads/main
| 2023-07-23T16:39:15.632082
| 2022-03-02T15:52:59
| 2022-03-02T15:52:59
| 94,126,513
| 91
| 19
|
NOASSERTION
| 2023-07-14T05:19:53
| 2017-06-12T18:14:00
|
R
|
UTF-8
|
R
| false
| true
| 613
|
rd
|
sf_list_rest_api_versions.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/utils-org.R
\name{sf_list_rest_api_versions}
\alias{sf_list_rest_api_versions}
\title{List REST API Versions}
\usage{
sf_list_rest_api_versions()
}
\value{
\code{list}
}
\description{
\ifelse{html}{\href{https://lifecycle.r-lib.org/articles/stages.html#stable}{\figure{lifecycle-stable.svg}{options: alt='[Stable]'}}}{\strong{[Stable]}}
Lists summary information about each Salesforce version currently available,
including the version, label, and a link to each version\'s root
}
\examples{
\dontrun{
sf_list_rest_api_versions()
}
}
|
8a292552ca3be3844efe3e6ec00aec900607ac4f
|
ffdea92d4315e4363dd4ae673a1a6adf82a761b5
|
/data/genthat_extracted_code/qualityTools/examples/units-methods.Rd.R
|
2d977d7a6502c537ca66ff076c1dab197bfdba2c
|
[] |
no_license
|
surayaaramli/typeRrh
|
d257ac8905c49123f4ccd4e377ee3dfc84d1636c
|
66e6996f31961bc8b9aafe1a6a6098327b66bf71
|
refs/heads/master
| 2023-05-05T04:05:31.617869
| 2019-04-25T22:10:06
| 2019-04-25T22:10:06
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 504
|
r
|
units-methods.Rd.R
|
library(qualityTools)
### Name: units-methods
### Title: Get and set methods
### Aliases: units units<- units-methods units,facDesign-method
### units<-,facDesign-method units,mixDesign-method
### units<-,mixDesign-method units,taguchiDesign-method
### units<-,taguchiDesign-method units,pbDesign-method
### units<-,pbDesign-method
### ** Examples
#NA in response column
fdo = fracDesign(k = 2)
summary(fdo)
units(fdo) = c("min","C")
names(fdo) = c("Time", "Temperature")
summary(fdo)
|
8bbef6f4ece3a79765d8c6ea6f3f329fc65931dc
|
297e214ceb537b2f812b042a0bf855a427fe75c0
|
/Aula 9 e 10 - Análise Corresp. Simples e Múltipla - ANACOR e ACM/SCRIPT - Exercícios.R
|
673272dc708fcd987b49c1ad6968a586b3141182
|
[] |
no_license
|
Bene31/MBA
|
e43afcb8029c9c594ab5596694344c2a720c2265
|
69196e512c423a9e02325a33a09548a89f9da68a
|
refs/heads/master
| 2023-07-03T22:15:29.819245
| 2021-08-08T16:15:20
| 2021-08-08T16:15:20
| 389,215,665
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 9,830
|
r
|
SCRIPT - Exercícios.R
|
# Pacotes a serem instalados e carregados ---------------------------------
pacotes <- c("plotly","tidyverse","ggrepel","sjPlot","reshape2","FactoMineR",
"cabootcrs","knitr","kableExtra","gifski","gganimate","factoextra",
"plot3D","viridis")
if(sum(as.numeric(!pacotes %in% installed.packages())) != 0){
instalador <- pacotes[!pacotes %in% installed.packages()]
for(i in 1:length(instalador)) {
install.packages(instalador, dependencies = T)
break()}
sapply(pacotes, require, character = T)
} else {
sapply(pacotes, require, character = T)
}
# Utilizando a ANACOR para demonstrar comportamentos temporais de forma animada
load("covid_america_weekly.RData")
# Apresentando os dados
covid_america_weekly %>%
kable() %>%
kable_styling(bootstrap_options = "striped",
full_width = TRUE,
font_size = 12)
# O primeiro passo é o de estabelecer, uma ANACOR para cada período de tempo.
# Aqui, estamos estabelecendo uma ANACOR para a semana 78 desde o primeiro
# caso de COVID-19 reportado no continente americano.
# Criando uma tabela de contingências
tab <- table(covid_america_weekly$country,
covid_america_weekly$lethality_Q5)
tab
# Teste Qui-Quadrado
qui2_covid <- chisq.test(tab)
qui2_covid
# Mapa de calor dos resíduos padronizados ajustados
data.frame(qui2_covid$stdres) %>%
rename(country = 1,
let_q5 = 2) %>%
ggplot(aes(x = country, y = let_q5, fill = Freq, label = round(Freq,3))) +
geom_tile() +
geom_text(size = 3, angle = 90) +
scale_fill_gradient2(low = "#440154FF",
mid = "white",
high = "#FDE725FF",
midpoint = 0) +
labs(x = NULL, y = NULL) +
theme(legend.title = element_blank(),
panel.background = element_rect("white"),
legend.position = "none",
axis.text.x = element_text(angle = 90))
# Elaborando a ANACOR:
anacor <- CA(tab)
# Plotando o mapa perceptual de maneira mais elegante:
# Capturando todas as coordenadas num só objeto
ca_coordenadas <- rbind(anacor$row$coord, anacor$col$coord)
ca_coordenadas
# Capturando a quantidade de categorias por variável
id_var <- apply(covid_america_weekly[,c(1,9)],
MARGIN = 2,
FUN = function(x) nlevels(as.factor(x)))
id_var
# Juntando as coordenadas e as categorias capturadas anteriormente
ca_coordenadas_final <- data.frame(ca_coordenadas,
Variable = rep(names(id_var), id_var))
ca_coordenadas_final
# Mapa perceptual bidimensional
# Mapa perceptual elegante:
ca_coordenadas_final %>%
rownames_to_column() %>%
rename(Category = 1) %>%
ggplot(aes(x = Dim.1,
y = Dim.2,
label = Category,
color = Variable,
shape = Variable)) +
geom_point(size = 2) +
geom_text_repel(max.overlaps = 100,
size = 3) +
geom_hline(yintercept = 0, linetype = "dashed", color = "gray50") +
geom_vline(xintercept = 0, linetype = "dashed", color = "gray50") +
labs(x = paste("Dimension 1:", paste0(round(anacor$eig[1,2], digits = 2), "%")),
y = paste("Dimension 2:", paste0(round(anacor$eig[2,2], digits = 2), "%"))) +
scale_color_viridis_d(option = "viridis") +
theme(panel.background = element_rect("white"),
panel.border = element_rect("NA"),
panel.grid = element_line("gray95"),
legend.position = "none")
# Elaborando a animação em razão do transcorrer temporal ------------------
# A base de dados a ser carregada a seguir contém as coordenadas de todas as
# ANACOR feitas, desde a 2ª até a 78ª semana na América. Foram consideradas
# duas dimensões de análise.
load("coords_covid_america_byweek.RData")
# Apresentando os dados
coords_covid_america_byweek %>%
kable() %>%
kable_styling(bootstrap_options = "striped",
full_width = TRUE,
font_size = 12)
#Sobrepondo as coordenadas dos mapas perceptuais em um só plano
coords_covid_america_byweek %>%
ggplot() +
geom_point(aes(x = Dim.1, y = Dim.2,
color = country %in% c("L1","L2","L3","L4","L5"), size = 3,
shape = country %in% c("L1","L2","L3","L4","L5"))) +
geom_text_repel(aes(x = Dim.1, y = Dim.2,
label = country),
max.overlaps = 3000) +
scale_color_viridis_d() +
labs(x = "Dimensão 1",
y = "Dimensão 2") +
theme(legend.position = "none") -> mapas_perceptuais
#Definindo que a interação entre os mapas perceptuais se dará em razão do passar
#das semanas
mapa_animado <- mapas_perceptuais + transition_time(week) +
enter_fade() +
labs(title = "Week: {frame_time}") +
exit_fade()
#Estabelecendo um fundo branco para os gráficos
theme_set(theme_bw())
#Resultado final
animate(mapa_animado, renderer = gifski_renderer(), fps = 1)
# Combinando técnicas -----------------------------------------------------
# Para fins didáticos, vamos utilizar duas bases de dados já visitadas.
# Primeiramente, estabeleceremos uma ACM e, depois, uma PCA. Por fim, faremos
# uma clusterização.
load("notasfatorial.RData")
# Apresentando os dados da base 'notasfatorial'
notasfatorial %>%
kable() %>%
kable_styling(bootstrap_options = "striped",
full_width = TRUE,
font_size = 12)
load(file = "perfil_investidor_aplicacao.RData")
# Apresentando os dados da base 'perfil_investidor_aplicacao'
perfil_investidor_aplicacao %>%
kable() %>%
kable_styling(bootstrap_options = "striped",
full_width = TRUE,
font_size = 12)
# Juntado as duas bases
base_dados <- notasfatorial %>%
left_join(perfil_investidor_aplicacao, by = "estudante")
# Apresentando a base de dados a ser utilizada
base_dados %>%
kable() %>%
kable_styling(bootstrap_options = "striped",
full_width = TRUE,
font_size = 12)
# Vamos começar pela ACM:
# Estabelecendo a ACM -----------------------------------------------------
# 1. Verificando o teste Qui-Quadrado entre o cruzamentos de variáveis a serem
# considerados
# A) Perfil x Aplicação
tab_perfil_aplicacao <- table(perfil_investidor_aplicacao$perfil,
perfil_investidor_aplicacao$aplicacao)
qui2_perfil_aplicacao <- chisq.test(tab_perfil_aplicacao)
qui2_perfil_aplicacao
# B) Perfil x Estado Civil
tab_perfil_estadocivil <- table(perfil_investidor_aplicacao$perfil,
perfil_investidor_aplicacao$estado_civil)
tab_perfil_estadocivil
qui2_perfil_estadocivil <- chisq.test(tab_perfil_estadocivil)
qui2_perfil_estadocivil
# C) Aplicação x Estado Civil
tab_aplicacao_estadocivil <- table(perfil_investidor_aplicacao$aplicacao,
perfil_investidor_aplicacao$estado_civil)
tab_aplicacao_estadocivil
# 2. A ACM
ACM <- MCA(base_dados[, 6:8], method = "Indicador")
# 3. Capiturando as coordenadas das observações em nossa base de dados
base_dados[c("D1","D2","D3","D4","D5")] <- data.frame(ACM$ind$coord)
# 4. Para facilitar o transcorrer do exercício, removeremos as variáveis
# categóricas originais, visto que suas coordenadas já as representam.
base_dados <- base_dados[,-c(6:8)]
# Estabelecendo uma PCA ---------------------------------------------------
# 1. Para a utilização do algoritmo prcomp(), o R exige a padronização dos
# dados. Não utilizaremos as coordenadas da ACM, mas já as estamos padronizando
# porque a subsequente clusterização a exigirá.
base_dados_std <- base_dados %>%
column_to_rownames("estudante") %>%
scale() %>%
data.frame()
# 2. A PCA
AFCP <- prcomp(base_dados_std[,1:4])
AFCP
# 3. Vamos considerar os fatores cujos eigenvalues se mostraram maiores do que
# 1. Assim, para salvá-los em nossa base de dados, podemos:
scores_fatoriais <- t(AFCP$rotation)/AFCP$sdev
#Assumindo-se apenas o F1 e F2 como indicadores, calculam-se os scores
#fatorias
score_D1 <- scores_fatoriais[1,]
score_D1
score_D2 <- scores_fatoriais[2,]
score_D2
F1 <- t(apply(base_dados_std[,1:4], 1, function(x) x * score_D1))
F2 <- t(apply(base_dados_std[,1:4], 1, function(x) x * score_D2))
F1
F2
F1 <- data.frame(F1) %>%
mutate(fator1 = rowSums(.) * 1)
F1
F2 <- data.frame(F1) %>%
mutate(fator2 = rowSums(.) * 1)
F2
base_dados_std[c("F1","F2")] <- cbind(F1$fator1, F2$fator2)
# 4. Por razões didáticas, excluiremos as variáveis métricas originais da base
# de dados:
base_dados_std <- base_dados_std[,-c(1:4)]
# Estabelecendo a Clusterização -------------------------------------------
# 1. Clustering
cluster_estudantes <- kmeans(base_dados_std, centers = 2)
# 2. Observando os resultados
fviz_cluster(cluster_estudantes, data = base_dados_std)
# 3. Uma outra maneira de enxergar os dados
# Vamos capturar as coordenadas do eixo Z:
plot <- fviz_cluster(cluster_estudantes, data = base_dados_std)
View(plot)
# Note que só as coordenadas dos eixos X e Y. Vamos "adaptar" o algoritmo
# fviz_cluster() para que ele nos retorne os valores do eixo Z:
fviz_cluster
fviz_cluster_adaptado(object = cluster_estudantes,
data = base_dados_std)
# Aparentemente, nada mudou, certo?
coordenadas <- fviz_cluster_adaptado(object = cluster_estudantes,
data = base_dados_std)
View(coordenadas)
scatter3D(x = coordenadas$data$x,
y = coordenadas$data$y,
z = coordenadas$data$Dim.3,
zlim = c(-3,3),
ylim = c(-3,3),
xlim = c(-3,3),
pch = 19,
bty = "b2",
colvar = as.numeric(coordenadas[["data"]][["cluster"]]),
col = viridis(200))
# Fim ---------------------------------------------------------------------
|
c2bb16f0d8b7bcb354dcd39ef3fc7e37086ce0ff
|
73ddf6aee285774a76a365f5144dafc3afae8ba8
|
/R/Veneer.R
|
e6fd9eea65f37dd93747493eea13dab5515fa206
|
[] |
no_license
|
matt-s-gibbs/swtools
|
d702fa44255c14b646d99b29bbe2296f6786ea2f
|
356e8df9e86e7c17e2d4e219352a2b6ea11adbde
|
refs/heads/master
| 2023-05-27T23:01:20.044583
| 2023-05-24T07:50:54
| 2023-05-24T07:50:54
| 125,963,455
| 4
| 1
| null | 2018-04-05T04:26:33
| 2018-03-20T05:06:29
|
R
|
UTF-8
|
R
| false
| false
| 9,684
|
r
|
Veneer.R
|
#' Run Source using Veneer
#'
#' @param StartDate Optional. Start date for simulation. Must be dd/mm/yyyy
#' @param EndDate Optional. End date for simulation. Must be dd/mm/yyyy
#' @param InputSet Optional. Input set to use
#' @param baseURL URL of the Veneer server. Defaults to the veneer default.
#'
#' @return Nothing to the R environment.
#'
#' If not set, the configuration parameters (StartDate, EndDate, InputSet), was is specified
#' in the Source configuration in the GUI will be used.
#'
#' The console will show any errors returned by Veneer.
#'
#' @examples
#' \dontrun{
#' VeneerRunSource()
#' VeneerRunSource("01/07/2017","01/02/2018","NoDams")
#' }
#'
#'@export
VeneerRunSource<-function(StartDate=NULL,EndDate=NULL,InputSet=NULL,baseURL="http://localhost:9876")
{
X<-list()
if(!is.null(StartDate)) X[["StartDate"]]<-StartDate
if(!is.null(EndDate)) X[["EndDate"]]<-EndDate
if(!is.null(InputSet)) X[["SelectedInputSet"]]<-InputSet
X<-jsonlite::toJSON(X,auto_unbox = TRUE)
A<-httr::POST(paste0(baseURL,"/runs"),body=X,httr::content_type_json())
#write error message to console if there was one
if(substr(rawToChar(A[[6]]),3,9)=="Message"){
return(substr(strsplit(rawToChar(A[[6]]),",")[[1]][1],13,1000))
}else{
return("Run Successful")
}
}
#' Update a function value or expression. Function must exist before being updated.
#'
#' @param Name Name of the function without the "$", e.g. f_ScaleFactor
#' @param Expression Expression to change it to, e.g. 1.2
#' @param baseURL URL of the Veneer server. Defaults to the veneer default.
#'
#' @return Nothing to the R environment.
#'
#' @examples
#' \dontrun{
#' VeneerSetFunction("f_ScaleFactor",1.2)
#' VeneerSetFunction("f_TargetLevel","if($m_Flow<1000,3.2,3.5)")
#' }
#'
#'@export
VeneerSetFunction<-function(Name,Expression,baseURL="http://localhost:9876")
{
X<-list("Expression"=as.character(Expression),"Name"=paste0("$",Name))
X<-jsonlite::toJSON(X,auto_unbox = TRUE)
httr::PUT(paste0(baseURL,"/functions/",Name),body=X,httr::content_type_json())
}
#' Change a Source piecewise table using Veneer
#'
#' @param data A 2 column data.frame or matrix with the data to load into the piecewise table.
#' @param pw_table The name of the piecewise linear variable, without the "$".
#' @param baseURL URL of the Veneer server. Defaults to the veneer default.
#'
#' @return Nothing to the R environment.
#'
#' @examples
#' \dontrun{
#' data<-data.frame(X=seq(1,5),Y=seq(1,5))
#' VeneerSetPiecewise(data,"pw_table")
#' }
#'
#' @export
VeneerSetPiecewise<-function(data,pw_table,baseURL="http://localhost:9876")
{
if(ncol(data)!=2)
{
stop("Data for piecewise linear must have 2 columns")
}
X<-list()
X[["Entries"]]<-as.matrix(data)
X[["XName"]]<-"Lookup"
X[["YName"]]<-"Result"
X<-jsonlite::toJSON(X,auto_unbox = TRUE)
#Name in here, or not??
httr::PUT(paste0(baseURL,"/variables/",pw_table,"/Piecewise"),body=X,httr::content_type_json())
}
#' Get data from a Source piecewise table using Veneer
#'
#' @param pw_table The name of the piecewise linear variable, without the $
#' @param baseURL URL of the Veneer server. Defaults to the veneer default.
#'
#' @return a matrix with the data from the piecewise table.
#'
#'
#'
#' @examples
#' \dontrun{
#' VeneerGetPiecewise(data,"pw_table")
#' }
#'
#'
#' @export
#' @importFrom utils URLencode
VeneerGetPiecewise<-function(pw_table,baseURL="http://localhost:9876")
{
#Name in here, or not??
D<-jsonlite::fromJSON(URLencode(paste0(baseURL,"/variables/",pw_table,"/Piecewise")))$Entries
return(D)
}
#'Get a time series result from Source using Veneer
#' @param TSURL, the URL of the time series to retrieve
#' @param baseURL URL of the Veneer server. Defaults to the veneer default.
#'
#' @return a zoo time series of the data
#'
#' The URL of the time series must be specified, by interrogation using a browser or other analysis.
#' By default Source returns SI units. Some conversion is undertaken:
#' * Flow converted to ML/d
#' * Volume converted to ML
#' * Area converted to ha
#'
#' Spaces are OK, like in the example below (dont need to insert %20 for example).
#'
#' @examples
#' \dontrun{
#' VeneerGetTS("/runs/latest/location/EndofSystem/element/Downstream Flow/variable/Flow")
#' }
#'
#' @export
#' @importFrom utils URLencode
VeneerGetTS<-function(TSURL,baseURL="http://localhost:9876")
{
D<-jsonlite::fromJSON(URLencode(paste0(baseURL,TSURL)))
B<-zoo::zoo(D$Events$Value,zoo::as.Date(D$Events$Date,format="%m/%d/%Y"))
if(D$Units=="m\U00B3/s") B <- B*86.4 #m3/s to ML/d
if(D$Units == "m\U00B3") B <- B / 1000 #m3 to ML
if(D$Units == "m\U00B2") B <- B / 10000 #m2 to ha
if(D$Units == "kg/m\U00B3") B <- B * 1000 #kg/m³ to mg/L
return(B)
}
#' Get all time series recorded in Source of a given variable type
#' @param variable Which variable to retrieve. Defaults to Flow.
#' @param run Which run to retrieve from. Defaults to the latest
#' @param baseURL URL of the Veneer server. Defaults to the veneer default.
#'
#' @return a zoo time series, with each output as a column
#'
#' @examples
#' \dontrun{
#' VeneerGetTSbyVariable() #returns all flow outputs recorded in the latest run
#' VeneerGetTSbyVariable("Water Surface Elevation",1)
#' }
#'
#' @export
#'
VeneerGetTSbyVariable<-function(variable="Flow",run="latest",baseURL="http://localhost:9876")
{
Results<-jsonlite::fromJSON(paste0(baseURL,"/runs/",run))
X<-Results$Results %>% dplyr::filter(.data$RecordingVariable==variable)
TS<-lapply(X$TimeSeriesUrl,function(x) VeneerGetTS(x,baseURL))
if(length(TS)>0)
{
TS<-zoo::zoo(matrix(unlist(TS),ncol=length(TS)),zoo::index(TS[[1]]))
if(ncol(TS)>1){
colnames(TS)<-X$NetworkElement
}else
{
names(TS)<-X$NetworkElement
}
return(TS)
}else
{
stop(paste("No results for variable",variable,"found for run",run,"\n",
"Recorded variables are:",paste(unique(Results$Results$RecordingVariable))))
}
}
#' Get a vector of the type of time series variables recorded
#' @param run Which run to retrieve from. Defaults to the latest
#' @param baseURL URL of the Veneer server. Defaults to the veneer default.
#'
#' @return a vector of variable types (e.g. Downstream flow, Downstream Flow Concentration, water surface elevation)
#'
#' @examples
#' \dontrun{
#' VeneerGetTSVariables()
#' }
#'
#' @export
#'
VeneerGetTSVariables<-function(run="latest",baseURL="http://localhost:9876")
{
Results<-jsonlite::fromJSON(paste0(baseURL,"/runs/",run))
return(unique(Results$Results$RecordingVariable))
}
#' Get all time series recorded in Source for a given node
#' @param Node Name of node to retrieve Time Series for
#' @param run Which run to retrieve from. Defaults to the latest
#' @param baseURL URL of the Veneer server. Defaults to the veneer default.
#'
#' @return a zoo time series, with each variable as a column
#'
#' @examples
#' \dontrun{
#' VeneerGetTSbyNode("Storage 1")
#' }
#'
#' @export
VeneerGetTSbyNode<-function(Node,run="latest",baseURL="http://localhost:9876")
{
Results<-jsonlite::fromJSON(paste0(baseURL,"/runs/",run))
X<-Results$Results %>% dplyr::filter(.data$NetworkElement==Node)
TS<-lapply(X$TimeSeriesUrl,function(x) VeneerGetTS(x,baseURL))
if(length(TS)>0)
{
TS<-zoo::zoo(matrix(unlist(TS),ncol=length(TS)),zoo::index(TS[[1]]))
if(ncol(TS)>1) colnames(TS)<-X$RecordingVariable
return(TS)
}else
{
stop(paste("No results for node",Node,"found for run",run,"\n",
"Recorded Nodes are:",paste(unique(Results$Results$NetworkElement))))
}
}
#' Get vector of InputSets
#' @param baseURL URL of the Veneer server. Defaults to the veneer default.
#'
#' @return vector containing info on Input Sets in the model
#'
#' @examples
#' \dontrun{
#' VeneerGetInputSets()
#' }
#'
#' @export
VeneerGetInputSets<-function(baseURL="http://localhost:9876")
{
return(jsonlite::fromJSON(paste0(baseURL,"/InputSets")))
}
#' Get a vector of node names for a given type
#' @param NodeType The node to return the names of. The icon in /network is searched for this name
#' @param baseURL URL of the Veneer server. Defaults to the veneer default.
#'
#'@return vector of node names matching the specified node type
#'
#'@examples
#'\dontrun{
#'VeneerGetNodesbyType("Weir")
#'}
#'
#'@export
VeneerGetNodesbyType<-function(NodeType,baseURL="http://localhost:9876")
{
A<-jsonlite::fromJSON(paste0(baseURL,"/network"))
#find the name of the nodetype
iconname<-A$features$properties$icon[grep(NodeType,A$features$properties$icon)[1]]
if(length(iconname)==1 & is.na(iconname)){
stop(paste(NodeType,"not found in the model. Try a different name, capitalisation matters. Search http://localhost:9876/network to see options, look for \"icon\""))
}else{
return(A$features$properties %>% dplyr::filter(.data$icon == iconname) %>% dplyr::select(.data$name))
}
}
#' Get the number of the latest run
#' @param baseURL URL of the Veneer server. Defaults to the veneer default.
#'
#' @return integer of the latest run number
#'
#' @examples
#' \dontrun{
#' VeneerlatestRunNumber()
#' }
#'
#' @export
VeneerlatestRunNumber<-function(baseURL="http://localhost:9876")
{
A<-jsonlite::fromJSON(paste0(baseURL,"/Runs"))
return(as.integer(strsplit(A[nrow(A),]$RunUrl,"/")[[1]][3]))
}
|
b1ca5e7f96faba6e2c2189a9241289a7a5eddf9f
|
7da3d24f996cffd0b436e0693930b31f2710edde
|
/CapitalBikeShareApp/ui.R
|
a431b74ed921be5ca6c6e9605314c6a701e86f9b
|
[] |
no_license
|
sylvest00/DDP
|
4975b4d375cb12a80b0ed7d97bc8115855ed433c
|
b6edf6bc82631f01077a45049f31b850c945311d
|
refs/heads/master
| 2021-01-11T18:05:08.978927
| 2017-01-30T08:33:09
| 2017-01-30T08:33:09
| 79,486,977
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 2,216
|
r
|
ui.R
|
# Developing Data Products Final Project
# Coursera JHU Data Science Specialization
# github.com/sylvest00
#
# Project: Visualizing Capital Bike Share Data
# ui.R file
# Janurary 29, 2017
library(shiny)
# Define UI for application that draws a histogram
shinyUI(fluidPage(
# Application title
titlePanel(paste("Capital Bike Share Data:",
"Visualize the Number of Bike Rentals & Rental Hours by Account Type",
sep = "\n")),
# Sidebar with a slider input for number of bins
sidebarLayout(
sidebarPanel(
helpText("INSTRUCTIONS: To visualize the number of bike rentals and total rent time, choose a
range of dates (between April 1, 2016 and June 30, 2016) that
you would like to inspect below. Press the \"RUN!\" button
when you are ready to plot the data."),
helpText("A bar plot will be displayed if the number of days selected is
less than or equal to 4, and a line plot will be displayed if
the number of days selected exceeds 4."),
helpText("Note: The data set is large. Date ranges greater than 15 days
will run slowly."),
# Input date ranges for data
dateRangeInput('dateRange',
label = h3('Select Date(s)'),
start = '4/1/2016', end = '4/15/2016',
min = '4/1/2016', max = '6/30/2016',
separator = ' - ',
format = 'MM dd, yyyy',
startview = 'year'),
submitButton('Run!')
),
# Show a plot of the generated distribution
mainPanel(
textOutput("plotTitle"),
tags$head(tags$style("#plotTitle{color: black;
font-size: 20px;
font-style: italic;}"
)
),
fluidRow(splitLayout(cellWidths = c("50%", "50%"),
plotOutput("totalRiders_linePlot"),
plotOutput("duration_linePlot")
)
)
)
)
))
|
6b73222be7d4fa2c1ec0587403a1dd710d6d9347
|
27dd6b958ade46a21cb716da6100b60a37aeee28
|
/GridSearch/grid.search.R
|
5ba42ddf52cc8f2aab6d49944d7173ba0a4fc1bb
|
[] |
no_license
|
mamun41/ADTransitions
|
ac0dc631db6f6228ce4245e261134fa72a88e0a7
|
8eb2a47a7acae0b8eccfe68ab6c9573496b2a501
|
refs/heads/main
| 2023-07-19T00:57:01.413293
| 2021-09-01T17:57:24
| 2021-09-01T17:57:24
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 10,523
|
r
|
grid.search.R
|
######## Grid search for global optimum
library(tidyverse)
library(ggplot2)
library(knitr)
library(readxl)
library(gridExtra)
library(grid)
library(gghighlight)
library(nloptr)
# setwd("/Users/Tommy/Desktop/Tommy/School/Grad School/Research/Research Brookmeyer/Code")
source("BrookFuncs.R")
source("AD_eval.f.g.R")
ages <- 50:95
inc.ages <- 65:90
empirical.incidence <- 0.00117 * exp(0.126 * (inc.ages - 60)) * 100
## lower bounds and upper bounds for parameters
lb <- rep(c(-14, 0.001), 12)
ub <- rep(c(-4, 0.15), 12)
## Grid of initial values
Lk0.init <- seq(-12, -6, 2)
a <- length(Lk0.init)
k1.init <- seq(0.02, 0.1, length.out = a)
possible.inits <- as.matrix(expand.grid(Lk0.init, k1.init))
init.valid <- vector(length = (a * a))
init.loss <- vector(length = (a * a))
for(i in 1:(a * a)){
g <- eval_g_ineq_both(rep(possible.inits[i,], 12), 0, 0, 0) < 0
if(any(g == FALSE)){
init.valid[i] <- 0
init.loss[i] <- NA
} else {
init.valid[i] <- 1
init.loss[i] <- eval_f_logs_weighted(x = rep(possible.inits[i,], 12),
r45.params, avg_prev_u, incidence = empirical.incidence, w = 1)
}
}
init.valid1 <- init.valid
###### the cobyla algorithm will sometimes take the parameter values outside
###### of the constraints, so this little precursor optimization accounts for that
for(i in 1:(a * a)){
if(init.valid[i] == 0) next
else{
opt.temp <- nloptr(x0 = rep(possible.inits[i,], 12),
eval_f = eval_f_logs_weighted,
lb = lb, ub = ub,
eval_g_ineq = eval_g_ineq_weighted,
opts = list("algorithm"="NLOPT_LN_COBYLA",
"xtol_rel"=1e-3,
"maxeval"=10),
r45 = r45.params,
prevs = avg_prev_u,
incidence = empirical.incidence,
w = 1)
if(is.nan(opt.temp$objective)){
init.valid[i] <- 0
init.loss[i] <- NA
}
}
}
valids <- which(init.valid == 1)
# opt with more stringent convergence criteria
opts.grid.e1 <- list()
index <- 1
for(i in valids[c(1, 4, 5, 8)]){
opts.grid.e1[[index]] <- nloptr(x0 = rep(possible.inits[i,], 12),
eval_f = eval_f_logs_weighted,
lb = lb, ub = ub,
eval_g_ineq = eval_g_ineq_weighted,
opts = list("algorithm"="NLOPT_LN_COBYLA",
"xtol_rel"=1e-3,
"maxeval"=40000),
r45 = r45.params,
prevs = avg_prev_u,
incidence = empirical.incidence,
w = 1)
index <- index + 1
}
# opts.grid.e1[[2]] <- nloptr(x0 = rep(possible.inits[valids[8],], 12),
# eval_f = eval_f_logs_weighted,
# lb = lb, ub = ub,
# eval_g_ineq = eval_g_ineq_weighted,
# opts = list("algorithm"="NLOPT_LN_COBYLA",
# "xtol_rel"=1e-3,
# "maxeval"=40000),
# r45 = r45.params,
# prevs = avg_prev_u,
# incidence = empirical.incidence,
# w = 1)
#
# saveRDS(opts.grid.e1, file = "GridSearch/opts.grid.e1.rds")
tab.ages <- seq(60, 90, 5)
e1.mats <- make_trans_matrix_low(opts.grid.e1[[1]]$solution, r45 = r45.params)
lifetime.table.f <- as.data.frame(matrix(nrow = length(tab.ages), ncol = 10))
lifetime.table.m <- as.data.frame(matrix(nrow = length(tab.ages), ncol = 10))
lifetime.table.f[,1] <- lifetime.table.m[,1] <- tab.ages
for(i in 1:length(tab.ages)){
for(j in 1:9){
curr.f <- lifetime(age = tab.ages[i], g = "Female", state = j, k0 = e1.mats[[1]], k1 = e1.mats[[2]])
curr.m <- lifetime(age = tab.ages[i], g = "Male", state = j, k0 = e1.mats[[1]], k1 = e1.mats[[2]])
lifetime.table.f[i, (j + 1)] <- curr.f[1]
lifetime.table.m[i, (j + 1)] <- curr.m[1]
}
}
colnames(lifetime.table.f) <- colnames(lifetime.table.m) <- c("Age", "Normal", "A", "A+T", "A+T+N",
"A+T+N + MCI", "T", "T+N", "N", "A+N")
lifetime.e1 <- list(lifetime.table.f, lifetime.table.m)
names(lifetime.e1) <- c("f", "m")
saveRDS(lifetime.e1, "GridSearch/lifetime.optimal.rds")
# opts.grid <- list()
#
# index <- 1
# for(i in valids){
# opts.grid[[index]] <- nloptr(x0 = rep(possible.inits[i,], 12),
# eval_f = eval_f_logs_weighted,
# lb = lb, ub = ub,
# eval_g_ineq = eval_g_ineq_weighted,
# opts = list("algorithm"="NLOPT_LN_COBYLA",
# "xtol_rel"=5e-3,
# "maxeval"=40000),
# r45 = r45.params,
# prevs = avg_prev_u,
# incidence = empirical.incidence,
# w = 1)
# index <- index + 1
# }
saveRDS(opts.grid, file = 'GridSearch/opts.grid.rds')
#### Opt where we don't take jack data from ages 91-95
opts.grid.90 <- list()
index <- 1
for(i in valids){
opts.grid.90[[index]] <- nloptr(x0 = rep(possible.inits[i,], 12),
eval_f = eval_f_logs_weighted_90,
lb = lb, ub = ub,
eval_g_ineq = eval_g_ineq_weighted,
opts = list("algorithm"="NLOPT_LN_COBYLA",
"xtol_rel"=5e-3,
"maxeval"=20000),
r45 = r45.params,
prevs = avg_prev_u[1:41,],
incidence = empirical.incidence,
w = 1)
index <- index + 1
}
saveRDS(opts.grid.90, file = "GridSearch/opts.grid.50-90.rds")
#### Trying the ISRES algorithm again
opt.isres <- nloptr(x0 = rep(possible.inits[8,], 12),
eval_f = eval_f_logs_weighted,
lb = lb, ub = ub,
eval_g_ineq = eval_g_ineq_weighted,
opts = list("algorithm"="NLOPT_GN_ORIG_DIRECT",
"xtol_rel"=5e-3,
"maxeval"=200),
r45 = r45.params,
prevs = avg_prev_u,
incidence = empirical.incidence,
w = 1)
#### Okay, we'll start with optimizing for 500 iterations, weed some out,
# then 1000 iterations, weed out again, yada yada
##### List of all optimizations
# opts.500 <- list()
# opts.1000 <- list()
# index <- 1
# for(i in 1:(a * a)){
#
# if(init.valid[i] == 0) next
# else{
# opts.500[[index]] <- nloptr(x0 = rep(possible.inits[i,], 12),
# eval_f = eval_f_logs_weighted,
# lb = lb, ub = ub,
# eval_g_ineq = eval_g_ineq_weighted,
# opts = list("algorithm"="NLOPT_LN_COBYLA",
# "xtol_rel"=5e-3,
# "maxeval"=500),
# r45 = r45.params,
# prevs = avg_prev_u,
# incidence = empirical.incidence,
# w = 1)
# opts.1000[[index]] <- nloptr(x0 = rep(possible.inits[i,], 12),
# eval_f = eval_f_logs_weighted,
# lb = lb, ub = ub,
# eval_g_ineq = eval_g_ineq_weighted,
# opts = list("algorithm"="NLOPT_LN_COBYLA",
# "xtol_rel"=5e-3,
# "maxeval"=1000),
# r45 = r45.params,
# prevs = avg_prev_u,
# incidence = empirical.incidence,
# w = 1)
# index <- index + 1
# }
#
# }
#
# ##### Do it again with 3000 iterations, only those qualifying are in
# opts.2000 <- list()
#
# index <- 1
# for(i in 1:(a * a)){
# if(init.valid[i] == 0) next
# else{
# opts.2000[[index]] <- nloptr(x0 = rep(possible.inits[i,], 12),
# eval_f = eval_f_logs_weighted,
# lb = lb, ub = ub,
# eval_g_ineq = eval_g_ineq_weighted,
# opts = list("algorithm"="NLOPT_LN_COBYLA",
# "xtol_rel"=1e-3,
# "maxeval"=2000),
# r45 = r45.params,
# prevs = avg_prev_u,
# incidence = empirical.incidence,
# w = 1)
# index <- index + 1
# }
# }
####### save results
# saveRDS(opts.500, "OptResults/grid.500.rds")
# saveRDS(opts.1000, "OptResults/grid.1000.rds")
# saveRDS(opts.2000, "OptResults/grid.2000.rds")
#######
##### OBJECTIVE FUNCTION VALUES
# obj.500 <- obj.1000 <- obj.2000 <- vector(length = length(opts.500))
#
#
# for(i in 1:length(opts.500)){
# obj.500[i] <- opts.500[[i]]$objective
# obj.1000[i] <- opts.1000[[i]]$objective
# obj.2000[i] <- opts.2000[[i]]$objective
# }
#
# ## make a pretty plot
# obj.prog <- cbind.data.frame(c(init.loss[!is.na(init.loss)], obj.500, obj.1000, obj.2000),
# rep(c(0, 500, 1000, 2000), each = sum(!is.na(init.loss))),
# rep(1:sum(init.valid), 4))
#
# names(obj.prog) <- c("Objective", "Iterations", "Init Set")
#
# obj.prog %>%
# ggplot(aes(x = Iterations, y = Objective, group = `Init Set`)) +
# geom_point() +
# geom_line() +
# theme_bw() +
# scale_y_continuous(trans = "log2") +
# labs(title = "Convergence of COBYLA algorithm for multiple sets of initial values",
# y = "Objective function (log scale)") +
# scale_x_continuous(breaks = c(0, 500, 1000, 2000))
|
7b220a57b2fb2825c7edd32eb391908931e72a0f
|
24cd7f6301da5c86a4f4ff2b70c4ebf9b20ae021
|
/data-raw/create_faces_data.R
|
d431ffec91b0d109f2c76356fc37f3167cf3bce4
|
[
"MIT"
] |
permissive
|
a-hurst/eeguana
|
1897cbf1872dbbe6b25750af9f617f5469fc702d
|
fe64a78ddecfd8dec76e1f44acc55651d5e2dd2d
|
refs/heads/master
| 2023-01-22T04:13:56.506807
| 2020-06-27T08:13:55
| 2020-06-27T08:13:55
| 319,080,261
| 0
| 0
| null | 2020-12-06T16:37:46
| 2020-12-06T16:37:46
| null |
UTF-8
|
R
| false
| false
| 1,087
|
r
|
create_faces_data.R
|
library(eeguana)
download.file("http://www.ling.uni-potsdam.de/~nicenboim/files/faces.vhdr",
mode = "wb", destfile = "faces.vhdr"
)
download.file("http://www.ling.uni-potsdam.de/~nicenboim/files/faces.vmrk",
mode = "wb", destfile = "faces.vmrk"
)
download.file("http://www.ling.uni-potsdam.de/~nicenboim/files/faces.dat",
mode = "wb", destfile = "faces.dat"
)
faces <- read_vhdr("faces.vhdr")
data_faces_ERPs <- faces %>%
eeg_segment(.description %in% c("s70", "s71"),
lim = c(-.2, .25)
) %>%
eeg_events_to_NA(.type == "Bad Interval") %>%
eeg_baseline() %>%
mutate(
condition =
if_else(description == "s70", "faces", "non-faces")
) %>%
select(-type) %>%
group_by(.sample, condition, .recording) %>%
summarize_at(channel_names(data_faces_ERPs), mean, na.rm = TRUE)
pos_10 <- events_tbl(faces) %>% filter(.type == "Stimulus", .description == "s130") %>% pull(.initial) %>% .[10]
data_faces_10_trials <- faces %>% filter(.sample %>% between(15000, pos_10)) %>% ungroup()
usethis::use_data(data_faces_ERPs, data_faces_10_trials, overwrite = TRUE)
|
297d585075f2ef731d11f2f5890222169df87414
|
cfb1c7d63471f7964721849f1a0d53d31bdc26a5
|
/cachematrix.R
|
564a550a54e75e7a8cde3b1b2b1d41cc8b53aaf1
|
[] |
no_license
|
guanabara/ProgrammingAssignment2
|
162985c92dc9d499c87bdc2b638b6046ff03d6f8
|
6998f7457c9d2096c706df1907c1c5f10c68dfad
|
refs/heads/master
| 2020-12-11T02:12:13.362470
| 2015-01-08T14:31:09
| 2015-01-08T14:31:09
| 28,963,901
| 0
| 0
| null | 2015-01-08T11:57:31
| 2015-01-08T11:57:31
| null |
UTF-8
|
R
| false
| false
| 1,189
|
r
|
cachematrix.R
|
## makeCacheMatrix is a function wich defines an object type used to store a matrix, and to cache its inverse.
## This type only has setters and getters to both values.
## It makes uses of R scoping in order to preserve the inverse value inside and R object.
makeCacheMatrix <- function(x = matrix()) {
mtxInverse <- NULL
set <- function(mtx)
{
x <<- mtx
mtxInverse <<- NULL
}
get <- function() { x }
setInverse <- function(inverse) { mtxInverse <<- inverse}
getInverse <- function() { mtxInverse }
list(set = set, get = get,
setInverse = setInverse,
getInverse = getInverse
)
}
## The following function calculates the inverse of a given matrix created by makeCacheMatrix, and returns the cached value if it exists,
## otherwise calculate it using R solve function, and store in cache.
cacheSolve <- function(x, ...) {
m <- x$getInverse()
if (!is.null(m))
{
message("getting cached data.")
return(m)
}
message("calculating inverse for matrix and populating cache.")
data <- x$get()
m <- solve(data)
x$setInverse(m)
m
}
|
3e24fdfcc12524043d519c0b48e7c41d78cd82be
|
aa40f8bbad3da770b1096e846d16f373a2f22864
|
/man/vonNeumann.Rd
|
afbedbf7a1f510a4a19dd9a26d1cafaaec534695
|
[] |
no_license
|
cran/ACSWR
|
bff4c1d8a111ae4045a9e23da5cbe9bf7c8c8ada
|
5fed66d5f3ead119f2de631d5079b7502f9c7067
|
refs/heads/master
| 2016-08-11T15:20:11.175145
| 2015-09-05T17:50:53
| 2015-09-05T17:50:53
| 48,076,424
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 482
|
rd
|
vonNeumann.Rd
|
\name{vonNeumann}
\alias{vonNeumann}
\title{
von Neumann Random Number Generator}
\description{
The "vonNeumann" function implements the von Neumann random generator as detailed in Section 11.2.
}
\usage{
vonNeumann(x, n)
}
\arguments{
\item{x}{
the initial seed
}
\item{n}{
number of required observations}
}
\author{
Prabhanjan N. Tattar
}
\examples{
vonNeumann(x=11,n=10)
vonNeumann(x=675248,n=10)
vonNeumann(x=8653,n=100)
}
\keyword{von Neumann}
\keyword{random generator}
|
6625e48726ab24f873b9b29f8cf740c69a7fc490
|
e00fd19665055752c6c3a10e59131f60e36a597b
|
/ui.R
|
a122a868b977e5753c3dc1621f0c4bc1170952bf
|
[] |
no_license
|
ConnieZ/delay_predictor
|
cffeee0afdf2fca0555bebb671b4caf4e5ff1962
|
b0774ab83369326cf5d5a461e1fe577221d5ea8d
|
refs/heads/master
| 2020-05-18T02:24:23.233736
| 2017-03-15T02:58:54
| 2017-03-15T02:58:54
| 23,283,641
| 1
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 2,021
|
r
|
ui.R
|
#ui.R
library(shiny)
library(rCharts)
shinyUI(fluidPage(
titlePanel("Flight Departure Delay Predictor for Newark Airport in NY"),
sidebarLayout(
sidebarPanel(
helpText("This app will calculate the likelihood of departure delay
for airlines departing Newark airport in NYC. The app runs
a Binary Logistic Regression model and uses it to predict
likelihood of delay."),
#input controls
selectInput("airline", label = strong("Choose an airline"),
choices = merged_airlines$name,
selected = merged_airlines$name[1]),
numericInput("temp", label = strong("Specify the temperature (degrees Fahrenheit)"),
value = 75),
numericInput("precip", label = strong("Specify the precipitation (inches)"),
value = 0),
numericInput("wind", label = strong("Specify the wind speed (mph)"),
value = 5),
numericInput("visib", label = strong("Specify the visibility"),
value = 10),
h5("To help you with providing values for fields above, use this forecast for weather in Newark, NY today:"),
tableOutput("todaysForecast")
),
mainPanel(
h4("Instructions"),
helpText("The app runs initially with default data,
however, once the user changes any of the input values on the left,
the app refreshes instantly and displays the newly provided values,
as well as the likelihood of delay estimate
and the refreshed chart with average departure and arrival delay
for the chosen airline grouped by month."),
h4("The values you provided for prediction model:"),
tableOutput("predictionData"),
br(),
textOutput("predictionValue"),
br(),
div(class='wrapper',
tags$style(".highcharts{ height: 100%; width: 800px;}"),showOutput('delayChart', 'highcharts'))
)
)
))
|
fdf863ea18ef129e499581253b2374b95db3a81a
|
c7041acdf8528a247e06d6b8bdad3a2a8e62e693
|
/R/funcs.R
|
1347675aa70fb4d461c711afee41edb0fbb2cf67
|
[] |
no_license
|
bio-cui/siGCD
|
e6d9204bf96dfa05e00f39bb880254911893b1e6
|
ef7cf2aa45ff0dbb41cfb68af618d27937116db5
|
refs/heads/main
| 2023-02-18T06:22:52.299103
| 2021-01-15T08:19:51
| 2021-01-15T08:19:51
| 316,158,836
| 1
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 2,956
|
r
|
funcs.R
|
############load DATA
loaddata<-function(expfile,clifile){
datalist=list()
load(paste('data/',expfile,sep=''))
datalist[[1]]=expma
load(paste('data/',clifile,sep=''))
datalist[[2]]=clinical
datalist
}
cofac_vars<-function(facts,clinical,vars){
if(length(facts)==0){
B1 = vars
} else {
facts=strsplit(facts,",")[[1]]
factid=match(facts,colnames(clinical))
B = clinical[,factid, drop=F]
B1=cbind(B,vars)
}
B1=as.data.frame(B1)
B1
}
cofac_varsim<-function(facts,B,vars){
if(length(facts)==0){
B1 = as.data.frame(vars)
} else {
B1=as.data.frame(cbind(B,vars))
}
B1
}
survout<-function(surv,exprs_data_cutoff,path_output,tulis){
kaplan_meier <- survfit(surv ~ exprs_data_cutoff,conf.type = "log")
xxx=summary(kaplan_meier)$table
outinfo=data.frame(xxx[,c(1,4,7)])
colnames(outinfo)=c("Number of Cases, Total","Number of Cases, Deceased","Median Survival Time")
#rownames(outinfo)=strsplit(tulis,"\\|")[[1]]
outinfo$Name = strsplit(tulis,"\\|")[[1]]
outinfo<-select(outinfo,4,1,2,3)
write.table(outinfo,file=paste(path_output,"outinfo.txt",sep=''),sep='\t',quote=F,row.names=F)
NN=length(kaplan_meier$strata)
timepoints=as.numeric(c(1,kaplan_meier$strata))
kmall=cbind(kaplan_meier$surv,kaplan_meier$time,kaplan_meier$n.event,kaplan_meier$n.censor)
for(i in 1:NN){
low_matrix <- matrix(nrow=kaplan_meier$strata[i],ncol=4)
if(kaplan_meier$strata[i]>1){
idl=sum(timepoints[1:i])
idh=sum(timepoints[1:(i+1)])-1
low_matrix <- kmall[idl:idh,]
colnames(low_matrix) <- c("surv", "time", "event", "censor")
write.table(low_matrix,file=paste(path_output,"survival_",as.character(i),".txt",sep = ""),sep='\t',quote=F,row.names = FALSE)
} else {
nopat=c(0,0,0,0)
names(nopat)=c("surv", "time", "event", "censor")
write.table(t(nopat),file=paste(path_output,"survival_",as.character(i),".txt",sep = ""),sep='\t',quote=F,row.names=F)
}
}
}
coxcal<-function(surv,datas){
errflag = F
coxph.fit = tryCatch(coxph(surv~., data=datas),
error = function(e) errflag <<- T)
if(!errflag){
reg.summary = summary(coxph.fit)$coef
}
reg.summary
}
coxcalsim<-function(surv,datas){
errflag = F
coxph.fit = tryCatch(coxph(surv~., data=datas),
error = function(e) errflag <<- T)
if(!errflag){
reg.summary = summary(coxph.fit)$coef
}
reg.summary["Interaction", c("z", "Pr(>|z|)")]
}
cellcal<-function(posg,negg=c(),expma,MM){
cellpos=cellneg=rep(0,MM)
if(length(posg)>0){
cellgeneid = match(posg,rownames(expma))
cellpos = expma[cellgeneid[1],]
if(length(posg)>1){
cellpos = colSums(expma[cellgeneid,])
}
}
if(length(negg)>0){
cellgeneid = match(negg,rownames(expma))
cellneg = expma[cellgeneid[1],]
if(length(negg)>1){
cellneg = colSums(expma[cellgeneid,])
}
}
cellss=(cellpos-cellneg)/(length(posg)+length(negg))
cellss
}
|
51ae11365ceb464cc2620e00a99d83ea7c8769eb
|
a53f1f939d3dc8a0278cfcbeed57c000bed77a1f
|
/playground/helper_functions.R
|
94d2621063589b79962e394fb7d56a8f2a646399
|
[] |
no_license
|
giuseppec/customtrees
|
0c803bdb70f7fb5d2d589cfca1b8510e7154288d
|
f7f50d9357d129c9f88d1985b8862da1f7f2f68e
|
refs/heads/master
| 2023-06-26T22:32:30.207928
| 2021-08-01T15:04:34
| 2021-08-01T15:04:34
| 254,136,481
| 1
| 1
| null | 2020-08-04T11:11:53
| 2020-04-08T16:06:21
|
R
|
UTF-8
|
R
| false
| false
| 4,406
|
r
|
helper_functions.R
|
library(dplyr)
library(reshape2)
library(stringr)
library(ggplot2)
library(tidyverse)
library(Rmalschains)
library(iml)
library(ranger)
library(kmlShape)
library(dtw)
library(tidyr)
# # Frechet distance FDA measure
# SS_fre = function(y, x, requires.x = FALSE, ...) { # slow
# # using only y-axis of curves is enough as x-axis is always the same for all curves
# require(kmlShape)
# center = colMeans(y)
# grid.x = as.numeric(names(center))
# pdp.y = unname(center)
# dist = apply(y, 1, function(ice) distFrechet(grid.x, pdp.y, grid.x, ice, FrechetSumOrMax = "sum"))
# sum(dist)
# }
#
# # Frechet distance measure - with filtered ice curves
# SS_fre_filtered = function(y, x, sub.number, requires.x = FALSE, feat, x.all, ...) {
# require(kmlShape)
# # use only ice curves that are available for the combination of the two features -> no extrapolation
# indices = filter(feat, x.all, y, sub.number)
# y.filtered = y[,indices, drop = FALSE]
# center = colMeans(y.filtered)
# grid.x = as.numeric(names(center))
# pdp.y = unname(center)
# dist = apply(y.filtered, 1, function(ice) distFrechet(grid.x, pdp.y, grid.x, ice, FrechetSumOrMax = "sum"))
# sum(dist)*20/length(indices)
# }
SS_fre = function(y, x, requires.x = FALSE, ...) { # slow
#require(Rfast)
ypred = apply(y, 2, median) #Rfast::colMedians(as.matrix(y))
sum(t(abs(t(y) - ypred)))
}
# Frechet distance measure - with filtered ice curves
SS_fre_filtered = function(y, x, sub.number, requires.x = FALSE, feat, x.all, ...) {
ycols = ncol(y)
# use only ice curves that are available for the combination of the two features -> no extrapolation
indices = filter(feat, x.all, y, sub.number)
y.filtered = y[,indices, drop = FALSE]
ypred = apply(y.filtered, 2, median) #Rfast::colMedians(as.matrix(y))
dist = sum(t(abs(t(y.filtered) - ypred)))
sum(dist)*ycols/length(indices)
}
# NEW:
# Filter function to use only ice curves within grid points to find best split point
# not yet included: categorical features (only numeric and one-hot-encoded)
# needs to be integrated in objective
filter = function(feat, x.all, Y, sub.number){
values = unique(x.all[sub.number,feat])
if (length(unique(x.all[,feat])) > 2) {
grid.points = as.numeric(names(Y))
break.points = grid.points[1:(length(grid.points) - 1)] + (grid.points[2:length(grid.points)] - grid.points[1:(length(grid.points) - 1)]) / 2
range = cut(values, breaks = c(min(x.all[,feat]), break.points, max(x.all[,feat])), labels = c(names(Y)), include.lowest = TRUE, right = TRUE)
return(which(names(Y) %in% unique(range)))
}
else if (length(unique(x.all[,feat])) == length(values)) {
return(c(1:length(names(Y))))
}
else if (length(values) == 1 & length(unique(x.all[,feat])) == 2) {
if (values < mean(unique(x.all[,feat]))) {
return(c(1:round(ncol(Y)/2,0)))
}
else if (values > mean(unique(x.all[,feat]))) {
return(c(round(ncol(Y)/2,0):ncol(Y)))
}
}
}
#------------------------------------------------------------------------------------------------------------
# functions for plotting
# get ice curves function for plotting
get_ice_curves <- function(Y, X, result, extrapol = TRUE){
assert_data_table(result)
# TODO: fix bug if more than one feature have the same best objective
feature = unique(result$feature[result$best.split])
split.points = unlist(result$split.points[result$best.split])
split.points = sort.int(split.points)
node.number = findInterval(x = X[,feature], split.points, rightmost.closed = TRUE) + 1
# split y according to node.number
y.list = split(Y, node.number)
# ice curve feature
feat = colnames(X)[which(!(colnames(X) %in% result$feature))]
#filter ice curves in case extrapol = TRUE
if (extrapol == TRUE) {
y.list.filtered = lapply(seq_along(y.list), FUN = function(i) {
ind = filter(feat, X, Y, which(node.number == i))
y.list[[i]][,ind, drop = FALSE]
})
}
else y.list.filtered = y.list
return(y.list.filtered)
}
# prepare data for plotting
plot.prep.ind = function(i, x){
x$.id = 1:nrow(x)
x = gather(x, .borders, .value, colnames(x)[1]:colnames(x)[ncol(x) - 1], factor_key = TRUE)
x$.split = i
return(x)
}
plot.prep.full = function(data){
data.prep = lapply(seq_along(data), FUN = function(i) {
plot.prep.ind(i, data[[i]])
})
data.prep = rbindlist(data.prep)
}
|
5b0fb4bd768447362a68bfb5128e259aec3ea924
|
ca86b61d9ec9144d318a5c21508c6ef7573111cd
|
/validation/survival/only lasso.R
|
eafb6031a51f5400cfef40f559c4be7319cc6e8a
|
[] |
no_license
|
chung-R/HD-MAC
|
71ef36155469f9a1a01c069a9fef35e48f60190e
|
ddfd9d6f7fff7c7e03abca166a05ee10e96504d0
|
refs/heads/master
| 2022-07-13T04:23:03.858403
| 2022-07-03T09:11:49
| 2022-07-03T09:11:49
| 221,901,315
| 4
| 4
| null | null | null | null |
UTF-8
|
R
| false
| false
| 2,612
|
r
|
only lasso.R
|
# setting
library(data.table)
setwd("C:/Users/Judy/Desktop/Meeting/RShiny package/RShiny")
source("c_index.R")
cox = as.data.frame(fread("Ova_TCGA_OS_clinical_muta_cleaned_313_13_670.csv",header = T,sep = ","))
library(survival)
user.data = cox
time.col = 9
event.col = 10
gene.type = list(gene.var = colnames(cox)[14:683])
penalty.alpha = 1
nfold = 1
fdr.genes.col = c("ZSWIM8", "PABPC3")
set.seed(1)
t = sample(1:nfold,size = nrow(user.data), replace = T)
gene.col = gene.type$gene.var
user.data[,gene.col] = apply(user.data[,gene.col], 2, as.numeric)
time = as.numeric(user.data[,time.col])
delta = user.data[,event.col]
x <- data.matrix(user.data[, fdr.genes.col])
y <- Surv(time,delta)
train = !(t==nfold)
test = (t==nfold)
if(nfold == 1){train = (t==nfold);test = (t==nfold)} # nfold = 1
x.train = x[train,]
x.test = x[test ,]
y.train = with(data.frame(time,delta)[train,], Surv(as.numeric(time),delta))
y.test = with(data.frame(time,delta)[test, ], Surv(as.numeric(time),delta))
#-----------------------------------------------------
# validation
library(ncvreg)
set.seed(4)
cv.out1 <- cv.ncvsurv(x.train, y.train, penalty="lasso", nfolds = 5)
lambda_min1 <- cv.out1$lambda.min
if(sum(as.numeric(coef(cv.out1, s = lambda_min1))==0) == length(fdr.genes.col)){
nz <- unlist(lapply(c(1:length(cv.out1$lambda)), function(x) sum(coef(cv.out1, s = cv.out1$lambda[x])!=0))) # cv.out$nzero
lambda_min1 <- cv.out1$lambda[as.numeric(which(nz != 0)[1])]
}
lasso.est.coef1 = as.numeric(coef(cv.out1, s = lambda_min1))
cv.est.coef1 = coef(cv.out1, s = lambda_min1/dim(user.data)[1])
best_coef1 = as.numeric(cv.est.coef1)
li1 = fixedLassoInf(x, time, beta = best_coef1, lambda = lambda_min1, status = delta, family = "cox")
best_coef_p_value1 = li1$pv
cv.lasso.est.coef1 = coef(cv.out1, s = lambda_min1)
la_geneSel1 = row.names(cv.est.coef1)[which(best_coef1 != 0)]
d1 = cbind(gene_list = la_geneSel1,
estimated_coefficient = lasso.est.coef1[which(lasso.est.coef1 != 0)],
p_value = best_coef_p_value1)
u51 = data.frame(Death = delta, Death_surtime = time)[test,]
cut_hazard11 = predict(cv.out1 , X = x.test, s=lambda_min1)
la1 <- list( c_index = c.index(u5 = u51, cut_hazard1 = cut_hazard11),
selected.gene = ((as.matrix(coef(cv.out1, s = lambda_min1)))[which( (as.numeric(coef(cv.out1, s = lambda_min1))) != 0),] ),
coef.and.p = as.data.frame(d1) )
# result
la1$selected.gene
|
3e6260c94f92086c4722dfe105962c0223de25d6
|
c96c61582ab453550913511f6b62236ac45ae429
|
/Bridge/week7RAssignment.R
|
e32e58ecb8bb8246a2bd0f847c99afefc967fb25
|
[] |
no_license
|
RodavLasIlad/CUNY
|
74f7c7391eab2d5d0cd1cc0d1a25bc78dbc4f4f8
|
2ed2a3c6a02ecd671a3aceec7ab0ab448c3fcae3
|
refs/heads/master
| 2021-01-10T21:00:54.037051
| 2014-11-21T16:52:26
| 2014-11-21T16:52:26
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 423
|
r
|
week7RAssignment.R
|
#Part 1
library(RSQLite)
setwd('C:/Users/Brett/Dropbox/CUNY')
sqlite <- dbDriver("SQLite")
workingdb <- dbConnect(sqlite, "simplecharter.sqlite")
dbListTables(workingdb)
results <- dbSendQuery(workingdb, "SELECT * FROM ALLCHARTERS")
resultsdf <- data.frame(fetch(results, -1))
dbDisconnect(workingdb)
#Part 2
library(MASS)
newdb <- dbConnect(sqlite, "newdb.sqlite")
dbWriteTable(newdb, "Boston", Boston)
dbDisconnect(newdb)
|
4e7797608f6bf3ac8c21c654b0fac8574db3e308
|
e8a9c5f88bab32d755d925a9640ae1ab1c51dc6b
|
/man/get_prg.Rd
|
6e721fea8f7ce2e2438302c327c3359103805eef
|
[
"MIT"
] |
permissive
|
skowronskij/prgdownloader
|
72c788865dc9f18be3a2ecef9f9a02250753bcbf
|
cf2c02f6259c381f04d0b44a9a73bfc4484ecfd8
|
refs/heads/master
| 2022-04-01T12:03:13.793553
| 2020-02-03T04:55:50
| 2020-02-03T04:55:50
| 228,857,918
| 1
| 0
|
NOASSERTION
| 2020-02-11T19:53:28
| 2019-12-18T14:32:24
|
R
|
UTF-8
|
R
| false
| true
| 590
|
rd
|
get_prg.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/get_prg.R
\name{get_prg}
\alias{get_prg}
\title{Pobieranie wybranych jednostek PRG}
\usage{
get_prg(unit, teryt)
}
\arguments{
\item{unit}{jednostka PRG}
\item{teryt}{kod teryt lub lista terytów}
}
\value{
obiekt sf zawierający atrybuty i geometrię wybranych jednostek PRG
}
\description{
Funkcja słóżąca do pobierania wybranych
jednostek PRG z api
}
\examples{
\dontrun{
get_prg('123ads', 'gminy', '3064011,2602042')
get_prg('123ads', 'powiaty', '1815')
get_prg('123ads', 'wojewodztwa', '24,16,30')
}
}
|
a96d64590d5f4ca887792ba6c5ec9ff08dea953d
|
2dc78a3377c57e0e5fbe8ee41e85942946666a36
|
/man/nrsaFishCover.Rd
|
1c5795f22eef0c13e2db6f7eb8f236e449daf54a
|
[] |
no_license
|
jasonelaw/aquamet
|
e5acce53127de4505486669597aed3dd74564282
|
3464af6dbd1acc7b163dc726f86811249293dd92
|
refs/heads/master
| 2020-03-25T16:19:46.810382
| 2018-08-07T20:51:01
| 2018-08-07T20:51:01
| 143,925,702
| 1
| 0
| null | null | null | null |
UTF-8
|
R
| false
| true
| 7,215
|
rd
|
nrsaFishCover.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/nrsaFishCover.r
\name{nrsaFishCover}
\alias{nrsaFishCover}
\title{Calculate NRSA Fish Cover Metrics}
\usage{
nrsaFishCover(algae = NULL, boulder = NULL, brush = NULL,
liveTree = NULL, macrophytes = NULL, overhang = NULL,
structures = NULL, undercut = NULL, woodyDebris = NULL,
coverClassTypes = data.frame(coverType = c("algae", "boulder", "brush",
"liveTree", "macrophytes", "overhang", "structures", "undercut",
"woodyDebris"), isBig = c(FALSE, TRUE, FALSE, FALSE, FALSE, FALSE, TRUE, TRUE,
TRUE), isNatural = c(FALSE, TRUE, TRUE, TRUE, FALSE, TRUE, FALSE, TRUE, TRUE),
stringsAsFactors = FALSE), coverCalculationValues = data.frame(field = c(NA,
"0", "1", "2", "3", "4"), presence = c(NA, 0, 1, 1, 1, 1), characteristicCover
= c(NA, 0, 0.05, 0.25, 0.575, 0.875), stringsAsFactors = FALSE))
}
\arguments{
\item{algae}{A data frame containing algae cover class data at each transect for
all reaches, with the following columns:
\itemize{
\item SITE integer or character specifying the site visit
\item TRANSECT character value specifying the transect
for which the value was recorded.
\item VALUE numeric or character values
}}
\item{boulder}{A data frame containing boulder cover class data at each
transect for all reaches, with the following columns:
\itemize{
\item SITE integer or character specifying the site visit
\item TRANSECT character value specifying the transect
for which the value was recorded.
\item VALUE numeric or character values
}}
\item{brush}{A data frame containing brush cover class data at each
transect for all reaches, with the following columns:
\itemize{
\item SITE integer or character specifying the site visit
\item TRANSECT character value specifying the transect
for which the value was recorded.
\item VALUE numeric or character values
}}
\item{liveTree}{A data frame containing livetree cover class data at each
transect for all reaches, with the following columns:
\itemize{
\item SITE integer or character specifying the site visit
\item TRANSECT character value specifying the transect
for which the value was recorded.
\item VALUE numeric or character values
}}
\item{macrophytes}{A data frame containing plant cover class data at each
transect for all reaches, with the following columns:
\itemize{
\item SITE integer or character specifying the site visit
\item TRANSECT character value specifying the transect
for which the value was recorded.
\item VALUE numeric or character values
}}
\item{overhang}{A data frame containing overhang cover class data at each
transect for all reaches, with the following columns:
\itemize{
\item SITE integer or character specifying the site visit
\item TRANSECT character value specifying the transect
for which the value was recorded.
\item VALUE numeric or character values
}}
\item{structures}{A data frame containing structural cover class data at each
transect for all reaches, with the following columns:
\itemize{
\item SITE integer or character specifying the site visit
\item TRANSECT character value specifying the transect
for which the value was recorded.
\item VALUE numeric or character values
}}
\item{undercut}{A data frame containing undercut cover class data at each
transect for all reaches, with the following columns:
\itemize{
\item SITE integer or character specifying the site visit
\item TRANSECT character value specifying the transect
for which the value was recorded.
\item VALUE numeric or character values
}}
\item{woodyDebris}{A data frame containing woody debris cover class data at
each transect for all reaches, with the following columns:
\itemize{
\item SITE integer or character specifying the site visit
\item TRANSECT character value specifying the transect
for which the value was recorded.
\item VALUE numeric or character values
}}
\item{coverClassTypes}{A data frame containing group membership information
for each type of fish cover. The default value for this argument
reproduces EPA NARS calculations. Expected to have the following columns:
\itemize{
\item coverType character values 'algae', 'boulder', 'brush',
'liveTree', 'macrophytes', 'overhang',
'structures', 'undercut', 'woodyDebris'
\item isBig logical values specifying whether the class
is considered as large for analysis
\item isNatural logical values specifying whether the class
is considered as natural for analysis
}}
\item{coverCalculationValues}{A data frame specifying how cover class values
are mapped to presence/absence and to characteristic cover fractions for
analysis. The default value for this argument reproduces EPA NARS
calculations. Expected to have the following columns:
\itemize{
\item field character value specifying the codes
used to record cover values
\item presence numeric value specifying whether the
cover value is present (1) or absent
(0) or missing (NA), used for mean
presence calculations.
\item characteristicCover numeric value specifying the
value used for mean cover
calculations.
}}
}
\value{
Either a data frame when metric calculation is successful or a
character string containing an error message when metric calculation is
not successful. The data frame contains the following columns:
\itemize{
\item SITE - universal ID value
\item METRIC - metric name
\item VALUE - metric value
}
Metrics calculated include: pfc_alg, pfc_rck, pfc_brs, pfc_lvt, pfc_aqm,
pfc_ohv, pfc_hum, pfc_ucb, pfc_lwd, xfc_alg, xfc_rck, xfc_brs, xfc_lvt,
xfc_aqm, xfc_ohv, xfc_hum, xfc_ucb, xfc_lwd, pfc_all, pfc_big, pfc_nat,
xfc_all, xfc_big, xfc_nat, sdfc_ucb, sdfc_ohv, idrucb, idrohv, iqrucb,
iqrohv
Descriptions for all metrics are included in
\emph{NRSA_Physical_Habitat_Metric_Descriptions.pdf} in the package
documentation.
}
\description{
This function calculates the fish cover
portion of the physical habitat metrics for National
Rivers and Streams Assessment (NRSA) data. The function
requires data frames containing the channel cover and stream
verification form data files.
}
\examples{
head(fishcoverEx)
fishCvrOut <- nrsaFishCover(algae=subset(fishcoverEx,PARAMETER=='ALGAE'),
boulder=subset(fishcoverEx,PARAMETER=='BOULDR'),
brush=subset(fishcoverEx,PARAMETER=='BRUSH'),
liveTree=subset(fishcoverEx,PARAMETER=='LVTREE'),
macrophytes=subset(fishcoverEx,PARAMETER=='MACPHY'),
overhang=subset(fishcoverEx,PARAMETER=='OVRHNG'),
structures=subset(fishcoverEx,PARAMETER=='STRUCT'),
undercut=subset(fishcoverEx,PARAMETER=='UNDCUT'),
woodyDebris=subset(fishcoverEx,PARAMETER=='WOODY'))
head(fishCvrOut)
}
\author{
Curt Seeliger \email{Seeliger.Curt@epa.gov}\cr
Tom Kincaid \email{Kincaid.Tom@epa.gov}
}
|
458db058ab2a8d8d2a831dafaaa7b90c2e9aa92c
|
b83cde74005d5d837f0494ce17ff75af290cc12d
|
/man/pro_edata.Rd
|
e91e858530cc53d0e987d70aa3570542d0fdfb93
|
[] |
no_license
|
clabornd/pmartRdata
|
eea42713a6af5389a9e2035e14c8db990d03095f
|
34ea559378f1f0523bfaa09609c98cfa368c103a
|
refs/heads/master
| 2020-04-08T21:19:04.424102
| 2019-11-06T19:12:29
| 2019-11-06T19:12:29
| 159,738,842
| 0
| 0
| null | 2018-11-29T23:05:44
| 2018-11-29T23:05:44
| null |
UTF-8
|
R
| false
| true
| 1,066
|
rd
|
pro_edata.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/data.R
\name{pro_edata}
\alias{pro_edata}
\title{Protein-level Expression Data (e_data)}
\format{A data.frame with 2,731 rows (proteins) and 12 columns (protein identifier and samples):
\describe{
\item{Reference}{Reference protein name}
\item{Mock*}{Three columns of mock samples}
\item{Infection*}{Eight columns of infection samples}
}}
\source{
See details of \code{\link{pmartRdata}} for relevant grant numbers.
}
\description{
A dataset containing the log2 median-normalized quantified mass spectra for 2,731 proteins (quantified from \code{\link{pep_edata}}) using Bayesian proteoform modeling (Webb-Robertson, 2014).
}
\references{
Webb-Robertson BJ, Matzke MM, Datta S, Payne SH, Kang J, Bramer LM, Nicora CD, Shukla AK, Metz TO, Rodland KD, Smith RD, Tardiff MF, McDermott JE, Pounds JG, Waters KM (2014), \emph{Bayesian proteoform modeling improves protein quantification of global proteomic measurements}. Molecular & Cellular Proteomics. doi: 10.1074/mcp.M113.030932.
}
|
4cc27b3acdae3d6ca699540a76a0767644e35dbe
|
3b0be5721a5478b1bac4e6b08cdcd1b88e3a4046
|
/inst/snippets/col.R
|
0c342174b9fb6c532d545c5575e161387acb9c81
|
[] |
no_license
|
stacyderuiter/Lock5withR
|
b7d227e5687bc59164b9e14de1c8461cb7861b14
|
417db714078dc8eaf91c3c74001b88f56f09b562
|
refs/heads/master
| 2020-04-06T06:33:39.228231
| 2015-05-27T11:41:42
| 2015-05-27T11:41:42
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 162
|
r
|
col.R
|
# There are 25 numbered plot symbols; pch=plot character
xyplot( mcs ~ age, data=HELPrct, groups=sex,
pch=c(1,2), col=c('brown', 'darkgreen'), cex=.75 )
|
6408ba249e5a2e2f5035c0fafdbb675008446d18
|
c60558ba8b790de2bfe96dc557d00dcaf82d3472
|
/rf_script.R
|
05867cd5ca44013415ca0a9148b6bfd4622d7d47
|
[] |
no_license
|
frankietam/MessyDataHW5
|
ba7d107eb40bb10b7bba0c389bc7e6c11a35fc28
|
38ab9be151b55350fc3fd2888bb0db366c4e425a
|
refs/heads/master
| 2020-04-08T03:24:01.948506
| 2018-12-06T14:35:02
| 2018-12-06T14:35:02
| 158,974,224
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 3,429
|
r
|
rf_script.R
|
## Messy Data and Machine Learning Homework 5
## Paul Sergent, Ruoyu Zhu, Frankie Tam
# Question 1
library(randomForest)
library(ROCR)
# import data
sqf_08_16.data <- read_csv('sqf_08_16.csv')
# restrict to CPW stops
sqf <- sqf_08_16.data %>% filter(suspected.crime=='cpw')
sqf <- sqf %>%
select(id, year, found.weapon, precinct, location.housing,
stopped.bc.desc, stopped.bc.violent, stopped.bc.other, stopped.bc.object,
stopped.bc.casing, stopped.bc.lookout, stopped.bc.drugs, stopped.bc.clothing,
stopped.bc.furtive, stopped.bc.bulge,
additional.report, additional.investigation, additional.proximity, additional.evasive,
additional.associating, additional.direction, additional.highcrime, additional.time,
additional.sights, additional.other,
suspect.age, suspect.build, suspect.sex, suspect.height, suspect.weight,
inside, radio.run, officer.uniform, observation.period, day, month, time.period)
# Convert variable types as necessary
sqf <- sqf %>% mutate(suspect.build = as.factor(suspect.build),
suspect.sex = as.factor(suspect.sex),
location.housing = as.factor(location.housing),
day = as.factor(day),
month = as.factor(month),
time.period = as.factor(time.period),
precinct = as.factor(precinct),
found.weapon = as.factor(found.weapon))
# A)
# spread precinct into binary indicator
# age, height, weight
precinct_spread <- sqf %>% count(id, precinct) %>% spread(precinct, n, fill=0)
# join precinct_spread
sqf_spread <- left_join(sqf, precinct_spread, by = "id")
# remove precinct column
sqf_spread <- sqf_spread %>% select(-precinct)
# convert precinct column names to legal names
names(sqf_spread) <- make.names(names(sqf_spread))
# restrict to 2013, 2014
sqf.1314 <- sqf_spread %>% filter(year==2013|year==2014)
# shuffle the data
sqf.1314 <- sqf.1314 %>% slice(sample(1:n()))
# 50% for training set
split_size = floor(nrow(sqf.1314)/2)
train_half <- sqf.1314 %>% slice(1:split_size)
# 50% for test set
test_half <- sqf.1314 %>% slice(split_size+1:n())
# restrict to 2015
test_later <- sqf_spread %>% filter(year==2015)
# remove stop id, year
train_half <- train_half %>% select(-id, -year)
test_half <- test_half %>% select(-id, -year)
test_later <- test_later %>% select(-id, -year)
# B)
# remove rows with NA
# remove observations with missing values
sum(is.na(train_half))
train_half <- na.omit(train_half)
# Create a Random Forest model with default parameters
model <- randomForest(found.weapon ~ ., data = train_half, ntree = 200, importance = TRUE)
model
# C)
# test half set
test_half$predicted.probability <- predict(model, newdata = test_half, type='prob')
# compute AUC using ROCR package
testhalf.pred <- prediction(test_half$predicted.probability[,2], test_half$found.weapon)
testhalf.perf <- performance(testhalf.pred, "auc")
cat('the auc score is ', 100*testhalf.perf@y.values[[1]], "\n")
# test later set
test_later$predicted.probability <- predict(model, newdata = test_later, type='prob')
# compute AUC using ROCR package
testlater.pred <- prediction(test_later$predicted.probability[,2], test_later$found.weapon)
testlater.perf <- performance(testlater.pred, "auc")
cat('the auc score is ', 100*testlater.perf@y.values[[1]], "\n")
|
512ef0efd37aca281a8649d4a18c37f433b988c2
|
0d35749c4c44b101afc124d26380574d650fec3a
|
/R/replace_labs.R
|
96a7c2a2e28449b4f7236132a8fa1e1504b0506c
|
[
"MIT"
] |
permissive
|
MattCowgill/grattantheme
|
3b27ce0488907d46dc52eff65622aef0235d4965
|
0b1dfac4e19a38c8894c0556cc1ebd3f1ee991de
|
refs/heads/master
| 2023-01-09T16:58:04.454028
| 2022-12-29T22:15:58
| 2022-12-29T22:15:58
| 351,668,681
| 0
| 0
|
NOASSERTION
| 2021-03-26T05:08:54
| 2021-03-26T05:08:54
| null |
UTF-8
|
R
| false
| false
| 1,640
|
r
|
replace_labs.R
|
#' Replace a ggplot2's labels (title, subtitle, and/or caption) with a
#' given string. Works for regular ggplot2 plots and patchwork plots.
#'
#' @param p ggplot2 object
#' @param labs named list of labels to replace; must contain elements named
#' 'title', 'subtitle', and 'caption', and no other elements.
#'
replace_labs <- function(p,
labs = list(title = NULL,
subtitle = NULL,
caption = NULL)) {
if (isFALSE(inherits(p, "gg"))) {
stop("Plot is not a ggplot2 object.")
}
if (isFALSE(inherits(labs, "list"))) {
stop("labs must be a list.")
}
if (isFALSE(identical(sort(names(labs)),
c("caption", "subtitle", "title")))) {
stop("labs must be a named list containing elements named title, ",
"subtitle, and caption, and only those elements.")
}
# Patchwork plots
if (isTRUE(inherits(p, "patchwork"))) {
# First, remove existing labels
p$patches$annotation <- subset(p$patches$annotation,
!names(p$patches$annotation) %in%
c("title", "subtitle", "caption"))
# Then, replace with supplied labels
p$patches$annotation <- c(p$patches$annotation,
labs)
} else {# Non-patchwork plots
# First, remove existing labels
p$labels <- subset(p$labels,
!names(p$labels) %in%
c("title", "subtitle", "caption"))
# Then, replace with supplied labels
p$labels <- c(p$labels,
labs)
}
p
}
|
d0ebc10ebadde74a153c6e1b4287eb8d36a29938
|
8dfa9bdff13c733458052b02110f73604218d224
|
/SRC/R/GRmetrics/R/GRbox.R
|
ecd23329affb5b035216049894551f3629cb072f
|
[] |
no_license
|
datarail/gr_metrics
|
faa40309f77c5f5679f16f46cad214b352d757e5
|
9c181322bb0dbca3459d80840b26104178fdf803
|
refs/heads/master
| 2023-05-15T09:22:18.662124
| 2023-04-27T21:05:10
| 2023-04-27T21:05:10
| 49,449,313
| 10
| 7
| null | 2023-04-27T21:05:11
| 2016-01-11T19:34:21
|
R
|
UTF-8
|
R
| false
| false
| 15,368
|
r
|
GRbox.R
|
#' Boxplots of a given GR metric
#'
#' Given a SummarizedExperiment object created by \code{\link{GRfit}},
#' this function creates boxplots according to the parameters below.
#'
#' @param fitData a SummarizedExperiment object, generated by the GRfit
#' function.
#' @param metric the GR metric (GR50, GRinf, h_GR, GRmax, GEC50, or GR_AOC)
#' or traditional metric (IC50, Einf, h, Emax, EC50, or AUC) that will be used
#' for the boxplot.
#' @param groupVariable the name of the variable from data (e.g. drug,
#' cell-line, etc.) to select factors from.
#' @param pointColor a variable that defines the coloring of the points
#' overlayed on the boxplot.
#' @param factors a vector of values of "groupVariable" of data that define
#' which variables to make boxplots for. By default, a separate boxplot is made
#' for each unique value of groupVariable.
#' @param wilA one value or a vector of values from "factors", i.e.
#' a subset of the boxplots. If specified, a one-sided Wilcoxon rank sum test
#' (wilcox.test) will be performed between "wilA" and "wilB" and
#' the results will be displayed on the figure. The null hypothesis
#' that the values from "wilA" and "wilB" have the same mean will
#' be tested against the alternative hypothesis that the mean of the
#' "wilB" values is greater than that of the "wilA" values.
#' @param wilB one value or a vector of values from "factors", i.e.
#' a subset of the boxplots (not overlapping "wilA").
#' @param plotly a logical value indicating whether to output a ggplot2 graph
#' or an interactive ggplotly graph
#'
#' @return ggplot2 or ggplotly boxplots of the factors along the x-axis, with
#' points colored by the given variable.
#' @author Nicholas Clark
#' @details
#' Given a SummarizedExperiment object created by \code{\link{GRfit}},
#' this function creates boxplots of a given GR metric (GR50, GRmax, etc.) or
#' traditional metric (IC50, Emax, etc.)
#' for values of the grouping variable. The results can be viewed in a static
#' ggplot image or an interactive plotly graph.
#'
#' By default, a boxplot is created for all unique
#' values of the grouping variable. The "factors" parameter can be used to
#' specify a smaller subset of values for which to create boxplots.
#' Points are overlayed on the boxplots and
#' they can be colored by the variable specified in the pointColor parameter.
#' If pointColor is set to NULL, the points will all be black. The results can
#' be viewed in a static ggplot image or an interactive plotly graph.
#' @seealso To create the object needed for this function, see
#' \code{\link{GRfit}}. For other visualizations, see \code{\link{GRdrawDRC}}
#' and \code{\link{GRscatter}}. For online GR calculator and browser, see
#' \url{http://www.grcalculator.org}.
#' @examples
#' # Load Case A (example 1) input
#' data("inputCaseA")
#' head(inputCaseA)
#' # Run GRfit function with case = "A"
#' output1 = GRfit(inputData = inputCaseA,
#' groupingVariables = c('cell_line','agent', 'perturbation','replicate',
#' 'time'))
#' GRbox(output1, metric ='GRinf',
#' groupVariable = 'cell_line', pointColor = 'agent' , factors = c('BT20',
#' 'MCF10A'))
#' GRbox(output1, metric ='GRinf',
#' groupVariable = 'cell_line', pointColor = 'cell_line' ,
#' factors = c('BT20', 'MCF10A'), plotly = FALSE)
#' GRbox(output1, metric = 'GR50', groupVariable = 'cell_line',
#' pointColor = 'cell_line', wilA = "BT20", wilB = c("MCF7","MCF10A"))
#' @export
GRbox <- function(fitData, metric, groupVariable, pointColor,
factors = "all", wilA = NULL, wilB = NULL, plotly = TRUE) {
data = cbind(as.data.frame(SummarizedExperiment::colData(fitData)),
t(SummarizedExperiment::assay(fitData)))
#bottom_margin = max(nchar(data[[groupVariable]]), na.rm = TRUE)
data[[groupVariable]] = factor(data[[groupVariable]])
if(!identical(factors, "all")) {
if(length(intersect(factors, data[[groupVariable]])) != length(factors)) {
stop('Factors must be values of the grouping variable')
}
data = data[data[[groupVariable]] %in% factors, ]
#bottom_margin = max(nchar(factors), na.rm = TRUE)
}
if(length(intersect(wilA, wilB)) > 0) {
stop('wilA and wilB must not overlap.')
}
if(metric == "GR50") {
data$`log10(GR50)` = log10(data$GR50)
metric = "log10(GR50)"
}
if(metric == "IC50") {
data$`log10(IC50)` = log10(data$IC50)
metric = "log10(IC50)"
}
if(metric == "h_GR") {
data$`log2(h_GR)` = log2(data$h_GR)
metric = "log2(h_GR)"
}
if(metric == "h") {
data$`log2(h)` = log2(data$h)
metric = "log2(h)"
}
# Get rid of infinite values
fin = is.finite(data[[metric]])
data = data[fin,]
if(!is.null(wilA) & !is.null(wilB)) {
for(i in 1:length(wilB)) {
data[[groupVariable]] = stats::relevel(data[[groupVariable]], wilB[i])
}
for(i in 1:length(wilA)) {
data[[groupVariable]] = stats::relevel(data[[groupVariable]], wilA[i])
}
# Perform Wilcoxon rank sum test
rowsA = data[[groupVariable]] %in% wilA
rowsB = data[[groupVariable]] %in% wilB
wil_dataA = data[rowsA,metric]
wil_dataB = data[rowsB,metric]
wil = stats::wilcox.test(x = wil_dataA, y = wil_dataB,
alternative = "less")
wil_pval = prettyNum(wil$p.value, digits = 2)
}
if(plotly == TRUE) {
p <- ggplot2::ggplot(data, ggplot2::aes_string(x = groupVariable,
y = metric, text = 'experiment'))
p = p + ggplot2::geom_boxplot(ggplot2::aes_string(fill = groupVariable,
alpha = 0.3), outlier.color = NA, show.legend = FALSE) +
ggplot2::geom_jitter(width = 0.5, show.legend = FALSE,
ggplot2::aes_string(colour = pointColor)) +
ggplot2::xlab('') + ggplot2::ylab(metric)
q = plotly::plotly_build(p)
# Last CRAN version of plotly (3.6.0) uses "q$"
# Latest github version of plotly (4.3.5) uses "q$x"
if(is.null(q$data)) {
# replace q with q$x so code works with new plotly version
q = q$x
}
if(!is.null(wilA) & !is.null(wilB)) {
top_y = q[[2]]$yaxis$range[2]
bottom_y = q[[2]]$yaxis$range[1]
total_y_range = top_y - bottom_y
# Get top of boxplot whiskers
whiskers = NULL
len = length(wilA) + length(wilB)
for(i in 1:len) {
whiskers[i] = stats::fivenum(q[[1]][[i]]$y)[5]
}
top_whisker = max(whiskers, na.rm = TRUE)
y_range = (top_y - top_whisker)/total_y_range
if(y_range < .25) {
top_y = top_whisker + .25*total_y_range
}
lh = top_whisker + total_y_range*(.1)
bump = total_y_range*(.05)
ll = lh - bump
lenA = length(wilA)
lenB = length(wilB)
pval = paste("p =", wil_pval)
if(lenA == 1 & lenB == 1) {
p = p + ggplot2::annotate("text", x = 1.5, y = lh + bump/2,
label = pval) +
ggplot2::geom_segment(x = 1, y = lh, xend = 2, yend = lh) +
ggplot2::geom_segment(x = 1, y = ll, xend = 1, yend = lh) +
ggplot2::geom_segment(x = 2, y = ll, xend = 2, yend = lh)
rm(q)
q = plotly::plotly_build(p)
} else if(lenA > 1 & lenB == 1) {
p = p + ggplot2::annotate("text", x = ((lenA + 1) + ((lenA+1)/2))/2,
y = lh + 2*bump, label = pval) +
ggplot2::geom_segment(x = 1, y = lh, xend = lenA, yend = lh) +
ggplot2::geom_segment(x = 1, y = ll, xend = 1, yend = lh) +
ggplot2::geom_segment(x = lenA, y = ll, xend = lenA, yend = lh) +
ggplot2::geom_segment(x = (lenA+1)/2, y = lh + bump, xend = lenA + 1,
yend = lh + bump) +
ggplot2::geom_segment(x = (lenA+1)/2, y = lh, xend = (lenA+1)/2,
yend = lh + bump) +
ggplot2::geom_segment(x = lenA+1, y = ll, xend = lenA+1,
yend = lh + bump)
rm(q)
q = plotly::plotly_build(p)
} else if(lenA == 1 & lenB > 1) {
p = p + ggplot2::annotate("text", x = 1.25 + .25*lenB,
y = lh + 2*bump, label = pval) +
ggplot2::geom_segment(x = 1, y = lh+bump, xend = .5*lenB + 1.5,
yend = lh+bump) +
ggplot2::geom_segment(x = 1, y = ll, xend = 1, yend = lh+bump) +
ggplot2::geom_segment(x = 1.5+.5*lenB, y = lh, xend = 1.5+.5*lenB,
yend = lh+bump) +
ggplot2::geom_segment(x = 2, y = lh, xend = lenB + 1, yend = lh) +
ggplot2::geom_segment(x = 2, y = ll, xend = 2, yend = lh) +
ggplot2::geom_segment(x = lenB+1, y = ll, xend = lenB+1, yend = lh)
rm(q)
q = plotly::plotly_build(p)
} else if(lenA > 1 & lenB > 1) {
p = p + ggplot2::annotate("text", x = .25*(lenB-1)+.75*(lenA+1),
y = lh + 2*bump, label = pval) +
ggplot2::geom_segment(x = 1, y = lh, xend = lenA, yend = lh) +
ggplot2::geom_segment(x = 1, y = ll, xend = 1, yend = lh) +
ggplot2::geom_segment(x = lenA, y = ll, xend = lenA, yend = lh) +
ggplot2::geom_segment(x = lenA+1, y = lh, xend = lenA+lenB,
yend = lh) +
ggplot2::geom_segment(x = lenA+1, y = ll, xend = lenA+1, yend = lh) +
ggplot2::geom_segment(x = lenA+lenB, y = ll, xend = lenA+lenB,
yend = lh) +
ggplot2::geom_segment(x = (lenA+1)/2, y = lh+bump,
xend = (lenA+1)+((lenB-1)/2), yend = lh+bump) +
ggplot2::geom_segment(x = (lenA+1)/2, y = lh, xend = (lenA+1)/2,
yend = lh+bump) +
ggplot2::geom_segment(x = (lenA+1)+((lenB-1)/2), y = lh,
xend = (lenA+1)+((lenB-1)/2), yend = lh+bump)
rm(q)
q = plotly::plotly_build(p)
}
}
q = plotly::plotly_build(p)
# Last CRAN version of plotly (3.6.0) uses "q$"
# Latest github version of plotly (4.3.5) uses "q$x"
if(!is.null(q$data)) { # old plotly
bottom_margin = max(nchar(q$layout$xaxis$ticktext), na.rm = TRUE)
left = nchar(q$layout$xaxis$ticktext[1])
q$layout$xaxis$tickangle = -45
q$layout$margin$b = 15 + 6*bottom_margin
if(left > 10) {
left_margin = q$layout$margin$l + (left-10)*6
q$layout$margin$l = left_margin
}
return(q)
} else { # new plotly
bottom_margin = max(nchar(q$x$layout$xaxis$ticktext), na.rm = TRUE)
left = nchar(q$x$layout$xaxis$ticktext[1])
q$x$layout$xaxis$tickangle = -45
q$x$layout$margin$b = 15 + 6*bottom_margin
if(left > 10) {
left_margin = q$x$layout$margin$l + (left-10)*6
q$x$layout$margin$l = left_margin
}
return(q)
}
} else {
p <- ggplot2::ggplot(data, ggplot2::aes_string(x = groupVariable,
y = metric))
p = p + ggplot2::geom_boxplot(ggplot2::aes_string(
fill = groupVariable, alpha = 0.3), outlier.color = NA,
show.legend = FALSE) + ggplot2::geom_jitter(
width = 0.5, ggplot2::aes_string(colour = pointColor)) +
ggplot2::xlab('') + ggplot2::ylab(metric) +
ggplot2::theme_grey(base_size = 14) +
ggplot2::theme(axis.text.x = ggplot2::element_text(angle = 45,
vjust = 1, hjust=1))
if(!is.null(wilA) & !is.null(wilB)) {
q = plotly::plotly_build(p)
# Last CRAN version of plotly (3.6.0) uses "q$"
# Latest github version of plotly (4.3.5) uses "q$x"
if(is.null(q$data)) {
q = q$x
}
# Get y range:
top_y = q[[2]]$yaxis$range[2]
bottom_y = q[[2]]$yaxis$range[1]
total_y_range = top_y - bottom_y
# Get top of boxplot whiskers
whiskers = NULL
len = length(wilA) + length(wilB)
for(i in 1:len) {
whiskers[i] = stats::fivenum(q[[1]][[i]]$y)[5]
}
top_whisker = max(whiskers, na.rm = TRUE)
y_range = (top_y - top_whisker)/total_y_range
if(y_range < .25) {
top_y = top_whisker + .25*total_y_range
}
lh = top_whisker + total_y_range*(.1)
bump = total_y_range*(.05)
ll = lh - bump
lenA = length(wilA)
lenB = length(wilB)
pval = paste("p =", wil_pval)
if(lenA == 1 & lenB == 1) {
p = p + ggplot2::annotate("text", x = 1.5, y = lh + bump/2,
label = pval) +
ggplot2::geom_segment(x = 1, y = lh, xend = 2, yend = lh) +
ggplot2::geom_segment(x = 1, y = ll, xend = 1, yend = lh) +
ggplot2::geom_segment(x = 2, y = ll, xend = 2, yend = lh)
} else if(lenA > 1 & lenB == 1) {
p = p + ggplot2::annotate("text", x = ((lenA + 1) + ((lenA+1)/2))/2,
y = lh + 2*bump, label = pval) +
ggplot2::geom_segment(x = 1, y = lh, xend = lenA, yend = lh) +
ggplot2::geom_segment(x = 1, y = ll, xend = 1, yend = lh) +
ggplot2::geom_segment(x = lenA, y = ll, xend = lenA, yend = lh) +
ggplot2::geom_segment(x = (lenA+1)/2, y = lh + bump, xend = lenA + 1,
yend = lh + bump) +
ggplot2::geom_segment(x = (lenA+1)/2, y = lh, xend = (lenA+1)/2,
yend = lh + bump) +
ggplot2::geom_segment(x = lenA+1, y = ll, xend = lenA+1,
yend = lh + bump)
} else if(lenA == 1 & lenB > 1) {
p = p + ggplot2::annotate("text", x = 1.25 + .25*lenB,
y = lh + 2*bump, label = pval) +
ggplot2::geom_segment(x = 1, y = lh+bump, xend = .5*lenB + 1.5,
yend = lh+bump) +
ggplot2::geom_segment(x = 1, y = ll, xend = 1, yend = lh+bump) +
ggplot2::geom_segment(x = 1.5+.5*lenB, y = lh, xend = 1.5+.5*lenB,
yend = lh+bump) +
ggplot2::geom_segment(x = 2, y = lh, xend = lenB + 1, yend = lh) +
ggplot2::geom_segment(x = 2, y = ll, xend = 2, yend = lh) +
ggplot2::geom_segment(x = lenB+1, y = ll, xend = lenB+1, yend = lh)
} else if(lenA > 1 & lenB > 1) {
p = p + ggplot2::annotate("text", x = .25*(lenB-1)+.75*(lenA+1),
y = lh + 2*bump, label = pval) +
ggplot2::geom_segment(x = 1, y = lh, xend = lenA, yend = lh) +
ggplot2::geom_segment(x = 1, y = ll, xend = 1, yend = lh) +
ggplot2::geom_segment(x = lenA, y = ll, xend = lenA, yend = lh) +
ggplot2::geom_segment(x = lenA+1, y = lh, xend = lenA+lenB,
yend = lh) +
ggplot2::geom_segment(x = lenA+1, y = ll, xend = lenA+1, yend = lh) +
ggplot2::geom_segment(x = lenA+lenB, y = ll, xend = lenA+lenB,
yend = lh) +
ggplot2::geom_segment(x = (lenA+1)/2, y = lh+bump,
xend = (lenA+1)+((lenB-1)/2), yend = lh+bump) +
ggplot2::geom_segment(x = (lenA+1)/2, y = lh, xend = (lenA+1)/2,
yend = lh+bump) +
ggplot2::geom_segment(x = (lenA+1)+((lenB-1)/2), y = lh,
xend = (lenA+1)+((lenB-1)/2), yend = lh+bump)
}
}
return(p)
}
}
|
011e7a6df5ee27283301b772bf59c9f7b5ccd20c
|
42702f6522a4ae297f5c2c402e08a5669a1dc5de
|
/Week20_Volcanoes/2020_w20_volcanoes.R
|
30113dcb2d7e4c9b44c87c7fb547d2a27e2171e6
|
[] |
no_license
|
crkm/TidyTuesday
|
008f931f0fedb0d16102ee2450c06155511f6489
|
d414af0d9f4554a0739ff5200e0b903ca3ff32d2
|
refs/heads/master
| 2022-12-28T15:09:05.800218
| 2020-10-14T19:31:46
| 2020-10-14T19:31:46
| 263,174,334
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 2,138
|
r
|
2020_w20_volcanoes.R
|
# get the data
volcano <- readr::read_csv('https://raw.githubusercontent.com/rfordatascience/tidytuesday/master/data/2020/2020-05-12/volcano.csv')
eruptions <- readr::read_csv('https://raw.githubusercontent.com/rfordatascience/tidytuesday/master/data/2020/2020-05-12/eruptions.csv')
events <- readr::read_csv('https://raw.githubusercontent.com/rfordatascience/tidytuesday/master/data/2020/2020-05-12/events.csv')
tree_rings <- readr::read_csv('https://raw.githubusercontent.com/rfordatascience/tidytuesday/master/data/2020/2020-05-12/tree_rings.csv')
sulfur <- readr::read_csv('https://raw.githubusercontent.com/rfordatascience/tidytuesday/master/data/2020/2020-05-12/sulfur.csv')
# load packages
library(tidyverse)
library(ggplot2)
library(ggridges)
# isolate data I want to use
volcanoid <- as.factor(volcano$volcano_number)
mainregion <- as.factor(volcano$region)
subregion <- as.factor(volcano$subregion)
rock <- as.factor(volcano$major_rock_1)
elevation <- volcano$elevation
mydata <- tibble(volcanoid, mainregion, subregion, elevation, rock)
# rock type and elevation
p <- ggplot(mydata, aes(x = elevation, y = rock, fill = rock)) +
geom_density_ridges(rel_min_height = 0.01) +
theme_ridges() +
theme(legend.position = "none",
axis.text.x = element_text(size=10),
axis.text.y = element_text(size=10),
plot.title = element_text(size=16, face="bold")) +
scale_x_continuous(n.breaks = 10) +
scale_fill_brewer(palette="Set3") +
labs(x = "Elevation", y = "Rock Type") +
ggtitle("Association Between Elevation and Major Volcanic Rock Types")
p
# rock type and elevation by main region
p2 <- p +
facet_wrap(~ mainregion, labeller = label_wrap_gen()) +
theme(strip.text.x = element_text(size = 8),
axis.text.x = element_text(size=8, angle=45),
axis.text.y = element_text(size=8),
plot.title = element_text(size=16, face="bold")) +
scale_x_continuous(n.breaks = 5) +
scale_fill_brewer(palette="Set3") +
labs(x = "Elevation", y = "Rock Type") +
ggtitle("Association Between Elevation and Major Volcanic \nRock Types Found In Different Global Regions")
p2
|
6b9a1dd668bf9a34c2a1652d76b485ebeb9f5b07
|
3cee6695d16a5879a835eb81ce7c71901e734c81
|
/tests/testthat/test_inputs/deps_vanilla.R
|
9930fc964b530c3278dde30b71b9ae4e19aba5ff
|
[
"MIT"
] |
permissive
|
anthonynorth/using
|
1757fe2d06960e7ddc10ccac1d31f2a36f429306
|
c33a88c5c42c64fd84423a65af7b5a3ae8d5e64a
|
refs/heads/master
| 2021-09-25T02:53:52.341391
| 2020-07-22T06:03:19
| 2020-07-22T06:03:19
| 242,888,516
| 26
| 1
|
MIT
| 2020-07-22T06:03:21
| 2020-02-25T02:13:02
|
R
|
UTF-8
|
R
| false
| false
| 84
|
r
|
deps_vanilla.R
|
library(rmarkdown)
library(here) # to get project root folder in Rmd
library(knitr)
|
1059fb78700f14e8e45fabeb8dda338365838e1a
|
6c316c163d8b6336a1ddddfb5c435f56466f8a98
|
/ExploratoryDataAnalysis/Week4/Plot2.R
|
42278ca28652dcf3dfa03eabe2002e8e36d31672
|
[] |
no_license
|
Meiyalaghan/CourseraDataScienceAssignments
|
186e94b50e7fb0d2f2aa1226226dbb9ac3be300a
|
b517aaf15078fc04bad593007a05a51a03d32218
|
refs/heads/master
| 2021-07-02T19:07:06.147240
| 2018-07-27T02:28:41
| 2018-07-27T02:28:41
| 96,506,635
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 1,122
|
r
|
Plot2.R
|
# TITLE: CourseraDataScienceAssignments\ - ExploratoryDataAnalysis - Week4
# Peer-graded Assignment: Course Project 2
#QUESTION2: Have total emissions from PM2.5 decreased in the Baltimore City, Maryland (fips=="24510") from 1999 to 2008?
# Use the base plotting system to make a plot answering this question.
rm(list=ls())
# Read data from .rds files
Emissions_Data <- readRDS("./data/summarySCC_PM25.rds")
Code_Table <- readRDS("./data/Source_Classification_Code.rds")
# Check loaded emission data
head(Emissions_Data)
str(Emissions_Data)
# look at the status of total emissions in the Baltimore City
Look_BaltimoreCity <- subset(Emissions_Data, fips == "24510")
png("Plot2.png")
TotalEmission_byYear <- tapply(Look_BaltimoreCity$Emissions, INDEX = Look_BaltimoreCity$year, FUN = sum)
plot(names(TotalEmission_byYear), TotalEmission_byYear,
type = "b", #both points and lines
lty = "dotted",
col = "blue",
lwd = 3, #line width of 3
main = expression("Total PM2.5 emission in BaltimoreCity by year"),
xlab = "Year",
ylab = "Total PM2.5 emissions (tons)")
dev.off()
|
edd2fcc815a972267369b64939842ecbaeb8a374
|
2d9fb03feb8626c67ba5d3f1a0815710b621c5f6
|
/R/size_of_selfloops_activity.R
|
3df1ff5c665677971554689dff892ce0980b0797
|
[] |
no_license
|
bbrewington/edeaR
|
4c8916bad4c54521764574770ae941983363dc0a
|
02b31d133b5cec68caa6e0c5fa446a6a6275d462
|
refs/heads/master
| 2021-01-19T18:32:49.442081
| 2016-08-27T17:31:36
| 2016-08-27T17:31:36
| 66,726,375
| 0
| 0
| null | 2016-08-27T17:17:51
| 2016-08-27T17:17:51
| null |
UTF-8
|
R
| false
| false
| 1,964
|
r
|
size_of_selfloops_activity.R
|
size_of_selfloops_activity <- function(eventlog, include_non_selfloops = FALSE) {
stop_eventlog(eventlog)
act <- activities(eventlog)
sl <- selfloops(eventlog)
colnames(act)[colnames(act) == activity_id(eventlog)] <- "event_classifier"
colnames(sl)[colnames(sl) == activity_id(eventlog)] <- "event_classifier"
for(i in 1:nrow(act)){
selfloops_lengths <- filter(sl, event_classifier == act$event_classifier[i])$length
if(!include_non_selfloops){
selfloops_lengths <- selfloops_lengths - 1
if(length(selfloops_lengths) > 0){
act$min[i] <- min(selfloops_lengths)
act$q1[i] <- quantile(selfloops_lengths, probs = 0.25)
act$mean[i] <- mean(selfloops_lengths)
act$median[i] <- median(selfloops_lengths)
act$q3[i] <- quantile(selfloops_lengths, probs = 0.75)
act$max[i] <- max(selfloops_lengths)
act$st_dev[i] <- sd(selfloops_lengths)
act$iqr[i] <- act$q3[i] - act$q1[i]
}
else {
act$min[i] <- NA
act$q1[i] <- NA
act$mean[i] <- NA
act$median[i] <- NA
act$q3[i] <- NA
act$max[i] <- NA
act$st_dev[i] <- NA
act$iqr[i] <- NA}
}
else {
number_of_zero_selfloops <- act$absolute_frequency[i] - sum(selfloops_lengths)
selfloops_lengths <- c(selfloops_lengths, rep(1, number_of_zero_selfloops))
selfloops_lengths <- selfloops_lengths - 1
act$min[i] <- min(selfloops_lengths)
act$q1[i] <- quantile(selfloops_lengths, probs = 0.25)
act$mean[i] <- mean(selfloops_lengths)
act$median[i] <- median(selfloops_lengths)
act$q3[i] <- quantile(selfloops_lengths, probs = 0.75)
act$max[i] <- max(selfloops_lengths)
act$st_dev[i] <- sd(selfloops_lengths)
act$iqr[i] <- act$q3[i] - act$q1[i]
}
}
act <- act %>% select(-absolute_frequency) %>% arrange(desc(relative_frequency))
colnames(act)[colnames(act)== "relative_frequency"] <- "relative_activity_frequency"
colnames(act)[colnames(act)=="event_classifier"] <- activity_id(eventlog)
return(act)
}
|
89dae60688973694e15bd3484be9985a4598abad
|
426c51e40c924c99f6211f3f352b8beed3c73965
|
/08 - Analizar datos para modelos predictivos.R
|
91421b06a138be35ba6b7df200c2e09dc1459f1f
|
[] |
no_license
|
pmtempone/Futbol
|
15196f9502aec394017759442dc940a943f7936e
|
cae13a37d6e2e20a90a024456c8f6c594ba8c61f
|
refs/heads/master
| 2020-04-12T08:15:38.397556
| 2017-09-19T02:12:07
| 2017-09-19T02:12:07
| 64,977,172
| 0
| 0
| null | 2017-09-17T21:56:46
| 2016-08-05T00:57:11
|
R
|
UTF-8
|
R
| false
| false
| 4,856
|
r
|
08 - Analizar datos para modelos predictivos.R
|
#----#carga de librerias#----
library(funModeling)
library(dplyr)
#----# data profiling----
my_data_status=df_status(Basetotal)
write.csv(my_data_status,file = "campos.csv")
# Removing variables with 100% of zero values
vars_to_remove=subset(my_data_status, my_data_status$p_zeros == 100)
vars_to_remove["variable"] #no hubo goles en contra en los 3 torneos
## Keeping all except vars_to_remove
Basetotal=Basetotal[, !(names(Basetotal) %in% vars_to_remove[,"variable"])]
#Deteccion de outliers
summary(Basetotal)
# checking the value for the top 1% of highest values (percentile 0.99), which is ~ 7.05
quantile(Basetotal$pase_correcto, 0.99)
#----#selecting variables----
library(ggplot2)
sv <- ggplot(Basetotal) + geom_boxplot(aes(x=titular, y=minutos_jugados, color=titular))+ylab('Minutos jugados')+theme(panel.grid.major = element_blank(), panel.grid.minor = element_blank(),
panel.background = element_blank(), axis.line = element_line(colour = "black"))
sv
#----#cross plot-----
cross_minutos=cross_plot(Basetotal, str_input="minutos_jugados", str_target="titular")
vars_to_analyze=c("torneo", "team.1", "rol_id_rol","pase_correcto")
cross_plot(data=Basetotal, str_target="titular", str_input=vars_to_analyze)
prop.rol.titular <- as.data.frame(prop.table(table(Basetotal$titular,Basetotal$rol_id_rol)))
prop.rol.titular[prop.rol.titular$Freq==0,] <- NA
prop.rol.titular <- prop.rol.titular[complete.cases(prop.rol.titular),]
library(scales)
ggplot(data = prop.rol.titular,aes(x=Var2,y=Freq))+geom_col(aes(y=Freq,fill=Var1),position="dodge")+
ylab('Proporcion sobre total de jugadores')+xlab("Rol de juego")+scale_x_discrete(labels=c("Arquero", "Defensor", "Mediocampista","Delantero"))+
theme(panel.grid.major = element_blank(), panel.grid.minor = element_blank(),
panel.background = element_blank(), axis.line = element_line(colour = "black"))+
scale_fill_discrete(name = "Titular")+scale_y_continuous(labels=scales::percent)
#proporcion de suplentes que jugaron en relacion a los titulares
prop.sup <- data.frame(rol_id_rol=c('1','2','3','4'),value=c(1.3,7.6,25.8,36.8))
prop.sup$porcentaje <- c(prop.sup$value/sum(prop.sup$value))
#----#boxplot----
plotar(data=Basetotal, str_input="minutos_jugados", str_target="titular", plot_type = "boxplot")
ggplot(data = Basetotal,mapping = aes(x=rol_id_rol,y=minutos_jugados,col=rol_id_rol))+geom_boxplot()
#----#density plot----
plotar(data=Basetotal, str_input="minutos_jugados", str_target="titular", plot_type = "histdens")
plotar(data=Basetotal, str_input="pase_correcto", str_target="titular", plot_type = "histdens")
#----base para modelo predictivo----
Basetotal$victoria_l <- ifelse(Basetotal$fixt_local_goles - Basetotal$fixt_visitante_goles>0,1,0)
base_titulares <- Basetotal %>% filter(titular=='S')
base_locales <- base_titulares %>% filter(J_local=='L') %>% group_by(even_id_evento,local.1) %>%
summarise(loc_goles=sum(goles_convertidos),loc_asistencias=sum(asistencias),loc_disp_afuera=sum(disparo_afuera),
loc_disp_palo=sum(disparo_palo),loc_disp_atajado=sum(disparo_atajado),loc_penal_errado=sum(penal_errado),
loc_faltas=sum(faltas),loc_offsides=sum(offsides),loc_amarillas=sum(amarillas),loc_doble_ama=sum(doble_amarilla),
loc_despejes=sum(despejes),loc_quites=sum(quites),loc_atajadas=sum(atajadas),loc_ataj_penal=sum(atajada_penal))
base_visitantes <- base_titulares %>% filter(J_local=='V') %>% group_by(even_id_evento,visitante.1) %>%
summarise(vis_goles=sum(goles_convertidos),vis_asistencias=sum(asistencias),vis_disp_afuera=sum(disparo_afuera),
vis_disp_palo=sum(disparo_palo),vis_disp_atajado=sum(disparo_atajado),vis_penal_errado=sum(penal_errado),
vis_faltas=sum(faltas),vis_offsides=sum(offsides),vis_amarillas=sum(amarillas),vis_doble_ama=sum(doble_amarilla),
vis_despejes=sum(despejes),vis_quites=sum(quites),vis_atajadas=sum(atajadas),vis_ataj_penal=sum(atajada_penal))
base_partido <- cbind(base_locales,base_visitantes[,2:15])
#armado de la base para enviar
base_modelado_locales <- base_locales[,1:2] %>% left_join(avg_equipos_eventos,by=c("even_id_evento"="even_id_evento","equipo_local"="equipo"))
base_modelado_visitantes <- base_visitantes[,1:2] %>% left_join(avg_equipos_eventos,by=c("even_id_evento"="even_id_evento","equipo_visitante"="equipo"))
base_modelado_completa <- base_modelado_locales %>% left_join(base_modelado_visitantes,by="even_id_evento")
base_modelado_completa <- base_modelado_completa %>% left_join(lkp_eventos[,c("even_id_evento","fixt_local_goles","fixt_visitante_goles")],by="even_id_evento")
base_modelado_completa <- base_modelado_completa %>% mutate(resultado_local=ifelse(fixt_local_goles>fixt_visitante_goles,1,0))
|
eec967cd7909987217ee2a1db7dc7f89d0712943
|
5b7a0942ce5cbeaed035098223207b446704fb66
|
/R/lsListGroups.R
|
08b74bfde9178d64387edd171bd9a57b10d94d1d
|
[
"MIT"
] |
permissive
|
k127/LimeRick
|
4f3bcc8c2204c5c67968d0822b558c29bb5392aa
|
a4d634981f5de5afa5b5e3bee72cf6acd284c92a
|
refs/heads/master
| 2023-04-11T21:56:54.854494
| 2020-06-19T18:36:05
| 2020-06-19T18:36:05
| 271,702,292
| 0
| 1
| null | 2020-06-12T03:45:14
| 2020-06-12T03:45:14
| null |
UTF-8
|
R
| false
| false
| 1,532
|
r
|
lsListGroups.R
|
#' Get survey groups
#'
#' @param surveyID ID of the survey containing the groups
#' @param lang \emph{(optional)} Language code for the language -- if not given, due to a bug in the API, instead of just the base language of the particular survey, all languages are returned
#' @param lsAPIurl \emph{(optional)} The URL of the \emph{LimeSurvey RemoteControl 2} JSON-RPC API
#' @param sessionKey \emph{(optional)} Authentication token, see \code{\link{lsGetSessionKey}()}
#'
#' @return The ids and all attributes of all survey groups
#'
#' @examples \dontrun{
#' lsListGroups("123456")
#' lsListGroups("123456", lang = "fr")
#' }
#'
#' @references \url{https://api.limesurvey.org/classes/remotecontrol_handle.html#method_list_groups}
#'
#' @export
#'
lsListGroups = function(surveyID,
lang = NULL,
lsAPIurl = getOption("lsAPIurl"),
sessionKey = NULL) {
if (is.null(surveyID))
stop("Need to specify surveyID.")
params = list(sSessionKey = sessionKey,
iSurveyID = surveyID,
sLanguage = lang)
data = lsAPI(method = "list_groups",
params = params,
lsAPIurl = lsAPIurl)
########################
# !! as tested against API of LS 3.22.15, the language parameter seems disregarded.
# So, as a work around we can drop any obs with language != lang.
if (!is.null(lang)) data <- data[data[, "language"] == lang,]
########################
data
}
|
dd6715d1c0baf13f7e34c527b3eaeeb858d5d49e
|
1ae11a3faf6e06edd46b6cbb00924f18dd77a09e
|
/R/getCheapArcs.R
|
8f19323d321fd830df707c5331fce552bc6b6f8e
|
[] |
no_license
|
cran/optrees
|
d443b36e21d21f9ec1538a5e451cce721ceda1ae
|
98121be6a97b7fd3ed856587da78795fc0ce70b3
|
refs/heads/master
| 2016-09-05T09:01:25.249779
| 2014-09-01T00:00:00
| 2014-09-01T00:00:00
| 23,565,281
| 1
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 1,797
|
r
|
getCheapArcs.R
|
#-----------------------------------------------------------------------------#
# optrees Package #
# Minimun Arborescence Problems #
#-----------------------------------------------------------------------------#
# getCheapArcs ----------------------------------------------------------------
#' Substracts the minimum weight of the arcs pointing to each node
#'
#' The \code{getCheapArcs} function substracts to each arc of a given graph the
#' value of the minimum weight of the arcs pointing to the same node.
#'
#' @param nodes vector containing the nodes of the graph, identified by a
#' number that goes from \eqn{1} to the order of the graph.
#' @param arcs matrix with the list of arcs of the graph. Each row represents
#' one arc. The first two columns contain the two endpoints of each arc and the
#' third column contains their weights.
#'
#' @return \code{getCheapArcs} returns a matrix with a new list of arcs.
#'
#' @seealso This function is an auxiliar function used in
#' \link{msArborEdmonds} and \link{getMinimumArborescence}.
getCheapArcs <- function(nodes, arcs) {
# Work with cost matrix
Cmat <- ArcList2Cmat(nodes, arcs)
for (i in seq(along = nodes)) {
# Check all nodes
if (!is.na(all(Cmat[, i] == Inf))) {
# Only consider arcs with no infinite cost
min.cost <- Cmat[which(Cmat[, i] == min(Cmat[, i], na.rm = T)), i]
Cmat[, i] <- Cmat[, i] - min.cost[1] # substract minimum cost to arcs
}
}
# Return list of arcs
arcs <- Cmat2ArcList(nodes, Cmat, directed = TRUE)
return(arcs)
}
#-----------------------------------------------------------------------------#
|
719550347c1de1ffc90b65995e69704919ada0c2
|
78fa0a150e83a2d23ee387a970fad61077fce1ab
|
/css/theme_tbesc.r
|
8fa644e6fd4e7d321ad55c7cd301f8c960150223
|
[] |
no_license
|
fmaruri/tbesc_reports
|
469c2b0ab1da13e56a2e54c5da289f05b17c5794
|
889ba370bc17c9b6c48da49c01a97c11c29c457c
|
refs/heads/master
| 2021-01-23T23:20:14.419980
| 2014-05-20T21:27:15
| 2014-05-20T21:27:15
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 123
|
r
|
theme_tbesc.r
|
library(ggplot2)
library(gridExtra)
theme_tbesc <-
theme_bw() +
theme(legend.key.width = unit(.75, "inches"))
|
13be8717a2b2ff6b3d606e48e0280d8c33ab18af
|
c95ec2299b408f8bbb1503b5a2c0c1f8d45e9216
|
/VERSION.R
|
4d4015efad65f8849ab44c18bceee864d770b8d8
|
[] |
no_license
|
KongLabRUSP/shiny.ngs
|
0f3f9fe289bcd146cbed0620b83f855dd717ef12
|
42b1cd1ad51f51239a0ac16f557c4776a2519481
|
refs/heads/master
| 2021-06-14T07:03:48.460672
| 2021-04-12T16:59:42
| 2021-04-12T16:59:42
| 168,424,899
| 1
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 185
|
r
|
VERSION.R
|
UPDATE_VERSION <- "v1.0"
UPDATE_DATE <- "April 12, 2021"
UPDATE_REPO <- "KongLabRUSP/shiny.ngs"
UPDATE_URL <- "https://raw.githubusercontent.com/KongLabRUSP/shiny.ngs/master/VERSION.R"
|
431899821f21b0f27bc144f2b0d2d85e85a02a14
|
0c6196646894f065c4604e83e4974b6ab04f5071
|
/generated_data_simulations_sec_3/rand_type_pm_exact.R
|
a69b07d191d869ef998e655fce7fbab433926691
|
[] |
no_license
|
kapelner/sequential_matching_simulations
|
460d88367153e6d4b574a953fe163f057e05da31
|
b2dea44ffc46ef4f2d38e8300cc03406ca239c1c
|
refs/heads/master
| 2021-01-22T07:39:03.146807
| 2013-05-22T02:35:29
| 2013-05-22T02:35:29
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 1,071
|
r
|
rand_type_pm_exact.R
|
source("common_pm.R")
source("common_pm_kk.R")
b_T_obs = mean(yTs) - mean(yCs)
## now we have to monte-carlo the exact test
b_T_sims = array(NA, Nsim_exact_test)
for (nsim_exact_test in 1 : Nsim_exact_test){
#permute matched pairs
permute_odds = rbinom(m, 1, prob_trt)
permute_evens = 1 - permute_odds
Xymatched$indic_T = as.vector(t(cbind(permute_odds, permute_evens)))
#permute reservoir
Xyleft$indic_T = sample(c(rep(1, nRT), rep(0, nRC)))
#get ybarT - ybarC and record it
yTs = c(Xymatched[Xymatched$indic_T == 1, ]$y, Xyleft[Xyleft$indic_T == 1, ]$y)
yCs = c(Xymatched[Xymatched$indic_T == 0, ]$y, Xyleft[Xyleft$indic_T == 0, ]$y)
b_T_sims[nsim_exact_test] = mean(yTs) - mean(yCs)
}
#hist(b_T_sims, br = 100)
#mean(b_T_sims)
#sum(b_T_sims_unc > 1) / Nsim_exact_test
#sum(b_T_sims_cond > 1) / Nsim_exact_test
#hist(b_T_sims_unc, br = 100)
#hist(b_T_sims_cond, br = 100)
#ks.test(b_T_sims_unc, b_T_sims_cond)
#this is the empirical two-sided p-value based on simulation
pval = sum(abs(b_T_obs) < abs(b_T_sims)) / Nsim_exact_test
beta_Ts[nsim] = NaN
|
e08571c66a37487984a7ce70fe742c7479b3e4f5
|
ce3bc493274116150497e73aa7539fef1c07442a
|
/man/gpt_ask.Rd
|
166b4fd29af54bc04368b6498293957e0e1b146c
|
[] |
no_license
|
laresbernardo/lares
|
6c67ff84a60efd53be98d05784a697357bd66626
|
8883d6ef3c3f41d092599ffbdd4c9c352a9becef
|
refs/heads/main
| 2023-08-10T06:26:45.114342
| 2023-07-27T23:47:30
| 2023-07-27T23:48:57
| 141,465,288
| 235
| 61
| null | 2023-07-27T15:58:31
| 2018-07-18T17:04:39
|
R
|
UTF-8
|
R
| false
| true
| 5,195
|
rd
|
gpt_ask.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/chatgpt.R
\name{gpt_ask}
\alias{gpt_ask}
\alias{gpt_history}
\alias{gpt_table}
\alias{gpt_classify}
\alias{gpt_tag}
\alias{gpt_extract}
\alias{gpt_format}
\alias{gpt_convert}
\alias{gpt_translate}
\title{ChatGPT API Interaction with R}
\usage{
gpt_ask(
ask,
secret_key = get_credentials()$openai$secret_key,
url = Sys.getenv("LARES_GPT_URL"),
model = Sys.getenv("LARES_GPT_MODEL"),
num_retries = 3,
temperature = 0.5,
max_tokens = NULL,
pause_base = 1,
quiet = FALSE,
...
)
gpt_history(quiet = TRUE, ...)
gpt_table(x, cols = NULL, quiet = TRUE, ...)
gpt_classify(x, categories, quiet = TRUE, ...)
gpt_tag(x, tags, quiet = TRUE, ...)
gpt_extract(x, extract, quiet = TRUE, ...)
gpt_format(x, format, quiet = TRUE, ...)
gpt_convert(x, unit, quiet = TRUE, ...)
gpt_translate(x, language, quiet = TRUE, ...)
}
\arguments{
\item{ask}{Character. Redacted prompt to ask ChatGPT. If multiple asks are
requested, they will be concatenated with "+" into a single request.}
\item{secret_key}{Character. Secret Key. Get yours in:
\href{https://platform.openai.com}{platform.openai.com}.}
\item{url}{Character. Base URL for OpenAI's ChatGPT API.}
\item{model}{Character. OpenAI model to use. This can be adjusted
according to the available models in the OpenAI API (such as "gpt-4").}
\item{num_retries}{Integer. Number of times to retry the request in
case of failure. Default is 3.}
\item{temperature}{Numeric. The temperature to use for generating
the response. Default is 0.5. The lower the \code{temperature},
the more deterministic the results in the sense that the highest probable
next token is always picked. Increasing temperature could lead to more
randomness, which encourages more diverse or creative outputs. You are
essentially increasing the weights of the other possible tokens.
In terms of application, you might want to use a lower temperature value
for tasks like fact-based QA to encourage more factual and concise responses.
For poem generation or other creative tasks, it might be beneficial to
increase the temperature value.}
\item{max_tokens}{Integer. The maximum number of tokens in the response.}
\item{pause_base}{Numeric. The number of seconds to wait between retries.
Default is 1.}
\item{quiet}{Boolean. Keep quiet? If not, show informative messages.}
\item{...}{Additional parameters.}
\item{x}{Vector. List items you wish to process in your instruction}
\item{cols}{Vector. Force column names for your table results.}
\item{categories, tags}{Vector. List of possible categories/tags to consider.}
\item{extract, format, unit}{Character. Length 1 or same as x to extract/format/unit
information from x. For example: email, country of phone number, country, amount as number,
currency ISO code, ISO, Fahrenheit, etc.}
\item{language}{Character. Language to translate to}
}
\value{
(Invisible) list. Content returned from API POST and processed.
}
\description{
This function lets the user ask ChatGPT via its API, and returns
the rendered reply. There are a couple of specific verbs (functions) with a
preset prompt to help fetch the data in specific formats. We also
store the prompts and replies in current session with their respective
time-stamps so user can gather historical results.
}
\examples{
\dontrun{
api_key <- get_credentials()$openai$secret_key
# Open question:
gpt_ask("Can you write an R function to plot a dummy histogram?", api_key)
##### The following examples return dataframes:
# Classify each element based on categories:
gpt_classify(1:10, c("odd", "even"))
# Add all tags that apply to each element based on tags:
gpt_tag(
c("I love chocolate", "I hate chocolate", "I like Coke"),
c("food", "positive", "negative", "beverage")
)
# Extract specific information:
gpt_extract(
c("My mail is 123@test.com", "30 Main Street, Brooklyn, NY, USA", "+82 2-312-3456", "$1.5M"),
c("email", "full state name", "country of phone number", "amount as number")
)
# Format values
gpt_format(
c("March 27th, 2021", "12-25-2023 3:45PM", "01.01.2000", "29 Feb 92"),
format = "ISO Date getting rid of time stamps"
)
# Convert temperature units
gpt_convert(c("50C", "300K"), "Fahrenheit")
# Create a table with data
gpt_table("5 random people's address in South America, email, phone, age between 18-30")
gpt_table(
ask = "5 largest cities, their countries, and population",
cols = c("city_name", "where", "POP")
)
# Translate text to any language
gpt_translate(
rep("I love you with all my heart", 5),
language = c("spanish", "chinese", "japanese", "russian", "german")
)
# Now let's read the historical prompts, replies, ano more from current session
gpt_history()
}
}
\seealso{
Other API:
\code{\link{bring_api}()},
\code{\link{fb_accounts}()},
\code{\link{fb_ads}()},
\code{\link{fb_creatives}()},
\code{\link{fb_insights}()},
\code{\link{fb_process}()},
\code{\link{fb_report_check}()},
\code{\link{fb_rf}()},
\code{\link{fb_token}()},
\code{\link{li_auth}()},
\code{\link{li_profile}()},
\code{\link{queryGA}()},
\code{\link{slackSend}()}
Other ChatGPT:
\code{\link{gpt_prompter}()}
}
\concept{API}
\concept{ChatGPT}
|
f33c5a9cd9f1c54b5918f728c8d76f68bfcfe69c
|
3dceb32dfec60c2c7df25b330562097e569c0ba4
|
/ch3.r
|
bc557913eb2e95bf4ea4fadf6160e810e5cef88d
|
[
"MIT"
] |
permissive
|
yedam-Lee/ThinkBIG
|
f41e0ffb46626538ec1e4308fb7434cae90d3e2d
|
39a96f0e6702b4b821cb56ab3ec49a83abe5fb19
|
refs/heads/master
| 2022-04-21T08:31:17.903830
| 2020-04-24T02:30:52
| 2020-04-24T02:30:52
| 126,263,693
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 2,755
|
r
|
ch3.r
|
## 3장 소스코드(2014/08/22 새 버전의 ggplot2에서 히스토그램이 출력되지 않는 문제를 수정)
# CSV 파일을 읽어들이기
dau <- read.csv("section3-dau.csv", header = T, stringsAsFactors = F)
head(dau)
dpu <- read.csv("section3-dpu.csv", header = T, stringsAsFactors = F)
head(dpu)
install <- read.csv("section3-install.csv", header = T, stringsAsFactors= F)
head(install)
str(dau)
summary(dau)
# DAU 데이터에 Install 데이터를 결합시키기
dau.install <- merge(dau, install, by = c("user_id", "app_name"))
head(dau.install)
# 위 데이터에 다시 DPU 데이터를 결합시키기
dau.install.payment <- merge(dau.install, dpu, by = c("log_date",
"app_name", "user_id"), all.x = T)
head(dau.install.payment)
head(na.omit(dau.install.payment))
# 비과금 유저의 과금액에 0을 넣기
dau.install.payment$payment[is.na(dau.install.payment$payment)] <- 0
head(dau.install.payment)
# 월차로 집계하기
# 월 항목 추가
dau.install.payment$log_month <-substr(dau.install.payment$log_date, 1, 7)
dau.install.payment$install_month <- substr(dau.install.payment$install_date, 1, 7)
install.packages("plyr")
library("plyr")
mau.payment <- ddply(dau.install.payment,
.(log_month, user_id, install_month), # 그룹화
summarize, # 집계 명령
payment = sum(payment) # payment 합계
)
head(mau.payment)
# 신규 유저인지 기존 유저인지 구분하는 항목을 추가
# 신규 유저와 기존 유저 식별
mau.payment$user.type <- ifelse(mau.payment$install_month == mau.payment$log_month,
"install", "existing")
mau.payment.summary <- ddply(mau.payment,
.(log_month, user.type), # 그룹화
summarize, # 집계 명령어
total.payment = sum(payment) # payment 합계
)
head(mau.payment)
head(mau.payment.summary)
# 그패프로 데이터를 시각화하기 (geom_bar() -> geom_bar(stat="identity")로 수정 2014/08/22)
library("ggplot2")
library("scales")
ggplot(mau.payment.summary, aes(x = log_month, y = total.payment,
fill = user.type)) + geom_bar(stat="identity") + scale_y_continuous(label = comma)
# old_theme = theme_update(
# axis.title.x = theme_text(family="HiraKakuProN-W3"),
# axis.title.y = theme_text(family="HiraKakuProN-W3", angle=90),
# plot.title = theme_text(family="HiraKakuProN-W3", size=14.4))
ggplot(mau.payment[mau.payment$payment > 0 & mau.payment$user.type == "install", ],
aes(x = payment, fill = log_month)) + geom_histogram(position = "dodge", binwidth = 20000)
|
29663b7c6297598e604bcd57b4aa44f6097fd1ff
|
1ad2b2937618b2d613f7fc2303ae46738553c6fd
|
/Plot2.R
|
bc6635f053789b679d6bd8481a648037113ed461
|
[] |
no_license
|
SuthaThangababu/ExData_Plotting1
|
07d2739b1a99e2dd858c34070988e19e73391dd2
|
d46fa4faf07b07ffa9128fa614bdcd432e52282b
|
refs/heads/master
| 2021-01-17T10:53:10.419744
| 2017-03-06T03:53:35
| 2017-03-06T03:53:35
| 84,023,331
| 0
| 0
| null | 2017-03-06T02:56:52
| 2017-03-06T02:56:52
| null |
UTF-8
|
R
| false
| false
| 811
|
r
|
Plot2.R
|
setwd("~/Userdata/Personal/Data Science/RDir")
download.file("https://d396qusza40orc.cloudfront.net/exdata%2Fdata%2Fhousehold_power_consumption.zip","Energy_Consumption.zip")
unzip("Energy_Consumption.zip",list = FALSE,unzip = "internal")
energy_file<- read.table("household_power_consumption.txt",header=TRUE,sep = ";", na.strings = "?", stringsAsFactors = FALSE)
energy_file_subset<-subset(energy_file, Date %in% c("1/2/2007","2/2/2007"))
energy_file_subset$Date<-as.Date(energy_file_subset$Date, "%d/%m/%Y")
datetime<-paste(as.Date(energy_file_subset$Date),energy_file_subset$Time)
energy_file_subset$DateTime<-as.POSIXct(datetime)
png(file="Plot2.png",width = 480,height = 480)
with(energy_file_subset, plot(Global_active_power~DateTime, type="l", ylab="Global Active Power (kilowatts)", xlab=""))
dev.off()
|
8da26bb79de45fda73adb710bdacc353f6f8788e
|
408212ab41a932e3d8f9cd6b0db64678040be63d
|
/R/tree.dating.R
|
4d4f163fe8db48d1ab9d9b651b4a026eef0bf8bd
|
[
"MIT"
] |
permissive
|
ropensci/phruta
|
802ed58510e8cffee3316e9a001c47d70ddafff3
|
0000972691748baa5d178c491b660aec18b9ba39
|
refs/heads/main
| 2023-05-22T13:41:46.126313
| 2023-05-02T18:41:44
| 2023-05-02T18:41:44
| 391,212,019
| 3
| 0
|
NOASSERTION
| 2023-01-11T16:06:57
| 2021-07-30T23:51:38
|
R
|
UTF-8
|
R
| false
| false
| 3,261
|
r
|
tree.dating.R
|
#' Tree dating under treePL or
#'
#'Performs tree dating under \code{"treePL"} or \code{"PATHd-8"}
#'based on secondary calibrations.
#'Note that \code{"treePL"} or \code{"PATHd-8"} must be installed in your PATH.
#'How to install \code{"PATHd-8"} in mac
#' \code{"https://gist.github.com/cromanpa94/a43bc710a17220f71d796d6590ea7fe4"}
#' and \code{"treePL"} can be installed using homebrew
#' (brew install brewsci/bio/treepl). Thanks to Brian O'Meara and Jonathan
#' Chang, respectively.
#'
#' @param taxonomyFolder Name of the folder where \code{"1.Taxonomy.csv"},
#' created duing the
#' \code{"sq.curate"} step, is stored (character).
#' @param phylogenyFolder Name of the folder where
#' \code{"RAxML_bipartitions.phruta"}, created duing the
#' \code{"tree.raxml"} step, is stored (character).
#' @param ... Arguments passed to \code{"geiger::congruify.phylo"}.
#'
#'
#' @importFrom geiger congruify.phylo
#' @importFrom ape read.tree write.tree
#' @importFrom utils read.csv write.csv
#'
#' @return None
#'
#' @examples
#' \dontrun{
#' sq.retrieve.direct(
#' clades = c("Felis", "Vulpes", "Phoca"),
#' species = "Manis_pentadactyla",
#' genes = c("ADORA3", "CYTB")
#' )
#' sq.curate(
#' filterTaxonomicCriteria = "Felis|Vulpes|Phoca|Manis",
#' kingdom = "animals", folder = "0.Sequences"
#' )
#' sq.aln(folder = "1.CuratedSequences")
#' tree.raxml(
#' folder = "2.Alignments", FilePatterns = "Masked",
#' raxml_exec = "raxmlHPC", Bootstrap = 100,
#' outgroup = "Manis_pentadactyla"
#' )
#' tree.dating(
#' taxonomyFolder = "1.CuratedSequences",
#' phylogenyFolder = "3.Phylogeny", scale = "treePL"
#' )
#' }
#' @export
tree.dating <-
function(taxonomyFolder = "1.CuratedSequences",
phylogenyFolder = "3.Phylogeny",
...) {
if (is.null(taxonomyFolder) |
is.null(phylogenyFolder))
stop("Please provide folder names")
##Over-writing?
if( !isTRUE(pkg.env$.testMode) ) {
UI <- readline(paste0("This function might overwrite ",
"4.Timetree", ". Are you sure you want to continue? (y/n) "))
if(UI != 'y') stop('Exiting since you did not press y')
}
taxonomy <- read.csv(paste0(taxonomyFolder, "/1.Taxonomy.csv"))
row.names(taxonomy) <- taxonomy$species_names
TargetTree <-
read.tree(paste0(phylogenyFolder, "/RAxML_bipartitions.phruta"))
resphy <-
congruify.phylo(
reference = SW.phruta,
target = TargetTree,
taxonomy = base::as.matrix(taxonomy),
...
)
names(resphy) <- c("family", "order", "class", "phyla", "kingdom")
resphy <- Filter(Negate(anyNA), resphy)
# Export
if (length(resphy) > 0) {
unlink("4.Timetree", recursive = TRUE)
dir.create("4.Timetree")
invisible(lapply(seq_along(resphy), function(x) {
write.tree(resphy[[x]]$phy,
paste0("4.Timetree/", names(resphy)[x],
"-levelCalibration.tre"))
write.csv(
resphy[[x]]$calibrations,
paste0(
"4.Timetree/",
names(resphy)[x],
"-levelCalibration.calibrated.csv"
)
)
}))
}
}
|
dde2f3ee90ad7f6d8df54438fa68df8c560986ce
|
cce60f27ea5cb998e6f1bdfee013a5797ab2a5f6
|
/2 trees.R
|
6e74759925b1f749edddba9e4ff977a3670918f8
|
[
"MIT"
] |
permissive
|
minetexter/titanic
|
c2e5755384b5bbb6e935237435c8228f9a9b7e7a
|
1c981c503273c64f288c6cd109ce67a9afb04b73
|
refs/heads/master
| 2021-01-25T14:21:45.379960
| 2018-03-03T10:41:30
| 2018-03-03T10:41:30
| 123,682,662
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 5,904
|
r
|
2 trees.R
|
library(rpart)
# rpart package dec tree uses cart instead of chaid
fit <- rpart(Survived ~ Pclass + Sex + Age + SibSp + Parch + Fare + Embarked,
data=train,
method="class")
plot(fit)
text(fit)
# Install and load required packages for fancy decision tree plotting
install.packages('rattle')
install.packages('rpart.plot')
install.packages('RColorBrewer')
library(rpart)
library(rattle)
library(rpart.plot)
library(RColorBrewer)
fancyRpartPlot(fit) #rattle
# Now let's make a prediction and write a submission file
Prediction <- predict(fit, test, type = "class")
submit <- data.frame(PassengerId = test$PassengerId, Survived = Prediction)
write.csv(submit, file = "myfirstdtree.csv", row.names = FALSE)
?predict
# Overriding default limits
?rpart.control
# Let's unleash the decision tree and let it grow to the max
fit <- rpart(Survived ~ Pclass + Sex + Age + SibSp + Parch + Fare + Embarked, data=train,
method="class", control=rpart.control(minsplit=2, cp=0))
fancyRpartPlot(fit)
# Overfitting issue
# Manually trim a decision tree, point and click, quit
fit <- rpart(Survived ~ Pclass + Sex + Age + SibSp + Parch + Fare + Embarked, data=train,
method="class", control=rpart.control(minsplit=2, cp=0.005))
new.fit <- prp(fit,snip=TRUE)$obj
fancyRpartPlot(new.fit)
# Feature Engineering
?strsplit
# What's in a name?
train$Name[1]
# Join together the test and train sets for easier feature engineering
train <- read.csv("train.csv")
test$Survived <- NA
combi <- rbind(train, test)
# Convert to a string
combi$Name <- as.character(combi$Name)
# What's in a name, again?
combi$Name[1]
# Find the indexes for the tile piece of the name
strsplit(combi$Name[1], split='[,.]')
strsplit(combi$Name[1], split='[,.]')[[1]]
strsplit(combi$Name[1], split='[,.]')[[1]][2]
combi$Title <- sapply(combi$Name, FUN=function(x) {strsplit(x, split='[,.]')[[1]][2]})
# Finally, we may wish to strip off those spaces from the beginning of the titles.
# Here we can just substitute the first occurrence of a space with nothing.
# We can use sub for this (gsub would replace all spaces, poor “the Countess” would look strange then though)
combi$Title <- sub(' ', '', combi$Title)
?sub
table(combi$Title)
# Combine small title groups
combi$Title[combi$Title %in% c('Mme', 'Mlle')] <- 'Mlle'
combi$Title[combi$Title %in% c('Capt', 'Don', 'Major', 'Sir')] <- 'Sir'
combi$Title[combi$Title %in% c('Dona', 'Lady', 'the Countess', 'Jonkheer')] <- 'Lady'
combi$Title <- factor(combi$Title)
summary(combi$Title)
# Engineered variable: Family size
combi$FamilySize <- combi$SibSp + combi$Parch + 1
# Engineered variable: Family
combi$Surname <- sapply(combi$Name, FUN=function(x) {strsplit(x, split='[,.]')[[1]][1]})
combi$FamilyID <- paste(as.character(combi$FamilySize), combi$Surname, sep="")
combi$FamilyID[combi$FamilySize <= 2] <- 'Small'
table(combi$FamilyID)
famIDs <- data.frame(table(combi$FamilyID))
famIDs <- famIDs[famIDs$Freq <= 2,]
combi$FamilyID[combi$FamilyID %in% famIDs$Var1] <- 'Small'
combi$FamilyID <- factor(combi$FamilyID)
# Split back into test and train sets
train <- combi[1:891,]
test <- combi[892:1309,]
fit <- rpart(Survived ~ Pclass + Sex + Age + SibSp + Parch + Fare + Embarked + Title + FamilySize + FamilyID,
data=train, method="class")
fancyRpartPlot(fit)
# In most cases though, the title or gender variables will govern the first decision due to the greedy nature of decision trees.
# The bias towards many-levelled factors won't go away either, and the overfitting problem can be difficult to gauge
summary(combi$Age)
Agefit <- rpart(Age ~ Pclass + Sex + SibSp + Parch + Fare + Embarked + Title + FamilySize,
data=combi[!is.na(combi$Age),],
method="anova")
fancyRpartPlot(Agefit)
combi$Age[is.na(combi$Age)] <- predict(Agefit, combi[is.na(combi$Age),])
summary(combi)
# Random forests in R can't use NA's and factor levels up to 32
which(combi$Embarked == '')
combi$Embarked[c(62,830)] = "S"
combi$Embarked <- factor(combi$Embarked)
which(is.na(combi$Fare))
combi$Fare[1044] <- median(combi$Fare, na.rm=TRUE)
combi$FamilyID2 <- combi$FamilyID
# Convert back to string
combi$FamilyID2 <- as.character(combi$FamilyID2)
combi$FamilyID2[combi$FamilySize <= 3] <- 'Small'
# And convert back to factor
combi$FamilyID2 <- factor(combi$FamilyID2)
install.packages('randomForest')
library(randomForest)
# Split back into test and train sets
train <- combi[1:891,]
test <- combi[892:1309,]
# Build Random Forest Ensemble
set.seed(415)
fit <- randomForest(as.factor(Survived) ~ Pclass + Sex + Age + SibSp + Parch + Fare + Embarked + Title + FamilySize + FamilyID2,
data=train, importance=TRUE, ntree=2000)
varImpPlot(fit)
# Now let's make a prediction and write a submission file
Prediction <- predict(fit, test)
submit <- data.frame(PassengerId = test$PassengerId, Survived = Prediction)
write.csv(submit, file = "firstforest.csv", row.names = FALSE)
# Conditional inference trees, using statistical tests rather than purity tests party package
# Build condition inference tree Random Forest, 5 is default variables too high. generally square root of total
set.seed(415)
fit <- cforest(as.factor(Survived) ~ Pclass + Sex + Age + SibSp + Parch + Fare + Embarked + Title + FamilySize + FamilyID,
data = train, controls=cforest_unbiased(ntree=2000, mtry=3))
# Now let's make a prediction and write a submission file
Prediction <- predict(fit, test, OOB=TRUE, type = "response")
submit <- data.frame(PassengerId = test$PassengerId, Survived = Prediction)
write.csv(submit, file = "ciforest.csv", row.names = FALSE)
|
0b2f55b9a5fcabf2ef8a071dcd05f9dae54e0e6a
|
086995334358642ce29c3c17f07367a646dde5d7
|
/man/cutna_headtail_df.Rd
|
41434b5b881c7b9c61154b0fd8699083375b7d35
|
[] |
no_license
|
yunkepeng/rbeni
|
29dca6fcb66a8dbe1310e59095a94d8d1a14252d
|
3c01006c16f76e078284ab3bb4fb74f06f9895ac
|
refs/heads/master
| 2022-12-23T18:24:50.822522
| 2020-09-25T09:15:26
| 2020-09-25T09:15:26
| 298,519,375
| 0
| 0
| null | 2020-09-25T08:52:14
| 2020-09-25T08:52:13
| null |
UTF-8
|
R
| false
| true
| 1,212
|
rd
|
cutna_headtail_df.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/cutna_headtail_df.R
\name{cutna_headtail_df}
\alias{cutna_headtail_df}
\title{Reduces the rows of a data frame based on the removal of NA values}
\usage{
cutna_headtail_df(df, col, extend = FALSE)
}
\arguments{
\item{df}{A data frame}
\item{col}{A character string specifying the column based on which the
removal of consecutive NA values from head and tail is done.}
\item{extend}{A logical specifying whether NAs at the head or tail of a
data frame's column (\code{col}) should be filled with the first (last)
non-NA value of the respective column. Defaults to \code{FALSE}.}
}
\value{
A reduced data frame, shortened after removing consecutive
NAs from the head and tail of the column (argument \code{col}) of the
initial data frame (argument \code{df}).
}
\description{
Reduces the rows of a data frame based on the removal of NA values from
head and tail of a given column. Considers all consecutive NA values
starting from the head and tail of a vector up to the first non-NA value.
}
\examples{
df <- data.frame( columnname = c(rep(NA, 5), seq(1,10), NA, seq(12,20), rep(NA,10)))
print(cutna_headtail_df(df, "columnname"))
}
|
5e68837cf5f6338fe8d8056e2b67ac003e04711f
|
f70cd5fb5a43710abc68abb12769fc742aa35322
|
/5. Regresion Lineal/RegresionLinealMultiple - avanzado.R
|
62956244a49c782597d2c5189dde43be2dc92498
|
[] |
no_license
|
yukochperdok/examples-stadistics
|
8cf21d6ee55ba8cdcef2c260d60b6e9052d3d159
|
b2d3f28f70ab0c6c14cbe5f9cc6b0114dc0167fc
|
refs/heads/master
| 2020-07-06T03:28:37.400761
| 2019-02-24T12:17:25
| 2019-02-24T12:17:25
| 74,061,871
| 1
| 1
| null | null | null | null |
ISO-8859-1
|
R
| false
| false
| 31,495
|
r
|
RegresionLinealMultiple - avanzado.R
|
# Limpiar el workspace
rm(list = ls(all = TRUE))
library(scatterplot3d)
library(Rcmdr)
##################################EJEMPLO 2###################################
#Datos de R sobre coches: mtcars
# Explicar el consumo (mpg) en función de la potencia (hp) y del peso (wt):
data(mtcars)
attach(mtcars)
# Mostramos datos
head(mtcars)
str(mtcars)
summary(mtcars)
# Con el summary podemos ver los factores, si hay nulos, tipos de valores.
# Si son discretos o continuos, a parte de como estan distribuidos mas o menos
# Vemos que am, gear, carb, cyl son valores discretos. Pero habria que factorizarlos
# Vemos sin embargo que mpg, drat, wt y qsec son valores continuos
# Y vemos que disp y hp son valores discretos no factorizables
# Revisamos las variables que nos han pedido:
# mpg - Var. continua - consumo
head(mpg)
summary(mpg)
boxplot(mpg) # Sin valores atipicos: entre 10 y 30. Mediana 20
hist(mpg)
plot(density(mpg))# Claramente una normal
shapiro.test(mpg) # Distribucion normal
mean(mpg) # 20.09062
sd(mpg) # 6.026948
# hp - Var discreta - Potencia
head(hp)
summary(hp)
boxplot(hp) # Pocos val. atipicos.
hist(hp)
plot(density(hp))# Parece una lognormal
shapiro.test(hp) # No es normal
shapiro.test(log(hp))# Es log normal
boxplot(log(hp)) # Eliminados val. atipicos, bigotes perfectos
hist(log(hp))
plot(density(log(hp)))# Confirmado Distribucion lognormal
# wt - Var discreta - Peso
head(wt)
summary(wt)
boxplot(wt) # Pocos val. atipicos,cercanos al bigote superior
hist(wt)
plot(density(wt))# Parece una lognormal
shapiro.test(wt) # Podria tratarse como una normal
shapiro.test(log(wt))# Pero es LogNormal
boxplot(log(wt)) # Eliminados val. atipicos, bigotes perfectos
hist(log(wt))
plot(density(log(wt)))# Confirmado Distribucion lognormal
# Hacemos un plot de las variables para ver que correlaciones nos podemos encontrar:
mtcars.minimo<-mtcars[,c("mpg","wt","hp")]
plot(mtcars.minimo)
# Buscamos correlaciones
cor(mtcars.minimo)
parcor(cor(mtcars.minimo))
# Vemos que hay bastante correlacion parcial entre mpg, hp y wt
######MODELO(s)
# Se deberia ir incluyendo uno a uno. De hecho nosotros ya hemos visto que se
# va a comportar mejor con los log.
# De todas formas aqui nos han fijado ambas variables y nos han pedido un modelo
# por lo tanto montamos el modelo directamente.
cars.lm = lm(mpg~hp+wt)
summary(cars.lm)
# Bastante buen modelo: 81% y p-values bajos.
#modelo de regresión
# millas recorridas por galón = 37.22 - 0.03 potencia - 3.87 peso
# Analisis de residuos
# Residuos VS consumo estimado
# para ver si hay algún coche que se comporta de modo muy distinto a los demás:
plot(predict(cars.lm), cars.lm$residuals)
abline(h=0)
#o:
plot(cars.lm$fitted.values,cars.lm$residuals)
abline(h=0)
# No hay una tendencia clara pero hay algun valor que tiene una varianza muy alta: val. atipicos
# Si normalizamos lo veremos mejor
mean.res<-mean(cars.lm$residuals)
sd.res<-sd(cars.lm$residuals)
plot(cars.lm$fitted.values,(cars.lm$residuals-mean.res)/sd.res)
abline(h=0)
abline(h=-1,col="blue")
abline(h=1,col="blue")
# Hay valores extremos que se salen. Podemos confirmarlo con Levene
grupo.residuos<-factor(c(rep("g1",8),rep("g2",8),rep("g3",8),rep("g4",8)))
identical(length(grupo.residuos),length(cars.lm$residuals))
# H0: varianzas iguales
leveneTest(cars.lm$residuals, group = grupo.residuos) # => Aceptamos H1-->Varianzas desiguales
############ NO VALE NUESTRO MODELO
############TRANSFORMACIONES
# Averiguamos poder de transformacion:
spreadLevelPlot(cars.lm) # 0.5853955
# Poder de transformacion 0.5, es decir es equivalente a log
curve(x**0.5,0:20, col = "green",xlim = c(0,4), ylim = c(-2,2))
curve(log,0:20, add = T ,col = "violet")
curve(x**2,0:20, add = T ,col = "blue")
curve(log(x,5),0:20, add = T ,col = "red")
abline(h=0, v=0)
# TRANSFORMACION EXPONENCIAL 0.5
hp.trans<-hp**0.5
wt.trans<-wt**0.5
cars.lm2 = lm(mpg~hp.trans+wt.trans)
summary(cars.lm2)
#Analizamos residuos
mean.res<-mean(cars.lm2$residuals)
sd.res<-sd(cars.lm2$residuals)
plot(cars.lm2$fitted.values,(cars.lm2$residuals-mean.res)/sd.res)
abline(h=0)
abline(h=-1,col="blue")
abline(h=1,col="blue")
# Mucho mejor los residuos
# TRANSFORMACION LOGARITMICA
cars.lmlog = lm(mpg~log(hp)+log(wt))
summary(cars.lmlog)
#Analizamos residuos
mean.res<-mean(cars.lmlog$residuals)
sd.res<-sd(cars.lmlog$residuals)
plot(cars.lmlog$fitted.values,(cars.lmlog$residuals-mean.res)/sd.res)
abline(h=0)
abline(h=-1,col="blue")
abline(h=1,col="blue")
# Varianzas iguales??
grupo.residuos<-factor(c(rep("g1",8),rep("g2",8),rep("g3",8),rep("g4",8)))
identical(length(grupo.residuos),length(cars.lmlog$residuals))
# H0: varianzas iguales
leveneTest(cars.lmlog$residuals, group = grupo.residuos) # => Varianzas iguales por poco
# Residuos normales???
shapiro.test(cars.lmlog$residuals) # Poca normalidad en los residuos
#Residuos Independientes
#H0 no ha correlacion entre los residuos
durbinWatsonTest(cars.lmlog, alternative = "two") # Aceptamos H0 -> Residuosindependientes
# Cerramos nuestro modelo con 88%, bastante explicativo pero los residuos tienen aun varianzas altas
# y poca normalidad.
# CONCLUSION: Se deberian tomar mas datos o incluir mas variables para explicar mejor el consumo
#Si queremos predecir las millas recorridas por galón por un coche con 150 caballos y peso 2.5:
# Consumo real VS estimado
pred<-predict(cars.lmlog)
qqplot(mpg,pred)
abline(c(0,1))
#o:
plot(mpg,58.231-11.400*log(wt)-5.193*log(hp), xlim = c(0,40), ylim = c(0,50))
abline(c(0,1))
# Representacion VD VS VI's
plot(log(wt), mpg)
abline(lm(mpg~log(wt)),col="red")
plot(log(hp), mpg)
abline(lm(mpg~log(hp)),col="red")
# Con todas las VI's
s3d<-scatterplot3d(log(wt),log(hp),mpg, main="3D Scatterplot", xlim = c(0,5),zlim = c(-10,60),ylim = c(0,5))
s3d$plane3d(cars.lmlog)
#o
s3d<-scatterplot3d(log(wt),log(hp),mpg, pch=16, highlight.3d=TRUE,
type="h", main="3D Scatterplot",xlim = c(0,5),zlim = c(-10,60),ylim = c(0,5))
s3d$plane3d(cars.lmlog)
# o tambien con graficos RGL
scatter3d(wt, hp, mpg)
# Para predecir lo mejor es usar los intervalos de prediccion:
interval.wt<-runif(10,min(wt),max(wt))
interval.hp <- sample(min(hp):max(hp), 10, replace=T)
mpg.predict<-predict(cars.lmlog,newdata = data.frame(wt=interval.wt,hp=interval.hp))
# Intervalos de prediccion
mpg.PI<-predict(cars.lmlog,newdata = data.frame(wt=interval.wt,hp=interval.hp), interval = "prediction", level = 0.9)
# Dibujamos el intervalo de prediccion (90%) en negro y azul
# Y la prediccion exacta en rojo
s<-scatterplot3d(interval.wt,interval.hp,mpg.predict, color = "red", xlim = c(0,10),zlim = c(0,50),ylim = c(10,350))
s$points3d(interval.wt,interval.hp,mpg.PI[,"lwr"], col = "black")
s$points3d(interval.wt,interval.hp,mpg.PI[,"upr"], col = "blue")
##################################EJEMPLO 3###################################
# Trabajar con un modelo explocativo de mpg para todas las variables de mtcars
# Imaginemos que ya tenemos toda la exploracion de las variables hecha.
# Ajustamos un posible modelo:
fit <- lm(mpg ~ hp + wt + disp, data=mtcars)
summary(fit)
############ FUNCIONES INTERESANTES:
# Coeficientes del modelo
coefficients(fit)
coefficients(fit)[1]# Intercepto
coefficients(fit)["hp"]# coef. hp
coefficients(fit)["wt"]# coef. wt
coefficients(fit)["disp"]# coef. disp
#o
fit$coefficients
# Residuos
residuals(fit)
#o
fit$residuals
#Valores estimados
fitted(fit)
#o
fit$fitted.values
fit$fitted.values["Fiat X1-9"] # Valor estimado para el Fiat X1-9
# Intervalos de confianza para cada para coeficiente
confint(fit, level=0.95) # Dejando 2.5 y 97.5 a cada lado
# Tabla ANOVA
anova(fit)
# Matriz de covarianza para los coeficientes
vcov(fit)
# Diagnostico de regresion:
# Te devuelve los coefficientes para cada tipo de coche.
# Las desviaciones por tipo de coche.
# Pesos de los residuos por tipo de coche.
# Todo esto para poder diagnosticar que influye mas en cada coeficiente, residuo, etc...
influence(fit)
influence(fit)$coefficients # Coefficientes
influence(fit)$sigma # Desviaciones
influence(fit)$wt.res # Pesos
# Por ejemplo: El coche con mayor residuo:
max(influence(fit)$wt.res) # Toyota Corolla
# Coche con minimo coeficiente para disp
min(influence(fit)$coefficients[,"disp"]) # Maserati Bora
######### PLOTS INTERESANTES:
layout(matrix(c(1,2,3,4),2,2)) # optional 4 graphs/page
plot(fit)
# Simplemente haciendo esto ya vemos los 4 plots mas interesantes:
# 1. Residuos VS Estimados --> Checkeamos heteroscedasticidad de las varianzas
# 2. Residuos tipificados VS Estimados --> Checkeamos heteroscedasticidad de las varianzas
# 3. Quantiles de residuos VS quantiles de dist.normal --> Normalidad de residuos
# 4. Residuos VS Leverage --> Detectar observaciones influyentes. (aquellas que sobrepasan la distacia de Cook)
# IMP: en todos marca una linea roja indicando tendencia.Y te marca los residuos
# mas atipicos: en nuestro caso Corolla, Chrysler y Maserati Bora.
layout(matrix(1,2)) # optional 4 graphs/page
################COMPARAR MODELOS - CON ANOVA
fit1 <- lm(mpg ~ hp + wt + disp, data=mtcars)
fit2 <- lm(mpg ~ hp + wt, data=mtcars)
anova(fit1, fit2)
# Te compara entre el modelo fit1 y fit2 y te dice que el modelo fit2 es mejor porque
# su suma de cuadrados de los residuos tiene 0.05 MAS que la del fit1
# Se puede comprobar aqui:
# Df Sum Sq Mean Sq
anova(fit1) # Residuals 28 194.99 6.96
anova(fit2) # Residuals 29 195.05 6.73
# IMP: Cuanto mayor sea la SC, menor sera la varianza residual.
################# CROSS - VALIDATION
fit <- lm(mpg ~ hp + wt + disp, data=mtcars)
summary(fit)
library(DAAG)
cv.lm(data = mtcars, fit, m=3)
# Para las K variables de un modelo, cv.lm, te calcula el error estandar. Es decir
# los estimados por VD y los dibuja en lineas de regresion de diferentes colores
# y formas
# Asi puedes enfrentar: valores reales de mpg VS estimados de mpg (con respecto a cada VD)
# Tambien se puede ver como se va reduciendo la R2:
library(bootstrap)
# Definimos funciones theta
theta.fit <- function(x,y){lsfit(x,y)}
theta.predict <- function(fit,x){cbind(1,x)%*%fit$coef}
# Matriz de VI
X <- as.matrix(mtcars[c("hp","wt","disp")])
# Vector de VD
y <- as.matrix(mtcars[c("mpg")])
results <- crossval(X,y,theta.fit,theta.predict,ngroup=10)
cor(y, fit$fitted.values)**2 # R2 en bruto
cor(y,results$cv.fit)**2 # R2 cross validada
############ METODOS PARA SELECCIONAR EL MEJOR MODELO
#####STEPWISE
# Stepwise Regression
# Este metodo se basa en la combinacion de
# 1. forward: partiendo de un modelo base(normalmente modelo vacio) ir incorporando
# ordenadamente por coeficiente de correlacion parcial todas las variables mientras
# no superen un p-value de barrera
# 2. backward: partiendo de un modelo completo (normalmente todas las variables a estudiar)
# se val eliminando ordenandamente segun su p-value mayor. Hasta que no hay un p-value que
# supere un p-value min.
# Para cada step se evalua de nuevo el modelo, pues los p-value han podido cambiar.
# both: conjuga ambas y va añadiendo (forward) por orden de correlacion parcial, y eliminando
# por p-value maximo.
library(MASS)
fit <- lm(mpg ~ hp + wt + disp, data=mtcars)
step <- stepAIC(fit, direction="backward")
step$anova
# Para la seleccion del tipo de metodo utilizamos "direction".
# En step(modelo, direction) o stepAIC(modelo, direction) comprueba el ajuste al modelo
# de la variable, no con p-value o con R2-ajust, sino con AIC (que depende de la suma de cuadrados de los residuos)
# A mayor suma de cuadrados=> Mayor AIC => Menor varianza de errorres => MEJOR MODELO
# step$anova te muestra el modelo inicial y el modelo final.
# Y el porque de las variables eliminadas en funcion a AIC.
# En nuestro caso ha eliminado disp.
# Si queremos partiendo de un modelo base coger el mejor modelo explicativo de mpg:
####### Planteamos todo el modelo fit0
# mpg ~ con todas las variables de mtcars
fit_max <- lm(mpg ~ ., data=mtcars)
summary(fit_max)
anova(fit_max)
# Solo tendria sentido incluir wt
####### Planteamos el modelo minimo
# mpg ~ con ninguna variable
fit_min <- lm(mpg ~ 1, data=mtcars)
summary(fit_min)
anova(fit_min)
# Solo intercepto, esto significaria que no hay modelo
####### Planteamos soluciones para Forward, Backward y Both
step.backward <- stepAIC(fit_max, direction="backward")
step.backward$anova
# Inicial modelo el maximo: mpg ~ cyl + disp + hp + drat + wt + qsec + vs + am + gear + carb
# Final modelo ajustado por AIC: mpg ~ wt + qsec + am
# FIjaros que al principio qsec y am practicamente ni se tenian en cuenta, y sin embargo
# cyl o disp si parecian entrar. Multicolinedad seguramente.
step.forward <- stepAIC(fit_min, direction="forward",
scope = list(lower=fit_min, upper=fit_max))
step.forward$anova
# Inicial modelo el minimo: mpg ~ 1
# Final modelo ajustado por AIC: mpg ~ wt + cyl + hp
# Fijaros que no coincide para nada con el backward. Ahora cyl y hp si han entrado.
# CONCLUSION: Los metodos forward y backward son incompletos.
# Hay que utilizar both
step.both <- stepAIC(fit_min, direction="both",
scope = list(lower=fit_min, upper=fit_max))
step.both$anova
# Inicial modelo el minimo: mpg ~ 1
# Final modelo ajustado por AIC: mpg ~ wt + cyl + hp
# Este si coincide con step.backward, porque no se ha tenido que quitar ninguna
# variable del modelo una vez incluida.
# El modelo explicativo MINIMO para mpg es:
# mpg ~ wt + cyl + hp
fit.optimo<-lm(mpg ~ wt + cyl + hp, data=mtcars)
summary(fit.optimo)
anova(fit.optimo)
# Como vereis hemos encontrado un modelo explicado al 82%
# las variables wt y cly si se ajustan perfectamente pero la hp no. ¿Entonces?
# Aqui depende mucho de las variables que le incluyamos y las transformaciones:
log.hp<-log(mtcars$hp)
log.wt<-log(mtcars$wt)
fit_max <- lm(mpg ~ .+log.hp+log.wt, data=mtcars)
summary(fit_max)
anova(fit_max)
step.both <- stepAIC(fit_min, direction="both",
scope = list(lower=fit_min, upper=fit_max))
step.both$anova
# Initial Model: mpg ~ 1
# Final Model: mpg ~ log.wt + log.hp
fit.optimo<-lm(mpg ~ log.wt + log.hp, data=mtcars)
summary(fit.optimo)
anova(fit.optimo)
# MODELO OPTIMO: explicado al 88% y con SE residuos bajisimo: 2.08
# Por lo tanto el mejor modelo es:
# mpg ~ log.wt + log.hp
# OBS: Tambien podriamos hacer transformaciones para el resto de variables.
# Y para la variable dependiente.
########MEJOR SUBCONJUNTO
# Este metodo se basa en encontrar para un modelo completo de partida, la mejor combinacion
# de variables de tamaño n. Para elejirlo se puede basar en R2 o CP o R2-ajustado
# Si por ejemplo estamos determinados por un nuemro de variables maximo es interesante
# Hacerlo por este metodo. Sino es asi, compensa computacionalmente Stepwise
library(leaps)
attach(mtcars)
# OBS: No vamos a incluir todas las variables para que se vean mejor las visualizaciones
# pero se podrian incluir todas perfectamente
leaps<-regsubsets(mpg~log.wt+log.hp+cyl+disp+qsec,data=mtcars,nbest=3)
# Se ha indicado nbest=3, queremos ver para cada n combinacion, 3 posibilidades.
summary(leaps)
# Devuelve una tabla de resultados ordenado por n y dentro de cada n el orden de eleccion (nbest)
# de tal forma que si nosotros queremos elegir el mejor subconjunto de 3 variables:
# log.wt log.hp cyl disp qsec
# 3 ( 1 ) "*" "*" " " " " "*"
# Elegiriamos: log.wt + log.hp + qsec + (Intercepto)
# Y lo podemos ver en grafico, ordenados por "Cp" o "adjr2" o "r2".
plot(leaps,scale="adjr2")
plot(leaps,scale="Cp")
# O con el grafico de subconjuntos ordenados por: adjr2 o rsq o cp, o incluso rss o bic
library(car)
subsets(leaps, statistic="adjr2")
subsets(leaps, statistic="cp")
fit.optimo.3.var<-lm(mpg ~ log.wt + log.hp + qsec, data=mtcars)
summary(fit.optimo.3.var)
anova(fit.optimo.3.var)
######### RELATIVA IMPORTANCIA DE LAS VI
# Se puede calcular por diversos algoritmos la relativa importancia de cada variable.
# Esto te dice que variable es mas importante en el modelo. NO la que aporta mas
# que seria tipificando como la encontrariamos, sino la que mas aporta en efectos
# de ajuste al modelo.
# El algoritmo mas utilizado y recomendado es lmg: se basa en R2
library(relaimpo)
calc.relimp(fit.optimo.3.var,type=c("lmg","last","first","pratt"),rela=TRUE)
# Devuelve muchas cosas pero lo mas importante es:
# lmg last first pratt
# log.wt 0.5111 0.8839 0.475 0.6736
# log.hp 0.4003 0.0951 0.422 0.2813
# qsec 0.0887 0.0209 0.103 0.0451
# Te dice para los 4 algoritmos que la mas relevante es log.wt.
# Se puede ver con otro paquete:
boot <- boot.relimp(fit.optimo.3.var, b = 1000, type = c("lmg","last", "first", "pratt")
, rank = TRUE, diff = TRUE, rela = TRUE)
booteval.relimp(boot) # print result
plot(booteval.relimp(boot,sort=TRUE)) # plot result
# Aqui te lo explican graficamente con las diferencias de relavancia entre
# las variables, la que esta primera con respecto a las demas es log.wt
# para los 4 algoritmos.
############### TRATAMIENTOS IMPROTANTES
### 1. Tratamiento Outlayers
# Imaginemos que tenemos esta regresion:
fit <- lm(mpg~disp+hp+wt+drat, data=mtcars)
summary(fit)
# Identificar outlayers sobre recta de regresion (con quantiles)
qqPlot(fit, main="QQ Plot")
# Identificar outlayers
leveragePlots(fit)
# 3 Plots para la identificacion de los outlayers con su distancia Cook
par(mfrow=c(2,2))
plot(fit,which = 4:6)
par(mfrow=c(1,1))
# Test de Bonferonni para las observaciones mas extremas:
# Coge el valor con un residuo estandarizado mas alto y utiliza una t de Student
# para recoger los puntos mas "extremos", que no tienen porque ser los que visualmente
# veamos en la grafica mas extremos.
outlierTest(fit, cutoff=3)
# En nuestro caso Los 3 primeros son:
# Toyota Corolla 2.515970 0.018380 0.58816
# Fiat 128 2.495829 0.019238 0.61562
# Chrysler Imperial 2.244689 0.033518 NA
# Sin embargo el Maserati Bora no aparece. Porque su residuo standarizado es menor.
### 2. Tratamiento Outlayers INFLUYENTES
# Cuando ya tenemos los outlayers identificados como en el ùnto anterior
# tenemos que ver cuales de ellos son influyentes.
# Para ello tenemos que calcular un numero de corte cutoff. Que sera el minimo numero
# entre 0 y 1 para que sea influyente ese punto.
# Hay gente que aplica: 4/(n-k-1),
# para k dimensiones (variables)
# para n valores de cada variable
# Otros prefieren aplicar: 2(k+1)/n incluso 3(k+1)/n para los mas restrictivos
# Primero vemos un plot de la VD con cada VI para ver los outlayers
# OJO DEPRECATED FUNCTION
library(car)
av.plots(fit)
# Segundo calculamos el minimo cutoff y vemos el plot de distancia de Cook para
# ese cutoff minimo
# cutoff > 4/(n-k-1)
k<-length(fit$coefficients)-1 # Le quito el intercepto
n<-nrow(mtcars) # Numero de filas (num.valores)
cutoff <- 4/((n-k)-1) # 0.1481481
cutoff <- 4/((n-k)-1) # 0.1481481
plot(fit, which=4, cook.levels=cutoff)
# Nos aparecen marcados como influyentes por su distancia:
# Maserati Bora, Chryler Imperial y Toyota Corolla
# Por ultimo nos ayudamos del plot de influencia para ver realmente
# los outlayers que mas influyen:
# Influence Plot
influencePlot(fit, id.method="identify", main="Influence Plot",
sub="Circle size is proportial to Cook's Distance" )
# En el grafico te muestra la influencia que tienen en funcion al diametro de su circulo
# Te los deja marcar para identificarlos.
# En el eje de las x tienes los que cometen mas error y en el eje de las y los que tienen
# mas distancia de cook (calculada antes).
# CONCLUSION: Aunque Toyota corolla tiene mas residuos standard, tiene menos influencia
# que Chrysler y Maserati. IMP: Estos dos en ningun caso han de quitarse.
####### 3. Tratamiento Normalidad
# Para revisar si los residuos cumplen normalidad:
# QQplot para residuos estandarizados
qqPlot(fit, main="QQ Plot")
# Y tambien ver el plot de residuos estandarizados sobre los quantiles de la normal:
plot(fit, which=2)
# Distribucion de residuos estandarizados
library(MASS)
sresid <- studres(fit)
hist(sresid, freq=FALSE,
main="Distribucion de residuos estandarizados")
# Devuelve una secuencia de 40 obs. desde el minimo al maximo
xfit<-seq(min(sresid),max(sresid),length=40)
# Probabilidad de una normal para cada valor de xfit
yfit<-dnorm(xfit)
# Las enfrento y reviso la normalidad
lines(xfit, yfit)
# Puedo incluso aplicar algun test:
shapiro.test(sresid) # Veo la NO NORMALIDAD
###### 4. Tratamiento Varianzas Iguales
# Para revisar si la varianza de los residuos es constante
# Una de las formas mas comunes es crear grupos para los residuos y aplicar un levene.test
# Una forma mas facil de aplicar lo mismo es con un ncvTest(non-constant error variance test)
# H0: varianzas constantes
ncvTest(fit) #p-value=0.231818>0.05 => Aceptamos H0=> Varianzas constantes
# Otro test analogo seria:
# H0: varianzas constantes
library(lmtest)
bptest(fit) #p-value=0.8371>0.05 => Aceptamos H0 => Varianzas constantes
# Cuando las varianzas no son constantes tenemos que transformar las variables.
# En este plot vemos los residuos VS estimados y vemos como van cambiando
# las varianzas(linea verde) a traves de la regression(linea roja).
spreadLevelPlot(fit)
# Y te sugiere un poder de transformacion: 0.6616338
# Cuya p siempre sera el multiplo de 0.5 mas cercano. En nuestro caso p=0.5 => Transformar variables en X^p
# Si p=0 => Transformar en ln(X)
# Otra opcion es hacer una transformacion BoxCOx para la VD: mpg en nuestro caso
library(caret)
mpg.trans<-BoxCoxTrans(mtcars$mpg)
print(mpg.trans)
# Hemos creado una transformacion ahora tenemos que cargar los valores
mpg.new<-predict(mpg.trans,mtcars$mpg)
new.mtcars<-cbind(mtcars,mpg.new)
head(new.mtcars)
# Ahora creamos una regresion transformada:
fit.trans.boxcox<-lm(mpg.new~disp+hp+wt+drat, data=new.mtcars)
summary(fit.trans.boxcox)
summary(fit)
# Vemos de nuevo las varianzas
ncvTest(fit.trans.boxcox)
spreadLevelPlot(fit.trans.boxcox)
ncvTest(fit)
spreadLevelPlot(fit)
# Hemos reducido la desviacion, se ve claramente en las graficas y en el
# Residual standard error del summary de los fit.
####### 5. Independencia de residuos
# Para que sea valida una regresion lineal se debe cumplir que cada residuo
# sea independiente del siguiente. Esto se demuestra con el test de Durbin-Watson:
# H0 no ha correlacion entre los residuos
durbinWatsonTest(fit, alternative = "two") # p-value= 0.272>0.05=> Independencia de los residuos
####### 6. Multicolinealidad
# Aunque ya lo tenemos bastante pillado, dos variables VD pueden estar correlacionadas
# entre ellas. Se ve claramente cuando tenemos un R^2 alto y algun p-value muy alto.
# Esa variable se explica por otra.
# Una forma facil de revisarlo es utilizando el coeficiente VIF:
# Se define como 1/1-R^2(de la variable)
vif(fit) # variance inflation factors
sqrt(vif(fit)) > 2 # Si la raiz de la influencia es mayor que 2 => Se explica por otra
# En nuestro caso tenemos multicolinealidad de las variables disp y wt.
# Tendriamos que ir quitando 1º disp, y luego volver a mirar que tal wt
# Ejemplo:
sqrt(vif(lm(mpg~hp+wt+drat, data=mtcars)))>2
# Quitando disp, la inflaccion de wt baja muchisimo
# CONCLUSION: Habia multicolinealidad entre wt y disp. Quitando disp solucionamos el problema
######## 7. Evaluar Linealidad
# Cuanto mas lineal son los puntos mas se ajustara un modelo lineal
# Plot del componente + residuo
crPlots(fit) # Te indica si la relacion es muy o poco lineal para cada VD
# Ceres plots indica lo mismo que el de antes
ceresPlots(fit)
# Vemos que para wt la linea verde se ajusta bastante a la regresion
# Por contra disp o drat no se ajustan mucho.
# Esto al final es equivalente a cor o parcor
######################EJEMPLO 4: VARIABLES CATEGORICAS#########################
# Para tratar las variables categoricas tenemos que identificar de que tipo estamos hablando:
# 1. Var. con 2 categorias: Lo suyo es representarla con 0 y 1.
# Ausencia y existencia. Y se puede estudiar toda ella.
# Si es ordinal se puede expresar como un factor y tratarla como una discreta mas.
# Por ejemplo la variable am de mtcars: Automatico/Manual
# 0: Automatico / 1: Manual
# Siempre trabajariamos con am o con factor (am)
am.f<-factor(am)
fit.am<-lm(mpg~am)
summary(fit.am)
#o
fit.am<-lm(mpg~am.f)
summary(fit.am)
# En am.f1 es igual que am porque solo tiene las categorias 0 y 1.
# Lo que esta diciendo es que un coche manual incrementa 7.245 veces el consumo frente
# a un automatico.
# 2. Var. con N categorias:
# a) Ordinal: Se puede trabajar con ella de forma unica: Haciendo factor y
# tratandola como una discreta
# Por ejemplo el corte en los diamantes:
library(ggplot2)
data(diamonds)
attach(diamonds)
# Mostramos datos
head(diamonds)
str(cut)
# Ord.factor w/ 5 levels "Fair"<"Good"<Very Good < Premium < Ideal
levels(cut)
# [1] "Fair" "Good" "Very Good" "Premium" "Ideal"
# Los levels no son numericos eso nos devuelve algo inteligible:
fit.cut<-lm(price~cut)
summary(fit.cut)
#cut.L,cut.Q,cut.C,cut^4????
# SOlucion tratarla como una variable discreta:
cut.num<-as.numeric(cut)
tapply(price,cut.num,summary)
boxplot(price~cut.num)
fit.cut<-lm(price~cut.num)
summary(fit.cut)
# Otra opcion es dicotomizarla y tratarla por separado
contrasts(cut) <- contr.treatment(5)
# Defines una tabla de contrastes par los valores:
#Fair 0 0 0 0
#Good 1 0 0 0
#Very Good 0 1 0 0
#Premium 0 0 1 0
#Ideal 0 0 0 1
# La categoria referencia es Fair y comprobaremos la relacion de los demas
# con price teniendo como referencia Fair.
fit.cut<-lm(price~cut)
summary(fit.cut)
# cut2 -429.89 113.85 -3.776 0.000160 *** --> Good con ref a Fair
# cut3 -377.00 105.16 -3.585 0.000338 *** --> V.Good con ref a Fair
# cut4 225.50 104.40 2.160 0.030772 * --> Premium con ref a Fair
# cut5 -901.22 102.41 -8.800 < 2e-16 *** --> Ideal con ref a Fair
# Por ejemplo aqui sacamos como conclusion que el precio aumenta en 225 de un corte
# Premium con respecto al Fair.
# b) No ordinal: Se tiene que dicotomizar. Creando N-1 variables dummies (0,0,0,...,1)
# y dejando una categoria (0,0,0...,0) como referencia.
# Se tratan siempre por separado, y se analizan con respecto a la categoria de referencia.
# Recogemos csv publicado y vamos a analizar su variable race.
# La cual puede tener 4 categorias posibles: (1 = Hispanic, 2 = Asian, 3 = African American and 4 = Caucasian)
hsb2 = read.table('http://www.ats.ucla.edu/stat/data/hsb2.csv', header=T, sep=",")
head(hsb2)
str(hsb2$race) # Es un numerico de valores 1 a 4
# 1. Factorizamos:
hsb2$race.f = factor(hsb2$race, labels=c("Hispanic", "Asian", "African-Am", "Caucasian"))
hsb2$race.f
# Vemos la media de write por nuestro nuevo factor
tapply(hsb2$write, hsb2$race.f, mean)
# Podriamos explorar mucho mas la variable, bloxplot, plot, cor, etc...
#2. Tenemos 4 categorias, no ordinales: Creamos una matriz de categorias y
# se la asignamos como contraste al factor
contrasts(hsb2$race.f) = contr.treatment(4)
print(hsb2$race.f)
# Planteamos la regression
summary(lm(write ~ race.f, hsb2))
# race.f2 11.542 3.286 3.512 0.000552 ***
# race.f3 1.742 2.732 0.637 0.524613
# race.f4 7.597 1.989 3.820 0.000179 ***
# CONCLUSIONES:
# La categoria African-Am, no es significativa. No podemos sacar conclusiones de ella
# Los escritores Asiaticos escriben 11,5 veces mas que los Hispanos
# Los escritores Caucasicos escriben 7.5 veces mas que los Hispanos
# No se puede sacar una relacion de la variable total raza, porque no es Ordinal
# Existen otros tipos de codificacion en vez de dummies:
# 1.Codificacion simple: Es practicamente igual que la codificacion dummy, puesto
# que cada categoria se compara con una categoria de referencia. La diferencia es el intercepto
# de la regresion en este caso es la media de las medias de las celdas. En la de dummy
# el intercepto corresponde a la celda media del grupo de referencia.
# Creamos la matriz de contrastes:
c<-contr.treatment(4)
#Repatimos las 4 categorias, generando una matriz de 1/4
my.coding<-matrix(rep(1/4, 12), ncol=3)
# Restamos a la matriz de contrastes la matriz de reparto
my.simple<-c-my.coding
my.simple
# Tendramos 1/k como referencia y -(k-1)/k como valor "1"
#Aplicamos esos contrastes al factor y miramos la regresion
contrasts(hsb2$race.f)<-my.simple
summary(lm(write~race.f, hsb2))
# Si nos fijamos tenemos los mismos coeficientes excepto el Intercepto por lo que comentabamos
# al principio. En este caso el intercepto es: 51.6784 = (46.45833 + 58 + 48.2 + 54.05517)/4
# Es decir la media de las medias de cada una.
# 2. Codificacion dummy ad-hoc: Si queremos hacer una codificacion dummy manualmente
# porque nos interesa otra dummy de referencia no Hispanico, por ejemplo Asiatico:
contrasts(hsb2$race.f) = matrix(byrow = T,data=c(c(0,0,1),c(0,0,0),c(0,1,0),c(1,0,0)), ncol = 3, nrow = 4)
hsb2$race.f<-relevel(hsb2$race.f, ref="Asian")
contrasts(hsb2$race.f)
summary(lm(write ~ race.f, hsb2))
# race.fHispanic -11.542 3.286 -3.512 0.000552 *** --> Hispanico con ref Asiatico
# race.fAfrican-Am -9.800 3.388 -2.893 0.004251 ** --> Afri.Amer con ref Asiatico
# race.fCaucasian -3.945 2.823 -1.398 0.163803 --> Caucasico con ref Asiatico
# 3. Codificacion ortogonal: Para variables que son numericas se puede usar este metodo
# que realmente te dice si un factor esta relacionado linealmente, cuadraticamente o cubicamente
# con la veriable dependiente:
hsb2$readcat<-cut(hsb2$read, 4, ordered = TRUE)
# Te divide en 4 grupos la variable discreta read y la agrupa por levels:
# Levels: (28,40] < (40,52] < (52,64] < (64,76]
table(hsb2$readcat)
tapply(hsb2$write, hsb2$readcat, mean)
# (28,40] (40,52] (52,64] (64,76]
# 42.77273 49.97849 56.56364 61.83333
# El contraste polinomial se decine como contr.poly(4) para 4 categorias:
contr.poly(4)
# Donde L es lineal, Q cuadratico y C cubico
#Asignamos a la variable la matriz poligonal
contrasts(hsb2$readcat) = contr.poly(4)
summary(lm(write ~ readcat, hsb2))
# readcat.L 14.2587 1.4841 9.607 <2e-16 ***
# readcat.Q -0.9680 1.2679 -0.764 0.446
# readcat.C -0.1554 1.0062 -0.154 0.877
# CONCLUSION: El factor numerico y ordinal readcat se relaciona linealmente con
# write, ni cuadraticamente ni cubicamente.
# Por cada readcat hay 14,25 write.
|
b6b2c8dbb4fc90ad1f452c3633c44f07abd17449
|
29f4de62d217c3ed153e8ce95b49b730b1cb3053
|
/ui.R
|
aa5257d079aa1846aa7577e579e64b10afe05d58
|
[] |
no_license
|
ceparman/Imaging-App2
|
9b773821694c522e8c8bb1ad2c632e43f6eb98dd
|
8d76cfe0f9a5acc31bcd7bcac29d73d489a73319
|
refs/heads/main
| 2021-06-11T23:09:54.294835
| 2021-03-22T20:17:27
| 2021-03-22T20:17:27
| 158,266,881
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 2,653
|
r
|
ui.R
|
# This is the user-interface definition of a Shiny web application.
# You can find out more about building applications with Shiny here:
#
# http://shiny.rstudio.com
#
library(shiny)
library(shinyjs)
shinyUI(fluidPage(
useShinyjs(),
# Application title
titlePanel(title=div(img(src="logo-frequency-therapeutics.png"))
,windowTitle = "Imaging App 2"),
# Sidebar with a slider input for number of bins
sidebarLayout(
sidebarPanel(
tags$h4("Cell Imaging Analysis"),
tags$hr(),
fluidRow(
column(4, textInput("user", "enter user name","")),
column(4, passwordInput("password","enter password",""))
),
fluidRow(
column(4, actionButton("validate", "validate credentials")),
column(4, textOutput(outputId = "logmessage"))
),
fluidRow(column(8,
radioButtons("target","Select Tenant",
choiceNames = c("Production","Testing"),
choiceValues = c("Credentialsfreq_prod.txt","Credentialsfreq_test.txt"),
inline = T)
),
),
tags$hr(),
textOutput("status"),
tags$hr(),
fluidRow(
column(4,textInput("plate_barcode", "enter plate barcode",value = "WCP31")),
column(4,checkboxInput("useplatefile","Use Plate File",value = FALSE))
),
disabled(fileInput("plate_file", 'Choose Plate Data File', #plate File
accept=c('text/csv',
'text/comma-separated-values,text/plain',
'.csv','application/zip'))),
tags$hr(),
radioButtons( "file_type", "Single or Dual Files", choices = NULL, selected = NULL,
inline = TRUE, width = NULL, choiceNames = c("Single File","Two Files"),
choiceValues = c("single","dual")),
uiOutput("fileinput"),
tags$hr(),
# textInput("path", "Directory for Saved Data"),
tags$hr(),
disabled(actionButton("load","Load Data and Sample Info"))
),
# Show a plot of the generated distribution
mainPanel(
tabsetPanel(id = "inTabset",
tabPanel("Controls",
uiOutput("controls") #,
# actionButton("process","Process Data")
),
tabPanel ("Review Data",
uiOutput("reject") #,
# textOutput("click_info"),
# actionButton("write","Write Results"),
# downloadButton("downloadData", "Download")
)
)
)
)
))
|
c8e53be42bd3ee1af1a1d053cfb9e8ff50b0d2e4
|
c16dc6dd3465dc3922623302d1882915d01d3f1b
|
/stan_tutorial.R
|
850945d21c0ecac35c077e57e4a20a7528bd3ccc
|
[] |
no_license
|
pstafford/StanGMMTutorial
|
a7b0237da90c62daf0bcb054a2c3ce35aa515165
|
79610ae75873919ac833985c30c8465cc97ca6f2
|
refs/heads/master
| 2023-07-06T02:32:03.336139
| 2021-08-13T10:57:29
| 2021-08-13T10:57:29
| 170,670,701
| 4
| 1
| null | null | null | null |
UTF-8
|
R
| false
| false
| 4,847
|
r
|
stan_tutorial.R
|
# load required packages
library(lme4)
library(rstan)
library(brms)
library(rstanarm)
library(bayesplot)
options(mc.cores = parallel::detectCores())
# read data
setwd('/Users/nico/Dropbox/WORK/STAN_Tutorial/')
data <- read.csv('DATA/NGA_West2_Flatfile_RotD50_d050_public_version_subsetASK.csv', header=TRUE)
dim(data)
h <- 6;
vref = 760;
M = data$Earthquake.Magnitude;
M_sq = M^2;
R = data$ClstD..km.;
lnR = log(sqrt(R^2 + h^2));
MlnR = M * log(sqrt(R^2 + h^2));
lnVS = log(data$Vs30..m.s..selected.for.analysis/vref);
EQID = data$EQID;
STATID = data$Station.Sequence.Number;
Y = log(data$PGA..g.);
data_regression = data.frame(M,M_sq,R,lnR,MlnR,lnVS,Y,EQID);
fit_lmer = lmer(Y ~ 1 + M_sq + lnR + M * lnR + lnVS
+ (1|EQID),data=data_regression);
eq_idx_factor <- factor(data$EQID)
eq_idx <- as.numeric(eq_idx_factor)
full_d <- list(
N = length(data[,1]),
NEQ = max(eq_idx),
idx_eq = eq_idx,
M = data$Earthquake.Magnitude,
R = data$ClstD..km.,
VS = data$Vs30..m.s..selected.for.analysis,
Y =log(data$PGA..g.)
);
niter = 400;
wp = 200;
nchains = 4;
fit_model1 <- stan('STAN/gmm_model1_vectorized.stan', data = full_d,
iter = niter, chains = nchains, warmup = wp, verbose = FALSE)
print(get_elapsed_time(fit_model1))
### check
check_treedepth(fit_model1)
check_divergences(fit_model1)
### print and trace plot
print(fit_model1, pars = c('lp__','theta','phi','tau'))
traceplot(fit_model1,pars = c('lp__','theta','phi','tau'))
fit_summary <- summary(fit_model1)
posterior <- extract(fit_model1)
deltaB_mean <- colMeans(posterior$deltaB)
M_eq <- unique(data.frame(eq_idx,data$Earthquake.Magnitude[eq_idx]))[,2]
par(mfrow = c(1,2))
plot(M_eq,deltaB_mean)
hist(posterior$theta[,2])
help("brm")
fit_brm <- brm(Y ~ 1 + M_sq + lnR + M * lnR + lnVS
+ (1|EQID),data = data_regression)
summary(fit_brm)
### check spatial correlation code ----
library(geosphere)
# sort the data by event to obtain a block diagonal structure
sdata <- data[order(EQID), ]
# compute the number of observations per event
uEQID <- unique(sdata$EQID)
numEvents <- length(uEQID)
obsPerEvent <- rep(0, numEvents)
for ( i in 1:numEvents ) {
obsPerEvent[i] <- length(which(sdata$EQID == uEQID[i]))
}
# get station longitude and latitudes
slon <- sdata$Station.Longitude
slat <- sdata$Station.Latitude
# compute the full inter-station distances
spos <- as.matrix(cbind(slon, slat))
# there is a co-located station within event 1030, so add some noise to one station RecID 11605
rid <- which(sdata$Record.Sequence.Number==11605)
spos[rid,] <- spos[rid,] + rnorm(2,0,1e-4)
dij <- distm(spos, fun = distHaversine)/1e3
# event and station indices
eq_idx_factor <- factor(sdata$EQID)
eq_idx <- as.numeric(eq_idx_factor)
stat_idx_factor <- factor(sdata$Station.Sequence.Number)
stat_idx <- as.numeric(stat_idx_factor)
# input data structures (free and fixed correlation length versions)
full_d_sp_free <- list(
N = nrow(sdata),
NEQ = max(eq_idx),
NSTAT = max(stat_idx),
M = sdata$Earthquake.Magnitude,
R = sdata$ClstD..km.,
VS = sdata$Vs30..m.s..selected.for.analysis,
Y = log(sdata$PGA..g.),
idx_eq = eq_idx,
idx_stat = stat_idx,
distanceMatrix = dij,
ObsPerEvent = obsPerEvent
)
# Note that this Jayarm & Baker give:
# 40.7 - 15.0*T for clustered Vs30 regions, and
# 8.5 + 17.2*T otherwise
# but these are within the context of a model exp(-3*dx/h)
# As we're using a simple model here we should expect apparent clustering from model bias,
# so use a relatively high value (h=30 --> correlationLength=10)
correlationLength <- 10.0
# create a blocked diagonal correlation matrix
oij <- outer(sdata$EQID, sdata$EQID, FUN="/")
oij[oij != 1] <- 0
rhoij <- oij * exp(-dij/correlationLength)
full_d_sp_fixed <- list(
N = nrow(sdata),
NEQ = max(eq_idx),
NSTAT = max(stat_idx),
M = sdata$Earthquake.Magnitude,
R = sdata$ClstD..km.,
VS = sdata$Vs30..m.s..selected.for.analysis,
Y = log(sdata$PGA..g.),
idx_eq = eq_idx,
idx_stat = stat_idx,
spatialCorrelationMatrix = rhoij,
ObsPerEvent = obsPerEvent
)
niter = 4;
wp = 2;
nchains = 4;
fit_model1_spatial_free <- stan('STAN/gmm_model1_spatial_free.stan', data = full_d_sp_free,
iter = niter, chains = nchains, warmup = wp, verbose = FALSE)
print(fit_model1_spatial_free, pars = c('lp__','phiSS','tau','phiS2S','correlationLength'))
traceplot(fit_model1_spatial_free, pars = c('lp__','phiSS','tau','phiS2S','correlationLength'))
fit_model1_spatial_fixed <- stan('STAN/gmm_model1_spatial_fixed.stan', data = full_d_sp_fixed,
iter = niter, chains = nchains, warmup = wp, verbose = FALSE)
print(fit_model1_spatial_fixed, pars = c('lp__','phiSS','tau','phiS2S'))
traceplot(fit_model1_spatial_fixed, pars = c('lp__','phiSS','tau','phiS2S'))
|
a7a1c0fe7ef691fb5e1caf0f6eeda40a383c5764
|
c5de5d072f5099e7f13b94bf2c81975582788459
|
/R Extension/RMG/Energy/VaR/rmsys/make.class.Factors.R
|
c8a7dadc083121b809bfdc02ccbfa954bd03ebee
|
[] |
no_license
|
uhasan1/QLExtension-backup
|
e125ad6e3f20451dfa593284507c493a6fd66bb8
|
2bea9262841b07c2fb3c3495395e66e66a092035
|
refs/heads/master
| 2020-05-31T06:08:40.523979
| 2015-03-16T03:09:28
| 2015-03-16T03:09:28
| 190,136,053
| 2
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 1,672
|
r
|
make.class.Factors.R
|
# Make classification variables for one run.
#
#
# Written by Adrian Dragulescu on 12-Dec-2006
make.class.Factors <- function(IVaR, run, options){
IVaR <- IVaR$Changes
curve.split <- strsplit(IVaR$curve.name, " ")
mkt <- sapply(curve.split, function(x){x[2]})
location <- sapply(curve.split, function(x){x[3]})
type <- rep("Vega", nrow(IVaR))
type[which(is.na(IVaR$vol.type))] <- "Delta"
class.Factors <- data.frame(year = format(IVaR$contract.dt, "%Y"),
month = format(IVaR$contract.dt, "%Y-%m"),
market = mkt,
location = location,
type = type,
curve.name = IVaR$curve.name)
class.Factors$curve.name <- as.character(class.Factors$curve.name)
if ("in.20072008" %in% run$classification){
ind <- class.Factors$year %in% c("2007","2008")
class.Factors$in.20072008 <- "not.in.2007-2008"
class.Factors$in.20072008[ind] <- "in.2007-2008"
}
if ("gasseason" %in% run$classification){
gasseason <- gas.season(IVaR$contract.dt)
cash.month <- format(options$asOfDate, "%Y-%m")
prompt.month <- format(seq(as.Date(paste(cash.month,"-01",sep="")),
by="month", length.out=2)[2], "%Y-%m")
class.Factors$gasseason <- ifelse(as.character(class.Factors$month)==cash.month,
"cash", ifelse(as.character(class.Factors$month)==prompt.month, "prompt",
gasseason))
}
return(class.Factors)
}
## if ("gasseason" %in% run$classification){
## class.Factors$gasseason <- gas.season(IVaR$contract.dt)
## }
|
182b0f19d2fa83be34d6c7bbf90ad547250c9b96
|
e9e374930f4e25b4749a01baca169bf552d968f0
|
/plot2.R
|
2f74f497f6888d48f3f5e91cd0022efe2ac33edc
|
[] |
no_license
|
jasonrodgers/ExData_Plotting1
|
891875c462a91f871e923ec252d3872a72e20ad6
|
45ab60dbafed3e72a660fe8f4a711b26d52e1a39
|
refs/heads/master
| 2021-01-23T16:31:58.108076
| 2016-04-24T22:03:17
| 2016-04-24T22:03:17
| 56,937,120
| 0
| 0
| null | 2016-04-23T19:29:57
| 2016-04-23T19:29:56
| null |
UTF-8
|
R
| false
| false
| 1,057
|
r
|
plot2.R
|
## download and unzip the source data
path <- "https://d396qusza40orc.cloudfront.net/exdata%2Fdata%2Fhousehold_power_consumption.zip"
download.file(path, "hpc.zip")
unzip("hpc.zip")
## load household power comsumption data
hpc <- read.table("household_power_consumption.txt", header=TRUE, sep=";")
## convert Date field to a date
hpc$Date <- as.Date(as.character(hpc$Date), format="%d/%m/%Y")
## get a subset of date for only the dates we care about
hpc_s <- subset(hpc, Date == as.Date("2007-02-01") | Date == as.Date("2007-02-02"))
## set global active power to a numeric
hpc_s$Global_active_power <- as.numeric(as.character(hpc_s$Global_active_power))
## create a combined date and time column
hpc_s$DT <- strptime(paste(as.character(hpc_s$Date), as.character(hpc_s$Time)), format="%Y-%m-%d %H:%M:%S")
## create the histogram to png device
png(filename = "Plot2.png")
with(hpc_s, plot(DT, Global_active_power, type="n", xlab="", ylab="Global Active Power (kilowatts)"))
lines(hpc_s$DT, hpc_s$Global_active_power)
dev.off()
|
fab4a8a028d16f93eca460d982b7c5010354b2df
|
53edece5ffdf7690bf068fa0b929bbca2f602465
|
/ggplot and shiny/ggplot.R
|
964c803f2715e04977325be19cbad628e0805919
|
[] |
no_license
|
sbjena/R-Basics
|
5f45ac5d6dd516ced171bda6da503cedfab2103b
|
10637da6396c5db6b2d1e08d9969b3a969c06725
|
refs/heads/master
| 2020-07-13T03:51:18.363193
| 2019-11-13T12:55:31
| 2019-11-13T12:55:31
| 204,982,021
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 2,992
|
r
|
ggplot.R
|
#install.packages("faraway")
#install.packages("ggplot2")
#install.packages("corrplot")
#install.packages('Amelia')
#install.packages('caret')
#install.packages('caTools')
# install.packages("dplyr")
# install.packages("ggthemes")
# install.packages("reshape2")
library(faraway)
library(ggplot2)
library(dplyr)
library(ggthemes)
library(reshape2)
data(package='faraway')
data('worldcup')
dim(worldcup)
View(worldcup)
# below image means that with increse in time there is chance of getting more shots
plot(worldcup$Time,worldcup$Shots)
# implement with ggplot
p = ggplot(worldcup,aes(x = Time,y = Shots))
p+geom_point()+ ggtitle("Time vs shots")
class(p)
summary(p)
worldcup %>% ggplot(aes(x = Time,y = Shots))+ geom_point()
# could not find function "%>%"
# install.packages("dplyr")
worldcup %>%
ggplot(aes(Shots)) +
geom_histogram(bins = 13)
levels(worldcup$Position)
#Position is a discrete value , search for discrete sectio on chitsheet for plotting
worldcup %>%
ggplot(aes(Position))+
geom_bar()
worldcup %>% ggplot(aes(x = Time,y = Shots,color = Position,size = Passes,shape = Team))+
geom_point()
# facet_wrap(~variable) will return a symmetrical matrix of plots for the number of levels of variable .
# facet_grid(.~variable) will return facets equal to the levels of variable distributed horizontally.
# facet_grid(variable~.) will return facets equal to the levels of variable distributed vertically.
worldcup %>% ggplot(aes(x = Time,y = Shots,color = Position,size = Passes,shape = Team))+
geom_point()+
facet_wrap(~Position)
# facet_grid(~Position)
# ? # separate by team now as all team are part of Position
worldcup %>%
filter(Team %in% c('Brazil','Spain')) %>%
ggplot(aes(x = Time,y = Shots,color = Position,size = Passes,shape = Team))+
geom_point()+
facet_wrap(~Position)
worldcup %>%
filter(Team %in% c('Brazil','Spain')) %>%
ggplot(aes(x = Time,y = Shots,color = Position))+
geom_point()+
facet_grid(~Position)
worldcup %>%
filter(Team %in% c('Brazil','Spain')) %>%
ggplot(aes(x = Time,y = Shots,color = Position))+
geom_point()+
facet_grid(Team~ Position)
# to Support excel
# install.packages('ggthemes')
library(ggthemes)
# get theme pkg mostly used for better representation by economist
worldcup %>%
filter(Team %in% c('Brazil','Spain')) %>%
ggplot(aes(x=Time, y=Passes,color = Position))+
geom_point()+
facet_grid(Team~Position) +
theme_excel()+
theme_economist()
# find avg no of shots played by brazil vs Spain
worldcup %>%
filter(Team %in% c('Brazil','Spain')) %>%
group_by(Team,Position) %>%
summarise(Avg.Shots = mean(Shots)) %>%
ggplot(aes(x=Position, y=Avg.Shots,fill = Team))+
geom_bar(stat = 'identity',position = 'dodge')
#install.packages("reshape2")
library(reshape2)
worldcup %>%
filter(Team %in% c('Brazil','Spain')) %>%
group_by(Team, Position) %>%
summarise(Avg.Shots = mean(Shots)) %>%
dcast(Position ~ Team,value.var = "Avg.Shots")
|
0669fbfa5fe50aa8b462739e1be98244b21bceb2
|
e24d806f0b2cf7f223f68cc0c7d153af55d101c0
|
/R/iBMA.surv.R
|
c00004047fbf3465d46c74f072c78e5c24f5d119
|
[] |
no_license
|
hanase/BMA
|
935656507dc46cd72c861ae057b3a341f79c2096
|
49f27b5e1b3ac79fd1010437977098a0fbd99965
|
refs/heads/master
| 2022-08-19T00:17:30.186781
| 2022-07-22T19:15:45
| 2022-07-22T19:15:45
| 45,632,665
| 31
| 12
| null | 2019-12-09T03:39:49
| 2015-11-05T19:00:38
|
R
|
UTF-8
|
R
| false
| false
| 8,660
|
r
|
iBMA.surv.R
|
iBMA.surv<- function(x, ...)
UseMethod("iBMA.surv")
iBMA.surv.data.frame <-
function (x, surv.t, cens, wt = rep(1, nrow(X)), thresProbne0 = 5,
maxNvar = 30, nIter = 100, verbose = FALSE, sorted = FALSE,
factor.type = TRUE, ...)
{
printCGen <- function(printYN) {
printYN <- printYN
return(function(x) if (printYN) cat(paste(paste(x, sep = "",
collapse = " "), "\n", sep = "")))
}
# CF namespcae unlock from https://gist.github.com/wch/3280369
#::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::
inc <- '
/* This is taken from envir.c in the R 2.15.1 source
https://github.com/SurajGupta/r-source/blob/master/src/main/envir.c
*/
#define FRAME_LOCK_MASK (1<<14)
#define FRAME_IS_LOCKED(e) (ENVFLAGS(e) & FRAME_LOCK_MASK)
#define UNLOCK_FRAME(e) SET_ENVFLAGS(e, ENVFLAGS(e) & (~ FRAME_LOCK_MASK))
'
src <- '
if (TYPEOF(env) == NILSXP)
error("use of NULL environment is defunct");
if (TYPEOF(env) != ENVSXP)
error("not an environment");
UNLOCK_FRAME(env);
// Return TRUE if unlocked; FALSE otherwise
SEXP result = PROTECT( Rf_allocVector(LGLSXP, 1) );
LOGICAL(result)[0] = FRAME_IS_LOCKED(env) == 0;
UNPROTECT(1);
return result;
'
unlockEnvironment <- cfunction(signature(env = "environment"),
includes = inc,
body = src)
nsEnv <- asNamespace('BMA')
unlockEnvironment(nsEnv)
nsEnv$glob <- function() {
utils::globalVariables(parent.env(environment()))
}
environment(nsEnv$glob) <- nsEnv
pkgEnv <- as.environment('package:BMA')
unlockEnvironment(pkgEnv)
pkgEnv$glob <- nsEnv$glob
exportEnv <- nsEnv$.__NAMESPACE__.$exports
exportEnv$glob <- c(glob="glob")
#::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::
utils::globalVariables("nastyHack_x.df")
sortX <- function(surv.t, cens, X, wt) {
fitvec <- rep(NA, times = ncol(X))
nastyHack_x.df <- data.frame(X)
scp <- formula(paste("~", paste(colnames(X), sep = "",
collapse = " + ")))
cox.out <- coxph(Surv(surv.t, cens) ~ 1, weights = wt,
method = "breslow", iter.max = 30, data = nastyHack_x.df)
addcox <- add1(cox.out, scope = scp, test = "Chisq",
data = nastyHack_x.df)
fitvec <- addcox[-1, grep("^P.*Chi", names(addcox))]
initial.order <- order(fitvec, decreasing = FALSE)
sortedX <- X[, initial.order]
return(list(sortedX = sortedX, initial.order = initial.order))
}
X <- x
cl <- match.call()
printC <- printCGen(verbose)
if (factor.type == FALSE) {
x.df <- data.frame(X)
X <- model.matrix(terms.formula(~., data = x.df), data = x.df)[,
-1]
}
if (!sorted) {
printC("sorting X")
sorted <- sortX(surv.t, cens, X, wt = wt)
sortedX <- sorted$sortedX
initial.order <- sorted$initial.order
}
else {
sortedX <- X
initial.order <- 1:ncol(sortedX)
}
nVar <- ncol(sortedX)
maxNvar <- min(maxNvar, nVar)
stopVar <- 0
nextVar <- maxNvar + 1
current.probne0 <- rep(0, maxNvar)
maxProbne0 <- rep(0, times = nVar)
nTimes <- rep(0, times = nVar)
currIter <- 0
first.in.model <- rep(NA, times = nVar)
new.vars <- 1:maxNvar
first.in.model[new.vars] <- currIter + 1
iter.dropped <- rep(NA, times = nVar)
currentSet <- NULL
current_state <- list(surv.t = surv.t, cens = cens, sortedX = sortedX,
wt = wt, call = cl, initial.order = initial.order, thresProbne0 = thresProbne0,
maxNvar = maxNvar, nIter = nIter, verbose = verbose,
nVar = nVar, currentSet = currentSet, new.vars = new.vars,
stopVar = stopVar, nextVar = nextVar, current.probne0 = current.probne0,
maxProbne0 = maxProbne0, nTimes = nTimes, currIter = currIter,
first.in.model = first.in.model, iter.dropped = iter.dropped)
class(current_state) <- "iBMA.intermediate.surv"
result <- iBMA.surv.iBMA.intermediate.surv(current_state,
...)
result
}
### this function does a set number of iterations of iBMA, returning an intermediate result unless it is finished,
### in which case it returns a final result
iBMA.surv.iBMA.intermediate.surv<- function (x, nIter = NULL, verbose = NULL, ...)
{
printCGen<- function(printYN)
{
printYN<- printYN
return(function(x) if (printYN) cat(paste(paste(x,sep="", collapse = " "),"\n", sep="")))
}
cs<- x
# check if nIter has been redefined
if (!is.null(nIter)) cs$nIter<- nIter
if (!is.null(verbose)) cs$verbose<- verbose
printC<- printCGen(cs$verbose)
finalIter<- cs$currIter + cs$nIter
### iterate until a final result is produced (cs$stopVar == 1) or nIter more iterations have been done
while (cs$stopVar == 0 && cs$currIter < finalIter)
{
# add in the new variables
nextSet<- c(cs$currentSet, cs$new.vars)
cs$currIter<- cs$currIter + 1
printC(paste("\n\n starting iteration ",cs$currIter," nextVar =",cs$nextVar))
printC("applying bic.surv now")
currentX<- cs$sortedX[,nextSet]
colnames(currentX)<- colnames(cs$sortedX)[nextSet]
ret.bic<- bic.surv(x = currentX, surv.t = cs$surv.t, cens = cs$cens, maxCol = cs$maxNvar + 1, ...)
printC(ret.bic$probne0)
cs$maxProbne0[nextSet]<- pmax(ret.bic$probne0, cs$maxProbne0[nextSet])
cs$nTimes[nextSet]<- cs$nTimes[nextSet] + 1
cs$rmVector <- ret.bic$probne0 < cs$thresProbne0
# adaptive threshold
if (any(cs$rmVector) == FALSE)
{
# no var to swap in!!, increase threshold
currMin <- min (ret.bic$probne0)
printC (paste("no var to swap! Min probne0 = ", currMin, sep=""))
newThresProbne0 <- currMin + 1
printC(paste("new probne0 threshold = ", newThresProbne0, sep=""))
cs$rmVector <- ret.bic$probne0 < newThresProbne0
# check that we do not drop everything!
if (all(cs$rmVector))
cs$rmVector<- c(rep(FALSE, times = length(cs$rmVector)-1), TRUE)
}
# drop the bad ones...
cs$iter.dropped[nextSet[cs$rmVector]]<- cs$currIter
cs$currentSet<- nextSet[!cs$rmVector]
# now if there are more variables to examine add the new set of variables to the current set
if ( cs$nextVar <= cs$nVar)
{
# set up new X
printC ("generating next set of variables")
lastVar<- sum(cs$rmVector) + cs$nextVar - 1
# add in bulk if we are not close to the end of the variables,
if (lastVar <= cs$nVar)
{
cs$new.vars<- cs$nextVar:lastVar
cs$first.in.model[cs$new.vars]<- cs$currIter + 1
cs$nextVar <- lastVar + 1
}
# add in one by one until no variables left
else
{
cs$new.vars<- NULL
for (i in length(cs$rmVector):1)
{
if (cs$rmVector[i] == TRUE && cs$nextVar <= cs$nVar)
{
cs$new.vars<- c(cs$new.vars, cs$nextVar)
cs$first.in.model[cs$nextVar]<- cs$currIter + 1
cs$nextVar <- cs$nextVar + 1
}
}
}
}
else
{
# exhausted all data
cs$stopVar <- 1
cs$new.vars = NULL
}
}
# if we have finished (all variables) do some wrap-up and generate output values
if (cs$stopVar == 1)
{
printC("finished iterating")
currentX<- cs$sortedX[,cs$currentSet]
colnames(currentX)<- colnames(cs$sortedX)[cs$currentSet]
ret.bic<- bic.surv(x = currentX, surv.t = cs$surv.t, cens = cs$cens, maxCol = cs$maxNvar + 1, ...)
output<- cs
output$bma<- ret.bic
output$selected<- cs$currentSet
output$nIterations<- cs$currIter
class(output)<- "iBMA.surv"
}
else
{
output<- cs
class(output)<- "iBMA.intermediate.surv"
}
output
}
iBMA.surv.matrix<- iBMA.surv.data.frame
|
20fb2b974b4839d64b38a93ee9c48903d39c42b6
|
9f6d74b6a3ff6dabd1690cf9421f24a7272ac101
|
/archivo y email.R
|
d01b613c93a85692755cd720c7adddc7ebc50f8f
|
[] |
no_license
|
Chema40/R-bolsa
|
63336e62087ed2b0e3461002c17b19319c938cf8
|
c4bc1a1c5fe1e4188d4eb37828de245b9dd6d414
|
refs/heads/main
| 2023-08-26T01:49:36.080662
| 2021-10-05T21:09:58
| 2021-10-05T21:09:58
| 413,979,562
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 553
|
r
|
archivo y email.R
|
install.packages("gmailr")
library(openxlsx)
library(gmailr)
l = list(IBEX = IBEX, ACS = ACS, ACX = ACX, ALM = ALM , AMS = AMS, ANA = ANA, BBVA = BBVA, BKIA = BKIA, BKT = BKT,
CABK = CABK, CIE = CIE, CLNX = CLNX, COL = COL, ELE = ELE, ENC = ENC, ENG = ENG, FER = FER,
GRF = GRF, IAG = IAG, IBE = IBE, IDR = IDR, ITX = ITX, MAP = MAP, MAS = MAS, MEL = MEL, MRL = MRL,
MTS = MTS, NTGY = NTGY, REE = REE, REP = REP, SAB = SAB, SAN = SAN, SGRE = SGRE, TEF = TEF,
VIS = VIS)
openxlsx::write.xlsx(l,"historico.xlsx")
|
852979a3a40d9b317abec55de763d6f6269b2ba2
|
7acb3ac4487256200e8dcf5a810212df69bb22c8
|
/Bootcamp/Maths/plots.R
|
ab31c7f0e2d63d55dd085af792b2285bdad7ab36
|
[] |
no_license
|
rougerbaptiste/Master
|
e14bf3f6b149af24f3b0bdcbe2b6a7cad7c0f58d
|
06628d4d1789922317152385941f5aea5de791c7
|
refs/heads/master
| 2020-07-15T14:07:23.481828
| 2016-11-03T15:05:21
| 2016-11-03T15:05:21
| 67,498,565
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 179
|
r
|
plots.R
|
rm(list=ls())
absc <- c(-1,0,1,2)
ord <- c(1/2, 1, 2,4)
pdf("plot.pdf")
plot(ord~ absc, ylim=c(-1,5))
abline(h=0)
abline(v=0)
plot(log(absc, exp(1)))
dev.off()
|
86fac854e447c6bf588aa4317736751b2483e689
|
31f8efb0c6dbdcaa8bf2716b85ba5da1f43d3e74
|
/W3/week3CLTnotes.R
|
c418904264e3763cf9e9cc1ebc2803cc3b4c79c7
|
[] |
no_license
|
patiljeevanr/PH525.1x
|
9729b770de519b46cd834a507a6ce421f3028a8d
|
216781e642ff5b3f79eadd613a49c88a9ba97965
|
refs/heads/master
| 2020-06-05T02:52:30.735203
| 2015-09-09T17:30:53
| 2015-09-09T17:30:53
| 42,193,550
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 546
|
r
|
week3CLTnotes.R
|
pops<-read.csv("mice_pheno.csv")
head(pops)
hf<-pops[pops$Diet=="hf" & pops$Sex=="F",3]
chow<-pops[pops$Diet=="chow" & pops$Sex=="F",3]
mean(hf)-mean(chow)
x<-sample(hf,12)
y<-sample(chow,12)
mean(x)-mean(y)
Ns<-c(3,5,10,25)
B<-10000
res<-sapply(Ns,function(n){
sapply(1:B,function(j){
mean(sample(hf,n))-mean(sample(chow,n))
})
})
library(rafalib)
mypar2(2,2)
for(i in seq(along=Ns)){
title<-paste("Avg=",signif(mean(res[,i]),3))
title<-paste(title,"SD=",signif(sd(res[,i]),3))
qqnorm(res[,i],main=title)
qqline(res[,i])
}
|
ce5c584891c1482257e888a5fa4843536ecf422d
|
ffdea92d4315e4363dd4ae673a1a6adf82a761b5
|
/data/genthat_extracted_code/prnsamplr/examples/samp.Rd.R
|
d838e7936382875ecf3cae9360a6063fbcf21aa1
|
[] |
no_license
|
surayaaramli/typeRrh
|
d257ac8905c49123f4ccd4e377ee3dfc84d1636c
|
66e6996f31961bc8b9aafe1a6a6098327b66bf71
|
refs/heads/master
| 2023-05-05T04:05:31.617869
| 2019-04-25T22:10:06
| 2019-04-25T22:10:06
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 299
|
r
|
samp.Rd.R
|
library(prnsamplr)
### Name: samp
### Title: Stratified permanent random number sampling
### Aliases: samp
### ** Examples
samp(method=pps, df=ExampleData, nsamp="nsamp", stratid="stratum", prn="prn", size="size")
samp(method=srs, df=ExampleData, nsamp="nsamp", stratid="stratum", prn="prn")
|
4e832cf603c1dace611c0e3c722b03f79e8ca648
|
5d03057e8366bab4018f79f0f7ebdb809e77c595
|
/R/cv.mosaic.R
|
9e8285ce58a8746ca52cfdf3c3586a5a33b92c16
|
[
"MIT"
] |
permissive
|
arorarshi/MOSAIC
|
9e4e732cb9910067960b0e7c1c52541e7ad76da9
|
6ffaaf995267cf495cd1e91fd3f8b15edbd71a10
|
refs/heads/master
| 2021-07-19T12:38:53.264713
| 2021-04-01T16:24:21
| 2021-04-01T16:24:21
| 245,756,168
| 1
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 5,346
|
r
|
cv.mosaic.R
|
#Runs cross validation on mosaic fit to attain best k
#calculate sum of squares of each simulated dataset
do.ss.stats<-function(mm,labels){
ll = unique(labels)
mm[lower.tri(mm,diag=TRUE)]<-NA
tss = sum(mm, na.rm=T)
wss<-rep(NA, length(ll))
for (i in 1:length(ll)){
wss[i] = sum(mm[labels==ll[i], labels==ll[i]], na.rm=T)
}
tot.withinss = sum(wss)
within.over.tot = tot.withinss/tss
return(within.over.tot)
}
get.centroid<-function (mat, labels, f)
{
ul <- unique(labels)
if (ncol(mat) == 1) {
warning("cmd reduces matrix to one eigen value! Noisy data?")
}
centroids <- matrix(NA, nrow = length(ul), ncol = ncol(mat))
for (i in 1:length(ul)) {
mat.ul <- mat[names(labels)[which(labels == ul[i])], ]
if (is.vector(mat.ul)) {
centroids[i, ] = mat.ul
}
else {
centroids[i, ] <- apply(mat.ul, 2, mean)
}
}
rownames(centroids) = paste0("f", f, "_k", ul)
return(centroids)
}
get.relabel<-function(pattern, olabel, centroid.cluster,kk){
relabel<-rep(NA, length(olabel))
names(relabel) = names(olabel)
for(i in 1:kk){
kpattern<-paste0(pattern,i)
idx<-which(names(centroid.cluster)==kpattern)
#change current label to this
if(length(idx)!=0){
change.label = centroid.cluster[idx]
idx2 = which(olabel == i)
if(length(idx2)!=0){relabel[idx2] = change.label}
}
}
if(any(is.na(relabel))){warning("there is a NA in relabel, something is wrong with clustering, noisy data or pick lower k?")}
return(relabel)
}
#this is done to provide meaning to cluster labels across folds.
#we run kmeans on centroid vector of all the folds to determine their closeness.
#random start -10
cv.relabel<-function(mat, train.labels,cv.test.labels, k,fold){
centroids<-list()
for(i in 1:length(train.labels)){
centroids[[i]]<-get.centroid(mat, train.labels[[i]],i)
}
centroids.all<-do.call(rbind.data.frame, lapply(centroids, function(x) x))
#do kmeans on the centroids
centroids.kmeans<-kmeans(centroids.all,k,nstart=20)
#print(centroids.kmeans$cluster)
#centroids cluster labels
all.cluster<-centroids.kmeans$cluster
relabel<-rep(NA,nrow(mat))
names(relabel) = rownames(mat)
for(i in 1:fold){
pattern = paste0("f",i,"_k")
rr<-get.relabel(pattern, cv.test.labels[[i]], all.cluster,k)
relabel[names(rr)] = rr
}
return(relabel)
}
#############################
# perform cross validation
#############################
cv.mosaic<-function(x, out,k,fold, cmd.k=NULL, type=NULL){
my.k <- as.numeric(k)
fold <- as.numeric(fold)
#To get an idea of total samples
dist.dat<-getDist(x,out, type=type)
#this for calculating ss on test labels
combine.dist<-combineDist(dist.dat)
inter <- intersect(names(out), rownames(combine.dist))
out = out[inter]
ll <- seq(1,length(inter))
#we will use this for cluster relabeling of test
if(is.null(cmd.k)){this.k = nrow(combine.dist)-1}
if(!(is.null(cmd.k))){this.k = as.numeric(cmd.k)}
combine.dist.cmd<-cmdscale(combine.dist, k=this.k)
folds <- cut(seq(1,length(ll)),breaks=fold,labels=FALSE)
ll.rand<- sample(ll,length(ll))
#Perform n fold cross validation
cv.test.labels<-list()
#cv.test.rand.index =NA
survfit<-list()
for(i in 1:fold){
#Segement your data by fold using the which() function
test.idx <- ll.rand[which(folds==i)]
train.idx <- setdiff(ll,test.idx)
train.snames = names(out)[train.idx]
out.train<- out[train.snames]
#multiply by coxph abs(log(HR))
distwt<-getDist(x,out,cv=TRUE,train.snames, type=type)
train.dist.dat<-distwt$train
all.dist.dat<-distwt$all
#combine dist
train.combine.dist<-combineDist(train.dist.dat)
all.combine.dist<-combineDist(all.dist.dat)
inter <- intersect(names(out), rownames(all.combine.dist))
all.combine.dist = all.combine.dist[inter,inter]
if(is.null(cmd.k)){cmd.k.all =nrow(all.combine.dist)-1 }
if(!(is.null(cmd.k))){cmd.k.all =as.numeric(cmd.k) }
#as cmd is on dist, and nrow is different for all and training set
#but multiplied with training HR
cmd.whole = cmdscale(all.combine.dist,k=cmd.k.all)
#get training fit labels
fit=mosaic(train.combine.dist,out,my.k,cmd.k)
#calculate test logrank and concordance
#we basically predict on whole
test =predict.test.label(cmd.whole,fit,my.k)
cv.test.labels[[i]] = test$test.labels
survfit[[i]]<-fit
}
train.labels = lapply(survfit, function(x) x$cluster)
cv.test.relabels<-cv.relabel(combine.dist.cmd, train.labels,cv.test.labels,my.k,fold)
min.labels = min(table(cv.test.relabels))
idx = which(min.labels <=5)
if (length(idx)!=0){message(paste0("k= ", my.k, " has 5 or few samples in cluster solution"))}
message(paste0("finished ", fold, " cross validation, total samples-", length(cv.test.relabels)))
cv.test.relabels = cv.test.relabels[names(out)]
if (length(unique(cv.test.relabels)) != my.k){warning(paste0("Test labels not equal to chosen k ",my.k)) }
#if everything collapses after test relabeling
if (length(unique(cv.test.relabels)) ==1){
cv.all.logrank = NA
warning("Everything collapsed after test relabel, logrank test is NA")
}
cv.test.ss<-do.ss.stats(combine.dist, cv.test.relabels)
cv.fit = list(cv.labels = cv.test.relabels, cv.ss = cv.test.ss)
return(cv.fit)
}
|
3747e146653172f667798e8047afa058f8f5a0e0
|
dae1a23e5b55d7f4e8089fcd58b425585b09d346
|
/scripts/scratch/testingVizualizePSD.R
|
0086614cf37c2a9752b2cda41f61561413d68199
|
[] |
no_license
|
canghel/kaggle-seizure-prediction
|
61855fdc42e82f78b4bd2fc28fb037388a39c6f5
|
61c1e3179495eccff2e976ab523f23f0c4dce824
|
refs/heads/master
| 2021-05-01T16:38:02.740072
| 2017-10-02T07:30:03
| 2017-10-02T07:30:03
| 71,008,240
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 1,409
|
r
|
testingVizualizePSD.R
|
setwd("C:/Users/cvanghel/Documents/personal-projects/kaggle-seizure-prediction/scripts/dataProcessing/");
outputPath <- "C:/Users/cvanghel/Documents/personal-projects/kaggle-seizure-prediction/output/dataProcessing";
#library("gplots");
pxx <- read.csv(
file = file.path(outputPath, paste0('20161008-test-pxx-',fileToLoad,'.txt')),
sep = ",",
header = FALSE,
quote = "",
stringsAsFactors = FALSE
)
#png(file.path(outputPath, "testpxx.png")); heatmap(as.matrix(pxx), Rowv=NA, Colv=NA); dev.off();
pxxCollapsed <- NULL;
for(j in 1:floor(nrow(pxx)/25)){
pxxCollapsed <- rbind(pxxCollapsed, apply(pxx[(25*j+1):25*j,], 2, sum, na.rm=TRUE));
}
png(file.path(outputPath, paste0("testpxx-",fileToLoad,".png"))); heatmap.2(as.matrix(pxxCollapsed)[15:20,], tracecol=NAR, Rowv=NA, Colv=NA); dev.off();
m <- log10(as.matrix(pxxCollapsed)[2:41,]+1);
v <- apply(m, 1, max);
w <- m;
for (j in 1:length(v)){
w[j,] <- w[j,]/v[j];
}
png(file.path(outputPath, paste0("rowsums-",fileToLoad,".png"))); plot(2:41, rowSums(m)/ncol(m), type="l"); dev.off();
png(file.path(outputPath, paste0("med-m-",fileToLoad,".png"))); plot(2:41, apply(m,1,median), type="l"); dev.off();
png(file.path(outputPath, paste0("mean-w-",fileToLoad,".png"))); plot(2:41, apply(w,1,mean), type="l"); dev.off();
#png(file.path(outputPath, paste0("median-",fileToLoad,".png"))); plot(2:41, apply(w, 1, median), type="l"); dev.off();
|
9713e90e6a96dc26028dce5ef0587a33d79c086d
|
04097b27234fda66f6da57820670f6c81cdef5e9
|
/kicad/ntc.R
|
6a434a7c472386b8497bf890de35bba72cf48feb
|
[] |
no_license
|
sternlabs/pulsar
|
394b055855ddb2a8315e0ffbce9149749f755351
|
c4617251797275ae90374f13afbf62bd23be1e70
|
refs/heads/master
| 2016-09-06T10:46:02.410563
| 2015-05-19T12:11:26
| 2015-05-19T12:12:00
| 23,312,188
| 1
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 1,073
|
r
|
ntc.R
|
library(ggplot2)
library(plyr)
ntc <- function(t, B=4000, T0=25)
exp(B * (1 / (273.15 + t) - 1 / (273.15 + T0)))
custom_divider <- function(r1val, r2val, B) {
function(t) {
r1 = r1val
r2 = r2val * ntc(t, B, 25)
return (r2/(r1+r2))
}
}
quantize <- function(x) {
vq <- round(1023 * x)
vq[vq>1023] <- 1023
vq[vq<0] <- 0
return (vq)
}
calc_div_value <- function(r.choice, t) {
within(data.frame(t=t), {
aref = 3.3 / 3
# XXX B is approximate
v <- 3.3 * custom_divider(r.choice$r1, r.choice$r2, B=4000)(t)
q <- quantize(v / aref)
q2 <- quantize(v * 2/3 / aref)
q3 <- quantize(v * 1/3 / aref)
})
}
r1.choice <- c(10e3,22e3,47e3,68e3,100e3)
r2.choice <- c(10e3,47e3,100e3)
r.choice <- expand.grid(r1 = r1.choice, r2 = r2.choice)
t <- seq(0, 100, 0.1)
div.vals <- adply(r.choice, 1, calc_div_value, t=t)
div.plot <- ggplot(div.vals, aes(x = t)) + geom_step(aes(y = q3)) + facet_grid(r2 ~ r1, labeller=label_both) + labs(x="Temperature/°C", y="10-bit value")
div.plot
|
8bb2e5bb05c886845fce7ebd9ca10c983bf3c44c
|
e246f4b83e8fc68348d1c66edd9bfbb5575354a2
|
/hmm/hmm4.R
|
eb1cc51da65cebac5e6c478bd3bd2c839d7e0175
|
[] |
no_license
|
ellbur/little-stuff
|
7450f4dd3efb9f46f9bc3a9253a9081afb7b4b3c
|
36e01b78f2f9f4f6c1213735f65d30a767078139
|
refs/heads/master
| 2021-06-19T10:07:54.856370
| 2017-03-04T15:33:32
| 2017-03-04T15:33:32
| 167,698
| 2
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 1,844
|
r
|
hmm4.R
|
library(RHmm)
Thing = item(
trans.matrix =,
obs.gens =,
num.states = nrow(trans.matrix),
states = 1:num.states,
gen.data = λ(num.points) %=% {
start.state = sample(states, 1)
state = start.state
obs = vector(num.points, mode='list')
for (i in 1:num.points) {
obs[[i]] = obs.gens[[state]]()
state = sample(states, 1, prob=trans.matrix[,state])
}
obs
}
)
Sample = item(thing=, num.points=,
data = thing$gen.data(num.points)
)
Model = item(sample=,
# (1) Discretize the output space
num.centers = 10,
num.states = 3,
data.matrix = do.call(rbind, sample$data),
k.fit = kmeans(data.matrix, num.centers),
centers = k.fit$centers,
discretize = λ(points) %=% {
as.character(sapply(points, closest))
},
closest = λ(point) %=% {
which.min(sapply(1:num.centers, λ(k) %=% {
sum((centers[[k]] - point)**2)
}))
},
sample.obs = discretize(sample$data),
symbols = as.character(1:num.centers),
states = as.character(1:num.states),
# (2) Create a Hidden Markov Model
hmm = HMMFit(sample.obs, dis='DISCRETE', nStates=num.states, Levels=symbols),
likeliness = λ(next.sample) %=% {
next.obs = discretize(next.sample$data)
Rho = forwardBackward(hmm, next.obs, keep.log=T)$Rho
Rho[[length(Rho)]]
}
)
things = list(
Thing(
rbind(
c(0.1, 0.2, 0.7),
c(0.2, 0.5, 0.2),
c(0.7, 0.3, 0.1)
),
list(
λ() %=% rnorm(1, 0, 1),
λ() %=% rnorm(1, 1, 1),
λ() %=% rnorm(1, 2, 1)
)
),
Thing(
rbind(
c(0.9, 0.1, 0.2),
c(0.1, 0.8, 0.2),
c(0.0, 0.1, 0.6)
),
list(
λ() %=% rnorm(1, 4, 1),
λ() %=% rnorm(1, 4, 1),
λ() %=% rnorm(1, 6, 1)
)
)
)
samples = xapply(things, Sample(x, 1000))
short.samples = xapply(things, Sample(x, 200))
models = xapply(samples, Model(x))
tests = xsapply(short.samples,
ysapply(models, y$likeliness(x))
)
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.