blob_id
stringlengths 40
40
| directory_id
stringlengths 40
40
| path
stringlengths 2
327
| content_id
stringlengths 40
40
| detected_licenses
listlengths 0
91
| license_type
stringclasses 2
values | repo_name
stringlengths 5
134
| snapshot_id
stringlengths 40
40
| revision_id
stringlengths 40
40
| branch_name
stringclasses 46
values | visit_date
timestamp[us]date 2016-08-02 22:44:29
2023-09-06 08:39:28
| revision_date
timestamp[us]date 1977-08-08 00:00:00
2023-09-05 12:13:49
| committer_date
timestamp[us]date 1977-08-08 00:00:00
2023-09-05 12:13:49
| github_id
int64 19.4k
671M
⌀ | star_events_count
int64 0
40k
| fork_events_count
int64 0
32.4k
| gha_license_id
stringclasses 14
values | gha_event_created_at
timestamp[us]date 2012-06-21 16:39:19
2023-09-14 21:52:42
⌀ | gha_created_at
timestamp[us]date 2008-05-25 01:21:32
2023-06-28 13:19:12
⌀ | gha_language
stringclasses 60
values | src_encoding
stringclasses 24
values | language
stringclasses 1
value | is_vendor
bool 2
classes | is_generated
bool 2
classes | length_bytes
int64 7
9.18M
| extension
stringclasses 20
values | filename
stringlengths 1
141
| content
stringlengths 7
9.18M
|
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
9ba7b21fd143273d044a5a1f9e3098cfe7339887
|
29585dff702209dd446c0ab52ceea046c58e384e
|
/eha/R/check.surv.R
|
718376239394e040fcde36f60f8ce743c029a83b
|
[] |
no_license
|
ingted/R-Examples
|
825440ce468ce608c4d73e2af4c0a0213b81c0fe
|
d0917dbaf698cb8bc0789db0c3ab07453016eab9
|
refs/heads/master
| 2020-04-14T12:29:22.336088
| 2016-07-21T14:01:14
| 2016-07-21T14:01:14
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 1,263
|
r
|
check.surv.R
|
check.surv <- function(enter, exit, event, id = NULL, eps = 1.e-8){
## The '.Fortran' version.
##########################
n <- length(enter)
if (length(exit) != n)stop("Length mismatch (enter/exit)")
if (length(event) != n)stop("Length mismatch (enter/event)")
if(!is.null(id)) if (length(id) != n)stop("Length mismatch (enter/id)")
## If no id (or one record per id).
if (is.null(id) || (length(unique(id)) == n)) return(all(enter < exit))
## Now, id is set; let's sort data:
#id <- factor(id)
n.ind <- length(unique(id))
ord <- order(id, enter)
id <- id[ord]
enter <- enter[ord]
exit <- exit[ord]
event <- as.logical(event[ord])
id <- factor(id)
id.size <- table(id)
xx <- .Fortran("chek",
as.integer(n),
as.integer(n.ind),
as.integer(id.size), ## length = n.ind
as.double(enter), ## length = n
as.double(exit), ## length = n
as.integer(event), ## length = n
as.double(eps),
sane = integer(n.ind) ## boolean; TRUE: good individual
)
bad.id <- levels(id)[xx$sane == 0]
bad.id
}
|
3a698f4fb0e1c1e12819c3e737ee1ebd05b087d0
|
f27a99810f7f0ff5796c66d5704af358d8d6cf1d
|
/plot3.R
|
712746da06c19b5655e5c748fc63053a98f38a3e
|
[] |
no_license
|
allynlea/ExData_Plotting1
|
840c9d01cbced62bd984fc22bc64f9bafd34a4f7
|
e2dab02617769badaa8727b5ade38a70e1b47cde
|
refs/heads/master
| 2021-01-22T15:51:08.752769
| 2016-01-11T12:13:32
| 2016-01-11T12:13:32
| 49,390,087
| 0
| 0
| null | 2016-01-10T23:16:09
| 2016-01-10T23:16:08
| null |
UTF-8
|
R
| false
| false
| 807
|
r
|
plot3.R
|
elec = read.csv("household_power_consumption.csv", sep=";", na.strings="?")
elec$Date <- as.Date(elec$Date, "%d/%m/%Y")
elecPower<- subset(elec, elec$Date == "2007-02-01" | elec$Date == "2007-02-02")
elecPower$DateTime<-paste(elecPower$Date,elecPower$Time)
elecPower$DateTime<-strptime(elecPower$DateTime, "%Y-%m-%d %H:%M:%S" )
png( file="plot3.png", height=480, width=480)
plot(elecPower$DateTime, elecPower$Sub_metering_1, type="l", xlab="", ylab=
"Energy sub metering", main="")
lines(elecPower$DateTime, elecPower$Sub_metering_2, type="l", col="red")
lines(elecPower$DateTime, elecPower$Sub_metering_3, type="l", col="blue")
legend("topright", c("Sub_metering_1", "Sub_metering_2", "Sub_metering_3"), lty=
c(1,1,1), lwd = c(2.5,2.5,2.5), col=c("black", "red", "blue"))
dev.off()
|
3d8798f8ab9941a6531595e2416a9400b95c55ee
|
eb3215ea01fc670cd0e705f8414ffad9dc51ff46
|
/R/PerformanceTesters.R
|
e2996b22e8fb0fdfc623797fc6bf28ea0b09b018
|
[] |
no_license
|
ireyoner/tmParallel
|
e68204fe47c0760fa4b1f3a4e902bbbab7b5a1be
|
24a2ebdbc110111e2f615fec11c6f53278e81104
|
refs/heads/master
| 2020-09-20T03:54:36.397638
| 2016-09-12T20:30:30
| 2016-09-12T20:30:30
| 66,380,880
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 1,540
|
r
|
PerformanceTesters.R
|
#' tmParallel functions for performance testing
#'
#' Some functions usefull for performance testing - with diffrent implementations
#' for creating DocumentTermMatrix or TermDocumentMatrix
#'
#' For parametrs lists see \likn{ParallelDocumentTermMatrix}
#'
#' @rdname tmParallelPerformanceTesting
DocumentTermMatrixOriginal <- function(texts, control = list(), docsIDs = as.character(1:length(texts))){
process (
texts = texts
, control = control
, docsIDs = docsIDs
, func = tmParallel:::Cpp_dtm_original
, matrixKind = 'DocumentTermMatrix'
)
}
#' @rdname tmParallelPerformanceTesting
TermDocumentMatrixOriginal <- function(texts, control = list(), docsIDs = as.character(1:length(texts))) {
process (
texts = texts
, control = control
, docsIDs = docsIDs
, func = tmParallel:::Cpp_dtm_original
, matrixKind = 'TermDocumentMatrix'
)
}
#' @rdname tmParallelPerformanceTesting
DocumentTermMatrixParallelSlower <- function(texts, control = list(), docsIDs = as.character(1:length(texts))) {
process (
texts = texts
, control = control
, docsIDs = docsIDs
, func = tmParallel:::Cpp_dtm_parallel_Lists
, matrixKind = 'DocumentTermMatrix'
)
}
#' @rdname tmParallelPerformanceTesting
TermDocumentMatrixParallelSlower <- function(texts, control = list(), docsIDs = as.character(1:length(texts))) {
process (
texts = texts
, control = control
, docsIDs = docsIDs
, func = tmParallel:::Cpp_dtm_parallel_Lists
, matrixKind = 'TermDocumentMatrix'
)
}
|
cfc44fe1bd8ecd6b0a131955b93cda5174ed6e7a
|
c58a1595115fea554db8cd6578279f574eabfa0e
|
/man/chk_omitted.Rd
|
b817a29f71325ca21f73d4b49079d788f832385f
|
[
"MIT"
] |
permissive
|
bayesiandemography/demcheck
|
129aca86fecda02be83bea73e639fb45d366c651
|
c52c3e4201e54ead631e587ebf94f97f9c7a05a0
|
refs/heads/master
| 2021-12-28T15:40:54.771894
| 2021-12-17T03:10:50
| 2021-12-17T03:10:50
| 200,993,678
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| true
| 1,385
|
rd
|
chk_omitted.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/chk-composite.R, R/err-composite.R
\name{chk_omitted}
\alias{chk_omitted}
\alias{chk_not_omitted}
\alias{err_omitted}
\alias{err_not_omitted}
\title{Check whether dimension(s) identified by an index are omitted,
or not omitted, according to 'map_dim'}
\usage{
chk_omitted(index, map_dim, name_index, name_dim)
chk_not_omitted(index, map_dim, name_index, name_dim)
err_omitted(index, map_dim, name_index, name_dim)
err_not_omitted(index, map_dim, name_index, name_dim)
}
\arguments{
\item{index}{An integer vector, identifying dimensions of array \code{self}}
\item{map_dim}{Integer vector mapping dimensions of array \code{self} to
dimensions of array \code{oth}.}
\item{name_index}{Name of \code{index}.}
\item{name_dim}{Name of the dimension that should or should not be omitted.}
}
\description{
\code{chk_omitted} checks that a dimension \emph{is} omitted,
and \code{chk_not_omitted} checks that a dimension \emph{is not}
omitted.
}
\examples{
chk_omitted(index = 2L,
map_dim = c(1L, 0L, 2L),
name_index = "indices_orig_self",
name_dim = "origin")
chk_not_omitted(index = 3L,
map_dim = c(1L, 0L, 2L),
name_index = "indices_dest_self",
name_dim = "destination")
}
\seealso{
\code{\link{chk_map_dim}}
}
|
5d34caf3de33534b43103ddbbd1bd87f1333959c
|
c0f1ad567a5f8ab8fb376242dc1a990d2ab6b3e8
|
/Propensión/MatrizOD.R
|
6a50538267cc041587c51ca029c140e34bee1263
|
[] |
no_license
|
RAS-WB-Uniandes-Urban-Cycling/proBikePolicies
|
edda6596b693f68b22c4ad50d6746833cef167e3
|
5c82094420a38421748bbb1f997550df4852fd17
|
refs/heads/master
| 2021-06-06T17:44:25.098109
| 2021-05-09T18:06:08
| 2021-05-09T18:06:08
| 135,209,976
| 0
| 2
| null | null | null | null |
UTF-8
|
R
| false
| false
| 5,560
|
r
|
MatrizOD.R
|
######Análisis Matriz OD Encuesta de movilidad 2015
#Se cargan librerias para transformación y visualizaciones
library(tidyverse)
library(readxl)
library(sf)
library(tmap)
library(tmaptools)
library(stplanr)
library(circlize)
######## Heatmaps demanda#######
# Carga de acap con ZATs, proyección en SRID 4326 longlat WGS84
ZATs_sf <- st_read("Bases de datos/Bases.gdb",layer = "ZATs",stringsAsFactors = FALSE) %>% st_transform(4326) %>%
mutate(id = as.numeric(id),zona_num_n = as.numeric(zona_num_n)) %>% select(zona_num_n ) %>% filter(!st_is_empty(.))
# Polígono zona Bogotá según IDECA quitanto Localidad de Sumapaz
Loc <- st_read("Bases de datos/IDECA.gdb",layer = "Loca") %>% filter(LocNombre != "SUMAPAZ") %>% st_transform(4326)
# Selección de ZATs dentro de región de Bogotá
sel <- st_within(x = st_centroid(ZATs_sf), y = st_union(Loc), sparse = FALSE)
ZATBOG <- ZATs_sf %>% filter(sel)
ZATLoc <- st_join(x = st_centroid(ZATBOG),y = select(Loc,LocCodigo,LocNombre)) %>% `st_geometry<-`(NULL)
ZATBOG_df <- ZATBOG %>% left_join(ZATLoc) %>% `st_geometry<-`(NULL)
# Matriz OD Multimodal
ODZATMM <- read_xlsx("Bases de datos/EncuestaMovilidad2015/Matrices EODH/matriz_medio_pico_habil.xlsx",na = "0")
ODZATMM[is.na(ODZATMM)] <- 0
ODZATMM <- ODZATMM %>% filter(zat_origen %in% unique(ZATBOG$zona_num_n) & zat_destino %in% unique(ZATBOG$zona_num_n)) %>%
mutate(f_Total = select(.,starts_with("f_")) %>% rowSums())
## Viajes Origen por ZAT
ODZATMM_O <- ODZATMM %>% select(zona_num_n=zat_origen,starts_with("f_")) %>%
group_by(zona_num_n) %>% summarise_if(is.numeric,sum)
## Viajes Destino por ZAT
ODZATMM_D <- ODZATMM %>% select(zona_num_n=zat_destino,starts_with("f_")) %>%
group_by(zona_num_n) %>% summarise_if(is.numeric,sum)
# Asignación de atributos a la capa de ZAT
ZAT_OD <- ZATBOG %>% left_join(ODZATMM_O, by=c("zona_num_n")) %>% left_join(ODZATMM_D, by=c("zona_num_n"))
# Mapa total viajes bicicleta por ZAT
tm_shape(Loc,bbox = bb(ZAT_OD)) + tm_borders(alpha = 0.5)+
tm_shape(ZAT_OD) + tm_fill(col = c("f_bicicleta.x", "f_bicicleta.y"),
palette = get_brewer_pal("YlGnBu",5,plot = F), style = "jenks", n = 5,
title = "Total viajes bicicleta",colorNA = "grey") + tm_borders() +
tm_facets(free.scales = FALSE) +
tm_layout(panel.labels = c("ZAT Origen", "ZAT Destino"),panel.label.bg.color = "white") +
tm_scale_bar(position = c("left","top"))+ tm_compass()
# Mapa total viajes por ZAT
tm_shape(Loc,bbox = bb(ZAT_OD)) + tm_borders(alpha = 0.5)+
tm_shape(ZAT_OD) + tm_fill(col = c("f_Total.x", "f_Total.y"),
palette = get_brewer_pal("YlGnBu",5,plot = F), style = "jenks", n = 5,
title = "Total viajes",colorNA = "grey") + tm_borders() +
tm_facets(free.scales = FALSE) +
tm_layout(panel.labels = c("ZAT Origen", "ZAT Destino"),panel.label.bg.color = "white") +
tm_scale_bar(position = c("left","top"))+ tm_compass()
############ Chord Diagram
# Lineas de deseo Chord Diagram
flowLines <- od2line(flow = ODZATMM,zones = as_Spatial(ZATBOG)) %>% st_as_sf() %>% mutate(length = st_length(.))
shortTravels <- flowLines %>% filter(length <= units::as.units(5,value = "km")) %>% `st_geometry<-`(NULL) %>%
left_join(ZATBOG_df,by = c("zat_origen" = "zona_num_n")) %>% left_join(ZATBOG_df,by = c("zat_destino" = "zona_num_n")) %>%
rename(CodOrigen = LocCodigo.x, NombreOrigen = LocNombre.x, CodDestino = LocCodigo.y,NombreDestino = LocNombre.y) %>%
group_by(CodOrigen,NombreOrigen,CodDestino,NombreDestino) %>% summarise_if(is.numeric,sum) %>% rownames_to_column(var = "Pair") %>%
ungroup()
##Transformación de matriz OD para realizar chord diagram
percentil = 90
#Solo se muestran para cada localidad origen, los destinos que acumulan el 90% de los viajes intermodales con distancias menores a 10km
ODChord <- shortTravels %>% select(c(2,3,5,13)) %>% filter(f_Total > 0) %>%
mutate(CodOrigen = as.character(CodOrigen),Total = f_Total) %>%
mutate(Total = if_else(Total<quantile(Total,percentil/100),0,Total)) %>%
ungroup()
ChordOrd <- ODChord %>% group_by(CodOrigen,NombreOrigen) %>% summarise(Total=sum(Total)) %>%
arrange(desc(Total)) %>% ungroup() %>% mutate(Color = rainbow(n(),s = 0.75,v = 0.7),legend = glue::glue("[{CodOrigen}] {NombreOrigen}"))
ChordDF <- ODChord %>% select(-1)
#Construcción Chord Diagram
circos.clear()
circos.par(start.degree = 90, gap.degree = 3, track.margin = c(-0.1, 0.1), points.overflow.warning = FALSE)
par(mar = rep(0, 4),family = "Open Sans")
file <- paste0("Figures/ChordBike",percentil,".pdf")
pdf(file,width=8,height=5.5)
chordDiagram(x = ChordDF, grid.col = ChordOrd$Color, transparency = 0.25,
order = ChordOrd$NombreOrigen, directional = 1,
direction.type = c("arrows", "diffHeight"), diffHeight = -0.04,
annotationTrack = "grid", annotationTrackHeight = c(0.15, 0.1),
link.arr.type = "big.arrow", link.sort = TRUE, link.largest.ontop = TRUE)
circos.trackPlotRegion(
track.index = 1,
bg.border = NA,
panel.fun = function(x, y) {
xlim = get.cell.meta.data("xlim")
sector.index = get.cell.meta.data("sector.index")
reg1 = ChordOrd$CodOrigen[ChordOrd$NombreOrigen == sector.index]
circos.text(x = mean(xlim), y = 0.5, labels = reg1, facing = "downward", cex = 0.9)
}
)
legend("left", pch = 15, col = ChordOrd$Color,
legend = ChordOrd$legend,cex = 0.7,bty = "n",
pt.cex = 1.5,ncol = 1,text.width = 0.3)
dev.off()
|
7671fcb5f65406cfe2001f1b9225bfe49a047cdf
|
1e098b440afdb0993b2805eec21f0d25afb0a0e8
|
/man/tissue_scale.Rd
|
581224128bff0be144b786b0e57d5c413caf13f8
|
[] |
no_license
|
HQData/CompTox-ExpoCast-httk
|
e9f13135042e804975cd538a4580b33b6ae22021
|
020314daa05af42979a205c395302377db2f9332
|
refs/heads/master
| 2020-04-20T04:27:43.094941
| 2018-01-24T15:45:39
| 2018-01-24T15:45:39
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| true
| 759
|
rd
|
tissue_scale.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/tissue_scale.R
\name{tissue_scale}
\alias{tissue_scale}
\title{Allometric scaling.}
\usage{
tissue_scale(height_ref, height_indiv, tissue_mean_ref)
}
\arguments{
\item{height_ref}{Reference height in cm.}
\item{height_indiv}{Individual height in cm.}
\item{tissue_mean_ref}{Reference tissue mass or flow.}
}
\value{
Allometrically scaled tissue mass or flow, in the same units as
\code{tissue_mean_ref}.
}
\description{
Allometrically scale a tissue mass or flow based on height^{3/4}.
}
\references{Ring, Caroline L., et al. "Identifying populations sensitive to environmental chemicals by simulating toxicokinetic variability." Environment International 106 (2017): 105-118.}
|
13d4298ddcb001381abef9c1bf48b0f1ddd5ae85
|
ffdea92d4315e4363dd4ae673a1a6adf82a761b5
|
/data/genthat_extracted_code/fslr/examples/fslrange.Rd.R
|
7cefdcf8ca7ca3e38505b957ca185776665b1a20
|
[] |
no_license
|
surayaaramli/typeRrh
|
d257ac8905c49123f4ccd4e377ee3dfc84d1636c
|
66e6996f31961bc8b9aafe1a6a6098327b66bf71
|
refs/heads/master
| 2023-05-05T04:05:31.617869
| 2019-04-25T22:10:06
| 2019-04-25T22:10:06
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 230
|
r
|
fslrange.Rd.R
|
library(fslr)
### Name: fslrange
### Title: Get range of an image
### Aliases: fslrange
### ** Examples
if (have.fsl()){
mnifile = file.path(fsldir(), "data", "standard",
"MNI152_T1_2mm.nii.gz")
fslrange(mnifile)
}
|
ca9a3197af9995c91ba0b6a059c3f6e249e680fc
|
ffdea92d4315e4363dd4ae673a1a6adf82a761b5
|
/data/genthat_extracted_code/EHRtemporalVariability/examples/plotIGTProjection-methods.Rd.R
|
454c87f3e72bf368b6a4bf84c1425fa106b7a68d
|
[] |
no_license
|
surayaaramli/typeRrh
|
d257ac8905c49123f4ccd4e377ee3dfc84d1636c
|
66e6996f31961bc8b9aafe1a6a6098327b66bf71
|
refs/heads/master
| 2023-05-05T04:05:31.617869
| 2019-04-25T22:10:06
| 2019-04-25T22:10:06
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 841
|
r
|
plotIGTProjection-methods.Rd.R
|
library(EHRtemporalVariability)
### Name: plotIGTProjection
### Title: Information Geometric Temporal plot
### Aliases: plotIGTProjection plotIGTProjection,IGTProjection-method
### ** Examples
load(system.file("extdata",
"variabilityDemoNHDSdiagcode1-phewascode.RData",
package="EHRtemporalVariability"))
p <- plotIGTProjection( igtProjection = igtProjs[[1]],
colorPalette = "Spectral",
dimensions = 2)
p
## Not run:
##D
##D # For additional and larger examples download the following .Rdata file:
##D
##D githubURL <- "https://github.com/hms-dbmi/EHRtemporalVariability-DataExamples/raw/master/variabilityDemoNHDS.RData"
##D load(url(githubURL))
##D plotIGTProjection(igtProjs$`diagcode1-phewascode`, dimensions = 3)
## End(Not run)
|
123f65477ed2bd87b5eed4516fc90037bfc25125
|
c192c8e32af24c49132befbe8bf613767b60ae66
|
/R/my_t.test.R
|
4970912c12d55db6110a64f80d608f2956a7ec15
|
[] |
no_license
|
Ali-Jahan/Project2
|
f549f931a1fa628342b1040034e6997cf8d9f3c7
|
b72b906b39e3b79830173c2f333e7103bdbaf1ca
|
refs/heads/master
| 2023-01-30T14:29:19.986134
| 2020-12-17T16:17:23
| 2020-12-17T16:17:23
| 322,231,304
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 2,204
|
r
|
my_t.test.R
|
#' T hypothesis test.
#'
#' This function implements one sided and two sided T hypothesis tests.
#'
#' @param x Numeric input of the data set.
#' @param alternative String input indicating whether the hypothesis is "less", "greater", or "two-sided".
#' @param mu Numeric indicating the mean (mu) that is being tested against.
#' @keywords inference
#'
#' @return List including \code{test_stat}, \code{df} for degree of freedom, \code{alternative},
#' and \code{p_val} of T hypothesis test.
#'
#' @examples
#' set.seed(1234)
#' # some test data
#' test_data <- rnorm(100, 0, 1)
#' # one sided less
#' my_t.test(test_data, alternative = "less", mu = 0)
#' # one sided greater
#' my_t.test(test_data, alternative = "greater", mu = 0)
#' # two sided
#' my_t.test(test_data, alternative = "two.sided", mu = 0)
#'
#' @export
my_t.test <- function(x, alternative, mu) {
# input checking for x to be a numeric vector
if(!is.numeric(x)) {
stop('Input data must be a numeric vector.')
}
# input check for mu - must be numeric of length 1
if(!is.numeric(mu) || length(mu) > 1) {
stop('the field \"mu\" must be numeric of length 1.')
}
# degree of freedom
degree_freedom <- length(x) - 1
# size of data set
n <- length(x)
# standard error
std_err <- sd(x) / sqrt(n)
# mean of input vector
mean_x <- mean(x)
# test stat
test_stat <- (mean_x - mu) / std_err
# temp p value
p_val <- 0
# checking the "alternative" field
if(alternative == "two.sided") { # two sided t test
p_val <- 2 * pt(abs(test_stat), degree_freedom, lower.tail = FALSE)
} else if(alternative == "less") { # less t test
p_val <- pt(test_stat, degree_freedom, lower.tail = TRUE)
} else if(alternative == "greater") { # greater t test
p_val <- pt(test_stat, degree_freedom, lower.tail = FALSE)
} else {
# wrong input ---> terminate and give error message
stop('field \"alternative\" must be two.sided, less, or greater')
}
# creating the output list
output_list <- list("test_stat" = test_stat,
"df" = degree_freedom,
"alternative" = alternative,
"p_val" = p_val)
return(output_list)
}
|
bc85c87d114e50406b35776c3b62270b5fb1b2c6
|
2d88b1d2e158b33e1acdb1117d259b0fb110f378
|
/R/mix_model_help.R
|
d73b937654b711be4fa8650840433df2a4c5c4dd
|
[] |
no_license
|
shahcompbio/xseq
|
3634b1a628844286b4f2c905e6b7eb74f2b14736
|
4290a6a1f0b533f1a514616b5a781a916c5f3993
|
refs/heads/master
| 2021-08-14T14:24:45.396870
| 2017-11-16T00:41:11
| 2017-11-16T00:41:11
| 103,323,499
| 8
| 2
| null | null | null | null |
UTF-8
|
R
| false
| false
| 3,116
|
r
|
mix_model_help.R
|
# Some helper functions for mixture model analysis
#
# Date:
# Revised: February 15, 2015
#
# Author: Jiarui Ding <jiaruid@cs.ubc.ca>
# Department of Computer Science, UBC
# Department of Molecular Oncology, BC Cancer Agency
#
#==============================================================================
MixGaussPriorLoglik = function(lambda, mu, sigma, prior, sigma.equal) {
# Compute the log-likelihood introduced by the priors
#
# Helper function
if(sigma.equal == TRUE) {
prior.like = -(prior$dof + 2 + length(mu)) * log(sigma[1]) -
((prior$sigma[1])^2 + sum(prior$kapp*(mu - prior$mu)^2))/2/sigma[1]^2
} else {
prior.like = -(prior$dof + 3) * log(sigma) -
(1 * (prior$sigma)^2 + prior$kapp*(mu - prior$mu)^2)/2/sigma^2
}
prior.like = sum(prior.like) + sum(log(lambda) * (prior$alpha - 1))
return(prior.like)
}
MixStudentPriorLoglik = function(lambda, mu, sigma, prior, sigma.equal) {
prior.like = MixGaussPriorLoglik(lambda, mu, sigma, prior, sigma.equal)
return(prior.like)
}
#==============================================================================
MixGaussCheckConverge = function(lambda, mu, sigma, loglik.hist, iter,
max.iter, conv.tol, lambda.tol, verbose) {
# To check the convergence of EM iterations
#
# Helper function
done = FALSE
loglik.incr = loglik.hist[iter] - loglik.hist[iter-1]
if (verbose) {
cat("iteration = ", iter, " loglik increasing = ", loglik.incr,
" loglik = ", loglik.hist[iter], "\n" )
print(rbind(lambda, mu, sigma))
}
loglik.incr = loglik.incr / abs(loglik.hist[iter])
if (abs(loglik.incr) < conv.tol) {
done = TRUE
return(done)
}
# Numeric issues
if(loglik.incr < 0) {
cat("WARNING! The likelihood decreases", "\n")
done = TRUE
}
if(any(lambda < lambda.tol)) {
cat(paste("WARNING! Mixture components < ", lambda.tol, sep=""), "\n")
done = TRUE
}
if (iter == max.iter) {
cat("WARNING! NOT CONVERGE!", "\n")
done = TRUE
}
return(done)
}
#==============================================================================
MixStudentCheckConverge = function(lambda, mu, sigma, nu, loglik.hist, iter,
max.iter, conv.tol, lambda.tol, verbose) {
# To check the convergence of EM iterations
#
# Helper function
done = FALSE
loglik.incr = loglik.hist[iter] - loglik.hist[iter-1]
if (verbose) {
cat("iteration = ", iter, " loglik increasing = ", loglik.incr,
" loglik = ", loglik.hist[iter], "\n" )
print(rbind(lambda, mu, sigma, nu))
}
loglik.incr = loglik.incr / abs(loglik.hist[iter])
if (abs(loglik.incr) < conv.tol) {
done = TRUE
return(done)
}
if(loglik.incr < 0) {
cat("WARNING! The likelihood decreases", "\n")
done = TRUE
}
if(any(lambda < lambda.tol)) {
cat(paste("WARNING! Mixture components < ", lambda.tol, sep=""), "\n")
done = TRUE
}
if (iter == max.iter) {
cat("WARNING! NOT CONVERGE!", "\n")
done = TRUE
}
return(done)
}
|
cc0e0d19f1758ae13eea87709825147e0697719c
|
ffdea92d4315e4363dd4ae673a1a6adf82a761b5
|
/data/genthat_extracted_code/SpatialExtremes/examples/condmap.Rd.R
|
c78d3af51686f726be467ce5a1c9a459d5580a2f
|
[] |
no_license
|
surayaaramli/typeRrh
|
d257ac8905c49123f4ccd4e377ee3dfc84d1636c
|
66e6996f31961bc8b9aafe1a6a6098327b66bf71
|
refs/heads/master
| 2023-05-05T04:05:31.617869
| 2019-04-25T22:10:06
| 2019-04-25T22:10:06
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 1,377
|
r
|
condmap.Rd.R
|
library(SpatialExtremes)
### Name: condmap
### Title: Produces a conditional 2D map from a fitted max-stable process
### Aliases: condmap
### Keywords: hplot
### ** Examples
##Define the coordinate of each location
n.site <- 30
locations <- matrix(runif(2*n.site, 0, 10), ncol = 2)
colnames(locations) <- c("lon", "lat")
##Simulate a max-stable process - with unit Frechet margins
data <- rmaxstab(50, locations, cov.mod = "whitmat", nugget = 0, range =
2, smooth = 1)
##Now define the spatial model for the GEV parameters
param.loc <- -10 - 4 * locations[,1] + locations[,2]^2
param.scale <- 5 + locations[,2] + locations[,1]^2 / 10
param.shape <- rep(.2, n.site)
##Transform the unit Frechet margins to GEV
for (i in 1:n.site)
data[,i] <- frech2gev(data[,i], param.loc[i], param.scale[i],
param.shape[i])
##Define a model for the GEV margins to be fitted
##shape ~ 1 stands for the GEV shape parameter is constant
##over the region
loc.form <- loc ~ lon + I(lat^2)
scale.form <- scale ~ lat + I(lon^2)
shape.form <- shape ~ 1
## 1- Fit a max-stable process
fitted <- fitmaxstab(data, locations, "whitmat", loc.form, scale.form,
shape.form, nugget = 0)
cond.coord <- c(5.1, 5.1)
condmap(fitted, cond.coord, seq(0, 10, length = 25), seq(0,10, length
=25), ret.per1 = 100, ret.per2 = 1.5)
points(t(cond.coord), pch = "*", col = 2, cex = 2)
|
f0e64614d344de6bd5875ce035ef1c1dfb84d1a7
|
81ef20863b388397a6158289141ec90109109bc3
|
/man/theme_myriad_new.Rd
|
08c7e0c35f7efe29a3de99999ecf533b5fd7621b
|
[] |
no_license
|
kathryntsai/myriad
|
ceb8ff67f22efafc90f40f24e8fb2d24813ae181
|
49d2abc0c12bbef2991ff2678c11fd3d845caff2
|
refs/heads/master
| 2022-12-22T03:49:17.901761
| 2020-09-26T19:49:25
| 2020-09-26T19:49:25
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| true
| 1,414
|
rd
|
theme_myriad_new.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/myriad.r
\name{theme_myriad_new}
\alias{theme_myriad_new}
\title{theme_myriad_semi}
\usage{
theme_myriad_new(base_family = "Myriad Pro SemiCondensed", base_size = 11)
}
\arguments{
\item{base_family, base_size}{base font family and size}
}
\description{
A [ggplot2] theme using semibold variants of Adobe Myriad Pro, adapted from [hrbrthemes]'s roboto condensed.
}
\details{
You should [import_myriad_semi]() first and also install the fonts on your
system before trying to use this theme.
}
\examples{
\dontrun{
library(ggplot2)
library(dplyr)
# seminal scatterplot
ggplot(mtcars, aes(mpg, wt)) +
geom_point() +
labs(x="Fuel effiiency (mpg)", y="Weight (tons)",
title="Seminal ggplot2 scatterplot example",
subtitle="A plot that is only useful for demonstration purposes",
caption="Brought to you by the letter 'g'") +
theme_myriad_new()
# seminal bar chart
count(mpg, class) \%>\%
ggplot(aes(class, n)) +
geom_col() +
geom_text(aes(label=n), nudge_y=3) +
labs(x="Fuel effiiency (mpg)", y="Weight (tons)",
title="Seminal ggplot2 bar chart example",
subtitle="A plot that is only useful for demonstration purposes",
caption="Brought to you by the letter 'g'") +
theme_myriad_semi(grid="Y") +
ggplot2::theme(axis.text.y=ggplot2::element_blank())
}
}
\author{
Kieran Healy
}
|
df1d0aab8754267d2fe014110fe811c3b0659410
|
e03799f962db79cbf504f8fa368111f70aa00434
|
/cachematrix.R
|
1c744b57ff5e3b81f9d4049401b8e05e621b5c40
|
[] |
no_license
|
bogdanrus/ProgrammingAssignment2
|
19430c9a66a8ed66574a1c55289ef78b251326d7
|
1ffc51edda530aa5050bd8ae0fb5b759b3bef1da
|
refs/heads/master
| 2020-12-29T01:54:49.657733
| 2014-11-23T19:24:14
| 2014-11-23T19:24:14
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 2,455
|
r
|
cachematrix.R
|
## The following functions will cache the inverse of a matrix(assuming
that the matrix provided is invertable)rather than computing it repeatedly.
This will save time for computing the inverse of the matrix, every time the
inverse is needed.
## The makeCacheMatrix function creates a "matrix" object that can cache it's
inverse. The inverse of the matrix is initialy set to NULL. solve() function
is used to calculate the inverse of the matrix.
makeCacheMatrix <- function(x = matrix()) { ## input "x" will be an invertable matrix
m <- NULL ## m is the inverse of "x", set to NULL every time the function is called
set <- function(y) {
x <<- y
m <<- NULL
}
get <- function() x ## returns the value of the original matrix
setinverse <- function(solve) m <<- solve ## called by first cacheSolve() and stores the value using "<<-"
getinverse <- function() m ## returns the cached value to cacheSolve()
list(set = set, get = get, ## accessed each time a new matrix is created, each time makeCacheMatrix() is called
setinverse = setinverse,
getinverse = getinverse)
}
## The cacheSolve function returns the inverse of the "matrix" object computed by
the previous function, if the matrix has not changed and the inverse was
computed already.
cacheSolve <- function(x, ...) { ## input "x" is an object created by makeCacheMatrix()
m <- x$getinverse() ## accesses object "x" and gets the matrix inverse
if(!is.null(m)) { ## if inverse is not NULL, a message is sent
message("getting cached data") ## message sent
return(m) ## returns the inverse of the matrix and ends cacheSolve()
}
data <- x$get() ## executed only if x$getinverse() is NULL
m <- solve(data, ...) ## if the inverse (m) is NULL, we calvulate the new inverse
x$setinverse(m) ## stores the calculated inverse
m ## inverse of matrix
## Return a matrix that is the inverse of 'x'
}
|
cb915818071df7ba25ff8f0be72bece94c05baa2
|
62e9ec62c910bac5eeb77c6cf9719a1fb30f566e
|
/R/MatricesTrasnformations/ImportandMatrixP2.R
|
89a19f97cc7a3da333ebf80fed3c9bb36d07bb55
|
[
"MIT"
] |
permissive
|
bhupendpatil/Practice
|
25dd22ccca706359aabe135a3cbfb813e2611cef
|
cae96a3636527100e4b98880e5d3e9ce9399335a
|
refs/heads/master
| 2023-09-01T13:58:53.857928
| 2023-08-21T06:16:41
| 2023-08-21T06:16:41
| 112,185,402
| 5
| 1
|
MIT
| 2023-08-21T06:16:43
| 2017-11-27T11:02:33
|
Jupyter Notebook
|
UTF-8
|
R
| false
| false
| 634
|
r
|
ImportandMatrixP2.R
|
library(igraph)
library(Matrix)
# i) View data collection
View(onemode)
#plot
netgraph = graph.data.frame(onemode, directed = T)
plot(netgraph,edge.arrow.size=.7)
# ii) Basic Networks matrices transformations
# adjacency matrix
mode1 = matrix(netgraph[],6,6)
#get.adjacency(netgraph) #optional
t(mode1))
# Sum
# to get out degree
rowSums(mode1)
# to ger in degree
colSums(mode1)
# Multiply Matrix
# for walk of distance 2 (to itself)
mode_square = mode1%*%mode1
mode_square
# for walk of distance 3 (to previous)
mode_cube = mode_square%*%mode1
mode_cube
# boolean 'AND' for distance 2
booleanAnd = mode1%&%mode1
booleanAnd
|
bdca832eb5707d789ca22e1ca30ce1977e5d7634
|
8caeff2957ae777eabbb17e92ac49a5f51f1937c
|
/Jeremie_Rehak_6:11:18.R
|
573c0c351165b873d8ee7a71d88e9e81d4b4bacb
|
[] |
no_license
|
ayusharora99/2018_Umpires
|
fbe382e7c3d1b6fbafeb2503cb9a9bffc26103db
|
ea344d33ad55e732a22c33ab155842834ded4422
|
refs/heads/master
| 2020-04-23T18:22:49.316852
| 2019-03-04T00:43:47
| 2019-03-04T00:43:47
| 171,364,791
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 3,711
|
r
|
Jeremie_Rehak_6:11:18.R
|
# 6/11/18 : Giants vs __OPPONENT__ : Jeremie Rehak
# 148 pitches were called strikes/balls
# The robot-ump called 38 of those pitches as called strikes & 110 as balls
# Jeremie Rehak called 37 of those pitches as called strikes & 111 as balls
# Accuracy: 95%
Jeremie_Rehak <- read.csv("~/Desktop/Analyzing Baseball Data with R/2018 Giants Umpires/Jeremie_Rehak_6:11:18.csv")
# Packages needed for Analysis
install.packages(c("e1071","caret","rpart"))
library(e1071)
library(caret)
library(rpart)
# Getting Familiar With Dataset & removing any NULL values
dim(Jeremie_Rehak)
names(Jeremie_Rehak)
is.na(Jeremie_Rehak)
colSums(is.na(Jeremie_Rehak))
Jeremie_Rehak = Jeremie_Rehak[,colSums(is.na(Jeremie_Rehak)) == 0]
dim(Jeremie_Rehak)
# Subsetting Relevant Info
drops = c("event","des","hit_location","bb_type","on_3b","on_2b","on_1b","hc_x","hc_y","hit_distance_sc","launch_speed","launch_angle","estimated_ba_using_speedangle","estimated_woba_using_speedangle","woba_value","woba_denom","launch_speed_angle","iso_value","babip_value")
Jeremie_Rehak = Jeremie_Rehak[ , !(names(Jeremie_Rehak) %in% drops)]
dim(Jeremie_Rehak)
# Splitting data into Training (80% of data) & Testing (20% of data) sets
Jeremie_Rehak_train = Jeremie_Rehak[0:(0.8 * nrow(Jeremie_Rehak)),]
dim(Jeremie_Rehak_train)
prop.table(table(Jeremie_Rehak_train$type))
Jeremie_Rehak_test = Jeremie_Rehak[(0.8*nrow(Jeremie_Rehak)):nrow(Jeremie_Rehak),]
dim(Jeremie_Rehak_test)
prop.table(table(Jeremie_Rehak_test$type))
# Creating Decision Tree to Predict Umpire's Call
tree_model <-rpart(type~., data = Jeremie_Rehak_train)
plot(tree_model)
text(tree_model, use.n = T)
# Testing Decision Tree with Test Data
Prediction_UMP<-predict(tree_model, newdata = Jeremie_Rehak_test, type = 'class')
# Accuracy of Decision Tree created for specific Umpire
confusionMatrix(table(Prediction_UMP, Jeremie_Rehak_test$type))
# Copy Pitch Calls into another data set and adjust type to the electronic strike zone calls
# Seperate Ball & Strike Types
Jeremie_Rehak_Strikes = subset(Jeremie_Rehak, Jeremie_Rehak$type == "S")
Jeremie_Rehak_Balls = subset(Jeremie_Rehak, Jeremie_Rehak$type == "B")
# Create new column for adjusted call based on electronic strike zone on Umpire's called strikes
# (plate_x < 0.833 & $plate_x > -0.833) & ($plate_z > sz_bot & plate_z < sz_top) == S
Jeremie_Rehak_Strikes$AdjustedCall = ifelse((Jeremie_Rehak_Strikes$plate_x < 0.833 & Jeremie_Rehak_Strikes$plate_x > -0.833) & (Jeremie_Rehak_Strikes$plate_z > Jeremie_Rehak_Strikes$sz_bot & Jeremie_Rehak_Strikes$plate_z < Jeremie_Rehak_Strikes$sz_top), 'S', 'B')
table(Jeremie_Rehak_Strikes$AdjustedCall)
# Create new column for adjusted call based on electronic strike zone on Umpire's called balls
# (plate_x > 0.833 | $plate_x < -0.833) | ($plate_z < sz_bot | plate_z > sz_top) == B
Jeremie_Rehak_Balls$AdjustedCall = ifelse((Jeremie_Rehak_Balls$plate_x > 0.833 | Jeremie_Rehak_Balls$plate_x < -0.833)|(Jeremie_Rehak_Balls$plate_z < Jeremie_Rehak_Balls$sz_bot | Jeremie_Rehak_Balls$plate_z > Jeremie_Rehak_Balls$sz_top),'B','S')
table(Jeremie_Rehak_Balls$AdjustedCall)
# Merge to create new dataset
Jeremie_Rehak_AdjustedCalls = rbind(Jeremie_Rehak_Strikes,Jeremie_Rehak_Balls)
# Re-create Decision Tree but this time with whole Data rather than just training set.
tree_model <-rpart(type~., data = Jeremie_Rehak)
plot(tree_model)
text(tree_model, use.n = T)
# Predict using Umpire's Decision Tree on the AdjustedCalls dataset & compare calls with adjusted_call to find Accuracy
Prediction_UMP<-predict(tree_model, newdata = Jeremie_Rehak_AdjustedCalls, type = 'class')
confusionMatrix(table(Prediction_UMP,Jeremie_Rehak_AdjustedCalls$AdjustedCall))
|
274a168635bb495cf55b70fcfe745ae59e462b3c
|
ffdea92d4315e4363dd4ae673a1a6adf82a761b5
|
/data/genthat_extracted_code/aire.zmvm/examples/download_meteorological.Rd.R
|
febb8c8aecc8ae8eccf8c0e62c7fb2e26c51d03c
|
[] |
no_license
|
surayaaramli/typeRrh
|
d257ac8905c49123f4ccd4e377ee3dfc84d1636c
|
66e6996f31961bc8b9aafe1a6a6098327b66bf71
|
refs/heads/master
| 2023-05-05T04:05:31.617869
| 2019-04-25T22:10:06
| 2019-04-25T22:10:06
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 231
|
r
|
download_meteorological.Rd.R
|
library(aire.zmvm)
### Name: download_meteorological
### Title: Download Meteorological Data Archives
### Aliases: download_meteorological
### ** Examples
## Not run:
##D head(download_meteorological(2017))
## End(Not run)
|
f78d6d2c9ac202da1f5a6a6600bbdf9963622639
|
8869198b3f0a395a2d60c0d49e58dc02b9d161d3
|
/R/stack_files.R
|
09ba2e9a4082550078fabf97942c5fe8031da5c6
|
[
"MIT"
] |
permissive
|
ahasverus/argostools
|
a62c1c80e4e331f5468d0b3e4f691f99d00584a7
|
beabb3bda0157ca4d0f5391cf39e616776c5ae13
|
refs/heads/master
| 2020-06-05T11:26:21.454534
| 2020-02-09T12:18:37
| 2020-02-09T12:18:37
| 33,409,994
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 1,027
|
r
|
stack_files.R
|
#' @title Import and clean raw Argos locations data (multiple files)
#' @description Import and clean raw Argos locations data(multiple files).
#'
#' @param files Vector of filenames to read
#'
#' @return A data frame with 7 variables
#'
#' @export
#'
#' @examples
#' ## See vignette
#'
stack_files <- function(files) {
locs <- data.frame()
for (file in files){
data <- read_argos(file)
# Extract PTTs
ptt <- sort(unique(as.character(data[ , "platform"])))
if (length(grep("[[:alpha:]]", ptt)) > 0) {
ptt <- ptt[-grep("[[:alpha:]]", ptt)]
}
# Remove no PTTs rows
pos <- which(!(data[ , "platform"] %in% ptt))
if (length(pos) > 0) {
data <- data[-pos, ]
}
# Format fields
data <- format_date(data)
data <- format_coords(data)
# Remove NAs
data <- delete_nas(data)
# Adding to table
locs <- rbind(locs, data)
}
# Type conversion
for (i in 1:ncol(locs)) {
locs[ , i] <- as.character(locs[ , i])
}
# Remove duplicates
locs <- rm_duplicates(locs)
return(locs)
}
|
c94d40ca67cb09cdaaada99c8fbd684f53a5ea2d
|
9123ac4623942513233434f8709f18803e13e9e2
|
/Common/apk.R
|
0ba4051ec454b515f2d72e2a1e82e43e1de589f5
|
[] |
no_license
|
ttvand/Facebook-V
|
fc41c0c2801feeeb25163d07237404882d5f722e
|
0bb716574fcfca6c4fe3113a210649a9213b0c38
|
refs/heads/master
| 2020-12-25T06:02:36.159672
| 2019-06-07T16:14:05
| 2019-06-07T16:14:05
| 62,836,018
| 137
| 81
| null | null | null | null |
UTF-8
|
R
| false
| false
| 1,664
|
r
|
apk.R
|
# Adapted from: https://github.com/benhamner/Metrics/blob/master/R/R/metrics.r#L181
# Compute the mean average precision at 3
map3 <- function(actual, predicted, returnScores=FALSE)
{
scores <- rep(0, length(actual))
scores[predicted[,1]==actual] <- 1
scores[predicted[,2]==actual] <- 1/2
scores[predicted[,3]==actual] <- 1/3
if(!returnScores){
out <- mean(scores)
} else{
out <- scores
}
out
}
#' Compute the average precision at k
#'
#' This function computes the average precision at k
#' between two sequences
#'
#' @param k max length of predicted sequence
#' @param actual ground truth set (vector)
#' @param predicted predicted sequence (vector)
#' @export
apk <- function(k, actual, predicted)
{
score <- 0.0
cnt <- 0.0
for (i in 1:min(k,length(predicted)))
{
if (predicted[i] %in% actual && !(i>1 && predicted[i] %in% predicted[0:(i-1)]))
{
cnt <- cnt + 1
score <- score + cnt/i
}
}
score <- score / min(length(actual), k)
score
}
#' Compute the mean average precision at k
#'
#' This function computes the mean average precision at k
#' of two lists of sequences.
#'
#' @param k max length of predicted sequence
#' @param actual list of ground truth sets (vectors)
#' @param predicted list of predicted sequences (vectors)
#' @export
mapk <- function (k, actual, predicted, returnScores=FALSE)
{
if( length(actual)==0 || length(predicted)==0 )
{
return(0.0)
}
scores <- rep(0, length(actual))
for (i in 1:length(scores))
{
scores[i] <- apk(k, actual[i], predicted[i,])
}
if(!returnScores){
out <- mean(scores)
} else{
out <- scores
}
out
}
|
e49c36743999dfcb9afc4d2f8a2d76bf7186525f
|
3703992a3018bb64ba40a7aa01c6f06ff7158266
|
/plot3.R
|
fb3a8dbb574cbc2cc8ffb059a5651d0560cd507b
|
[] |
no_license
|
abhilash-r/All-R-Programming
|
c55899e0389bf2f2d18df1101389f8af8326d706
|
574e8ed520b0dd08dea0c1ca9b908321b7e3d272
|
refs/heads/master
| 2016-09-10T12:20:32.609471
| 2015-07-12T22:17:36
| 2015-07-12T22:17:36
| 38,980,511
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 781
|
r
|
plot3.R
|
## Fetches all the dataset
data <- read.csv("household_power_consumption.txt", na.string="?", sep=";")
## Take data between "2007-02-01" & "2007-02-02"
data <- data[(data$Date=="1/2/2007" | data$Date=="2/2/2007"),]
# Combine Date and Time
data$DateTime <- strptime(paste(data$Date, data$Time, sep=" "),
format="%d/%m/%Y %H:%M:%S")
# Plot the graph
plot(data$DateTime, data$Sub_metering_1, type="l",
xlab="", ylab="Energy sub metering")
lines(data$DateTime, data$Sub_metering_2, col="red")
lines(data$DateTime, data$Sub_metering_3, col="blue")
legend("topright", col=c("black", "red", "blue"), lty=1, lwd=2,
legend=c("Sub_metering_1", "Sub_metering_2", "Sub_metering_3"))
dev.copy(png, file="plot3.png", height=480, width=480)
dev.off()
|
7fd3954ed7b1be6230996332c514bda2a89f9b7e
|
1d9b89efbe08226b1598ce86e3e95d603d4552f7
|
/man/renderSubNetSimple.Rd
|
a9a54d4f09e2f120670e37329aee4816f198d667
|
[
"Apache-2.0"
] |
permissive
|
laderast/surrogateMutation
|
1e84f05c29369387c40c4a1bb86d49c06d7c4e34
|
50987f20ae56a72495996acfb8185b3b9c215c84
|
refs/heads/master
| 2021-01-22T05:06:47.613979
| 2018-01-06T05:32:31
| 2018-01-06T05:32:31
| 34,889,234
| 0
| 1
| null | null | null | null |
UTF-8
|
R
| false
| false
| 4,463
|
rd
|
renderSubNetSimple.Rd
|
\name{renderSubNetSimple}
\alias{renderSubNetSimple}
%- Also NEED an '\alias' for EACH other topic documented here.
\title{
renderSubNetSimple
}
\description{
%% ~~ A concise (1-5 lines) description of what the function does. ~~
}
\usage{
renderSubNetSimple(NodeName, sampleName, GeneName, intome = intome, gisticCopyCalls = NULL, resultObj, fileOut = NULL)
}
%- maybe also 'usage' for other objects documented here.
\arguments{
\item{NodeName}{
%% ~~Describe \code{NodeName} here~~
}
\item{sampleName}{
%% ~~Describe \code{sampleName} here~~
}
\item{GeneName}{
%% ~~Describe \code{GeneName} here~~
}
\item{intome}{
%% ~~Describe \code{intome} here~~
}
\item{gisticCopyCalls}{
%% ~~Describe \code{gisticCopyCalls} here~~
}
\item{resultObj}{
%% ~~Describe \code{resultObj} here~~
}
\item{fileOut}{
%% ~~Describe \code{fileOut} here~~
}
}
\details{
%% ~~ If necessary, more details than the description above ~~
}
\value{
%% ~Describe the value returned
%% If it is a LIST, use
%% \item{comp1 }{Description of 'comp1'}
%% \item{comp2 }{Description of 'comp2'}
%% ...
}
\references{
%% ~put references to the literature/web site here ~
}
\author{
%% ~~who you are~~
}
\note{
%% ~~further notes~~
}
%% ~Make other sections like Warning with \section{Warning }{....} ~
\seealso{
%% ~~objects to See Also as \code{\link{help}}, ~~~
}
\examples{
##---- Should be DIRECTLY executable !! ----
##-- ==> Define data, use random,
##-- or do help(data=index) for the standard data sets.
## The function is currently defined as
function (NodeName, sampleName, GeneName, intome = intome, gisticCopyCalls = NULL,
resultObj, fileOut = NULL)
{
library(Rgraphviz)
library(BioNet)
if (!is.null(gisticCopyCalls)) {
cellFrame <- gisticCopyCalls[, c("Gene.Symbol", "NodeName",
sampleName)]
}
mutCopyFrame <- resultObj$mutCopyFrames[[sampleName]]
cellResults <- resultObj$cellResults
nodenet <- c(NodeName, intersect(as.character(inEdges(NodeName,
intome)[[1]]), as.character(mutCopyFrame$NodeName)))
brcamut <- intersect(nodenet, as.character(mutCopyFrame[mutCopyFrame$mutations !=
0, "NodeName"]))
labeltab <- geneIntTable[geneIntTable$NodeName \%in\% nodenet,
]
labels <- as.character(labeltab$Gene)
names(labels) <- labeltab$NodeName
nodenetwork <- subNetwork(nodenet, intome)
nodeRenderInfo(nodenetwork) <- list(shape = "circle", iheight = 0.5,
iwidth = 0.5, fixedsize = FALSE, label = as.list(labels))
RPPAshape <- list()
RPPAshape[[NodeName]] <- "diamond"
nodeRenderInfo(nodenetwork) <- list(shape = RPPAshape)
if (!is.null(gisticCopyCalls)) {
nodecopyframe <- cellFrame[as.character(cellFrame$NodeName) \%in\%
nodenet, ]
rownames(nodecopyframe) <- nodecopyframe$NodeName
colnames(nodecopyframe) <- c("GeneName", "NodeName",
"copyStatus")
}
else {
nodecopyframe <- mutCopyFrame[, c("GeneName", "NodeName",
"copyStatus")]
}
nodecolors <- list()
for (nd in rownames(nodecopyframe)) {
ndcol <- "white"
if (nodecopyframe[nd, "copyStatus"] > 0) {
ndcol <- "pink"
}
if (nodecopyframe[nd, "copyStatus"] < 0) {
ndcol <- "lightgreen"
}
nodecolors[[nd]] <- ndcol
}
for (nd in brcamut) {
nodecolors[[nd]] <- "lightblue"
}
cellRes <- cellResults[[sampleName]]
NodDegree <- cellRes[cellRes$NodeName == NodeName, "degree"]
NeighborMuts <- cellRes[cellRes$NodeName == NodeName, "neighborVec"]
pval <- cellRes[cellRes$NodeName == NodeName, "pvalue"]
nodenetwork <- layoutGraph(nodenetwork)
if (NeighborMuts > 0 & length(nodes(nodenetwork)) > 1) {
nodeRenderInfo(nodenetwork) <- list(fill = nodecolors)
if (is.null(fileOut)) {
fileOut <- paste(sampleName, "-", GeneName, ".svg",
sep = "")
}
svg(height = 7, width = 7, filename = fileOut)
renderGraph(nodenetwork)
plotTitle <- paste(NodeName, " (", sampleName, " ", ", p=",
pval, ", n=", NeighborMuts, ", d=", NodDegree, ")",
sep = "")
title(plotTitle)
dev.off()
}
}
}
% Add one or more standard keywords, see file 'KEYWORDS' in the
% R documentation directory.
\keyword{ ~kwd1 }
\keyword{ ~kwd2 }% __ONLY ONE__ keyword per line
|
26a1456103f8bf493f5a9ad4976cd16ce3e42dbe
|
c0594b6c8ad34662469cb3369cda7bbbf959ae69
|
/man/convert_to_numeric.Rd
|
371401712ba8f16e4d3637445e2bb606023014b8
|
[
"CC-BY-4.0"
] |
permissive
|
bgctw/cosore
|
289902beaf105f91faf2428c3869ac6bba64007f
|
444f7c5ae50750ec7f91564d6ab573a8dc2ed692
|
refs/heads/master
| 2022-10-22T13:48:37.449033
| 2020-06-17T05:46:19
| 2020-06-17T05:46:19
| 269,271,512
| 0
| 0
|
CC-BY-4.0
| 2020-06-17T05:46:20
| 2020-06-04T05:47:05
| null |
UTF-8
|
R
| false
| true
| 498
|
rd
|
convert_to_numeric.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/utils.R
\name{convert_to_numeric}
\alias{convert_to_numeric}
\title{Convert a vector to numeric, giving an informative warning.}
\usage{
convert_to_numeric(x, name, warn = TRUE)
}
\arguments{
\item{x}{Vector}
\item{name}{Name of object (for warning)}
\item{warn}{Warn if non-numeric? Logical}
}
\value{
A numeric vector.
}
\description{
Convert a vector to numeric, giving an informative warning.
}
\keyword{internal}
|
8dd983f166a226e70a1ac4e4088a9643750c655f
|
9fbd7cafab56b8cb58ca7385a726a0070d9e050d
|
/man/sen_senator.Rd
|
c101bf43e503acd9d2a8c1213bf438bba2824dca
|
[] |
no_license
|
duarteguilherme/congressbr
|
6f343935a7734dfac70c6794a031db614dafd248
|
e9f05428e877f56d31966b14ca00b4ec825fabf5
|
refs/heads/master
| 2022-11-22T05:40:27.177434
| 2020-07-14T23:20:22
| 2020-07-14T23:20:22
| 83,827,931
| 17
| 1
| null | null | null | null |
UTF-8
|
R
| false
| true
| 1,397
|
rd
|
sen_senator.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/sen_senator.R
\name{sen_senator}
\alias{sen_senator}
\title{Downloads and tidies persnonal information on senators from the
Federal Senate}
\usage{
sen_senator(
id = NULL,
affiliations = TRUE,
mandates = TRUE,
absences = FALSE,
ascii = TRUE
)
}
\arguments{
\item{id}{\code{integer}. Unique id for a senator. A dataframe of these
is available from \code{sen_senator_list()}.}
\item{affiliations}{\code{logical}. If TRUE, the default, returns information
on party affiliation.}
\item{mandates}{\code{logical}. If TRUE, the default, returns information on
terms served by the senator.}
\item{absences}{\code{logical}. If TRUE, returns information on leaves of
absence taken by the senator.}
\item{ascii}{\code{logical}. If TRUE, certain strings are converted to ascii
format.}
}
\value{
A tibble, of classes \code{tbl_df}, \code{tbl} and \code{data.frame}.
}
\description{
Downloads and tidies personal information on senators from the
Federal Senate.
}
\note{
Setting \code{affiliations}, \code{mandates} and particularly
\code{absences} to \code{TRUE} will result in a rather bloated data frame.
}
\examples{
\donttest{
benedito <- sen_senator(id = 3823)
aecio <- sen_senator(id = 391, absences = TRUE)
juc <- sen_senator(73)
}
}
\author{
Robert Myles McDonnell, Guilherme Jardim Duarte & Danilo Freire.
}
|
96279dfc630496b49271a8a0396b0bb1bc6c78ed
|
0767f577fc78e13f2bad7c0566f877898214c161
|
/Source_docker_code/CIBERSORTxCode_fractions/R_modules/run_adjust_w_pseudo.R
|
0e1fac74d94c937bfa8381cb372988fcaf3809cd
|
[] |
no_license
|
toeric/Parallel_CIBERSORTx
|
174f88a505a2af4507d38ac724b3f74a82585c1b
|
7047ff68a6ca7cee8d7c3322ff1088e3193b7cf3
|
refs/heads/main
| 2023-05-27T07:22:48.454618
| 2021-06-15T11:09:38
| 2021-06-15T11:09:38
| 366,675,061
| 4
| 1
| null | 2021-06-15T10:50:45
| 2021-05-12T10:18:07
|
R
|
UTF-8
|
R
| false
| false
| 4,193
|
r
|
run_adjust_w_pseudo.R
|
suppressWarnings(suppressMessages(library(e1071)))
suppressWarnings(suppressMessages(library(parallel)))
suppressWarnings(suppressMessages(library(preprocessCore)))
suppressWarnings(suppressMessages(library(colorRamps)))
suppressWarnings(suppressMessages(library(bapred)))
suppressWarnings(suppressMessages(library(data.table)))
suppressWarnings(suppressMessages(library(gplots)))
suppressWarnings(suppressMessages(library(nnls)))
args <- commandArgs(trailingOnly = TRUE)
filenum <- type.convert(args[1])
fracs_null <- type.convert(args[2])
fractions_file <- args[3]
xl_file <- args[4]
x_file <- args[5]
rnames_file <- args[6]
non_adj_file <- args[7]
statefile <- args[8]
seed <- type.convert(args[9])
RNGtype <- type.convert(args[10])
out_mixtures_adjusted <- args[11]
out_simfracs <- args[12]
out_adj_save <- args[13]
# load existing RNG state or create new one
if(RNGtype == "0") {
set.seed(seed)
} else {
set.seed(seed, "L'Ecuyer-CMRG")
}
if(file.exists(statefile))
.Random.seed <- scan(statefile, what=integer(), quiet=TRUE)
fractions <- read.table(fractions_file,sep="\t",header=T,check.names=F)
xl <- scan(xl_file, character(), quote = "", sep="\t", quiet=T)
x <- data.table(read.table(x_file,sep="\t",header=T, check.names=F))
rnames <- scan(rnames_file, character(), quote = "", sep="\t", quiet=T)
non.adj <- read.table(non_adj_file,sep="\t",header=T, row.names=1, check.names=F)
st.dev = function(x){x*2}
# Create same number of pseudomixtures as number of mixtures
for(i in 1:filenum){
if (fracs_null){
simfrac <- fractions
# Create pseudomixtures by sampling each cell type from a distribution with the cell type fractions as mean and standard deviation set to 2*fractions
simfrac <- pmax(0,sapply(1:length(unique(xl)), function(i) rnorm(mean=as.numeric(fractions[i]),sd=st.dev(as.numeric(fractions[i])),1)))
while (identical(simfrac, rep(0, length(simfrac)))) {
simfrac <- pmax(0,sapply(1:length(unique(xl)), function(i) rnorm(mean=as.numeric(fractions[i]),sd=st.dev(as.numeric(fractions[i])),1)))
}
} else {
simfrac <- fractions[i,]
}
simfrac <- pmax(2,round(length(xl)*simfrac/sum(simfrac)))
if(i==1) simfracs <- simfrac
else{simfracs <- rbind(simfracs,simfrac)}
samples <- sort(as.numeric(unlist(sapply(1:length(unique(xl)), function(i) sample(which(xl==names(table(xl))[i]),simfrac[i],replace=T)))))
xsum <- x[,Reduce('+',.SD),.SDcols=samples]
names(xsum) <- rnames
inter <- intersect(rnames,rownames(non.adj))
xsum <- xsum[inter]
if(i==1) {
m <- xsum
names(m) <- inter
}
else{
if(i==2) {
inter2 <- intersect(names(m), inter)
m <- cbind(m[inter2],xsum[inter2])
}
if(i>2) {
inter2 <- intersect(rownames(m), inter)
m <- cbind(m[inter2,],xsum[inter2])
}
}
}
colnames(simfracs) = names(table(xl))
m[is.na(m)] <- 1
inter <- intersect(rownames(m),rownames(non.adj))
m <- cbind(m[inter,], non.adj[inter,])
tmpcol <- colnames(m)
rs<-colSums(m)
rs_med<-median(rs)
tmp <- rownames(m)
m <- sapply(1:ncol(m), function(i) m[,i]*(1e6/rs[i]))
rownames(m) <- tmp
colnames(m) <- tmpcol
vars <- apply(m,1,var)
m<- m[which(vars>0),]
#learn parameters
dd<-log(m[,c(1:c(filenum+filenum))]+1,2)
dd[is.na(dd)] = 0
cbat=combatba(x=t(dd),batch=as.factor(c(rep(1,filenum),rep(2,filenum))))
#apply parameters to rest of mixture
dd<-log(m[,c(1:c(filenum+filenum))]+1,2)
dd[is.na(dd)] = 0
sm_adjust <- t(combatbaaddon(params=cbat,x=t(dd),batch=as.factor(c(rep(1,filenum),rep(2,filenum)))))
#write to disk
adjusted <- adj.save <- (2^sm_adjust)-1
adjusted <- cbind(rownames(adjusted),adjusted)
colnames(adjusted) = c("GeneSymbol", c(1:filenum), colnames(non.adj))
write.table(adjusted[,c(1, (filenum+2):ncol(adjusted))], out_mixtures_adjusted, sep="\t", quote=F, row.names=F,col.names=T)
write.table(simfracs, out_simfracs, sep="\t", quote=F, row.names=T,col.names=T)
write.table(adj.save, out_adj_save, sep="\t", quote=F, row.names=T,col.names=T)
# preserve RNG state for the next script call
cat(paste(.Random.seed, collapse='\t'), file=statefile)
|
7eb5f656391af7d3250220802c610370857b290e
|
c68a68122d541ae95052a21f9abbc8dc9f551a8d
|
/dataprep/dataprep-template.R
|
c72df1d17301676eee8675fa8414e76142ee99c2
|
[] |
no_license
|
iwensu0313/us-aquaculture
|
11ed8c76e2df8f81166b610b83ad23199542772b
|
b92eaac9bf9d58961b978d609a91313b2bf603f4
|
refs/heads/master
| 2021-07-20T18:21:55.758307
| 2020-05-10T00:07:05
| 2020-05-10T00:07:05
| 154,531,449
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 345
|
r
|
dataprep-template.R
|
## DATA PREP TITLE
## Data Source
# Source Name
# Link:
# Downloaded:
# Timeseries:
# Format:
# Notes here
## Summary
library(tidyverse)
library(plotly)
library(viridis)
library(validate)
## Read in Data
## Tidy: Select subset, clean up data tables, fix strings, fix class
## Wrangle: Join, combine,
## Summarize
## Plotting
|
6a1ecb00edf9b0a39e9dd022922d35c867c721b7
|
69e60c5f2db28cda637d98e39ce90c97d9aeb6d8
|
/src/roc.R
|
9c7e216ba50b1d335b4b233b0d4cb48b0b93a672
|
[] |
no_license
|
fivebillionmph/m223b-project1a
|
d7abdf3fa58e07aa24a6146dd650f647afc4633d
|
34cd825910540a5e78b526205ab4600e7dc4cd6f
|
refs/heads/master
| 2022-12-03T08:02:44.056176
| 2019-06-21T04:01:48
| 2019-06-21T04:01:48
| 193,025,123
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 272
|
r
|
roc.R
|
args = commandArgs(trailingOnly=TRUE)
if(length(args) != 1) {
stop("need file name")
}
library(ROCR)
f = args[1]
d = read.delim(f)
pred = prediction(d[,2], d[,1])
perf = performance(pred, "tpr", "fpr")
perf.auc = performance(pred, "auc")
print(perf.auc)
plot(perf)
|
af8b3e71cbc5904b284139b2f3d5e4450dcd3d29
|
8eb4b0e89610dc7fdcc68463bdc57e5adbf10849
|
/R/program_on_path.R
|
4ab42c3730ca4ba030aaa2bb0fd10fd7f9937c56
|
[] |
no_license
|
cmcouto-silva/snpsel
|
f0b1fa1675fb10e15329cae874441f241f3a5e15
|
e3898308f5b0b2ae071cefdc111f5334a0319cf7
|
refs/heads/master
| 2023-05-07T22:19:08.444440
| 2021-06-06T15:55:12
| 2021-06-06T15:55:12
| 127,948,086
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 168
|
r
|
program_on_path.R
|
#' @export
program_on_path <- function(program) {
logi <- ifelse(Sys.which(program) == "", FALSE, TRUE)
if(!logi) stop(program, " not installed on system path!")
}
|
8e2412f7cb0e7f0aedfcb5a6ac2fdcc39a16a937
|
02617474275bde6a99bfee3d1e899056a806aba4
|
/amylose_oct_2021.R
|
04e45b9976121c64185df8e17a04736fa256a231
|
[] |
no_license
|
quadram-institute-bioscience/SSIIIa-Rscripts
|
97f850c6e9f4d31b39ad32222e3015924f05c3d2
|
5bc3df58787a8476d39348aa401edd29dff0da74
|
refs/heads/master
| 2022-02-25T13:12:48.242669
| 2022-02-10T00:20:24
| 2022-02-10T00:20:24
| 254,044,889
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 5,371
|
r
|
amylose_oct_2021.R
|
library(readxl)
library(data.table)
library(ggplot2)
library(tidyr)
datAmylose <- read_excel("data/2021-09-23AmyloseContent.xlsx",
sheet="Gene Average",
range="A5:C161")
names(datAmylose) <- c("plant", "id", "amylose")
datAmylose <- separate(datAmylose, plant, c("Genotype", "Cross1", "Cross2"),sep = " ")
datAmylose <- separate(datAmylose, Genotype, c("strain", "mutant"),sep = "-")
setDT(datAmylose)
datAmylose[, CrossID := as.numeric(factor(Cross1)), by=.(strain, mutant)]
View(datAmylose)
(gAMy <- ggplot(datAmylose,aes(mutant, amylose)) +
geom_point(aes(shape=factor(CrossID), group=factor(CrossID)), position=position_dodge(width=.3)) +
coord_flip() + scale_y_log10() +
facet_grid(rows=vars(strain)) +
theme_bw())
gRS + gAMy + plot_layout(guides="collect")
datMerged <- merge(datAmylose, datRS)
datMergedAverage <- datMerged[, .(meanAmylose=mean(amylose), meanRS=mean(RSper100g)),by=.(mutant, CrossID, strain)
]
## There is no correlation between amylose and starch at the individual sample level.
ggplot(datMerged, aes(amylose, RSper100g)) + geom_point(aes(shape=mutant, color=mutant)) +
scale_shape_manual(values=1:10) + scale_x_log10() + scale_y_log10() + theme_bw() +
scale_color_manual(values=c("red", "black"))+ facet_wrap(~paste(strain,Cross1))
## Within plant there is no evidence that amylose predicts resistant starch.
summary(lm(data=datMerged , log(RSper100g) ~ log(amylose) + mutant*Cross1))
## Similarly among the wild types.. but there is no variation in amylose at the WT level so..
library(ggrepel)
ggplot(datMergedAverage, aes(meanAmylose, meanRS)) +
geom_point(aes(color=mutant), size=2) +
scale_shape_manual(values=10:19) + scale_x_log10() + scale_y_log10() + theme_bw() +
scale_color_manual(values=c("red", "black")) +
geom_line(aes(group=paste(strain,CrossID)))
ggplot(datMergedAverage, aes(meanAmylose, meanRS)) +
geom_point(aes(color=mutant)) +
scale_shape_manual(values=10:19) + scale_x_log10() + scale_y_log10() + theme_bw() +
scale_color_manual(values=c("red", "black")) +
geom_label_repel(aes(label=paste(strain,mutant)))
## Does the difference in amylose caused by the treatment predict the difference in starch?
datMerged$amyloseAVG <- ave(datMerged$amylose, datMerged$CrossID, datMerged$strain, FUN=mean)
summary(lmer(dat=datMerged[mutant=="abd"], log(RSper100g) ~ log(amylose) + (1|strain:CrossID)))
summary(lmer(dat=datMerged[mutant=="abd"], log(RSper100g) ~ log(amyloseAVG) + (1|strain:CrossID)))
summary(lm(dat=datMergedAverage[mutant=="abd"], log(meanRS) ~ log(meanAmylose) ))
##### WT ONLY.
summary(lmer( data=datMerged[mutant=="wt"], log(amylose) ~ (1|strain/CrossID) ))
summary(lmer( data=datMerged[mutant=="wt"], amylose ~ (1|strain/CrossID) ))
## Amylose does not vary across WT strains
summary(lm( data=datMerged[mutant=="wt"], log(amylose) ~ paste0(strain,CrossID) ))
## RS does vary across WT strains
summary(lm( data=datMerged[mutant=="wt"], log(RSper100g) ~ paste0(strain,CrossID) ))
ggplot(datRS[mutant=="wt"], aes(gsub(pattern = "x","\n",Cross1), RSper100g)) + geom_point() +
facet_grid(cols=vars(strain), space="free", scale="free") + scale_y_log10() +
labs(x="Strain", y="Resistant starch") + theme_bw()
ggplot(datMerged[mutant=="wt"], aes(gsub(pattern = "x","\n",Cross1), amylose)) + geom_point() +
facet_grid(cols=vars(strain), space="free", scale="free") + scale_y_log10() +
labs(x="Strain", y="Resistant starch") + theme_bw()
## There is no evidence that amylose varies across WT strains,
## but a lot of evidence that resistant starch varies across WT strains.
#### Is there a correlation between the increase in amylose and the increase in RS?
dmalong <- melt(datMergedAverage, id.vars=c("strain", "mutant", "CrossID"))
dmawide <- dcast(dmalong, CrossID+strain ~ mutant + variable)
ggplot(dmawide , aes(wt_meanAmylose, abd_meanAmylose)) + geom_point() +
coord_fixed() + geom_abline(slope=1, intercept=0)+
scale_y_log10() + scale_x_log10()
ggplot(dmawide , aes(wt_meanRS, abd_meanRS)) + geom_point() +
coord_fixed() + geom_abline(slope=1, intercept=0) +
scale_y_log10() + scale_x_log10()
summary(lm(data=dmawide , log(abd_meanRS) ~ log(abd_meanAmylose) + log(wt_meanRS) + log(wt_meanAmylose)))
dmawide
dmawide2 <- dmawide[, .(abd_meanAmylose=mean(abd_meanAmylose),
wt_meanAmylose=mean(wt_meanAmylose),
abd_meanRS=mean(abd_meanRS),
wt_meanRS=mean(wt_meanRS)), by=strain]
dmawide2
summary(lm(data=dmawide2 , log(abd_meanRS) ~ log(abd_meanAmylose) + log(wt_meanRS) + log(wt_meanAmylose)))
summary(lm(data=dmawide2 , log(abd_meanRS) ~ log(wt_meanRS) + log(abd_meanAmylose) ))
summary(lm(data=dmawide2 , log(abd_meanRS) ~ log(abd_meanAmylose) ))
dmawide2$difRS=dmawide2$abd_meanRS - dmawide2$wt_meanRS
dmawide2$difAmy=dmawide2$abd_meanAmylose - dmawide2$wt_meanAmylose
## For every increase in log amylose there is an increase in log RS.
summary(lm(data=dmawide2 , log(difRS) ~ log(abd_meanAmylose) ))
## If we try to correlate the average change in RS with the change in amylose at the strain level..
summary(lm(data=dmawide2 , log(difRS) ~ log(difAmy) ))
summary(lm(data=dmawide2 , difRS ~ difAmy ))
ggplot(dmawide2, aes(difRS, difAmy)) + geom_point()
|
55994903552cc0c9801a96d4c11ee6d96675030e
|
67af11952ff7ef35d9cdbf490351abfb020b34da
|
/man/pm_gh_config.Rd
|
dc14f88746623009ca71e3c9950feac26c5637d0
|
[] |
no_license
|
quietsnooze/pmpackage
|
1c68d1f1aa70c53a81fc1abc2e0182dec6ce30b0
|
45bf5a3694cfb2c162f65855a2b8a827649198c4
|
refs/heads/master
| 2021-04-15T09:26:15.434185
| 2021-02-28T17:57:34
| 2021-02-28T17:57:34
| 126,635,991
| 1
| 1
| null | 2018-12-29T13:57:30
| 2018-03-24T20:05:07
|
R
|
UTF-8
|
R
| false
| true
| 449
|
rd
|
pm_gh_config.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/githubFunctions.R
\name{pm_gh_config}
\alias{pm_gh_config}
\title{pm_gh_config - configure github with username and email}
\usage{
pm_gh_config(github_username, github_email)
}
\arguments{
\item{github_username}{Your username for github}
\item{github_email}{Your github email address}
}
\value{
}
\description{
pm_gh_config - configure github with username and email
}
|
73ccc477c5f7d69ada1e0154e2f40eef85ade999
|
4e1c85bc81fcc02a6b197fc44ab53376acba7084
|
/scripts/Sector_Bar_Chart.R
|
5fa507c33a30193b82ad1eb56ba6fe30d6063a51
|
[] |
no_license
|
darshils2001/info201-project
|
24e38278c0ed08c1f7fe37b4a7448b5bb5e3d49b
|
6d10cc82f09cc45d6f4f57c93eebb9ea783a46bf
|
refs/heads/main
| 2023-01-28T19:21:31.070568
| 2020-12-10T21:21:57
| 2020-12-10T21:21:57
| 305,523,884
| 2
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 2,059
|
r
|
Sector_Bar_Chart.R
|
# Load Packages
library("readxl")
library("ggplot2")
library("tidyverse")
# Load Dataset
sectors <- read_excel("datasets/National_Sector_Dataset.xls")
# Convert Sector Numbers to Names
sectors$NAICS_SECTOR <- gsub(11, "Agriculture", sectors$NAICS_SECTOR)
sectors$NAICS_SECTOR <- gsub(21, "Mining", sectors$NAICS_SECTOR)
sectors$NAICS_SECTOR <- gsub(22, "Utilities", sectors$NAICS_SECTOR)
sectors$NAICS_SECTOR <- gsub(23, "Construction", sectors$NAICS_SECTOR)
sectors$NAICS_SECTOR <- gsub(31, "Manufacturing", sectors$NAICS_SECTOR)
sectors$NAICS_SECTOR <- gsub(42, "Wholesale Trade", sectors$NAICS_SECTOR)
sectors$NAICS_SECTOR <- gsub(44, "Retail Trade", sectors$NAICS_SECTOR)
sectors$NAICS_SECTOR <- gsub(48, "Transportation", sectors$NAICS_SECTOR)
sectors$NAICS_SECTOR <- gsub(51, "Information", sectors$NAICS_SECTOR)
sectors$NAICS_SECTOR <- gsub(52, "Finance", sectors$NAICS_SECTOR)
sectors$NAICS_SECTOR <- gsub(53, "Real Estate", sectors$NAICS_SECTOR)
sectors$NAICS_SECTOR <- gsub(54, "Professional Services", sectors$NAICS_SECTOR)
sectors$NAICS_SECTOR <- gsub(55, "Management", sectors$NAICS_SECTOR)
sectors$NAICS_SECTOR <- gsub(56, "Waste Management", sectors$NAICS_SECTOR)
sectors$NAICS_SECTOR <- gsub(61, "Educational Services", sectors$NAICS_SECTOR)
sectors$NAICS_SECTOR <- gsub(62, "Health Care", sectors$NAICS_SECTOR)
sectors$NAICS_SECTOR <- gsub(71, "Arts", sectors$NAICS_SECTOR)
sectors$NAICS_SECTOR <- gsub(72, "Food Services", sectors$NAICS_SECTOR)
sectors$NAICS_SECTOR <- gsub(81, "Other Services", sectors$NAICS_SECTOR)
# Filter to 5 randomly selected sectors
random_sectors <- sectors %>%
filter(NAICS_SECTOR == "Health Care" | NAICS_SECTOR == "Arts" |
NAICS_SECTOR == "Management" | NAICS_SECTOR == "Utilities" |
NAICS_SECTOR == "Other Services")
# Find average % in each sector
random_sectors$ESTIMATE_PERCENTAGE <- as.numeric(
gsub("[\\%,]", "", random_sectors$ESTIMATE_PERCENTAGE)
)
average_random_sectors <- random_sectors %>%
group_by(NAICS_SECTOR) %>%
summarize(
AVERAGE_PERCENT = mean(ESTIMATE_PERCENTAGE)
)
|
90211f301b0e9791a8142b6abf2fbe8ee54da16e
|
688148891812cefeadebfd0340ead1a04b6e8a00
|
/R/incdist.R
|
d25095e859162354a8b6eb225d05cef29e0fdf32
|
[] |
no_license
|
mjantti/incdist
|
47a59545a59666e0f343f8cd4b5267a84bb5eba2
|
836fa9c6b74d0f475db686ee1a49e4e52cddaebe
|
refs/heads/master
| 2023-08-16T13:40:23.974958
| 2023-08-09T07:21:59
| 2023-08-09T07:21:59
| 244,979,962
| 1
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 31,585
|
r
|
incdist.R
|
## test to see if both make it to NAMESPACE
# income -- R functions for income distribution analysis
# v 0.1 1998/01/07 mjantti@abo.fi
# Goals:
# This library will supply standard methods for income distribution
# research, including
# - (relative, absolute and generalized) inequality and poverty indices
# - (relative, absolute and generalized) inequality and poverty curves
# - statistical inference for the above
# - for microdata, grouped data and using kernel density objects
# - fitting distribution functions to grouped and microdata
# - robust methods
# v 0.2 2003/07/15 mjantti@abo.fi
## move to use new style methods
## .. abandoned
## v. 0.3 2012-12-03 markus.jantti@sofi.su.se
### an income distribution object
#' An income distribution object
#'
#' This function creates an object of class incdist that can be used to
#' facilitate income distribution analysis.
#'
#' The incdist object can be summarized, and the summary can be printed and
#' plotted (plotting is currently broken).
#'
#' @aliases incdist incdist.default incdist.formula summary.incdist
#' print.summary.incdist plot.summary.incdist
#' @param formula a one-or two-sided formula that defines the resource
#' variables of interest.
#' @param weights sampling weights.
#' @param data a data frame that holds the data.
#' @param subset defines a subset of the data to be used.
#' @param eqsale defines a transformation of each resource variable. Defaults
#' to NULL, that is, no transformation.
#' @param group a one-sided formula that defines the grouping structure.
#' @param part a one-sided formula that defines breaks in the data, for which
#' analyses are done separately.
#' @param idvar a one-sided formula that auxiliary variables to be kept in the
#' resulting object, for instance, variables needed for equivalence scale
#' transformation or deflating the analysis variables.
#' @param na.rm a logical indicating if only complete cases should be used.
#' @param func a character vector giving which income distribution functionals
#' to calculate.
#' @return An object of class incdist.
#' @author Markus Jantti \email{markus.jantti@@iki.fi}
#' @seealso
#' @references
#' \insertRef{lambert1993}{incdist}
#'
#' @examples
#'
#' ## generate some data
#' ## income components
#' n <- 1000
#' x1 <- runif(n)
#' x2 <- rexp(n)
#' a <- ifelse(a <- rpois(n, 1), a, 1)
#' c <- rpois(n, 2)
#' ## sum to total income
#' y1 <- x1 + x2
#' ## generate a grouping
#' g <- factor(rbinom(n, 1, .5), labels = letters[1:2])
#' ## generate a partitioning variable
#' p <- factor(sample(c(1, 2, 3), n, replace = TRUE),
#' labels = paste("t", 1:3, sep = ""))
#' ## generate some weights
#' w <- rpois(n, 5)
#' ## put it all into a data frame
#' test.d <- data.frame(x1, x2, y1, g, p, a, c, w)
#'
#' summary(incdist(y1 ~ x1 + x2, data=test.d, group = ~ g))
#' summary(incdist(y1 ~ x1 + x2, data=test.d, part = ~ p))
#' id.0 <- incdist(y1 ~ x1 + x2, data=test.d, part = ~ p, group = ~ g,
#' idvar = ~ a + c, weights = w)
#' id.0$eqscale <- list(formula = ~ (a + k*c)^d, coef=list(k=.8, d=.75),
#' type="citro-michael")
#'
#' print(summary(id.0, equivalise = FALSE), all = TRUE)
#' print(summary(id.0, equivalise = TRUE), all = TRUE) # the default
#' print(id.0)
#' ## there is not such function: plot(id.0)
#' @importFrom stats approxfun as.formula complete.cases cov.wt formula is.empty.model
#' model.extract model.weights na.omit predict terms var weighted.mean
#'
## the formula method, currently only this exists
## will this need to be modified
#' @export
incdist <- function(x, ...)
{
if(is.null(class(x))) class(x) <- data.class(x)
UseMethod("incdist", x)
}
#' @export
incdist.formula <-
function(formula, weights, data = sys.frame(sys.parent()), subset,
eqscale = NULL,
group = ~ 1, part = ~ 1, idvar = ~ 0,
na.rm = TRUE, ...)
{
if (!inherits(formula, "formula")){
stop("First argument must be the income formula!")
}
# 1. Set up the data.
# figure out the income part, the partitioning and the group structure
# code copied from locfit{locfit} & sem{tsls.formula}
income <- terms(formula, data = data)
## if there are no components, let the intercept be
if(length(attr(income, "term.labels")))
attr(income, "intercept") <- 0
## allow for no response (in the future...)
grterms <- terms(group, data = data)
if(length(attr(grterms, "term.labels")))
attr(grterms, "intercept") <- 0
paterms <- terms(part, data = data)
## here also, if paterms is ~1 (which should be the default),
## keep the intercept
if(length(attr(paterms, "term.labels")))
attr(paterms, "intercept") <- 0
idterms <- terms(idvar, data = data)
attr(idterms, "intercept") <- 0
c1 <- as.character(formula)
c2 <- as.character(group)
c3 <- as.character(part)
c4 <- as.character(idvar)
## how to get weights named correctly
formula <- as.formula(paste(c1[2], c1[1], c1[3], "+", c2[2], "+", c3[2],
"+", c4[2]))
m <- match.call()
m$formula <- formula
m$eqscale <- m$group <- m$part <- m$method <- m$idvar <- NULL
m[[1]] <- as.name("model.frame")
frm <- eval(m, sys.frame(sys.parent()))
w <- model.weights(m)
incnames <- as.character(attributes(income)$variables)[-1]
## initialize at NULL
## should panames and grnames be coerced to be factors here?
yname <- panames <- grnames <- idvarnames <- NULL
if (attr(income, "response"))
{
incnames <- incnames[-1]
yname <- deparse(formula[[2]])
}
if(!is.empty.model(part))
{
panames <- as.character(attributes(paterms)$variables)[-1]
for(i in panames) frm[[i]] <- as.factor(frm[[i]])
}
if(!is.empty.model(group))
{
grnames <- as.character(attributes(grterms)$variables)[-1]
for(i in grnames) frm[[i]] <- as.factor(frm[[i]])
}
if(!is.empty.model(idvar))
{
idvarnames <- as.character(attributes(idterms)$variables)[-1]
}
## start constructing the return object.
## return a structure with class incdist
## which contains the data etc
## data, formula, yname, xnames, part names, groupnames
ret <- list(frm, formula, eqscale,
yname, incnames, panames, grnames, idvarnames,
group, part, idvar, m)
names(ret) <- c("frm", "formula", "eqscale", "yname",
"incnames", "panames", "grnames",
"idvarnames", "group",
"part", "idvar", "m")
class(ret) <- "incdist"
ret
}
#' @export
print.incdist <- function(object, ...)
{
if(!is.incdist(object)) stop("Not an incdist object!")
cat("An incdist object\n")
cat("Overall variable: \t", object$yname, "\n")
cat("Components: \t", object$incnames, "\n")
cat("Grouping (within part): \t", object$grnames, "\n")
cat("Partitioned by: \t", object$panames, "\n")
invisible(object)
}
## summary.incdist here
## part is the partitioning factor
## eqscale should be a formula? that is applied to every element in income
## method should specify an index
## is the first argument a formula?
## group if the grouping factor(s)
## NB: problem with Gini coefficient use
#' @export
## this function needs quite a bit of work
summary.incdist <- function(object,
equivalise = FALSE,
func = c("weighted_mean", "weighted_std",
"gini.default"),
poverty=FALSE, concentration = FALSE,
povertyline=NULL,
povertyline.function="weighted_median",
povertyline.fraction=0.5,
frame=TRUE, ...)
{
if (!inherits(object, "incdist")){
stop("First argument must be the incdist object!")
}
## 2. this is where the income inequality code should start
## y contains the income variable
## check if y == x (to some tolerance)
## i. for each part
## a. inequality & poverty indices, lorenz & tip objects
## b. concentration curves & indices
## c. figure out the group decompositions (i.e, do a-b for these)
## ii. for the full set of partitions
## a-c
attach(object)
on.exit(detach(object))
## see lorenz
##on.exit(## detach(object))
income <- terms(object$formula, data = frm)
if(length(panames))
pal <- levels(frm[[panames]])
if(length(grnames))
grl <- levels(frm[[grnames]])
## need to make some stuff explicitly null
if(!length(grnames)) grl <- NULL
if(!length(panames)) pal <- NULL
eqscl <- object$eqscale
## save the old frame, will be needed if we want another e.s.
if(frame)
old.frm <- frm
else
old.frm <- ""
if(!is.null(eqscl) & equivalise)
frm <- eqscale(object)
# 0. the top level statistics
if(length(panames)) frm.list <- split(frm, frm[[panames]])
else
{
frm.list <- list()
frm.list[[1]] <- frm
}
## create the lists (structures)
## ret.x and ret.y to hold the statistics
## n to hold sample sizes
## sumw to hold sum of weights
## must be done here
ret.y <- n <- sumw <- list() ##ret.y{i:group}{l:partition}
for(i in 1:(1+length(grl)))
{
ret.y[[i]] <- n[[i]] <- sumw[[i]] <- list()
for(l in 1:length(frm.list))
{
ret.y[[i]][[l]] <- n[[i]][[l]] <- sumw[[i]][[l]] <- NA
}
names(ret.y[[i]]) <- names(n[[i]]) <- names(sumw[[i]]) <- pal
}
names(ret.y) <- names(n) <- names(sumw) <- c("all", grl)
ret.x <- list() ##ret.y{j:incomecomps}{i:group}{l:partition}
## skip the first incnames, which is in "ret.y"
if(length(incnames))
{
for(j in 1:length(incnames))
{
ret.x[[j]] <- list()
for(i in 1:(1+length(grl)))
{
ret.x[[j]][[i]] <- list()
for(l in 1:length(frm.list))
{
ret.x[[j]][[i]][[l]] <- NA
}
names(ret.x[[j]][[i]]) <- pal
}
names(ret.x[[j]]) <- c("all", grl)
}
names(ret.x) <- incnames
}
## start doing the work
for(l in 1:length(frm.list)) ## l indexes partitions
{
if(length(grnames))
{
count.grnames <- c(dim(frm.list[[l]])[1],
table(frm.list[[l]][[grnames]]))
frm.list.l <- split(frm.list[[l]],
## should I give a levels argument
## to ensure that all partitions have
## the same structure (no. of levels)?
as.factor(frm.list[[l]][[grnames]]))
frm.list.l <- c(list(frm.list[[l]]), frm.list.l)
}
else
{
frm.list.l <- list(frm.list[[l]])
count.grnames <- dim(frm.list[[l]])[1]
}
## this used to be
for(i in 1:(1+length(grl))) ## i indexes groups present. The first is all.
{
doing.name <- grnames[i]
## must add som code here to
## a. check if every i has a dataframe in frm.list.l
## b. if not, create an empty data frame for those
if(count.grnames[i] == 0)
{
frm.list.l[[i]] <- subset(frm.list[[l]], T)
if(attr(income, "response"))
y <- 0
n[[i]][[l]] <- 0
}
if (count.grnames[i] != 0 && attr(income, "response")) {
y <- model.extract(frm.list.l[[i]], response)
n[[i]][[l]] <- length(y)
##yname <- deparse(formula[[2]])
}
else {
y <- NULL
}
if(length(incnames))
{
x <- as.matrix(frm.list.l[[i]][, incnames])
if (length(incnames) == dim(x)[2])
dimnames(x) <- list(NULL, incnames)
}
weights <- model.extract(frm.list.l[[i]], weights)
sumw[[i]][[l]] <- sum(weights)
## figure out the poverty line if any of the functions is poverty
## only for group 1 ("all")
## this and the next place it is used needs to be revisited!
if(poverty & i==1)
{
if(is.null(povertyline))
{
if(!is.null(weights))
poverty.line <-
povertyline.fraction*
do.call(povertyline.function,
list(x=as.numeric(y), w=weights))
else
poverty.line <-
povertyline.fraction*
do.call(povertyline.function, list(x=as.numeric(y)))
}
else
poverty.line <- frm.list.l[[i]][[povertyline]]
}
## "func" holds the functions to be calculated
## these must accept two (an exactly two) arguments
## the data and the weights
tmp.res <- list()
for(k in 1:length(func))
{
## initialize to 0 for the case the group is empty
tmp.res[[k]] <- 0
if (!count.grnames[i]) next
if(!is.null(weights))
{
if(length(grep("poverty", func[k]))>0 & poverty)
{
if(length(grep("absolute", func[k]))>0)
tmp.res[[k]] <-
do.call(func[k], list(as.vector(y), w = weights,
...))
else
tmp.res[[k]] <-
do.call(func[k], list(as.vector(y), w = weights,
z = as.numeric(poverty.line),
...))
}
else
tmp.res[[k]] <-
do.call(func[k], list(as.vector(y), w = weights, ...))
}
else
tmp.res[[k]] <- do.call(func[k], list(as.vector(y)), ...)
}
names(tmp.res) <- func
ret.y[[i]][[l]] <- tmp.res
if(length(incnames))
{
for(j in 1:length(incnames))
{
## "func" holds the functions to be calculated
## these must accept two (an exactly two) arguments
## the data and the weights
tmp.res <- list()
for(k in 1:length(func))
{
## initialize to zero and check if groups is empty
tmp.res[[k]] <- 0
if (!count.grnames[i]) next
if(!is.null(weights))
{
if(func[k] == "concentration_coef" || concentration)
{
tmp.res[[k]] <-
do.call(func[k],
list(x[,j], w = weights,
ranked = as.vector(y), ...))
}
else
{
if(func[k] == "relative.poverty" || poverty)
tmp.res[[k]] <-
do.call(func[k],
list(x[,j],
w=weights,
z = as.numeric(poverty.line),
...))
else
tmp.res[[k]] <-
do.call(func[k],
list(x[,j], w=weights, ...))
}
}
else
{
if(func[k] == "concentration.coef" || concentration)
tmp.res[[k]] <-
do.call(func[k],
list(x[,j], ranked = as.vector(y)),
...)
else
{
if(func[k] == "relative.poverty" || poverty)
tmp.res[[k]] <-
do.call(func[k], list(x[,j], ...))
else
tmp.res[[k]] <-
do.call(func[k], list(x[,j], ...))
}
}
}
names(tmp.res) <- func
ret.x[[j]][[i]][[l]] <- tmp.res
}
}
}
}
## start constructing the return object.
## detach(object)
if(frame)
{
ret <- list(ret.y, ret.x,
n, sumw,
object$yname, object$incnames,
object$grnames, object$panames,
func,
object$frm, old.frm)
names(ret) <- c(object$yname, "components",
"sample.size", "sum.weights",
"overall", "comp",
"group", "partition",
"func", "frm", "old.frm")
}
else
{
ret <- list(ret.y, ret.x,
n, sumw,
object$yname, object$incnames,
object$grnames, object$panames,
func)
names(ret) <- c(object$yname, "components",
"sample.size", "sum.weights",
"overall", "comp",
"group", "partition",
"func")
}
structure(ret, class = c("summary.incdist", class(object)))
}
## do a print.summary function for incdist
#' @export
print.summary.incdist <- function(object, all = FALSE, what = TRUE,
relative = FALSE, ...)
{
if(!inherits(object, "summary.incdist")) stop("Not an incdist summary object!")
##
comps <- object$comp
if(what == TRUE)
funcs <- object$func
else
funcs <- what
if(length(object$partition))
parts <- levels(object$frm[[object$partition]])
else parts <- 1
if(length(object$group))
groups <- c("all", levels(object$frm[[object$group]]))
else groups <- 1
stats <- list()
for(k in 1:length(funcs))
{
## this assumes that the funcs have returned scalars for each part, comps
stats[[k]] <- list()
##
for(j in 1:(1+length(comps)))
{
stats[[k]][[j]] <- matrix(0, nrow = length(parts),
ncol = length(groups))
rownames(stats[[k]][[j]]) <- parts
colnames(stats[[k]][[j]]) <- groups
for(l in 1:length(parts))
{
for(i in 1:length(groups))
{
if(j==1)
stats[[k]][[j]][l, i] <- object[[1]][[i]][[l]][[k]]
else
stats[[k]][[j]][l, i] <- object[[2]][[j-1]][[i]][[l]][[k]]
if(relative == TRUE)
{
if(j==1)
stats[[k]][[j]][l, i] <-object[[1]][[i]][[l]][[k]]/
object[[1]][[1]][[l]][[k]]
else
stats[[k]][[j]][l, i] <- object[[2]][[j-1]][[i]][[l]][[k]]/
object[[2]][[j-1]][[1]][[l]][[k]]
}
}
}
}
names(stats[[k]]) <- c(object$overall, comps)
}
## this is not very elegant, but not doing it leads to problems
stats[[k+1]] <- list()
stats[[k+1]][[1]] <- matrix(0, nrow = length(parts),
ncol = length(groups))
rownames(stats[[k+1]][[1]]) <- parts
colnames(stats[[k+1]][[1]]) <- groups
stats[[k+2]] <- list()
stats[[k+2]][[1]] <- matrix(0, nrow = length(parts),
ncol = length(groups))
rownames(stats[[k+2]][[1]]) <- parts
colnames(stats[[k+2]][[1]]) <- groups
for(l in 1:length(parts))
{
for(i in 1:length(groups))
{
stats[[k+1]][[1]][l, i] <- object[["sample.size"]][[i]][[l]]
stats[[k+2]][[1]][l, i] <- object[["sum.weights"]][[i]][[l]]
if(relative == TRUE)
stats[[k+2]][[1]][l, i] <- object[["sum.weights"]][[i]][[l]]/
object[["sum.weights"]][[1]][[l]]
}
}
names(stats) <- c(funcs, "sample.size", "sum.weights")
retval <- list(stats, funcs, parts, c(object$overall, comps), groups)
names(retval) <-
c("result.matrices", "functions", "partitions", "variables", "groups")
#structure(retval, class = c("summary.incdist", class(x)))
cat("An incdist.summary object\n")
cat("Variables: ", retval$variables, "\n")
cat("Partitions: ", retval$partitions, "\n")
cat("Statistics: ", retval$functions, "\n")
if(all)
{
cat("The matrices: \n")
for(k in 1:(length(retval$result.matrices)-2)) ## indexes functions
{
for(j in 1:(1+length(comps)))
{
cat("Statistic: ", retval$functions[k],
"Income component: ", retval$variables[j], "\n")
print(retval$result.matrices[[k]][[j]])
}
}
cat("Observations: \n")
print(retval$result.matrices[[k+1]][[1]])
cat("Sum of weights: \n")
print(retval$result.matrices[[k+2]][[1]])
cat("\n")
}
invisible(retval)
}
## I need better methods to display my results
## the summary object has results for the top-level variables in [[1]]
## and all components in [[2]]
#' @export
incdist.as.array <- function(object, ...)
{
##if(!inherits(object, "summary.incdist"))
## stop("Not an incdist summary objectect!")
fun <- object$functions
var <- object$variables
par <- object$partitions
gro <- object$groups
## make a large array that holds all
ret <- array(unlist(object[["result.matrices"]]),
dim=c(length(par),
length(gro),
length(var),
length(fun)),
dimnames=
list(partitions=par,
groups=gro,
variables=var,
functions=fun))
ret
}
## the predicate function
#' @export
is.incdist <- function(object) inherits(object, "incdist")
#' Transform an incdist object with an equivalence scale
#'
#' A utility function used by summary.incdist to transform "raw" income
#' variables by an equivalence scale
#'
#' Presuposes the present in object of a list called eqscale, with (at least)
#' components formula, coef. Variables in formula must be found in the
#' object\$frm.
#'
#' @param object an incdist object.
#' @return Returns an object where the income variables in frm have been
#' replaced the equivalised version and the old unequavalised form has been
#' copied old.frm.
#' @author Markus Jantti \email{markus.jantti@@iki.fi}
#' @seealso
#' @references
#' @examples
#'
#' x1 <- runif(50)
#' x2 <- rexp(50)
#' ad <- ifelse(ad <- rpois(50, 1), ad, 1)
#' ch <- rpois(50, 2)
#' ## sum to total income
#' y1 <- x1 + x2
#' ## generate a grouping
#' g <- factor(c(rep(1, 10), rep(2, 10), rep(1, 15), rep(2, 15)),
#' labels = letters[1:2]) # 2 groups
#' ## generate a partitioning variable
#' p <- factor(c(rep(1, 20), rep(2, 30)), labels = LETTERS[1:2])
#' ## generate some weights
#' w <- rpois(50, 5)
#' ## put it all into a data frame
#' test.d <- data.frame(x1, x2, y1, g, p, ad, ch, w)
#'
#' id.0 <- incdist(y1 ~ x1 + x2, part = ~ p, group = ~ g, weights = w,
#' data = test.d)
#' id.0$idvarnames <- c("a", "c")
#' id.0$eqscale <- list(formula = ~ (ad + k*ch)^d, coef=list(k=.8, d=.75),
#' type="citro-michael")
#'
#' eqscale(id.0)
#'
#' @export
eqscale <- function(object)
{
.tmp.eqscale <- function (object)
{
if(!is.incdist(object)) stop("Not an incdist objectect!")
if(is.null(object$eqscale)) stop("No equivalence scale present in objectect!")
form <- object$eqscale$form
coef <- object$eqscale$coef
data <- object$frm
thisEnv <- environment()
env <- new.env()
for (i in names(data)) {
assign(i, data[[i]], envir = env)
}
if(!is.null(coef))
ind <- as.list(coef)
else ind <- NULL
parLength <- 0
for (i in names(ind)) {
temp <- coef[[i]]
assign(i, temp, envir = env)
}
eval(form[[2]], envir = env)
}
if(!is.incdist(object)) stop("Not an incdist objectect!")
## this is probably not needed?
##attach(object)
data <- object$frm
tmp.es <- .tmp.eqscale(object)
##env <- new.env() # not needed, I think
## this gives a waring on R CMD check. Need to define the explicitly
for(i in c(object$yname, object$incnames)) data[[i]] <- data[[i]]/tmp.es
data
}
## functions that "bind" to income distribution functionals
## an inequality index
#'
#' Estimate inequality indices
#'
#' This function estimates inequality indices.
#'
#'
#' @aliases inequality inequality.incdist inequality.default
#' @param x a numerical vector or incdist object
#' @param w an optional vector of non-negative integer values weights.
#' @param indices to estimate
#' @return the estimated indices
#' @author Markus Jantti \email{markus.jantti@@iki.fi}
#' @seealso \code{\link{gini}}, \code{\link{ge}}, \code{\link{atkinson}}
#' @references Lambert, P. (1993). \emph{The distribution and
#' redistribution of income. A mathematical analysis.} Manchester
#' University Press, Manchester.
#' @export
inequality <- function(x, ...)
{
if(is.null(class(x))) class(x) <- data.class(x)
UseMethod("inequality", x)
}
#' @export
inequality.default <- function(x, type = c("gini", "ge", "atkinson", "cv2"), ...)
{
if(!is.numeric(x)) stop("argument x is non-numeric!")
args <- list(...)
ret <- numeric(length(type))
names(ret) <- type
for(i in type) ret[i] <- do.call(i, c(list(x), args))
ret
}
#' @export
inequality.incdist <-
function(x, what = 1, type = c("gini", "ge", "atkinson", "cv2"),
frame=FALSE,
equivalise=FALSE, ...)
{
object <- x
if(!is.incdist(object)) stop("Not an incdist object!")
## for what
## what to calculate (change this so multiple measures can be used.)
## type <- match.arg(type)
obj <- summary(object, func = type, frame=frame, equivalise=equivalise,...)
##print(obj, all = TRUE)
obj[[what]][[1]]
}
## a poverty index
#'
#' Estimate poverty indices
#'
#' This function estimates poverty indices.
#'
#'
#' @aliases povery poverty.incdist poverty.default
#' @param x a numerical vector or incdist object
#' @param w an optional vector of non-negative integer values weights.
#' @param indices to estimate
#' @return the estimated indices
#' @author Markus Jantti \email{markus.jantti@@iki.fi}
#' @seealso \code{\link{inequality}}, \code{\link{fgt}}
#' @references Lambert, P. (1993). \emph{The distribution and
#' redistribution of income. A mathematical analysis.} Manchester
#' University Press, Manchester.
#' @export
poverty <- function(x, ...)
{
if(is.null(class(x))) class(x) <- data.class(x)
UseMethod("poverty", x)
}
#' @export
poverty.default <- function(x, type = "fgt", ...)
{
if(!is.numeric(x)) stop("argument x is non-numeric!")
args <- list(...)
ret <- numeric(length(type))
names(ret) <- type
for(i in type) ret[i] <- do.call(i, c(list(x), args))
ret
}
#' @export
poverty.incdist <-
function(x, what = 1, type = "fgt", frame=FALSE, ...)
{
object <- x
if(!is.incdist(object)) stop("Not an incdist object!")
## for what
## what to calculate
## type <- match.arg(type)
obj <- summary(object, func = type, poverty=TRUE, frame=frame, ...)
##print(obj, all = TRUE)
obj[[what]][[1]]
}
## make a "as.data.frame" method
#' @export
as.data.frame.incdist <- function(object, all=TRUE, what=TRUE,
relative=FALSE, ...)
{
if(!inherits(object, "summary.incdist")) stop("Not an incdist summary object!")
vars <- c(object$overall, object$comp)
groupdim <- object$group
grouplevs <- names(table(object$frm[[groupdim]]))
ngroup <- length(grouplevs)
partdim <- object$partition
partlevs <- names(table(object$frm[[partdim]]))
npart <- length(partlevs)
func <- object$func
nfunc <- length(func)
## which approach?
## constructing "by hand" may be unreliable
#ret1 <- as.numeric(unlist(object[1:2]))
#nstat <- length(ret1)
#Variable <- rep(vars, each=ngroup*npart*nfunc)
#Group <- rep()
#Partition <- rep()
#Function <- rep()
#ret <-
# data.frame(val=ret1,
# Variable=rep(vars, each=ngroup*npart*nfunc),
# Group=rep(),
# Partition=rep(),
# Function=rep())
## or create a dataset by unlisting the 1st and 2nd compont
## Care needs to be taken to deal with different kind of objects
## 1. z ~ y + x (works)
## 2. z ~ 1 (works)
## 3. ~ x + y
if(length(object[[1]])>0)
{
tx1 <- unlist(object[1])
ne <- length(strsplit(names(tx1[1]), "\\.")[[1]])
td1 <- data.frame(val=as.numeric(tx1))
## a temporary fix
tn <- unlist(strsplit(names(tx1), "\\.", perl=TRUE))
##tn <- tn[-grep("%", tn)]
td2 <-
as.data.frame(matrix(tn, ncol=ne, byrow=T), stringsAsFactors = FALSE)
}
if(length(object[[2]])>0)
{
tx3 <- unlist(object[2])
td3 <- data.frame(val=as.numeric(tx3))
## a temporary fix
tn <- unlist(strsplit(names(tx3), "\\.", perl=TRUE))
##tn <- tn[-grep("%", tn)]
td4 <-
as.data.frame(matrix(tn, ncol=ne+1, byrow=T)[,-1], stringsAsFactors = FALSE)
}
if(length(object[[1]])>0 && length(object[[2]])>0)
ret <- rbind(cbind(td1, td2), cbind(td3, td4))
if(length(object[[1]])>0 && !length(object[[2]])>0)
ret <- cbind(td1, td2)
if(!length(object[[1]])>0 && length(object[[2]])>0)
ret <- cbind(td3, td4)
## this *must* be generalised at some point
ret$Variable <- ret$V1
ret$Group <- ret$V2
ret$Partition <- ret$V3
ret$Function <- paste(ret$V4, ret$V5, sep=".")
if(ne>5) {ret$Value <- ret$V6; ret$V6 <- NULL}
ret$V1 <- ret$V2 <- ret$V3 <- ret$V4 <- ret$V5 <- NULL
ret
}
## design and write a "c()" method for two incdist objects
|
45b9e453f1be40149eb850115f4ef34ae811d505
|
00035781ff4e3bcc409b5b1178373e4f59a68d27
|
/plot3.R
|
390932932c7234e90f28afbeb50497cd975f8277
|
[] |
no_license
|
fmalvicino/ExData_Plotting1
|
467e71699819d193d4e5687ce37f7ffe9301dc52
|
97d71dfd6c2acdee2726d1daeaf15044474c4bff
|
refs/heads/master
| 2021-01-17T21:28:07.895509
| 2015-08-08T01:39:09
| 2015-08-08T01:39:09
| 40,381,540
| 0
| 0
| null | 2015-08-07T21:26:23
| 2015-08-07T21:26:22
| null |
UTF-8
|
R
| false
| false
| 747
|
r
|
plot3.R
|
hpc<- read.csv("household_power_consumption.txt", sep= ";", na.strings="?")
library(dplyr)
library(lubridate)
hpc$DateTime <- dmy_hms (paste(hpc$Date, hpc$Time))
hpc2 <- filter(hpc, DateTime %within% new_interval(ymd("2007-02-01"), ymd_hm("2007-02-02 23:59")))
plot(hpc2$DateTime, hpc2$Sub_metering_1, xlab= "", ylab="Energy sub metering", type="n")
legend("topright", lty = 1, col= c("black", "red", "blue"), legend= c("Sub_metering_1", "Sub_metering_2", "Sub_metering_3"))
lines(hpc2$DateTime, hpc2$Sub_metering_1, type= "l")
lines(hpc2$DateTime, hpc2$Sub_metering_2, type= "l", col= "red")
lines(hpc2$DateTime, hpc2$Sub_metering_3, type= "l", col = "blue")
dev.copy(device = png, file= "plot3.png", width = 480, height = 480)
dev.off()
|
dfe2e6f87427624530c00914e6684338672d0d45
|
50066dae4216d17bd6f0dcb9a11d872e73246eb6
|
/man/pk.calc.clast.obs.Rd
|
f4f173534b7b27cacb06c360472993afa33ac7a2
|
[] |
no_license
|
cran/PKNCA
|
11de9db2cb98279c79d06022415b8772e7c1f5ea
|
8f580da3e3c594e4e1be747cb2d8e35216784ed2
|
refs/heads/master
| 2023-05-10T16:54:19.131987
| 2023-04-29T18:30:02
| 2023-04-29T18:30:02
| 48,085,829
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| true
| 859
|
rd
|
pk.calc.clast.obs.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/pk.calc.simple.R
\name{pk.calc.clast.obs}
\alias{pk.calc.clast.obs}
\title{Determine the last observed concentration above the limit of
quantification (LOQ).}
\usage{
pk.calc.clast.obs(conc, time, check = TRUE)
}
\arguments{
\item{conc}{Concentration measured}
\item{time}{Time of concentration measurement}
\item{check}{Run \code{\link{check.conc.time}}?}
}
\value{
The last observed concentration above the LOQ
}
\description{
If Tlast is NA (due to no non-missing above LOQ measurements), this
will return NA.
}
\seealso{
Other NCA parameters for concentrations during the intervals:
\code{\link{pk.calc.cmax}()},
\code{\link{pk.calc.cstart}()},
\code{\link{pk.calc.ctrough}()}
}
\concept{NCA parameters for concentrations during the intervals}
|
fe03af73fe7b0d04a6efc966fdb1af570047fc8d
|
b2532b65845b352083b9c27e2a29c1237fd640e4
|
/R/snp_asso_fig.R
|
262bfa1a7aa5e61c61c1b56fa848b9425ada57d5
|
[
"CC0-1.0"
] |
permissive
|
kbroman/Talk_MAGIC2021
|
dfadeb4956e7a1ea184b329c6c3b68510462931a
|
deedea16df3ac92e20480d8b02634e70aca98373
|
refs/heads/master
| 2023-05-05T11:19:29.981646
| 2021-05-22T01:53:40
| 2021-05-22T01:53:40
| 360,722,882
| 1
| 1
| null | null | null | null |
UTF-8
|
R
| false
| false
| 2,193
|
r
|
snp_asso_fig.R
|
# SNP association scan
library(qtl2)
url <- "https://raw.githubusercontent.com/rqtl/qtl2data/master/ArabMAGIC/"
url <- "~/Code/Rqtl2/qtl2data/ArabMAGIC/"
arab <- read_cross2(paste0(url, "arabmagic_tair9.zip"))
arab <- drop_nullmarkers(arab)
file <- "_cache/snp_asso.rds"
if(file.exists(file)) {
out_snps <- readRDS(file)
} else {
pr <- calc_genoprob(arab, error_prob=0.002, cores=0)
snp_pr <- genoprob_to_snpprob(pr, arab)
out_snps <- scan1(snp_pr, arab$pheno, cores=0)
saveRDS(out_snps, file)
}
load("_cache/scans.RData")
plot_snp_asso <-
function(version=1, lodcolumn="fruit_length", title="fruit length",
ymx=max(out_hk[,"fruit_length"]), logp=FALSE)
{
par(mar=c(4.1,4.1,0.6,0.6), fg="white", col.lab="white", col.axis="white")
green <- "#49A56E"
if(logp) {
ylab <- expression(paste(-log[10], " p-value"))
out_snps[,lodcolumn] <- -pchisq(out_snps[,lodcolumn]*2*log(10),
1, log=TRUE, lower=FALSE)/log(10)
out_hk[,lodcolumn] <- -pchisq(out_hk[,lodcolumn]*2*log(10),
18, log=TRUE, lower=FALSE)/log(10)
ymx <- max(c(out_snps[,lodcolumn], out_hk[,lodcolumn]))
} else {
ylab <- "LOD score"
}
plot(out_snps, arab$pmap, lod=lodcolumn, type="p", pch=16, altcol=green, gap=0,
ylim=c(0, ymx*1.05), cex=0.6, ylab=ylab)
if(version==2) {
plot(out_hk, arab$pmap, lod=lodcolumn, gap=0, altcol=green, add=TRUE)
}
u <- par("usr")
text(u[2]-diff(u[1:2])*0.015, u[4]-diff(u[3:4])*0.02, title, col="black", adj=c(1, 1))
}
pdf("../Figs/snp_asso.pdf", height=5.5, width=11, pointsize=16)
plot_snp_asso()
dev.off()
pdf("../Figs/snp_asso_B.pdf", height=5.5, width=11, pointsize=16)
plot_snp_asso(2)
dev.off()
pdf("../Figs/snp_asso_C.pdf", height=5.5, width=11, pointsize=16)
plot_snp_asso(2, "seed_weight", "seed weight")
dev.off()
pdf("../Figs/snp_asso_B_logp.pdf", height=5.5, width=11, pointsize=16)
plot_snp_asso(2, logp=TRUE)
dev.off()
pdf("../Figs/snp_asso_C_logp.pdf", height=5.5, width=11, pointsize=16)
plot_snp_asso(2, "seed_weight", "seed weight", logp=TRUE)
dev.off()
|
87231ec159ef74c90f23472f46a39ee183405112
|
af493dbfe092b00363d7b03dd6f01399a83841f5
|
/man/canonical_name_create.Rd
|
90ade912b7ec183df69c415660f27d5441240931
|
[] |
no_license
|
antaldaniel/eurobarometer_old2
|
7665b252b20e950db427bd43956efc9d3e88f90b
|
042e13e447f1a43c898f9e8887f7ed37aa3983c2
|
refs/heads/master
| 2021-10-09T15:33:20.307410
| 2018-12-30T19:06:55
| 2018-12-30T19:06:55
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| true
| 469
|
rd
|
canonical_name_create.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/canonical_name_create.R
\name{canonical_name_create}
\alias{canonical_name_create}
\title{Create a canonical variable name for the R objects}
\usage{
canonical_name_create(x)
}
\arguments{
\item{x}{A vector of the GESIS variable names}
}
\description{
Create a canonical variable name for the R objects
}
\examples{
canonical_name_create ( c("UPPER CASE VAR", "VAR NAME WITH \% SYMBOL") )
}
|
2e228c704706e7fd4ddd47c435d9596f5b08a1ac
|
cd1e004315e9de253d4a02d928a72ff585de54c5
|
/R/converters.R
|
2db9c43c4058e9afac4b0dac4ce6266f508355be
|
[] |
no_license
|
thefooj/cassandrasimple
|
8e6898439bf7ad4fea587cf7bbd642437de60848
|
e87092f52d8a7e2fe4b323f702d164cf05fa4e98
|
refs/heads/master
| 2021-05-23T04:09:46.075233
| 2020-10-06T15:32:14
| 2020-10-06T15:32:14
| 81,841,891
| 1
| 0
| null | 2018-06-19T20:48:19
| 2017-02-13T15:48:44
|
R
|
UTF-8
|
R
| false
| false
| 618
|
r
|
converters.R
|
as_date_from_cql_date <- function(localdate) {
as.Date(rJava::.jcall(localdate, 'S', 'toString'), tz="UTC") # to YYYY-MM-DD
}
as_posixct_from_cql_get_timestamp <- function(javaDate) {
# getTime gives milliseconds since epoch 1970-01-01 as a Java long (64-bit signed).
# we convert to seconds and give the epoch
as.POSIXct(rJava::.jcall(javaDate,'J', 'getTime')/1000, origin="1970-01-01", tz="UTC")
}
as_date_from_1970_epoch_days <- function(data) {
as.Date(data, origin="1970-01-01", tz="UTC")
}
as_posixct_from_1970_epoch_seconds <- function(data) {
as.POSIXct(data, origin="1970-01-01", tz="UTC")
}
|
a784ff68944d42cf9c7a787e5deacf743dcc89d6
|
ebcaca53d888b5b6ecc2d281937eab35df236554
|
/code/bias/editorial_rejections_by_journal.R
|
bf94dcaa8d004392ef636db45a76a9773e66ee7f
|
[
"MIT"
] |
permissive
|
SchlossLab/Hagan_Gender_mBio_2020
|
4c4eb3dd8b4f030dbea2eca29ae4ebf1fa8da2b2
|
118e082fcb1d6a347e8e0f16def7a1ec1ca4907d
|
refs/heads/master
| 2023-01-01T03:12:26.188371
| 2020-10-15T17:55:14
| 2020-10-15T17:55:14
| 155,252,942
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 2,275
|
r
|
editorial_rejections_by_journal.R
|
#break decisions after review down by journal
j_ed_dec_data <- bias_data %>%
# filter(!(country %in% c("Japan", "Taiwan, Province of China",
# "China", "Singapore", "Hong Kong",
# "Korea, Republic of"))) %>%
filter(version.reviewed == 0) %>%
filter(grouped.vers == 1) %>%
#filter(US.inst == "yes") %>%
select(gender, journal, grouped.random, EJP.decision, version) %>%
filter(EJP.decision %in% c("Accept, no revision",
"Reject", "Revise only")) %>%
distinct()
ASM_summary_dec <- bias_data %>%
filter(version.reviewed == 0) %>%
filter(grouped.vers == 1) %>%
select(gender, grouped.random, EJP.decision) %>% distinct() %>%
group_by(gender) %>% summarise(total = n())
ASM_dec <- j_ed_dec_data %>%
group_by(gender, EJP.decision) %>%
summarise(n = n()) %>%
left_join(., ASM_summary_dec, by = "gender") %>%
mutate(prop_dec = get_percent(n, total)) %>%
select(-n, -total) %>% distinct() %>%
spread(key = gender, value = prop_dec) %>%
mutate(performance = male - female)
journal_summary <- j_ed_dec_data %>%
group_by(journal, gender) %>% summarise(total = n())
journal_dec_summary <- j_ed_dec_data %>%
group_by(journal, EJP.decision) %>%
summarise(n = n())
ed_rejections_E <- j_ed_dec_data %>%
group_by(journal, gender, EJP.decision) %>%
summarise(n = n()) %>%
left_join(., journal_summary,
by = c("journal", "gender")) %>%
distinct() %>%
mutate(prop_rej = get_percent(n, total)) %>%
select(-n, -total) %>%
spread(key = gender, value = prop_rej) %>%
mutate(performance = male - female) %>%
left_join(., journal_dec_summary,
by = c("journal", "EJP.decision")) %>%
ggplot() +
geom_col(aes(x = fct_reorder(journal, performance),
y = performance, fill = performance)) +
facet_wrap(~EJP.decision)+
coord_flip()+
gen_gradient+
geom_hline(data = ASM_dec, aes(yintercept = performance))+
geom_text(aes(x = journal, y = 1.5, label = n))+
labs(x = "Journal",
y = "\nDifference in Decision after First Review",
caption = "Vertical line indicates value for
all journals combined")+
my_theme_horiz
#ggsave("results/first_decision_j.png")
|
c351098e27312d33ade6f2f0d53b35609c266069
|
117bdbc2b2380aeacec87cf6c8b24b18ab8c5bee
|
/man/list.gene.snp.Rd
|
0fd081ca10f05e8b46b5b25e8d5d99d276002f8d
|
[] |
no_license
|
cran/PIGE
|
1cc3f6aa9bfd47408be850188b1e3b7dfad90557
|
682c43bd19a050c6b5eb055f7184e5849e60cf94
|
refs/heads/master
| 2021-06-11T21:06:21.948691
| 2017-08-30T07:23:09
| 2017-08-30T07:23:09
| 17,681,352
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 444
|
rd
|
list.gene.snp.Rd
|
\docType{data}
\name{list.gene.snp}
\alias{list.gene.snp}
\title{Fictive list for the case-control study example containing the names of the snp for each gene included in the studied pathways}
\format{A list containing the names of the SNPs belonging to each gene analysed.}
\description{
Fictive list for the case-control study example
containing the names of the snp for each gene included in
the studied pathways
}
\keyword{datasets}
|
c191a5d0ce76b43f2340b849f5f521cf2e213946
|
91969900434366f888c9e424e9078b95aa9cf9e4
|
/pairwise_comparison_script.R
|
de63101b7bf139b5df1eecaa5d804b1da7d493d3
|
[
"MIT"
] |
permissive
|
evelienadri/herelleviridae
|
138c98b5cb8c11c090c218cb4ad434f3cd25f290
|
f7e9a3d99c046244ea99f5766e734fde926df7d1
|
refs/heads/master
| 2021-07-07T14:15:23.190995
| 2017-10-02T13:23:59
| 2017-10-02T13:23:59
| 105,536,381
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 628
|
r
|
pairwise_comparison_script.R
|
# Import table
gene_presence_absence <- read.delim("~/path_to_file/filename", row.names=1)
m <- as.matrix(gene_presence_absence)
# Make a vector with the column sums
s <- as.vector(colSums(m))
# Pairwise sums between all columns into out matrix
n <- seq_len(ncol(m))
id <- expand.grid(n,n)
out <- matrix(colSums(m[ , id[,1]] + m[ , id[,2]] == 2), ncol = length(n))
diag(out) <- 1
# Add headers to out matrix
headers <- as.list(colnames(m))
colnames(out) <- headers
rownames(out) <- headers
# Divide out matrix by sum vector to get percentages
p <- out/s
diag(p) <- 1
write.table(p, file = "pairwise_comp.txt", sep = "\t")
|
7d4f57b4c87c19d5ac0804014349b7e12f1530a9
|
4c0394633c8ceb95fc525a3594211636b1c1981b
|
/tests/testthat/test-rename.R
|
ea757bb7892de989de19d8e90771e1788ee3e2fd
|
[
"MIT"
] |
permissive
|
markfairbanks/tidytable
|
8401b92a412fdd8b37ff7d4fa54ee6e9b0939cdc
|
205c8432bcb3e14e7ac7daba1f4916d95a4aba78
|
refs/heads/main
| 2023-09-02T10:46:35.003118
| 2023-08-31T19:16:36
| 2023-08-31T19:16:36
| 221,988,616
| 357
| 33
|
NOASSERTION
| 2023-09-12T20:07:14
| 2019-11-15T19:20:49
|
R
|
UTF-8
|
R
| false
| false
| 3,130
|
r
|
test-rename.R
|
test_that("rename() works for one column", {
df <- data.table(x = c(1,1,1), y = c(2,2,2), z = c("a", "a", "b"))
df <- df %>%
rename(new_x = x)
expect_named(df, c("new_x", "y", "z"))
})
test_that("rename() doesn't modify-by-reference", {
df <- data.table(x = c(1,1,1), y = c(2,2,2), z = c("a", "a", "b"))
df %>%
rename(new_x = x)
expect_named(df, c("x", "y", "z"))
})
test_that("rename() works for one column w/ data.frame", {
df <- data.frame(x = c(1,1,1), y = c(2,2,2), z = c("a", "a", "b"))
df <- df %>%
rename(new_x = x)
expect_named(df, c("new_x", "y", "z"))
})
test_that("rename() works for spaced column names", {
df <- data.table(`test spaced column` = 1:3, y = c(2,2,2), z = c("a", "a", "b"))
df <- df %>%
rename(new_name = `test spaced column`)
expect_named(df, c("new_name", "y", "z"))
})
test_that("rename() works for multiple columns", {
df <- data.table(x = c(1,1,1), y = c(2,2,2), z = c("a", "a", "b"))
df <- df %>%
rename(new_x = x,
new_y = y)
expect_named(df, c("new_x", "new_y", "z"))
})
test_that("rename() works by column position", {
df <- data.table(x = c(1,1,1), y = c(2,2,2), z = c("a", "a", "b"))
df <- df %>%
rename(new_x = 1,
new_y = 2)
expect_named(df, c("new_x", "new_y", "z"))
})
test_that("rename_with() works for all variables", {
df <- data.table(x = c(1,1,1), y = c(2,2,2))
df <- df %>%
rename_with(~ paste0(.x, "_append"))
expect_named(df, c("x_append", "y_append"))
})
test_that("rename_with() doesn't modify by reference", {
df <- data.table(x = c(1,1,1), y = c(2,2,2))
df %>%
rename_with(~ paste0(.x, "_append"))
expect_named(df, c("x", "y"))
})
test_that("rename_with() works for all variables w/ data.frame", {
df <- data.frame(x = c(1,1,1), y = c(2,2,2))
df <- df %>%
rename_with(~ paste0(.x, "_append"))
expect_named(df, c("x_append", "y_append"))
})
test_that("rename_with() works with predicate", {
df <- data.table(x = c(1,1,1), y = c(2,2,2), z = c("a", "a", "b"))
df <- df %>%
rename_with(~ paste0(.x, "_character"), where(is.character))
expect_named(df, c("x","y","z_character"))
})
test_that("rename_with() works with twiddle", {
df <- data.table(x_start = c(1,1,1), end_x = c(2,2,2), z = c("a", "a", "b"))
anon_df <- df %>%
rename_with(function(.x) paste0(.x, "_append"), c(starts_with("x")))
twiddle_df <- df %>%
rename_with(~ paste0(.x, "_append"), c(starts_with("x")))
expect_equal(anon_df, twiddle_df)
})
test_that("can make a custom function with quosures", {
df <- data.table(x = c(1,1,1), y = c(2,2,2), z = c("a", "a", "b"))
rename_fn <- function(data, new_name, old_name) {
data %>%
rename({{ new_name }} := {{ old_name }})
}
df <- df %>%
rename_fn(new_x, x)
expect_named(df, c("new_x", "y", "z"))
})
test_that("works with a grouped_tt", {
df <- tidytable(x = 1, y = 2, z = 3) %>%
group_by(x)
res <- df %>%
rename(new_x = x,
new_y = y)
expect_named(res, c("new_x", "new_y", "z"))
expect_equal(group_vars(res), "new_x")
expect_true(is_grouped_df(res))
})
|
452ade1e4e75907735b50deeb97a3f14621fce54
|
76660ce4a227e51225c521432b924dbb23082a4a
|
/eda_graphs_and_plotting_course_project_1/plot3.R
|
5a83a1e22a82dc7b704ea8a0ee9be57a1f61ee5d
|
[] |
no_license
|
esgarg/datasciencecoursera
|
862ae29e5b883399e47b97797db39e27686b85bc
|
5bfec82c1e9855fac87857aadfd061302dce0f77
|
refs/heads/master
| 2021-01-15T11:04:16.137661
| 2017-11-17T17:18:08
| 2017-11-17T17:18:08
| 99,611,844
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 1,240
|
r
|
plot3.R
|
# data source, assuming it is downloaded
fileURL <- "household_power_consumption.txt"
# create the grep command
grepCMD <- paste("grep", "^[12]/2/2007", fileURL, "2>/dev/null")
# get the colNames by only reading one line
colNames <- colnames(read.table(fileURL, nrows=1, header=TRUE, sep=";", na.strings="?"))
# read only selected dates from the DB and assign colNames
epcData <- read.table(pipe(grepCMD), sep=";", na.strings="?", col.names=colNames)
# convert dates
epcData$Date <- as.Date(epcData$Date, format="%d/%m/%Y")
# convert time, assuming timezone is Paris/France since data is contributed from Clamart, France
tz <- "Europe/Paris"
epcData$Time <- strptime(paste(epcData$Date, epcData$Time), format="%Y-%m-%d %H:%M:%S", tz="Europe/Paris")
# plot3.png
plotFile <- "plot3.png"
par(bg="white")
plot(epcData$Time, epcData$Sub_metering_1, type="l", col="black", xlab="", ylab="Energy sub metering")
lines(epcData$Time, epcData$Sub_metering_2, type="l", col="red")
lines(epcData$Time, epcData$Sub_metering_3, type="l", col="blue")
legend("topright", legend=c("Sub_metering_1","Sub_metering_2","Sub_metering_3"), col=c("black","red","blue"), lty=c(1,1,1))
dev.copy(png, height=480, width=480, pointsize=8, file=plotFile)
dev.off()
|
db0f85b4d39162b2c6a46547f1a67fa373bcee0a
|
622684fa19669ed8746dade3583c2207ad1b3d70
|
/R/genoDist.R
|
0ad29711f3aea0f3b87d1b2c8ac4757ff61fcf46
|
[
"MIT"
] |
permissive
|
CMWbio/geaR
|
b3ef3299467e07b026ece8cb755f4b4e0c0a7ab3
|
fe837b8d19342deaa24fb733af563af35992f346
|
refs/heads/master
| 2023-04-29T11:17:33.254755
| 2023-04-21T07:23:12
| 2023-04-21T07:23:12
| 145,640,886
| 5
| 0
| null | 2020-08-10T10:07:52
| 2018-08-22T01:46:26
|
R
|
UTF-8
|
R
| false
| false
| 2,747
|
r
|
genoDist.R
|
#' Calculates the hamming distance between alleles
#'
#' @description Used in the calculation of diversity statistics
#'
#' @details Authours: Chris Ward
#' Calculates the hamming distance using matrix multiplication
#'
#'
#' @param genoMat A \code{matrix} \cr
#' Allele genotypes for each individual
#' @param pairwiseDeletion \code{logical}
#' If \code{TRUE} missing data will be removed from distance calculations in a paiwise manner.
#'
#' @return A \code{matrix} of hamming distance between individuals
#'
#' @useDynLib geaR
#'
#' @import Rcpp
#'
#' @rdname genoDist
#' @export
genoDist <- function(genoMat, pairwiseDeletion){
# without Cpp
geno <- union(genoMat, genoMat)
dat <- lapply(geno, function(f){
mat <- genoMat == f
mat <- apply(mat, 2, as.double)
multiMat <- geaR:::eigenMapMatMult(t(mat), mat)
colnames(multiMat) <- colnames(mat)
rownames(multiMat) <- colnames(mat)
multiMat
})
names(dat) <- geno
sim <- Reduce("+", dat)
S <- nrow(genoMat)
dif <- S - sim
if(pairwiseDeletion & any(genoMat == "N")){
notNmat <- genoMat != "N"
## get number of non N sites in each pairwise
nonNsites <- t(notNmat) %*% notNmat
#total number of Ns between each individual
Ntotal <- S - nonNsites
# N same between pairwise indv
Nsim <- dat[["N"]]
# N different between pairwise indv
Ndif <- Ntotal - Nsim
# non N differences
nonNdif <- dif - Ndif
# differences without Ns
distMat <- nonNdif / nonNsites
#distMat <- nonNdif / nrow(genoMat)
}else{
distMat <- dif / nrow(genoMat)
}
return(distMat)
## with Cpp
# sourceCpp("~/Desktop/geaR/R/matrixMap.cpp")
#
#
# geno <- union(genoMat, genoMat)
# genoList <- as.list(geno)
# names(genoList) <- geno
# dat <- lapply(genoList, function(f){
# mat <- genoMat == f
# mat <- apply(mat, 2, as.numeric)
# multiMat <- eigenMapMatMult(t(mat), mat)
# colnames(multiMat) <- colnames(mat)
# rownames(multiMat) <- colnames(mat)
# multiMat
# })
#
#
# sim <- Reduce("+", dat)
#
# S <- nrow(genoMat)
# dif <- S - sim
#
#
# if(pairwiseDeletion & any(geno == "N")){
#
# notNmat <- genoMat != "N"
# ## get number of non N sites in each pairwise
#
# notNmat <- apply(notNmat, 2, as.numeric)
# nonNsites <- eigenMapMatMult(t(notNmat), notNmat)
# #total number of Ns between each individual
# Ntotal <- S - nonNsites
# # N same between pairwise indv
# Nsim <- dat[["N"]]
# # N different between pairwise indv
# Ndif <- Ntotal - Nsim
# # non N differences
# nonNdif <- dif - Ndif
# # differences without Ns
# distMat <- nonNdif / nonNsites
#
#
# }else{
# distMat <- dif / nrow(genoMat)
#
# }
#
# return(distMat)
}
|
d912c46c9f0977756eec91a71db67219c4646a09
|
90e772dfeb9fc441424dcf5e5beaa545af606f1c
|
/inst/shiny/ui.R
|
89f20d29ec2a7ec31e1bf0f5a34791a9574f689c
|
[
"GPL-3.0-only"
] |
permissive
|
chenjy327/MesKit
|
97d356c8c8ac73493ba6f60488d5a0c6aae23092
|
c9eb589fca6471e30e45cb9e03030af5ade69f83
|
refs/heads/master
| 2021-08-17T07:48:53.618404
| 2021-06-24T06:19:08
| 2021-06-24T06:19:08
| 304,196,319
| 0
| 0
|
MIT
| 2020-10-15T03:10:38
| 2020-10-15T03:10:37
| null |
UTF-8
|
R
| false
| false
| 148,596
|
r
|
ui.R
|
#required packages
suppressMessages(library(shiny))
suppressMessages(library(DT))
suppressMessages(library(shinydashboard))
suppressMessages(library(shinyWidgets))
suppressMessages(library(shinycssloaders))
suppressMessages(library(shinyBS))
suppressMessages(library(MesKit))
suppressMessages(library(BSgenome.Hsapiens.UCSC.hg19))
#sider bar----
sidebar <- dashboardSidebar(
width = 300,
sidebarMenu(id="sidername",selected='home',
menuItem(strong("Home"), tabName = "home", icon = icon("home")),
menuItem(strong("Input data"), tabName = "input", icon = icon("gear")),
menuItem(strong("Mutational landscape"), tabName = "AL", icon = icon("bar-chart")),
menuItem(strong("ITH evaluation"), tabName = "ITH", icon = icon("bar-chart")),
menuItem(strong("Metastatic routes inference"), tabName = "clone", icon = icon("bar-chart")),
menuItem(strong("PhyloTree-based analysis"), tabName = "tree", icon = icon("tree"))
)
)
#shinydashboar
bodyHome <- tabItem("home",
fluidRow(
box(
width = 12,
status = "info",
solidHeader = TRUE,
title = div(strong("Introduction"),style = "font-size:2em; font-weight:500;"),
p("Cancer develops as a result of the accumulation of genetic aberrations, which promotes the generation of distinct subpopulations of tumor cells and shapes intra-tumor heterogeneity (ITH). ITH is involves in tumor growth, progression, invasion, and metastasis, presenting one of the most significant barriers to accurate diagnoses and effective treatments of cancers. Therefore, dissecting and interpreting ITH of tumor dynamics is one of the major tasks in cancer research. Here, we present MesKit, an R/Bioconductor package that provides commonly used analysis and visualization modules for MRS studies.",
style = "font-size:18px; font-weight:500;line-height:40px;"),
br()
)
),
fluidRow(
box(
width = 12,
status = "info",
solidHeader = TRUE,
title = div(strong("Overview of MesKit"),style = "font-size:2em; font-weight:500;"),
fluidRow(
column(
width = 6,
div(img(src = "image/MesKit_workflow.png", width = "90%",height = "72%"),
style="text-align: center;float:left;margin:0;padding:0")
),
column(
width = 5,
div(
br(),
br(),
br(),
br(),
h3(strong("With this MesKit Shiny APP:")),
p("- Visualize mutational landscape",br(),
"- Quantify heterogeneity within or between tumors from the same patient",br(),
"- Infer metastatic routes",br(),
"- Perform mutational signature analysis at different levels ",br(),
"- Construct and visualize phylogenetic trees",
style = "font-size:18px; font-weight:500;line-height:50px;"),
style = "text-align: left;float:left;padding-left:0px;margin:0px"
)
)
)
)
)
)
bodyIP <- tabItem("input",
fluidRow(
column(
width = 12,
column(
width = 3,
box(
div(shiny::icon("gear"), strong("Input Section"), inline =TRUE,style = "font-size:27px; font-weight:500;"),
br(),
width = NULL,
fileInput(inputId = 'mafFile',
label = div(style = "font-size:1.5em; font-weight:600;",'MAF file',
tags$button(
Id = "iecontrol01",
type = "button",
class = "bttn-material-circle",
class = "btn action-button",
list(tags$img(src = "image/button.png",width = "22px",height = "22px")),
style = " background-position: center;padding:0;margin-bottom:7px;"
)
),
placeholder = "Example: CRC_HZ.maf",
width = 400),
fileInput(inputId = 'clinFile',
label = div(style = "font-size:1.5em; font-weight:600;",'Clinical file',
tags$button(
Id = "iecontrol_clin",
type = "button",
class = "bttn-material-circle",
class = "btn action-button",
list(tags$img(src = "image/button.png",width = "22px",height = "22px")),
style = " background-position: center;padding:0;margin-bottom:7px;"
)
),
placeholder = "Example: CRC_HZ.clin.txt",
width = 400),
checkboxInput('useccffile', label = div(style = "font-size:1.5em; font-weight:600; padding-left:15px",
'CCF file',
tags$button(
Id = "iecontrol02",
type = "button",
class = "bttn-material-circle",
class = "btn action-button",
list(tags$img(src = "image/button.png",width = "22px",height = "22px")),
style = " background-position: center;padding:0;margin-bottom:7px;"
)),
value = FALSE,width = 400),
bsTooltip(id = "useccffile",
title = "CCF file of somatic mutations. Default NULL.",
placement = "top",
trigger = "hover"),
conditionalPanel(
condition = "input.useccffile == true",
fileInput('ccfFile',label = '',placeholder = "Example: CRC_HZ.ccf.tsv", width = 400),
checkboxInput('ccfFile_use_indel_ccf',label = div(style = "font-size:1.5em; font-weight:600;padding-left:15px ", 'Use indel ccf'),value = FALSE),
bsTooltip(id = "ccfFile_use_indel_ccf",
title = "Whether use indel in ccfFile. Default FALSE.",
placement = "top",
trigger = "hover"),
),
selectInput('ref', label = div(style = "font-size:1.5em; font-weight:600; ", 'Genome reference'),
choices = c('hg18','hg19','hg38'),selected = "hg19", width = 400),
bsTooltip(id = "ref",
title = "Human reference genome versions of hg18,hg19 or hg38 by UCSC",
placement = "top",
trigger = "hover"),
actionBttn('submit1',div(
strong("Upload data"),align = 'center'))
)
),
column(
width = 9,
box(
width = NULL,
uiOutput("datapreview"),
# DT::dataTableOutput('maftable', width = '100%')
uiOutput("ie1"),
uiOutput("ie2"),
uiOutput("ie_clin"),
)
)
)
)
)
bodyITH <- tabItem("ITH",
fluidRow(
column(
width = 3,
box(
width = NULL,
conditionalPanel(
condition = "input.tith == 'ith_mathscore'",
div(strong("Parameter"),style = "font-size:2em; font-weight:600;"),
br(),
uiOutput("mathscore_patientid_ui"),
checkboxInput('mathscore_withintumor',
value = FALSE,
label = div(style = "font-size:1.5em; font-weight:600; padding-left:12px", 'Within tumor'),
width = 500),
bsTooltip(id = "mathscore_withintumor",
title = "Calculate MATH score within tumors in each patients",
placement = "top",
trigger = "hover"),
checkboxInput('mathscore_useadjvaf',
label = div(style = "font-size:1.5em; font-weight:600; padding-left:15px", 'Use adjusted vaf'),
value = FALSE,
width = 400),
bsTooltip(id = "mathscore_useadjvaf",
title = "Use adjusted VAF in analysis when adjusted VAF or CCF is available. Default FALSE.",
placement = "top",
trigger = "hover"),
checkboxInput('mathscore_usetumorsamplelabel', label = div(style = "font-size:1.5em; font-weight:600; padding-left:15px", 'Use tumor sample label'),value = FALSE, width = 400),
bsTooltip(id = "mathscore_usetumorsamplelabel",
title = "Logical (Default: FALSE). Rename the 'Tumor_Sample_Barcode' with 'Tumor_Label'.",
placement = "top",
trigger = "hover"),
tags$table(
tags$tr(id = "inline",
width = "100%",
tags$td(width = "30%", div(style = "font-size:1.5em; font-weight:600; ", "Min vaf")),
tags$td(width = "60%", textInput(inputId = "mathscore_minvaf", value = 0, label = NULL)))
),
bsTooltip(id = "mathscore_minvaf",
title = "The minimum VAF for filtering variants. Default: 0. ",
placement = "top",
trigger = "hover"),
br(),
fluidRow(
column(
width = 9,
div(
tags$button(
id = "submit_mathscore", type = "button", class = "action-button bttn",
class = "bttn-unite", class = paste0("bttn-md"),
class = paste0("bttn-default"),
list(strong("Start analysis"),icon("hand-right", lib = "glyphicon")),
style = "margin-bottom:0px;margin-right:0px;"
)
)
)
)
),
conditionalPanel(
condition = "input.tith == 'ith_vafcluster'",
div(strong("Parameter"),style = "font-size:2em; font-weight:600;"),
br(),
uiOutput("vafcluster_patientid_ui"),
tags$table(
tags$tr(id = "inline",
width = "100%",
tags$td(width = "30%", div(style = "font-size:1.5em; font-weight:600; ", "Min vaf")),
tags$td(width = "60%", textInput(inputId = "vafcluster_minvaf", value = 0, label = NULL)))
),
bsTooltip(id = "vafcluster_minvaf",
title = "The minimum value of VAF. Default: 0. Option: on the scale of 0 to 1.",
placement = "top",
trigger = "hover"),
# br(),
# tags$table(
# tags$tr(id = "inline",
# width = "100%",
# tags$td(width = "30%", div(style = "font-size:1.5em; font-weight:600; ", "Max vaf")),
# tags$td(width = "60%", textInput(inputId = "vafcluster_maxvaf", value = 1, label = NULL)))
# ),
# bsTooltip(id = "vafcluster_maxvaf",
# title = "The maximum value of VAF. Default: 0. Option: on the scale of 0 to 1.",
# placement = "top",
# trigger = "hover"),
br(),
checkboxInput('vafcluster_withintumor',label = div(style = "font-size:1.5em; font-weight:600;padding-left:15px ", 'Within tumor'),value = FALSE),
bsTooltip(id = "vafcluster_withintumor",
title = "Cluster VAF within tumors in each patients,default is FALSE.",
placement = "top",
trigger = "hover"),
checkboxInput('vafcluster_useadjvaf', label = div(style = "font-size:1.5em; font-weight:600; padding-left:15px", 'Use adjusted vaf'),value = FALSE, width = 400),
bsTooltip(id = "vafcluster_useadjvaf",
title = "Use adjusted VAF in analysis when adjusted VAF or CCF is available. Default FALSE.",
placement = "top",
trigger = "hover"),
checkboxInput('vafcluster_useccf', label = div(style = "font-size:1.5em; font-weight:600; padding-left:15px", 'Use ccf'),value = FALSE, width = 400),
bsTooltip(id = "vafcluster_useccf",
title = "Cluster CCF. Default FALSE.",
placement = "top",
trigger = "hover"),
checkboxInput('vafcluster_usetumorsamplelabel', label = div(style = "font-size:1.5em; font-weight:600; padding-left:15px", 'Use tumor sample label'),value = FALSE, width = 400),
bsTooltip(id = "vafcluster_usetumorsamplelabel",
title = "Logical (Default: FALSE). Rename the 'Tumor_Sample_Barcode' with 'Tumor_Label'.",
placement = "top",
trigger = "hover"),
fileInput(inputId = 'vafcluster_segfile',
label = div(style = "font-size:1.5em; font-weight:600; ", 'Segment file'),
width = 400),
sliderInput(inputId='vafcluster_width',label = div(style = "font-size:1.5em; font-weight:600; ", 'Image width'),min = 400,max = 1000, value = 470, width = 500),
sliderInput(inputId='vafcluster_height',label = div(style = "font-size:1.5em; font-weight:600; ", 'Image height'),min = 400,max = 1000, value = 470, width = 500),
br(),
br(),
fluidRow(
column(
width = 9,
div(
tags$button(
id = "submit3", type = "button", class = "action-button bttn",
class = "bttn-unite", class = paste0("bttn-md"),
class = paste0("bttn-default"),
list(strong("Start analysis"),icon("hand-right", lib = "glyphicon")),
style = "margin-bottom:0px;margin-right:0px;"
)
)
)
)
),
conditionalPanel(
condition = "input.tith == 'ith_ccfauc'",
div(strong("Parameter"),style = "font-size:2em; font-weight:600;"),
br(),
uiOutput("ccfauc_patientid_ui"),
tags$table(
tags$tr(id = "inline",
width = "100%",
tags$td(width = "30%", div(style = "font-size:1.5em; font-weight:600; ", "Min ccf")),
tags$td(width = "60%", textInput(inputId = "ccfauc_minccf", value = 0, label = NULL)))
),
bsTooltip(id = "ccfauc_minccf",
title = "The minimum value of CCF. Default: 0.",
placement = "top",
trigger = "hover"),
br(),
checkboxInput('ccfauc_withintumor',
value = FALSE,
label = div(style = "font-size:1.5em; font-weight:600; padding-left:12px", 'Within tumor'),
width = 500),
bsTooltip(id = "ccfauc_withintumor",
title = "Calculate AUC within tumors in each patients, default is FALSE.",
placement = "top",
trigger = "hover"),
# checkboxInput('ccfauc_useadjvaf', label = div(style = "font-size:1.5em; font-weight:600; padding-left:15px", 'Use adjusted vaf'),value = FALSE, width = 400),
# bsTooltip(id = "ccfauc_useadjvaf",
# title = "Use adjusted VAF in analysis when adjusted VAF or CCF is available. Default FALSE.",
# placement = "top",
# trigger = "hover"),
checkboxInput('ccfauc_usetumorsamplelabel', label = div(style = "font-size:1.5em; font-weight:600; padding-left:15px", 'Use tumor sample label'),value = FALSE, width = 400),
bsTooltip(id = "ccfauc_usetumorsamplelabel",
title = "Logical (Default: FALSE). Rename the 'Tumor_Sample_Barcode' with 'Tumor_Label'.",
placement = "top",
trigger = "hover"),
sliderInput(inputId='ccfauc_width',label = div(style = "font-size:1.5em; font-weight:600; ", 'Image width'),min = 400,max = 1000, value = 747, width = 500),
sliderInput(inputId='ccfauc_height',label = div(style = "font-size:1.5em; font-weight:600; ", 'Image height'),min = 400,max = 1000, value = 560, width = 500),
br(),
fluidRow(
column(
width = 9,
div(
tags$button(
id = "submit_ccfauc", type = "button", class = "action-button bttn",
class = "bttn-unite", class = paste0("bttn-md"),
class = paste0("bttn-default"),
list(strong("Start analysis"),icon("hand-right", lib = "glyphicon")),
style = "margin-bottom:0px;margin-right:0px;"
)
)
)
)
),
conditionalPanel(
condition = "input.tith == 'ith_calfst'",
div(strong("Parameter"),style = "font-size:2em; font-weight:600;"),
br(),
uiOutput("calfst_patientid_ui"),
tags$table(
tags$tr(id = "inline",
width = "100%",
tags$td(width = "30%", div(style = "font-size:1.5em; font-weight:600; ", "Min vaf")),
tags$td(width = "60%", textInput(inputId = "calfst_minvaf", value = 0, label = NULL)))
),
bsTooltip(id = "calfst_minvaf",
title = "Specify the minimum VAF_adj, default is 0.02.",
placement = "top",
trigger = "hover"),
br(),
tags$table(
tags$tr(id = "inline",
width = "100%",
tags$td(width = "50%", div(style = "font-size:1.5em; font-weight:600; ", "Min total depth")),
tags$td(width = "40%", textInput(inputId = "calfst_mintotaldepth", value = 2, label = NULL)))
),
bsTooltip(id = "calfst_mintotaldepth",
title = "The minimum total allele depth for filtering variants. Default: 2.",
placement = "top",
trigger = "hover"),
checkboxInput('calfst_withinTumor',
value = FALSE,
label = div(style = "font-size:1.5em; font-weight:600; padding-left:12px", 'Within Tumor'),
width = 500),
bsTooltip(id = "calfst_withinTumor",
title = "Calculate fst within types in each patients,default is FALSE.",
placement = "top",
trigger = "hover"),
checkboxInput('calfst_useadjvaf', label = div(style = "font-size:1.5em; font-weight:600; padding-left:15px", 'Use adjusted vaf'),value = FALSE, width = 400),
bsTooltip(id = "calfst_useadjvaf",
title = "Use adjusted VAF in analysis when adjusted VAF or CCF is available. Default FALSE.",
placement = "top",
trigger = "hover"),
checkboxInput('calfst_usecircle',
value = TRUE,
label = div(style = "font-size:1.5em; font-weight:600; padding-left:12px", 'Use circle'),
width = 500),
checkboxInput('calfst_usetumorsamplelabel', label = div(style = "font-size:1.5em; font-weight:600; padding-left:15px", 'Use tumor sample label'),value = FALSE, width = 400),
bsTooltip(id = "calfst_usetumorsamplelabel",
title = "Logical (Default: FALSE). Rename the 'Tumor_Sample_Barcode' with 'Tumor_Label'.",
placement = "top",
trigger = "hover"),
bsTooltip(id = "calfst_usecircle",
title = "Logical (Default:TRUE). Whether to use circle as visualization method of correlation matrix",
placement = "top",
trigger = "hover"),
# tags$table(
# tags$tr(id = "inline",
# width = "100%",
# tags$td(width = "30%", div(style = "font-size:1.5em; font-weight:600; ", "Title: ")),
# tags$td(width = "70%", textInput(inputId = "calfst_title", value = NULL, label = NULL)))
# ),
# bsTooltip(id = "calfst_title",
# title = "The title of the plot.",
# placement = "top",
# trigger = "hover"),
# br(),
tags$table(
tags$tr(id = "inline",
width = "100%",
tags$td(width = "30%", div(style = "font-size:1.5em; font-weight:600; ", "number.cex")),
tags$td(width = "50%", textInput(inputId = "calfst_numbercex", value = 8, label = NULL)))
),
bsTooltip(id = "calfst_numbercex",
title = "The size of text shown in correlation plot. Default 8.",
placement = "top",
trigger = "hover"),
br(),
tags$table(
tags$tr(id = "inline",
width = "100%",
tags$td(width = "30%", div(style = "font-size:1.5em; font-weight:600; ", "number.col")),
tags$td(width = "50%", textInput(inputId = "calfst_numbercol", value = "#C77960", label = NULL)))
),
bsTooltip(id = "calfst_numbercol",
title = "The color of text shown in correlation plot. Default #C77960.",
placement = "top",
trigger = "hover"),
br(),
sliderInput(inputId='calfst_width',label = div(style = "font-size:1.5em; font-weight:600; ", 'Image width'),min = 400,max = 1000, value = 560, width = 500),
sliderInput(inputId='calfst_height',label = div(style = "font-size:1.5em; font-weight:600; ", 'Image height'),min = 400,max = 1000, value = 560, width = 500),
br(),
fluidRow(
column(
width = 9,
div(
tags$button(
id = "submit_calfst", type = "button", class = "action-button bttn",
class = "bttn-unite", class = paste0("bttn-md"),
class = paste0("bttn-default"),
list(strong("Start analysis"),icon("hand-right", lib = "glyphicon")),
style = "margin-bottom:0px;margin-right:0px;"
)
)
)
)
),
conditionalPanel(
condition = "input.tith == 'ith_mutheatmap'",
div(strong("Parameter"),style = "font-size:2em; font-weight:600;"),
br(),
uiOutput("mutheatmap_patientid_ui"),
tags$table(
tags$tr(id = "inline",
width = "100%",
tags$td(width = "30%", div(style = "font-size:1.5em; font-weight:600; ", "Min vaf")),
tags$td(width = "60%", textInput(inputId = "mutheatmap_minvaf", value = 0, label = NULL)))
),
bsTooltip(id = "mutheatmap_minvaf",
title = "The minimum value of VAF. Default: 0. Option: on the scale of 0 to 1.",
placement = "top",
trigger = "hover"),
br(),
tags$table(
tags$tr(id = "inline",
width = "100%",
tags$td(width = "30%", div(style = "font-size:1.5em; font-weight:600; ", "Min ccf")),
tags$td(width = "60%", textInput(inputId = "mutheatmap_minccf", value = 0, label = NULL)))
),
bsTooltip(id = "mutheatmap_minccf",
title = "The minimum value of CCF. Default: 0. Option: on the scale of 0 to 1.",
placement = "top",
trigger = "hover"),
br(),
checkboxInput('mutheatmap_useadjvaf', label = div(style = "font-size:1.5em; font-weight:600; padding-left:15px", 'Use adjusted vaf'),value = FALSE, width = 400),
bsTooltip(id = "mutheatmap_useadjvaf",
title = "Use adjusted VAF in analysis when adjusted VAF or CCF is available. Default FALSE.",
placement = "top",
trigger = "hover"),
checkboxInput('mutheatmap_useccf',
value = FALSE,
label = div(style = "font-size:1.5em; font-weight:600; padding-left:12px", 'Use ccf'),
width = 500),
bsTooltip(id = "mutheatmap_useccf",
title = "Logical. If FALSE (default), print a binary heatmap of mutations. Otherwise, print a cancer cell frequency (CCF) heatmap.",
placement = "top",
trigger = "hover"),
checkboxInput('mutheatmap_usetumorsamplelabel', label = div(style = "font-size:1.5em; font-weight:600; padding-left:15px", 'Use tumor sample label'),value = FALSE, width = 400),
bsTooltip(id = "mutheatmap_usetumorsamplelabel",
title = "Logical (Default: FALSE). Rename the 'Tumor_Sample_Barcode' with 'Tumor_Label'.",
placement = "top",
trigger = "hover"),
checkboxInput('mutheatmap_classByTumor', label = div(style = "font-size:1.5em; font-weight:600; padding-left:15px", 'Class by tumor'),value = FALSE,width = 400),
bsTooltip(id = "mutheatmap_classByTumor",
title = "FALSE(Default). Define shared pattern of mutations based on tumor types (TRUE) or samples (FALSE)",
placement = "top",
trigger = "hover"),
# bsTooltip(id = "mutheatmap_useccf",
# title = "Logical. If FALSE (default), print a binary heatmap of mutations. Otherwise, print a cancer cell frequency (CCF) heatmap.",
# placement = "top",
# trigger = "hover"),
# fileInput(inputId = 'mutheatmap_genelist',
# label = div(style = "font-size:1.5em; font-weight:600; ", 'Gene list file'),
# placeholder = "Default: IntOGen-DriverGenes_HC.tsv",
# width = 400),
uiOutput("mutheatmap_parameters_ui"),
# checkboxInput('mutheatmap_plotgenelist',
# value = FALSE,
# label = div(style = "font-size:1.5em; font-weight:600; padding-left:12px", 'Plot gene list'),
# width = 500),
# bsTooltip(id = "mutheatmap_plotgenelist",
# title = "If TRUE, plot heatmap with genes on geneList when geneList is not NULL.Default FALSE.",
# placement = "top",
# trigger = "hover"),
#
# checkboxInput('mutheatmap_showgene',
# value = FALSE,
# label = div(style = "font-size:1.5em; font-weight:600; padding-left:12px", 'Show gene'),
# width = 500),
# bsTooltip(id = "mutheatmap_showgene",
# title = "Show the name of genes next to the heatmap.Default FALSE.",
# placement = "top",
# trigger = "hover"),
#
# checkboxInput('mutheatmap_showgenelist',
# value = TRUE,
# label = div(style = "font-size:1.5em; font-weight:600; padding-left:12px", 'Show gene list'),
# width = 500),
# bsTooltip(id = "mutheatmap_showgenelist",
# title = "Show the names of gene on the geneList.Default TRUE.",
# placement = "top",
# trigger = "hover"),
tags$table(
tags$tr(id = "inline",
width = "100%",
tags$td(width = "60%", div(style = "font-size:1.5em; font-weight:600; ", "Mutation threshold")),
tags$td(width = "30%", textInput(inputId = "mutheatmap_mutthreshold", value = 150, label = NULL)))
),
bsTooltip(id = "mutheatmap_mutthreshold",
title = "show.gene and show.geneList will be FALSE when patient have more mutations than threshold.Default is 150.",
placement = "top",
trigger = "hover"),
br(),
tags$table(
tags$tr(id = "inline",
width = "100%",
tags$td(width = "50%", div(style = "font-size:1.5em; font-weight:600; ", "Sample text size")),
tags$td(width = "40%", textInput(inputId = "mutheatmap_sampletextsize", value = 9, label = NULL)))
),
bsTooltip(id = "mutheatmap_sampletextsize",
title = "Size of sample name.Default 9.",
placement = "top",
trigger = "hover"),
br(),
tags$table(
tags$tr(id = "inline",
width = "100%",
tags$td(width = "50%", div(style = "font-size:1.5em; font-weight:600; ", "Legend title size")),
tags$td(width = "40%", textInput(inputId = "mutheatmap_legendtitlesize", value = 10, label = NULL)))
),
bsTooltip(id = "mutheatmap_legendtitlesize",
title = "Size of legend title.Default 9.",
placement = "top",
trigger = "hover"),
br(),
sliderInput(inputId='mutheatmap_width',label = div(style = "font-size:1.5em; font-weight:600; ", 'Image width'),min = 400,max = 1000, value = 700, width = 500),
sliderInput(inputId='mutheatmap_height',label = div(style = "font-size:1.5em; font-weight:600; ", 'Image height'),min = 400,max = 1000, value = 560, width = 500),
br(),
fluidRow(
column(
width = 9,
div(
tags$button(
id = "submit_mutheatmap", type = "button", class = "action-button bttn",
class = "bttn-unite", class = paste0("bttn-md"),
class = paste0("bttn-default"),
list(strong("Start analysis"),icon("hand-right", lib = "glyphicon")),
style = "margin-bottom:0px;margin-right:0px;"
)
)
)
)
),
conditionalPanel(
condition = "input.tith == 'caInput_calneidist'",
div(strong("Parameter"),style = "font-size:2em; font-weight:600;"),
br(),
uiOutput("calneidist_patientid_ui"),
tags$table(
tags$tr(id = "inline",
width = "100%",
tags$td(width = "30%", div(style = "font-size:1.5em; font-weight:600; ", "Min CCF")),
tags$td(width = "60%", textInput(inputId = "calneidist_minccf", value = 0, label = NULL)))
),
br(),
bsTooltip(id = "calneidist_minccf",
title = "Specify the minimum CCF, default is 0.08.",
placement = "top",
trigger = "hover"),
checkboxInput('calneidist_withintumor',
value = FALSE,
label = div(style = "font-size:1.5em; font-weight:600; padding-left:12px", 'Within Tumor'),
width = 500),
bsTooltip(id = "calneidist_withintumor",
title = "Calculate fst within tumors in each patients,default is FALSE.",
placement = "top",
trigger = "hover"),
checkboxInput('calneidist_useadjvaf', label = div(style = "font-size:1.5em; font-weight:600; padding-left:15px", 'Use adjusted vaf'),value = FALSE, width = 400),
bsTooltip(id = "calneidist_useadjvaf",
title = "Use adjusted VAF in analysis when adjusted VAF or CCF is available. Default FALSE.",
placement = "top",
trigger = "hover"),
checkboxInput('calneidist_usecircle',
value = TRUE,
label = div(style = "font-size:1.5em; font-weight:600; padding-left:12px", 'Use circle'),
width = 500),
bsTooltip(id = "calneidist_usecircle",
title = "Logical (Default:TRUE). Whether to use circle as visualization method of correlation matrix",
placement = "top",
trigger = "hover"),
checkboxInput('calneidist_usetumorsamplelabel', label = div(style = "font-size:1.5em; font-weight:600; padding-left:15px", 'Use tumor sample label'),value = FALSE, width = 400),
bsTooltip(id = "calneidist_usetumorsamplelabel",
title = "Logical (Default: FALSE). Rename the 'Tumor_Sample_Barcode' with 'Tumor_Label'.",
placement = "top",
trigger = "hover"),
# tags$table(
# tags$tr(id = "inline",
# width = "100%",
# tags$td(width = "30%", div(style = "font-size:1.5em; font-weight:600; ", "Title: ")),
# tags$td(width = "70%", textInput(inputId = "calneidist_title", value = NULL, label = NULL)))
# ),
# bsTooltip(id = "calneidist_title",
# title = "The title of the plot.",
# placement = "top",
# trigger = "hover"),
# br(),
tags$table(
tags$tr(id = "inline",
width = "100%",
tags$td(width = "30%", div(style = "font-size:1.5em; font-weight:600; ", "number.cex")),
tags$td(width = "50%", textInput(inputId = "calneidist_numbercex", value = 8, label = NULL)))
),
bsTooltip(id = "calneidist_numbercex",
title = "The size of text shown in correlation plot. Default 8.",
placement = "top",
trigger = "hover"),
br(),
tags$table(
tags$tr(id = "inline",
width = "100%",
tags$td(width = "30%", div(style = "font-size:1.5em; font-weight:600; ", "number.col")),
tags$td(width = "50%", textInput(inputId = "calneidist_numbercol", value = "#C77960", label = NULL)))
),
bsTooltip(id = "calneidist_numbercol",
title = "The color of text shown in correlation plot. Default #C77960.",
placement = "top",
trigger = "hover"),
br(),
sliderInput(inputId='calneidist_width',label = div(style = "font-size:1.5em; font-weight:600; ", 'Image width'),min = 400,max = 1000, value = 560, width = 500),
sliderInput(inputId='calneidist_height',label = div(style = "font-size:1.5em; font-weight:600; ", 'Image height'),min = 400,max = 1000, value = 560, width = 500),
br(),
fluidRow(
column(
width = 9,
div(
tags$button(
id = "submit_calneidist", type = "button", class = "action-button bttn",
class = "bttn-unite", class = paste0("bttn-md"),
class = paste0("bttn-default"),
list(strong("Start analysis"),icon("hand-right", lib = "glyphicon")),
style = "margin-bottom:0px;margin-right:0px;"
)
)
)
)
)
)
),
column(
width = 9,
box(
width = NULL,
div(strong("ITH evaluation"),style = "font-size:27px; font-weight:500;"),
p("Understanding the origin and development of intra-tumor heterogeneity is clinically important, which has the potential to yield insights to guide therapeutic strategies. MesKit has integrated several metrics to estimate ITH within region or between regions borrowed from published research and population genetics.",
style = "font-size:20px; font-weight:500;line-height:40px;"),
tabBox(
id = 'tith',
height = "100%",
width = "100%",
selected = "ith_mathscore",
side = "left",
tabPanel(
title = div(icon("chart-bar"), "MATH score", style = "font-size:1.5em; font-weight:600; "),
value = "ith_mathscore",
uiOutput("mathscore.patientlist"),
DT::dataTableOutput('mathScore'),
br(),
br(),
uiOutput("msdb")
),
tabPanel(
title = div(icon("image"), "Cluster mutations", style = "font-size:1.5em; font-weight:600; "),
value = "ith_vafcluster",
uiOutput("vafcluster.patientlist"),
uiOutput("vafcluster_table_ui"),
uiOutput("vafcluster.samplelist"),
div(plotOutput("vaf",height = "100%", width = "100%"),align = "left"),
br(),
uiOutput("vcdb")
),
tabPanel(
title = div(icon("image"), "AUC of CCF", style = "font-size:1.5em; font-weight:600; "),
value = "ith_ccfauc",
uiOutput('ccfauc.patientlist'),
uiOutput("ccfauc_table_ui"),
div(plotOutput("ccfauc_plot",height = "100%", width = "100%"),align = "left") ,
uiOutput("ccfauc_db_ui")
),
tabPanel(
title = div(icon("image"), "Fixation index", style = "font-size:1.5em; font-weight:600; "),
value = "ith_calfst",
uiOutput('calfst.patientlist'),
uiOutput("calfst_pair_table_ui"),
div(plotOutput("calfst_plot",height = "100%", width = "100%"),align = "left") ,
uiOutput("calfst_db_ui")
# uiOutput("calfst_avg_table_ui"),
),
tabPanel(
title = div(icon("image"), "Nei's distance", style = "font-size:1.5em; font-weight:600; "),
value = "caInput_calneidist",
uiOutput('calneidist.patientlist'),
uiOutput("calneidist_pair_table_ui"),
div(plotOutput("calneidist_plot",height = "100%", width = "100%"),align = "left") ,
uiOutput("calneidist_db_ui")
# uiOutput("calneidist_avg_table_ui"),
),
tabPanel(
title = div(icon("image"), "Heatmap", style = "font-size:1.5em; font-weight:600; "),
value = "ith_mutheatmap",
uiOutput('mutheatmap.patientlist'),
div(plotOutput("mutheatmap_plot",height = "100%", width = "100%"),align = "left") ,
uiOutput("mutheatmap_db_ui")
)
)
)
)
)
)
bodyAL <- tabItem("AL",
fluidRow(
column(
width = 3,
box(
width = NULL,
conditionalPanel(
condition = "input.al_tabbox == 'pannel_plotmutprofile'",
div(strong("Parameter"),style = "font-size:2em; font-weight:600;"),
br(),
uiOutput("plotmutprofile.patientlist"),
selectInput("plotmutprofile_class", label = div(style = "font-size:1.5em; font-weight:600; ", "Class"),
choices = c("SP","CS","SPCS"),
selected = "SP"),
bsTooltip(id = "plotmutprofile_class",
title = 'The class which would be represented, default is "SP" (Shared pattern: Public/Shared/Private),other options: "CS" (Clonal status: Clonal/Subclonl) and "SPCS".',
placement = "top",
trigger = "hover"),
tags$table(
tags$tr(id = "inline",
width = "100%",
tags$td(width = "50%", div(style = "font-size:1.5em; font-weight:600; ", "Top genes count")),
tags$td(width = "40%", textInput(inputId = "plotmutprofile_topGenesCount", value = 10, label = NULL)))
),
bsTooltip(id = "plotmutprofile_topGenesCount",
title = "The number of genes print, default is 10",
placement = "top",
trigger = "hover"),
br(),
checkboxInput('plotmutprofile_usegenelist', label = div(style = "font-size:1.5em; font-weight:600; padding-left:15px", 'Gene list'),value = FALSE,width = 400),
bsTooltip(id = "plotmutprofile_usegenelist",
title = "A list of genes to restrict the analysis. Default NULL.",
placement = "top",
trigger = "hover"),
conditionalPanel(
condition = "input.plotmutprofile_usegenelist == true",
fileInput(inputId = 'plotmutprofile_genelist',
label = div(style = "font-size:1.5em; font-weight:600; ", 'Gene list file'),
placeholder = "Example: IntOGen-DriverGenes_COREAD.tsv",
width = 400)
),
checkboxInput('plotmutprofile_classByTumor', label = div(style = "font-size:1.5em; font-weight:600; padding-left:15px", 'Class by tumor'),value = FALSE,width = 400),
bsTooltip(id = "plotmutprofile_classByTumor",
title = "FALSE(Default). Define shared pattern of mutations based on tumor types (TRUE) or samples (FALSE)",
placement = "top",
trigger = "hover"),
checkboxInput('plotmutprofile_remove_empty_columns', label = div(style = "font-size:1.5em; font-weight:600; padding-left:15px", 'Remove empty columns'),value = TRUE,width = 400),
bsTooltip(id = "plotmutprofile_remove_empty_columns",
title = "Whether remove the samples without alterations",
placement = "top",
trigger = "hover"),
checkboxInput('plotmutprofile_remove_empty_rows', label = div(style = "font-size:1.5em; font-weight:600; padding-left:15px", 'Remove empty rows'),value = TRUE,width = 400),
bsTooltip(id = "plotmutprofile_remove_empty_rows",
title = "Whether remove the genes without alterations.",
placement = "top",
trigger = "hover"),
checkboxInput('plotmutprofile_showColnames', label = div(style = "font-size:1.5em; font-weight:600; padding-left:15px", 'Show column names'),value = TRUE,width = 400),
bsTooltip(id = "plotmutprofile_showColnames",
title = "TRUE(Default). Show sample names of columns.",
placement = "top",
trigger = "hover"),
checkboxInput('plotmutprofile_usetumorsamplelabel', label = div(style = "font-size:1.5em; font-weight:600; padding-left:15px", 'Use tumor sample label'),value = FALSE, width = 400),
bsTooltip(id = "plotmutprofile_useadjvaf",
title = "Logical (Default: FALSE). Rename the 'Tumor_Sample_Barcode' with 'Tumor_Label'.",
placement = "top",
trigger = "hover"),
sliderInput('plotmutprofile_width', label = div(style = "font-size:1.5em; font-weight:600; ", 'Image width'), min = 400,max = 1200, value = 900,width = 500),
sliderInput('plotmutprofile_height', label = div(style = "font-size:1.5em; font-weight:600; ", 'Image height'), min = 400,max = 1200, value = 900,width = 500),
fluidRow(
column(
width = 9,
div(
tags$button(
id = "submit_plotmutprofile", type = "button", class = "action-button bttn",
class = "bttn-unite", class = paste0("bttn-md"),
class = paste0("bttn-default"),
list(strong("Start analysis"),icon("hand-right", lib = "glyphicon")),
style = "margin-bottom:0px;margin-right:0px;"
)
)
)
)
),
conditionalPanel(
condition = "input.al_tabbox == 'pannel_plotcna'",
div(strong("Parameter"),style = "font-size:2em; font-weight:600;"),
br(),
fileInput(inputId = 'plotcna_segfile',
label = div(style = "font-size:1.5em; font-weight:600;",'Segment file',
tags$button(
Id = "iecontrol_seg",
type = "button",
class = "bttn-material-circle",
class = "btn action-button",
list(tags$img(src = "image/button.png",width = "22px",height = "22px")),
style = " background-position: center;padding:0;margin-bottom:7px;"
)
),
placeholder = "Example: CRC_HZ.seg.txt",
width = 400),
# fileInput(inputId = 'plotcna_segfile',
# label = div(style = "font-size:1.5em; font-weight:600; ",
# 'Segment file'),
# width = 400),
checkboxInput('plotmutprofile_usegisticAmpGenes', label = div(style = "font-size:1.5em; font-weight:600; padding-left:15px", 'Gistic amplification genes'),value = FALSE,width = 400),
bsTooltip(id = "plotmutprofile_usegisticAmpGenes",
title = "Amplification Genes file generated by GISTIC.",
placement = "top",
trigger = "hover"),
conditionalPanel(
condition = "input.plotmutprofile_usegisticAmpGenes == true",
fileInput(inputId = 'plotcna_gisticAmpGenesFile',
label = '',
placeholder = "Example: LIHC_amp_genes.conf_99.txt",
width = 400)
),
checkboxInput('plotmutprofile_usegisticDelGenes', label = div(style = "font-size:1.5em; font-weight:600; padding-left:15px", 'Gistic deletion genes'),value = FALSE,width = 400),
bsTooltip(id = "plotmutprofile_usegisticDelGenes",
title = "Deletion Genes file generated by GISTIC.",
placement = "top",
trigger = "hover"),
conditionalPanel(
condition = "input.plotmutprofile_usegisticDelGenes == true",
fileInput(inputId = 'plotcna_gisticDelGenesFile',
label = '',
placeholder = "Example: LIHC_del_genes.conf_99.txt",
width = 400)
),
checkboxInput('plotmutprofile_usegisticAllLesions', label = div(style = "font-size:1.5em; font-weight:600; padding-left:15px", 'Gistic all lesions'),value = FALSE,width = 400),
bsTooltip(id = "plotmutprofile_usegisticAllLesions",
title = "Information of all lesions generated by GISTIC.",
placement = "top",
trigger = "hover"),
conditionalPanel(
condition = "input.plotmutprofile_usegisticAllLesions == true",
fileInput(inputId = 'plotcna_gisticAllLesionsFile',
label = '',
placeholder = "Example: LIHC_all_lesions.conf_99.txt",
width = 400)
),
selectInput('plotcna_refBuild', label = div(style = "font-size:1.5em; font-weight:600; ", 'Genome reference'),
choices = c('hg18','hg19','hg38'),selected = "hg19", width = 400),
bsTooltip(id = "plotcna_refBuild",
title = "Human reference genome versions of hg18, hg19 or hg38 by UCSC. Default: hg19.",
placement = "top",
trigger = "hover"),
uiOutput("plotcna_gistic_parameters_ui"),
br(),
uiOutput("plotcna.patientlist"),
tags$table(
tags$tr(id = "inline",
width = "100%",
tags$td(width = "50%", div(style = "font-size:1.5em; font-weight:600; ", "Sample text size")),
tags$td(width = "40%", textInput(inputId = "plotcna_sampletextsize", value = 11, label = NULL)))
),
bsTooltip(id = "plotcna_sampletextsize",
title = "Size of sample name.Default 11.",
placement = "top",
trigger = "hover"),
br(),
tags$table(
tags$tr(id = "inline",
width = "100%",
tags$td(width = "50%", div(style = "font-size:1.5em; font-weight:600; ", "Legend text size")),
tags$td(width = "40%", textInput(inputId = "plotcna_legendtextsize", value = 9, label = NULL)))
),
bsTooltip(id = "plotcna_legendtextsize",
title = "Size of legend text.Default 9.",
placement = "top",
trigger = "hover"),
br(),
tags$table(
tags$tr(id = "inline",
width = "100%",
tags$td(width = "50%", div(style = "font-size:1.5em; font-weight:600; ", "Legend title size")),
tags$td(width = "40%", textInput(inputId = "plotcna_legendtitlesize", value = 11, label = NULL)))
),
bsTooltip(id = "plotcna_legendtitlesize",
title = "Size of legend title.Default 11.",
placement = "top",
trigger = "hover"),
br(),
tags$table(
tags$tr(id = "inline",
width = "100%",
tags$td(width = "50%", div(style = "font-size:1.5em; font-weight:600; ", "Sample bar size")),
tags$td(width = "40%", textInput(inputId = "plotcna_samplebarheight", value = 0.5, label = NULL)))
),
bsTooltip(id = "plotcna_samplebarheight",
title = "Bar height of each sample.Default 0.5.",
placement = "top",
trigger = "hover"),
br(),
tags$table(
tags$tr(id = "inline",
width = "100%",
tags$td(width = "70%", div(style = "font-size:1.5em; font-weight:600; ", "Chromosome bar size")),
tags$td(width = "30%", textInput(inputId = "plotcna_chrombarheight", value = 0.5, label = NULL)))
),
bsTooltip(id = "plotcna_chrombarheight",
title = "Bar height of each chromosome.Default 0.5.",
placement = "top",
trigger = "hover"),
br(),
checkboxInput('plotcna_showrownames',label = div(style = "font-size:1.5em; font-weight:600;padding-left:15px ", 'Show row names'),value = TRUE),
checkboxInput('plotcna_usetumorsamplelabel', label = div(style = "font-size:1.5em; font-weight:600; padding-left:15px", 'Use tumor sample label'),value = FALSE, width = 400),
bsTooltip(id = "plotcna_usetumorsamplelabel",
title = "Logical (Default: FALSE). Rename the 'Tumor_Sample_Barcode' with 'Tumor_Label'.",
placement = "top",
trigger = "hover"),
checkboxInput('plotcna_removeempytchr', label = div(style = "font-size:1.5em; font-weight:600; padding-left:15px", 'Remove empty chromosome'),value = FALSE, width = 400),
bsTooltip(id = "plotcna_removeempytchr",
title = "Remove empty chromosomes that do not exist in samples. Default TRUE. ",
placement = "top",
trigger = "hover"),
sliderInput('plotcna_width', label = div(style = "font-size:1.5em; font-weight:600; ", 'Image width'), min = 400,max = 1200, value = 800,width = 500),
sliderInput('plotcna_height', label = div(style = "font-size:1.5em; font-weight:600; ", 'Image height'), min = 400,max = 1200, value = 800,width = 500),
br(),
fluidRow(
column(
width = 9,
div(
tags$button(
id = "submit_plotcna", type = "button", class = "action-button bttn",
class = "bttn-unite", class = paste0("bttn-md"),
class = paste0("bttn-default"),
list(strong("Start analysis"),icon("hand-right", lib = "glyphicon")),
style = "margin-bottom:0px;margin-right:0px;"
)
)
)
)
),
)
),
column(
width = 9,
box(
width = NULL,
div(strong("Mutational landscape"),style = "font-size:27px; font-weight:500;"),
p("",
style = "font-size:20px; font-weight:500;line-height:40px;"),
tabBox(
id = 'al_tabbox',
selected = 'pannel_plotmutprofile',
side = 'left',
height = "100%",
width = "100%",
tabPanel(
value = 'pannel_plotmutprofile',
title = div(icon("image"), "Mutational profile",style = "font-size:1.5em; font-weight:600; "),
div(plotOutput('plotmutprofile_plot', height = "100%", width = "100%"), align = "left"),
br(),
uiOutput("plotmutprofile_download_button_ui")
),
tabPanel(
value = 'pannel_plotcna',
title = div(icon("image"), "CNA profile", style = "font-size:1.5em; font-weight:600; "),
uiOutput("ie_seg"),
uiOutput("plotcna_table_ui"),
div(plotOutput('plotcna_plot', height = "100%", width = "100%"), align = "left"),
br(),
uiOutput("plotcna_download_button_ui")
)
)
)
)
)
)
bodyclone <- tabItem('clone',
fluidRow(
column(
width = 3,
box(
width = NULL,
conditionalPanel(
condition = "input.clt == 'clone_compareccf'",
div(strong("Parameter"),style = "font-size:2em; font-weight:600;"),
br(),
uiOutput("compareccf_patientid_ui"),
tags$table(
tags$tr(id = "inline",
width = "100%",
tags$td(width = "30%", div(style = "font-size:1.5em; font-weight:600; ", "Min ccf")),
tags$td(width = "60%", textInput(inputId = "compareccf_minccf", value = 0, label = NULL)))
),
br(),
bsTooltip(id = "compareccf_minccf",
title = "The minimum value of ccf",
placement = "top",
trigger = "hover"),
checkboxInput('compareccf_usetumorsamplelabel', label = div(style = "font-size:1.5em; font-weight:600; padding-left:15px", 'Use tumor sample label'),value = FALSE, width = 400),
bsTooltip(id = "compareccf_usetumorsamplelabel",
title = "Logical (Default: FALSE). Rename the 'Tumor_Sample_Barcode' with 'Tumor_Label'.",
placement = "top",
trigger = "hover"),
uiOutput("compareccf_pairbytumor_ui"),
# checkboxInput('compareccf_pairbytumor',
# value = FALSE,
# label = div(style = "font-size:1.5em; font-weight:600; padding-left:12px", 'Pair by tumor'),
# width = 500),
# bsTooltip(id = "compareccf_pairbytumor",
# title = "Compare CCF by tumor",
# placement = "top",
# trigger = "hover"),
fluidRow(
column(
width = 9,
div(
tags$button(
id = "submit_compareccf", type = "button", class = "action-button bttn",
class = "bttn-unite", class = paste0("bttn-md"),
class = paste0("bttn-default"),
list(strong("Start analysis"),icon("hand-right", lib = "glyphicon")),
style = "margin-bottom:0px;margin-right:0px;"
)
)
)
)
),
conditionalPanel(
condition = "input.clt == 'clone_caljsi'",
div(strong("Parameter"),style = "font-size:2em; font-weight:600;"),
br(),
uiOutput("caljsi_patientid_ui"),
tags$table(
tags$tr(id = "inline",
width = "100%",
tags$td(width = "30%", div(style = "font-size:1.5em; font-weight:600; ", "Min ccf")),
tags$td(width = "60%", textInput(inputId = "caljsi_minccf", value = 0, label = NULL)))
),
bsTooltip(id = "caljsi_minccf",
title = "The minimum value of ccf",
placement = "top",
trigger = "hover"),
checkboxInput('caljsi_usetumorsamplelabel', label = div(style = "font-size:1.5em; font-weight:600; padding-left:15px", 'Use tumor sample label'),value = FALSE, width = 400),
bsTooltip(id = "caljsi_usetumorsamplelabel",
title = "Logical (Default: FALSE). Rename the 'Tumor_Sample_Barcode' with 'Tumor_Label'.",
placement = "top",
trigger = "hover"),
uiOutput("caljsi_pairbytumor_ui"),
# checkboxInput('caljsi_pairbytumor',
# value = FALSE,
# label = div(style = "font-size:1.5em; font-weight:600; padding-left:12px", 'Pair by tumor'),
# width = 500),
# bsTooltip(id = "caljsi_pairbytumor",
# title = "calculate JSI by tumor",
# placement = "top",
# trigger = "hover"),
checkboxInput('caljsi_usecircle',
value = TRUE,
label = div(style = "font-size:1.5em; font-weight:600; padding-left:12px", 'Use circle'),
width = 500),
bsTooltip(id = "caljsi_usecircle",
title = "Logical (Default:TRUE). Whether to use circle as visualization method of correlation matrix",
placement = "top",
trigger = "hover"),
# tags$table(
# tags$tr(id = "inline",
# width = "100%",
# tags$td(width = "30%", div(style = "font-size:1.5em; font-weight:600; ", "Title: ")),
# tags$td(width = "70%", textInput(inputId = "caljsi_title", value = NULL, label = NULL)))
# ),
# bsTooltip(id = "caljsi_title",
# title = "The title of the plot.",
# placement = "top",
# trigger = "hover"),
# br(),
tags$table(
tags$tr(id = "inline",
width = "100%",
tags$td(width = "30%", div(style = "font-size:1.5em; font-weight:600; ", "number.cex")),
tags$td(width = "40%", textInput(inputId = "caljsi_numbercex", value = 8, label = NULL)))
),
bsTooltip(id = "caljsi_numbercex",
title = "The size of text shown in correlation plot. Default 8.",
placement = "top",
trigger = "hover"),
br(),
tags$table(
tags$tr(id = "inline",
width = "100%",
tags$td(width = "30%", div(style = "font-size:1.5em; font-weight:600; ", "number.col")),
tags$td(width = "40%", textInput(inputId = "caljsi_numbercol", value = "#C77960", label = NULL)))
),
bsTooltip(id = "caljsi_numbercol",
title = "The color of text shown in correlation plot. Default #C77960.",
placement = "top",
trigger = "hover"),
br(),
sliderInput(inputId='caljsi_width',label = div(style = "font-size:1.5em; font-weight:600; ", 'Image width'),min = 400,max = 1000, value = 560, width = 500),
sliderInput(inputId='caljsi_height',label = div(style = "font-size:1.5em; font-weight:600; ", 'Image height'),min = 400,max = 1000, value = 560, width = 500),
br(),
fluidRow(
column(
width = 9,
div(
tags$button(
id = "submit_caljsi", type = "button", class = "action-button bttn",
class = "bttn-unite", class = paste0("bttn-md"),
class = paste0("bttn-default"),
list(strong("Start analysis"),icon("hand-right", lib = "glyphicon")),
style = "margin-bottom:0px;margin-right:0px;"
)
)
)
)
),
conditionalPanel(
condition = "input.clt == 'clone_testneutral'",
div(strong("Parameter"),style = "font-size:2em; font-weight:600;"),
br(),
uiOutput("testneutral_patientid_ui"),
checkboxInput('testneutral_withintumor',div(style = "font-size:1.5em; font-weight:600; padding-left:15px ", 'Within Tumor'),value = FALSE),
bsTooltip(id = "testneutral_withintumor",
title = 'Test neutral within tumros in each patients,default is FALSE.',
placement = "top",
trigger = "hover"),
tags$table(
tags$tr(id = "inline",
width = "100%",
tags$td(width = "50%", div(style = "font-size:1.5em; font-weight:600; ", "Min total depth: ")),
tags$td(width = "50%", textInput(inputId = "testneutral_mintotaldepth", value = 2, label = NULL)))
),
bsTooltip(id = "testneutral_mintotaldepth",
title = "The minimun total depth of coverage. Defalut: 2",
placement = "top",
trigger = "hover"),
br(),
tags$table(
tags$tr(id = "inline",
width = "100%",
tags$td(width = "30%", div(style = "font-size:1.5em; font-weight:600; ", "Min vaf: ")),
tags$td(width = "70%", textInput(inputId = "testneutral_minvaf", value = 0.1, label = NULL)))
),
bsTooltip(id = "testneutral_minvaf",
title = "The minimum value of vaf",
placement = "top",
trigger = "hover"),
br(),
tags$table(
tags$tr(id = "inline",
width = "100%",
tags$td(width = "30%", div(style = "font-size:1.5em; font-weight:600; ", "Max vaf: ")),
tags$td(width = "70%", textInput(inputId = "testneutral_maxvaf", value = 0.3, label = NULL)))
),
bsTooltip(id = "testneutral_maxvaf",
title = "The maximum value of vaf",
placement = "top",
trigger = "hover"),
br(),
tags$table(
tags$tr(id = "inline",
width = "100%",
tags$td(width = "50%", div(style = "font-size:1.5em; font-weight:600; ", "R2 threshold: ")),
tags$td(width = "50%", textInput(inputId = "testneutral_R2threshold", value = 0.98, label = NULL)))
),
bsTooltip(id = "testneutral_R2threshold",
title = "The threshod of R2 to decide whether a tumor follows neutral evolution. Default: 0.98",
placement = "top",
trigger = "hover"),
br(),
tags$table(
tags$tr(id = "inline",
width = "100%",
tags$td(width = "50%", div(style = "font-size:1.5em; font-weight:600; ", "Min mut count: ")),
tags$td(width = "50%", textInput(inputId = "testneutral_minmutcount", value = 20, label = NULL)))
),
bsTooltip(id = "testneutral_minmutcount",
title = "The minimun number of subclonal mutations used to fit model. Default: 20",
placement = "top",
trigger = "hover"),
br(),
sliderInput(inputId='testneutral_width',label = div(style = "font-size:1.5em; font-weight:600; ", 'Image width'),min = 400,max = 1000, value = 560, width = 500),
sliderInput(inputId='testneutral_height',label = div(style = "font-size:1.5em; font-weight:600; ", 'Image height'),min = 400,max = 1000, value = 560, width = 500),
br(),
fluidRow(
column(
width = 9,
div(
tags$button(
id = "submit_testneutral", type = "button", class = "action-button bttn",
class = "bttn-unite", class = paste0("bttn-md"),
class = paste0("bttn-default"),
list(strong("Start analysis"),icon("hand-right", lib = "glyphicon")),
style = "margin-bottom:0px;margin-right:0px;"
)
)
)
)
)
)
),
column(
width = 9,
box(
width = NULL,
div(strong("Metastatic routes inference"),style = "font-size:27px; font-weight:500;"),
p("Since metastasis is the ultimate cause of death for most patients, it is particularly important to gain a systematic understanding of how tumor disseminates and the scale of ongoing parallel evolution in metastatic and primary site. Here, we provide two functions to help distinguish monoclonal from polyclonal seeding. ",
style = "font-size:20px; font-weight:500;line-height:40px;"),
tabBox(
id = 'clt',
selected = 'clone_compareccf',
side = 'left',
height = "100%",
width = "100%",
tabPanel(
value = 'clone_compareccf',
title = div(icon("chart-bar"), "CCF comparison", style = "font-size:1.5em; font-weight:600; "),
uiOutput('compareccf.patientlist'),
uiOutput('compareccf.samplelist'),
br(),
uiOutput("compareccf_table_ui")
),
tabPanel(
title = div(icon("image"), "Jaccard similarity index", style = "font-size:1.5em; font-weight:600; "),
value = "clone_caljsi",
uiOutput('caljsi.patientlist'),
uiOutput("caljsi_pair_table_ui"),
br(),
div(plotOutput("caljsi_plot",height = "100%", width = "100%"),align = "left") ,
uiOutput("caljsi_db_ui")
)
# tabPanel(
# title = div(icon("box"), "testNeutral"),
# value = "clone_testneutral",
# uiOutput('testneutral.patientlist'),
# uiOutput("testneutral_table_ui"),
# br(),
# uiOutput('testneutral.samplelist'),
# uiOutput('warningMessage_testneutral'),
# div(plotOutput("testneutral_plot",height = "100%", width = "100%"),align = "center") ,
# uiOutput("testneutral_db_ui")
# )
)
)
)
)
)
bodytree <- tabItem('tree',
fluidRow(
column(
width = 3,
box(
width = NULL,
# conditionalPanel(
# condition = "input.sgt == 'S_getphylotree'",
# div(strong("Parameter"),style = "font-size:2em; font-weight:600;"),
# br(),
# selectInput("getphylotree_method", label = div(style = "font-size:1.5em; font-weight:600; ", "Tree construct method"),
# choices = c("NJ","MP","ML","FASTME.ols","FASTME.bal"),
# selected = "NJ"),
# bsTooltip(id = "getphylotree_method",
# title = "Approach to construct phylogenetic trees",
# placement = "top",
# trigger = "hover"),
# tags$table(
# tags$tr(id = "inline",
# width = "100%",
# tags$td(width = "30%", div(style = "font-size:1.5em; font-weight:600; ", "Min vaf: ")),
# tags$td(width = "70%", textInput(inputId = "getphylotree_minvaf", value = 0.02, label = NULL)))
# ),
# bsTooltip(id = "getphylotree_minvaf",
# title = "The minimum value of vaf",
# placement = "top",
# trigger = "hover"),
# br(),
# tags$table(
# tags$tr(id = "inline",
# width = "100%",
# tags$td(width = "30%", div(style = "font-size:1.5em; font-weight:600; ", "Min ccf: ")),
# tags$td(width = "70%", textInput(inputId = "getphylotree_minccf", value = 0, label = NULL)))
# ),
# bsTooltip(id = "getphylotree_minccf",
# title = "The minimum value of ccf",
# placement = "top",
# trigger = "hover"),
# br(),
# tags$table(
# tags$tr(id = "inline",
# width = "100%",
# tags$td(width = "60%", div(style = "font-size:1.5em; font-weight:600; ", "Boostrap repetitions: ")),
# tags$td(width = "40%", textInput(inputId = "getphylotree_bootstraprepnum", value = 100, label = NULL)))
# ),
# bsTooltip(id = "getphylotree_bootstraprepnum",
# title = "Bootstrap iterations. Default 100.",
# placement = "top",
# trigger = "hover"),
# br(),
# actionBttn('submit_getphylotree',div(
# strong("Get phylotree"),align = 'center'))
# ),
conditionalPanel(
condition = "input.sgt == 'S_plotphylotree'",
div(strong("Parameter"),style = "font-size:2em; font-weight:600;"),
br(),
uiOutput("plotphylotree_patientid_ui"),
selectInput("plotphylotree_getphylotree_method", label = div(style = "font-size:1.5em; font-weight:600; ", "Tree construct method"),
choices = c("NJ","MP","ML","FASTME.ols","FASTME.bal"),
selected = "NJ"),
bsTooltip(id = "plotphylotree_getphylotree_method",
title = "Approach to construct phylogenetic trees",
placement = "top",
trigger = "hover"),
tags$table(
tags$tr(id = "inline",
width = "100%",
tags$td(width = "30%", div(style = "font-size:1.5em; font-weight:600; ", "Min vaf")),
tags$td(width = "60%", textInput(inputId = "plotphylotree_getphylotree_minvaf", value = 0, label = NULL)))
),
bsTooltip(id = "plotphylotree_getphylotree_minvaf",
title = "The minimum value of vaf",
placement = "top",
trigger = "hover"),
br(),
tags$table(
tags$tr(id = "inline",
width = "100%",
tags$td(width = "30%", div(style = "font-size:1.5em; font-weight:600; ", "Min ccf")),
tags$td(width = "60%", textInput(inputId = "plotphylotree_getphylotree_minccf", value = 0, label = NULL)))
),
bsTooltip(id = "plotphylotree_getphylotree_minccf",
title = "The minimum value of ccf",
placement = "top",
trigger = "hover"),
br(),
tags$table(
tags$tr(id = "inline",
width = "100%",
tags$td(width = "50%", div(style = "font-size:1.5em; font-weight:600; ", "Boostrap repetitions")),
tags$td(width = "30%", textInput(inputId = "plotphylotree_getphylotree_bootstraprepnum", value = 100, label = NULL)))
),
bsTooltip(id = "plotphylotree_getphylotree_bootstraprepnum",
title = "Bootstrap iterations. Default 100.",
placement = "top",
trigger = "hover"),
br(),
selectInput("plotphylotree_branchcol", label = div(style = "font-size:1.5em; font-weight:600; ", "Branch color"),
choices = c("mutType",
"mutSig",
"NULL"),
selected = "mutType"),
bsTooltip(id = "plotphylotree_branchcol",
title = "Specify the colors of branches (Default: mutType). Other options: 'mutSig' for coloring branches by branch mutation signature;",
placement = "top",
trigger = "hover"),
conditionalPanel(
condition = "input.plotphylotree_branchcol == 'mutSig'",
selectInput("plotphylotree_signatureref", label = div(style = "font-size:1.5em; font-weight:600; ", "Signautre reference"),
choices = c("cosmic_v2",
"nature2013",
"exome_cosmic_v3"),
selected = "cosmic_v2"),
bsTooltip(id = "plotphylotree_signatureref",
title = 'signature reference',
placement = "top",
trigger = "hover"),
tags$table(
tags$tr(id = "inline",
width = "100%",
tags$td(width = "60%", div(style = "font-size:1.5em; font-weight:600; ", "Minimal mutation number")),
tags$td(width = "20%", textInput(inputId = "plotphylotree_minmutcount", value = 15, label = NULL)))
),
bsTooltip(id = "plotphylotree_minmutcount",
title = 'The threshold for the variants in a branch. Default 15.',
placement = "top",
trigger = "hover"),
),
checkboxInput('plotphylotree_showbootstrap',div(style = "font-size:1.5em; font-weight:600; padding-left:15px ", 'Show bootstrap value'),value = TRUE),
bsTooltip(id = "plotphylotree_showbootstrap",
title = 'Whether to add bootstrap value on internal nodes.',
placement = "top",
trigger = "hover"),
checkboxInput('plotphylotree_usetumorsamplelabel', label = div(style = "font-size:1.5em; font-weight:600; padding-left:15px", 'Use tumor sample label'),value = FALSE, width = 400),
bsTooltip(id = "plotphylotree_usetumorsamplelabel",
title = "Logical (Default: FALSE). Rename the 'Tumor_Sample_Barcode' with 'Tumor_Label'.",
placement = "top",
trigger = "hover"),
checkboxInput('plotphylotree_show_scale_bar', label = div(style = "font-size:1.5em; font-weight:600; padding-left:15px", 'Show scale bar'),value = FALSE, width = 400),
bsTooltip(id = "plotphylotree_show_scale_bar",
title = "Logical (Default: FALSE). Whether to show scale bar.This function adds a horizontal bar giving the scale of the branch lengths to a plot on the current graphical device.",
placement = "top",
trigger = "hover"),
conditionalPanel(
condition = "input.plotphylotree_show_scale_bar == true",
tags$table(
tags$tr(id = "inline",
width = "100%",
tags$td(width = "30%", div(style = "font-size:1.5em; font-weight:600; ", "Scale bar x")),
tags$td(width = "60%", textInput(inputId = "plotphylotree_scale_bar_x", value = NULL, label = NULL)))
),
bsTooltip(id = "plotphylotree_scale_bar_x",
title = "The x location of scale bar.",
placement = "top",
trigger = "hover"),
br(),
tags$table(
tags$tr(id = "inline",
width = "100%",
tags$td(width = "30%", div(style = "font-size:1.5em; font-weight:600; ", "Scale bar y")),
tags$td(width = "60%", textInput(inputId = "plotphylotree_scale_bar_y", value = NULL, label = NULL)))
),
bsTooltip(id = "plotphylotree_scale_bar_y",
title = "The y location of scale bar.",
placement = "top",
trigger = "hover"),
br(),
),
tags$table(
tags$tr(id = "inline",
width = "100%",
tags$td(width = "30%", div(style = "font-size:1.5em; font-weight:600; ", "Min ratio")),
tags$td(width = "60%", textInput(inputId = "plotphylotree_minratio", value = 0.05, label = NULL)))
),
bsTooltip(id = "plotphylotree_minratio",
title = "Double (Default:1/20). If min.ratio is not NULL,all edge length of a phylogenetic tree should be greater than min.ratio*the longest edge length.If not, the edge length will be reset as min.ratio*longest edge length.",
placement = "top",
trigger = "hover"),
br(),
sliderInput(inputId='plotphylotree_width',label = div(style = "font-size:1.5em; font-weight:600; ", 'Image width'),min = 300,max = 1200, value = 500, width = 500),
sliderInput(inputId='plotphylotree_height',label = div(style = "font-size:1.5em; font-weight:600; ", 'Image height'),min = 300,max = 1200, value = 500, width = 500),
#
fluidRow(
column(
width = 9,
div(
tags$button(
id = "submit_plotphylotree", type = "button", class = "action-button bttn",
class = "bttn-unite", class = paste0("bttn-md"),
class = paste0("bttn-default"),
list(strong("Start analysis"),icon("hand-right", lib = "glyphicon")),
style = "margin-bottom:0px;margin-right:0px;"
)
)
)
)
),
conditionalPanel(
condition = "input.sgt == 'S_comparetree'",
div(strong("Parameter"),style = "font-size:2em; font-weight:600;"),
br(),
uiOutput("comparetree.patientlist"),
selectInput("comparetree_getphylotree_method1",
label = div(style = "font-size:1.5em; font-weight:600; ",
"Tree1 construct method"),
choices = c("NJ","MP","ML","FASTME.ols","FASTME.bal"),
selected = "NJ"),
bsTooltip(id = "comparetree_getphylotree_method1",
title = "Approach to construct phylogenetic trees",
placement = "top",
trigger = "hover"),
selectInput("comparetree_getphylotree_method2",
label = div(style = "font-size:1.5em; font-weight:600; ",
"Tree2 construct method"),
choices = c("NJ","MP","ML","FASTME.ols","FASTME.bal"),
selected = "MP"),
bsTooltip(id = "comparetree_getphylotree_method2",
title = "Approach to construct phylogenetic trees",
placement = "top",
trigger = "hover"),
tags$table(
tags$tr(id = "inline",
width = "100%",
tags$td(width = "30%", div(style = "font-size:1.5em; font-weight:600; ", "Min vaf")),
tags$td(width = "60%", textInput(inputId = "comparetree_getphylotree_minvaf", value = 0, label = NULL)))
),
bsTooltip(id = "comparetree_getphylotree_minvaf",
title = "The minimum value of vaf",
placement = "top",
trigger = "hover"),
br(),
tags$table(
tags$tr(id = "inline",
width = "100%",
tags$td(width = "30%", div(style = "font-size:1.5em; font-weight:600; ", "Min ccf")),
tags$td(width = "60%", textInput(inputId = "comparetree_getphylotree_minccf", value = 0, label = NULL)))
),
bsTooltip(id = "comparetree_getphylotree_minccf",
title = "The minimum value of ccf",
placement = "top",
trigger = "hover"),
br(),
tags$table(
tags$tr(id = "inline",
width = "100%",
tags$td(width = "50%",
div(style = "font-size:1.5em; font-weight:600; ",
"Boostrap repetitions")),
tags$td(width = "30%",
textInput(inputId = "comparetree_getphylotree_bootstraprepnum",
value = 100, label = NULL)))
),
bsTooltip(id = "comparetree_getphylotree_bootstraprepnum",
title = "Bootstrap iterations. Default 100.",
placement = "top",
trigger = "hover"),
br(),
# div(strong("Parameter(phyloTree2)"),style = "font-size:1.6em; font-weight:600;"),
# selectInput("comparetree_getphylotree_method2", label = div(style = "font-size:1.5em; font-weight:600; ", "Tree construct method"),
# choices = c("NJ","MP","ML","FASTME.ols","FASTME.bal"),
# selected = "MP"),
# bsTooltip(id = "comparetree_getphylotree_method2",
# title = "Approach to construct phylogenetic trees",
# placement = "top",
# trigger = "hover"),
# tags$table(
# tags$tr(id = "inline",
# width = "100%",
# tags$td(width = "30%", div(style = "font-size:1.5em; font-weight:600; ", "Min vaf: ")),
# tags$td(width = "70%", textInput(inputId = "comparetree_getphylotree_minvaf2", value = 0.02, label = NULL)))
# ),
# bsTooltip(id = "comparetree_getphylotree_minvaf2",
# title = "The minimum value of vaf",
# placement = "top",
# trigger = "hover"),
# br(),
# tags$table(
# tags$tr(id = "inline",
# width = "100%",
# tags$td(width = "30%", div(style = "font-size:1.5em; font-weight:600; ", "Min ccf: ")),
# tags$td(width = "70%", textInput(inputId = "comparetree_getphylotree_minccf2", value = 0, label = NULL)))
# ),
# bsTooltip(id = "comparetree_getphylotree_minccf2",
# title = "The minimum value of ccf",
# placement = "top",
# trigger = "hover"),
# br(),
# tags$table(
# tags$tr(id = "inline",
# width = "100%",
# tags$td(width = "60%", div(style = "font-size:1.5em; font-weight:600; ", "Boostrap repetitions: ")),
# tags$td(width = "40%", textInput(inputId = "comparetree_getphylotree_bootstraprepnum2", value = 100, label = NULL)))
# ),
# bsTooltip(id = "comparetree_getphylotree_bootstraprepnum2",
# title = "Bootstrap iterations. Default 100.",
# placement = "top",
# trigger = "hover"),
# br(),
# div(strong("Parameter(compareTree)"),style = "font-size:1.6em; font-weight:600;"),
checkboxInput('comparetree_showbootstrap',
div(style = "font-size:1.5em; font-weight:600; padding-left:15px ",
'Show bootstrap value'),value = FALSE),
bsTooltip(id = "comparetree_showbootstrap",
title = 'Whether to add bootstrap value on internal nodes.',
placement = "top",
trigger = "hover"),
checkboxInput('comparetree_usetumorsamplelabel', label = div(style = "font-size:1.5em; font-weight:600; padding-left:15px", 'Use tumor sample label'),value = FALSE, width = 400),
bsTooltip(id = "comparetree_usetumorsamplelabel",
title = "Logical (Default: FALSE). Rename the 'Tumor_Sample_Barcode' with 'Tumor_Label'.",
placement = "top",
trigger = "hover"),
tags$table(
tags$tr(id = "inline",
width = "100%",
tags$td(width = "30%", div(style = "font-size:1.5em; font-weight:600; ", "Min ratio")),
tags$td(width = "60%", textInput(inputId = "comparetree_minratio", value = 0.05, label = NULL)))
),
bsTooltip(id = "comparetree_minratio",
title = "Double (Default:1/20). If min.ratio is not NULL,all edge length of a phylogenetic tree should be greater than min.ratio*the longest edge length.If not, the edge length will be reset as min.ratio*longest edge length.",
placement = "top",
trigger = "hover"),
br(),
# textInput(inputId = "comparetree_commoncol",
# label = div(style = "font-size:1.5em; font-weight:600; ", 'Common color'),
# value = "red"),
# bsTooltip(id = "comparetree_commoncol",
# title = "Color of common branches.",
# placement = "right",
# trigger = "hover"),
sliderInput(inputId='comparetree_width',label = div(style = "font-size:1.5em; font-weight:600; ", 'Image width'),min = 700,max = 1400, value = 1100, width = 500),
sliderInput(inputId='comparetree_height',label = div(style = "font-size:1.5em; font-weight:600; ", 'Image height'),min = 400,max = 1000, value = 700, width = 500),
#
fluidRow(
column(
width = 9,
div(
tags$button(
id = "submit_comparetree", type = "button", class = "action-button bttn",
class = "bttn-unite", class = paste0("bttn-md"),
class = paste0("bttn-default"),
list(strong("Start analysis"),icon("hand-right", lib = "glyphicon")),
style = "margin-bottom:0px;margin-right:0px;"
)
)
)
)
),
conditionalPanel(
condition = "input.sgt == 'S_treemutsig'",
div(strong("Parameter"),style = "font-size:2em; font-weight:600;"),
br(),
uiOutput("treemutsig_patientid_ui"),
selectInput("treemutsig_getphylotree_method", label = div(style = "font-size:1.5em; font-weight:600; ", "Tree construct method"),
choices = c("NJ","MP","ML","FASTME.ols","FASTME.bal"),
selected = "NJ"),
bsTooltip(id = "treemutsig_getphylotree_method",
title = "Approach to construct phylogenetic trees",
placement = "top",
trigger = "hover"),
tags$table(
tags$tr(id = "inline",
width = "100%",
tags$td(width = "30%", div(style = "font-size:1.5em; font-weight:600; ", "Min vaf")),
tags$td(width = "60%", textInput(inputId = "treemutsig_getphylotree_minvaf", value = 0, label = NULL)))
),
bsTooltip(id = "treemutsig_getphylotree_minvaf",
title = "The minimum value of vaf",
placement = "top",
trigger = "hover"),
br(),
tags$table(
tags$tr(id = "inline",
width = "100%",
tags$td(width = "30%", div(style = "font-size:1.5em; font-weight:600; ", "Min ccf")),
tags$td(width = "60%", textInput(inputId = "treemutsig_getphylotree_minccf", value = 0, label = NULL)))
),
bsTooltip(id = "treemutsig_getphylotree_minccf",
title = "The minimum value of ccf",
placement = "top",
trigger = "hover"),
br(),
tags$table(
tags$tr(id = "inline",
width = "100%",
tags$td(width = "50%", div(style = "font-size:1.5em; font-weight:600; ", "Boostrap repetitions")),
tags$td(width = "30%", textInput(inputId = "treemutsig_getphylotree_bootstraprepnum", value = 100, label = NULL)))
),
bsTooltip(id = "treemutsig_getphylotree_bootstraprepnum",
title = "Bootstrap iterations. Default 100.",
placement = "top",
trigger = "hover"),
br(),
selectInput('treemutsig_level', label = div(style = "font-size:1.5em; font-weight:600; ", 'Level'),
choices = c('1','2','3', '4', '5', '6'),selected = "2", width = 400),
bsTooltip(id = "treemutsig_level",
title = " Calculate mutation in different levels.'1': patient,'2':tumor,'3':sample,'4':branch,'5':mutation type.Default level '2'.",
placement = "top",
trigger = "hover"),
# checkboxInput('treemutsig_withintumor',label = div(style = "font-size:1.5em; font-weight:600;padding-left:15px ", 'Within tumor'),value = FALSE),
# bsTooltip(id = "treemutsig_withintumor",
# title = 'Exploring signatures within tumor. Default: FALSE.',
# placement = "top",
# trigger = "hover"),
selectInput("treemutsig_signatureref", label = div(style = "font-size:1.5em; font-weight:600; ", "Signautre reference"),
choices = c("cosmic_v2",
"nature2013",
"exome_cosmic_v3"),
selected = "cosmic"),
bsTooltip(id = "treemutsig_signatureref",
title = 'signature reference',
placement = "top",
trigger = "hover"),
tags$table(
tags$tr(id = "inline",
width = "100%",
tags$td(width = "85%", div(style = "font-size:1.5em; font-weight:600; ", "Minimal mutation number")),
tags$td(width = "15%", textInput(inputId = "treemutsig_minmutcount", value = 15, label = NULL)))
),
bsTooltip(id = "treemutsig_minmutcount",
title = 'The threshold for the variants in a branch. Default 15.',
placement = "top",
trigger = "hover"),
br(),
tags$table(
tags$tr(id = "inline",
width = "100%",
tags$td(width = "50%", div(style = "font-size:1.5em; font-weight:600; ", "Signature cutoff")),
tags$td(width = "30%", textInput(inputId = "treemutsig_signaturecutoff", value = 0.1, label = NULL)))
),
bsTooltip(id = "treemutsig_signaturecutoff",
title = 'Discard any signature contributions with a weight less than this amount.Default: 0.1.',
placement = "top",
trigger = "hover"),
br(),
selectInput("treemutsig_mode", label = div(style = "font-size:1.5em; font-weight:600; ", "Mode"),
choices = c('NULL',
'Original',
'Reconstructed',
'Difference'),
selected = "NULL"),
bsTooltip(id = "treemutsig_mode",
title = "type of mutation spectrum.Default: NULL. Options:'Original','Reconstructed' or 'Difference'",
placement = "top",
trigger = "hover"),
checkboxInput('treemutsig_usetumorsamplelabel', label = div(style = "font-size:1.5em; font-weight:600; padding-left:15px", 'Use tumor sample label'),value = FALSE, width = 400),
bsTooltip(id = "treemutsig_usetumorsamplelabel",
title = "Logical (Default: FALSE). Rename the 'Tumor_Sample_Barcode' with 'Tumor_Label'.",
placement = "top",
trigger = "hover"),
sliderInput(inputId='treemutsig_width',label = div(style = "font-size:1.5em; font-weight:600; ", 'Image width'),min = 400,max = 1000, value = 800, width = 500),
sliderInput(inputId='treemutsig_height',label = div(style = "font-size:1.5em; font-weight:600; ", 'Image height'),min = 400,max = 1000, value = 560, width = 500),
br(),
br(),
fluidRow(
column(
width = 9,
div(
tags$button(
id = "submit_treemutsig", type = "button", class = "action-button bttn",
class = "bttn-unite", class = paste0("bttn-md"),
class = paste0("bttn-default"),
list(strong("Start analysis"),icon("hand-right", lib = "glyphicon")),
style = "margin-bottom:0px;margin-right:0px;"
)
)
)
)
),
conditionalPanel(
condition = "input.sgt == 'S_muttrunkbranch'",
div(strong("Parameter"),style = "font-size:2em; font-weight:600;"),
br(),
uiOutput("muttrunkbranch_patientid_ui"),
selectInput("muttrunkbranch_getphylotree_method", label = div(style = "font-size:1.5em; font-weight:600; ", "Tree construct method"),
choices = c("NJ","MP","ML","FASTME.ols","FASTME.bal"),
selected = "NJ"),
bsTooltip(id = "muttrunkbranch_getphylotree_method",
title = "Approach to construct phylogenetic trees",
placement = "top",
trigger = "hover"),
tags$table(
tags$tr(id = "inline",
width = "100%",
tags$td(width = "30%", div(style = "font-size:1.5em; font-weight:600; ", "Min vaf")),
tags$td(width = "60%", textInput(inputId = "muttrunkbranch_getphylotree_minvaf", value = 0, label = NULL)))
),
bsTooltip(id = "muttrunkbranch_getphylotree_minvaf",
title = "The minimum value of vaf",
placement = "top",
trigger = "hover"),
br(),
tags$table(
tags$tr(id = "inline",
width = "100%",
tags$td(width = "30%", div(style = "font-size:1.5em; font-weight:600; ", "Min ccf")),
tags$td(width = "60%", textInput(inputId = "muttrunkbranch_getphylotree_minccf", value = 0, label = NULL)))
),
bsTooltip(id = "muttrunkbranch_getphylotree_minccf",
title = "The minimum value of ccf",
placement = "top",
trigger = "hover"),
br(),
tags$table(
tags$tr(id = "inline",
width = "100%",
tags$td(width = "50%", div(style = "font-size:1.5em; font-weight:600; ", "Boostrap repetitions")),
tags$td(width = "30%", textInput(inputId = "muttrunkbranch_getphylotree_bootstraprepnum", value = 100, label = NULL)))
),
bsTooltip(id = "muttrunkbranch_getphylotree_bootstraprepnum",
title = "Bootstrap iterations. Default 100.",
placement = "top",
trigger = "hover"),
checkboxInput('muttrunkbranch_ct',div(style = "font-size:1.5em; font-weight:600; padding-left:15px ", 'CT'),value = FALSE),
bsTooltip(id = "muttrunkbranch_ct",
title = 'Distinction between C>T at CpG and C>T at other sites, Default FALSE',
placement = "top",
trigger = "hover"),
tags$table(
tags$tr(id = "inline",
width = "100%",
tags$td(width = "30%", div(style = "font-size:1.5em; font-weight:600; ", "P-value")),
tags$td(width = "60%", textInput(inputId = "muttrunkbranch_pvalue", value = 0.05, label = NULL)))
),
br(),
bsTooltip(id = "muttrunkbranch_pvalue",
title = "Confidence level of the interval for Fisher test. Default: 0.05.",
placement = "right",
trigger = "hover"),
sliderInput(inputId='muttrunkbranch_width',label = div(style = "font-size:1.5em; font-weight:600; ", 'Image width'),min = 400,max = 1000, value = 470, width = 500),
sliderInput(inputId='muttrunkbranch_height',label = div(style = "font-size:1.5em; font-weight:600; ", 'Image height'),min = 400,max = 1000, value = 470, width = 500),
br(),
br(),
fluidRow(
column(
width = 9,
div(
tags$button(
id = "submit_muttrunkbranch", type = "button", class = "action-button bttn",
class = "bttn-unite", class = paste0("bttn-md"),
class = paste0("bttn-default"),
list(strong("Start analysis"),icon("hand-right", lib = "glyphicon")),
style = "margin-bottom:0px;margin-right:0px;"
)
)
)
)
)
)
),
column(
width = 9,
box(
width = NULL,
div(strong("PhyloTree-based analysis"),style = "font-size:27px; font-weight:500;"),
p("Systematic understanding of evolutionary relationship among regions plays a fundamental role in MRS study, where phylogenetic tree is the primary tool for describing these associations and interpreting ITH. MesKit is capable of constructing and comparing phylogenetic trees based on different methods, visualizing the rooted phylogenetic trees with annotation, as well as charactering mutational patterns based on phylogenetic trees.",
style = "font-size:20px; font-weight:500;line-height:40px;"),
tabBox(
id = 'sgt',
side = 'left',
selected = 'S_plotphylotree',
width = "100%",
height = "100%",
tabPanel(
title = div(icon("tree"), "Plot phylotree", style = "font-size:1.5em; font-weight:600; "),
value = 'S_plotphylotree',
uiOutput("phylotree.patientlist"),
div(plotOutput("phylotree_plot",height = "100%",width = "100%"),align = "left"),
br(),
uiOutput("phylotree_downloadbutton_ui")
),
tabPanel(
title = div(icon("tree"), "Compare trees", style = "font-size:1.5em; font-weight:600; "),
value = 'S_comparetree',
verbatimTextOutput("comparetree_dist"),
br(),
div(plotOutput('comparetree_plot', height = "100%", width = "100%"),align = "left"),
uiOutput('comparetree_db_ui')
# br(),
# uiOutput('treemutsig_table_ui')
),
tabPanel(
title = div(icon("image"), "Mutational signature", style = "font-size:1.5em; font-weight:600; "),
value = 'S_treemutsig',
# uiOutput('warningMessage_treemutsig'),
uiOutput("treemutsig.patientlist"),
uiOutput("treemutsig.samplelist"),
div(plotOutput('treemutsig_plot', height = "100%", width = "100%"),align = "left"),
uiOutput("treemutsig_download_button_ui"),
# br(),
# uiOutput('treemutsig_table_ui')
),
tabPanel(
title = div(icon("image"), "Trunk vs Branch", style = "font-size:1.5em; font-weight:600; "),
value = 'S_muttrunkbranch',
uiOutput("muttrunkbranch.patientlist"),
br(),
uiOutput('muttrunkbranch_table_ui'),
div(plotOutput('muttrunkbranch_plot', height = "100%", width = "100%"),align = "left"),
uiOutput("muttrunkbranch_download_button_ui")
)
)
)
)
)
)
#Main function----
dbHeader <- dashboardHeader(title = "MesKit", titleWidth = 300,
# tags$li(class = "dropdown", actionLink(inputId = "help", label = div(style = "font-size:15px; font-weight:400; ", "Help"))),
# tags$li(class = "dropdown", actionLink(inputId = "contact", label = div(style = "font-size:15px; font-weight:400; ", "Contact"))),
dropdownMenu(
type = "notifications",
icon = icon("question-circle"),
badgeStatus = NULL,
headerText = "Help:",
notificationItem("MesKit github page", icon = icon("file"),
href = "https://github.com/Niinleslie/MesKit")
),
dropdownMenu(
type = "notifications",
icon = icon("envelope"),
badgeStatus = NULL,
headerText = "",
tags$li(p("Mengni Liu, liumn5@mail2.sysu.edu.cn")),
tags$li(p("Jianyu Chen, chenjy327@mail2.sysu.edu.cn")),
tags$li(p("Xin Wang, wangx555@mail2.sysu.edu.cn")),
tags$li(p("Chengwei Wang, wangchw8@outlook.com"))
# notificationItem("Mengni Liu, liumn5@mail2.sysu.edu.cn, Sun Yat-sen university", icon = icon("user"),href = "liumn5@mail2.sysu.edu.cn"),
# notificationItem("Chengwei Wang, wangchw8@outlook.com, Sun Yat-sen university", icon = icon("user"),href = "wangchw8@outlook.com"),
# notificationItem("Jianyu Chen, chenjy327@mail2.sysu.edu.cn, Sun Yat-sen university", icon = icon("user"),href = "chenjy327@mail2.sysu.edu.cn"),
# notificationItem("Xin Wang, wangx555@mail2.sysu.edu.cn, Sun Yat-sen university", icon = icon("user"),href = "wangx555@mail2.sysu.edu.cn")
)
)
dbHeader$children[[2]]$children <- tags$a(href='https://github.com/Niinleslie/MesKit',
tags$img(src='image/logo.jpg',height='65',width='250'))
shinyUI(
dashboardPage(
skin = "blue",
header=dbHeader ,
sidebar=sidebar,
body=dashboardBody(
## add text behind the sidebar (design error)
tags$head(tags$style(HTML(
"/* logo */
.skin-blue .main-header .logo {
background-color: #3c8dbc;
}
/* logo when hovered */
.skin-blue .main-header .logo:hover {
background-color: #3c8dbc;
}
.textnvbar {
font-size: 20px;
line-height: 50px;
text-align: left;
font-family: 'Helvetica Neue',Helvetica,Arial,sans-serif;
padding: 0 15px;
overflow: hidden;
color: white;
}
.checkbox { /* checkbox is a div class*/
line-height: 25px;}
input[type='checkbox']{
width: 23px; /*Desired width*/
height: 23px; /*Desired height*/
line-height: 25px;
}
span {
line-height: 30px;
}
"))),
## change the style of the progress bar.
# tags$head(tags$style(
# type="text/css",
# ".progress.shiny-file-input-progress {
# height:5px;
# }
#
# .progress-bar {
# background-image: linear-gradient(to right, #77C7FF, #3c8dbc ) !important;
# background-size: auto !important;
# font-size:0px;
# height:5px;
# }"
# )),
tags$script(HTML('
$(document).ready(function() {
$("header").find("nav").append(\'<span class="textnvbar"> MesKit: A Tool Kit for Dissecting Cancer Evolution of Multi-region Tumor Biopsies through Somatic Alterations</span>\');
})
')),
tags$head(
tags$style(type="text/css", "#inline label{ display: table-cell; text-align: centers; vertical-align: middle; width=400; }
#inline .form-group { display: table-row; width=400; }"),
tags$style(HTML("
.shiny-output-error-validation {
color: brown;
}
.shiny-notification {
height: 200px;
width: 600px;
position:fixed;
font-size: 30px;
top: calc(50% - 100px);
left: calc(50% + 100px);
}
.dt-right {
text-align: justify !important;
}
.shiny-output-error-validation {
color: green;
font-size:27px;
font-weight:500;
}
.main-sidebar { font-size: 20px; }
table.dataTable tbody th, table.dataTable tbody td {
padding: 10px 1.5em !important;
}
.tooltip {
min-width: 15em !important;
}
.progress-message, .progress-detail {
display: block !important;
}
# .shiny-notification-close {
# float: right;/*image size adjust */
# font-weight: bold;
# font-size: 30px;
# bottom: 9px;
# position: relative;
# padding-left: 4px;
# color: #444;
# cursor: default;
# }
# .pipediv{
# width:900px;
# height:500px;
# }
# .pipediv .pipe{
# float:left;
# }
# .pipediv .pipe img{
# width:500px;
# height:500px;
# }
# .pipetext{
#
# }
")),
tags$link(rel = "stylesheet", type = "text/css", href = "main.css")
),
tabItems(
bodyHome,
bodyIP,
bodyAL,
bodyITH,
bodyclone,
# bodyfunction,
bodytree
# bodySurvival
)
)
)
)
|
4b65085c40b51dc27b0ed141d98b6de737dd095b
|
b49fc180baf1545f8a7188aeebd59304a5ea3bb2
|
/man/ukFlex.Rd
|
d51f7104688ac83add3b0f004f8df713c1067d38
|
[
"MIT"
] |
permissive
|
dickoa/shinyuikit
|
fd7dc94571fe68036dca1e41896a2f7e81a820e7
|
10e1159a316d6b01c7063ce52dc867fd3e09b614
|
refs/heads/master
| 2020-03-12T11:58:29.988212
| 2018-06-15T19:58:06
| 2018-06-15T19:58:06
| 130,608,382
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| true
| 810
|
rd
|
ukFlex.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/uikit-flex.R
\name{ukFlex}
\alias{ukFlex}
\title{UIkit Flexbox layout for shiny}
\usage{
ukFlex(..., inline = FALSE, wrap = NULL, horizontal_align = NULL,
vertical_align = NULL, direction = NULL, height = NULL, width = NULL,
muted_background = FALSE, dimension = NULL)
}
\arguments{
\item{...}{The UI elements to place in the Flex element}
\item{inline}{logical.}
\item{wrap}{character.}
\item{horizontal_align}{logical.}
\item{vertical_align}{logical.}
\item{direction}{character. Define the axis that flex items are placed and their direction, by default items run horizontaly from left to right as does the 'row' option.}
\item{width}{character. Define Flex item width}
}
\description{
UIkit Flexbox layout for shiny
}
|
d4ec9c2251b7b2a4198d3e197e9192a894853e82
|
a4cb5eb0e937b43293c0a5e6a778c5c3227517b5
|
/03-Rscripts/shiny-cores/app-add_delete.R
|
12e530808bdab5e54d45373e9f90d5a58c00a1ce
|
[] |
no_license
|
Joacala/trini
|
1e5c1c87ebc40155ff72b0057f960c00703dd572
|
0bf3fa8bf95590355a83e32c8acba08d3ef62c53
|
refs/heads/main
| 2023-04-19T05:28:08.299320
| 2022-10-13T08:27:29
| 2022-10-13T08:27:29
| 331,259,193
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 1,764
|
r
|
app-add_delete.R
|
### add and delete points from a scaterplot
library(shiny)
visual.cor <- function(res.s,imc, path){
ui <- fluidPage(
fluidRow(
div(id="container",
height = 500,
width = 500,
actionButton("reset", "Reset"),
actionButton("save", "Save"),
style="position:relative;",
div(plotOutput("plot2",
height = 500,
width = 500),
style="position:absolute; top:10; left:0;"),
div(plotOutput("plot1",
height = 500,
width = 500,
dblclick = dblclickOpts(id = "plot_click2"),
click = "plot_click"),
style="position:absolute; top:10; left:0;")
)
)
)
server <- function(input, output) {
rv=reactiveValues(m=data.frame(x=res.s$x,y=res.s$y))
output$plot1 <- renderPlot({
par(bg="transparent")
plot(rv$m$y~rv$m$x,col=4,pch=3,cex=1.5,ylim=c(1000,0),xlim=c(-600,600),axes=T, yaxs="i", xaxs="i")
})
output$plot2 <- renderPlot({
plot(imc,ylim=c(1000,0),xlim=c(-600,600),asp="varying")
})
observeEvent(input$plot_click2, {
np <- nearPoints(rv$m, input$plot_click2, xvar = "x", yvar = "y", allRows = TRUE, maxpoints=1)
rv$m <- rv$m[!np$selected_,]
})
observeEvent(input$plot_click, {
rv$m <- rbind(rv$m,unlist(input$plot_click))
})
observeEvent(input$reset, {
rv$m <- data.frame(x=res.s$x,y=res.s$y)
})
observeEvent(input$save, {
write.csv(rv$m,path)
})
}
shinyApp(ui, server)
}
path <- "C:\\Users\\F541U\\Desktop\\proyectos\\Julen\\data_shiny.csv"
visual.cor(res.s,imc, path)
|
ae59045071e5099f4f711f3b94fd79718fdf2e5e
|
ffdea92d4315e4363dd4ae673a1a6adf82a761b5
|
/data/genthat_extracted_code/openair/examples/timePlot.Rd.R
|
4de1ba332e420801e50305f339f7a503476ce3d2
|
[] |
no_license
|
surayaaramli/typeRrh
|
d257ac8905c49123f4ccd4e377ee3dfc84d1636c
|
66e6996f31961bc8b9aafe1a6a6098327b66bf71
|
refs/heads/master
| 2023-05-05T04:05:31.617869
| 2019-04-25T22:10:06
| 2019-04-25T22:10:06
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 1,649
|
r
|
timePlot.Rd.R
|
library(openair)
### Name: timePlot
### Title: Plot time series
### Aliases: timePlot
### Keywords: methods
### ** Examples
# basic use, single pollutant
timePlot(mydata, pollutant = "nox")
# two pollutants in separate panels
## Not run: timePlot(mydata, pollutant = c("nox", "no2"))
# two pollutants in the same panel with the same scale
## Not run: timePlot(mydata, pollutant = c("nox", "no2"), group = TRUE)
# alternative by normalising concentrations and plotting on the same
scale
## Not run:
##D timePlot(mydata, pollutant = c("nox", "co", "pm10", "so2"), group = TRUE, avg.time =
##D "year", normalise = "1/1/1998", lwd = 3, lty = 1)
## End(Not run)
# examples of selecting by date
# plot for nox in 1999
## Not run: timePlot(selectByDate(mydata, year = 1999), pollutant = "nox")
# select specific date range for two pollutants
## Not run:
##D timePlot(selectByDate(mydata, start = "6/8/2003", end = "13/8/2003"),
##D pollutant = c("no2", "o3"))
## End(Not run)
# choose different line styles etc
## Not run: timePlot(mydata, pollutant = c("nox", "no2"), lty = 1)
# choose different line styles etc
## Not run:
##D timePlot(selectByDate(mydata, year = 2004, month = 6), pollutant =
##D c("nox", "no2"), lwd = c(1, 2), col = "black")
## End(Not run)
# different averaging times
#daily mean O3
## Not run: timePlot(mydata, pollutant = "o3", avg.time = "day")
# daily mean O3 ensuring each day has data capture of at least 75%
## Not run: timePlot(mydata, pollutant = "o3", avg.time = "day", data.thresh = 75)
# 2-week average of O3 concentrations
## Not run: timePlot(mydata, pollutant = "o3", avg.time = "2 week")
|
83957eba033e57967e223eae6447e240d4eba285
|
db0e244c6d3c1aa0bef5d5906750d8f94c388387
|
/analyses/informacio_interes_indepe/prepare_data.R
|
ae646a2bd1e3ff42028568ef1b95629ad9143ace
|
[
"MIT"
] |
permissive
|
joebrew/vilaweb
|
ea9796aa7a5d4f0676608618ba975dac95346000
|
f0b028c07484c750d75a101308c3937d81b40d80
|
refs/heads/master
| 2021-06-09T02:00:21.502677
| 2020-09-07T22:20:03
| 2020-09-07T22:20:03
| 159,472,849
| 23
| 9
|
NOASSERTION
| 2021-06-01T23:59:21
| 2018-11-28T09:01:42
|
HTML
|
UTF-8
|
R
| false
| false
| 26,787
|
r
|
prepare_data.R
|
# Libraries
library(vilaweb)
library(tidyverse)
library(databrew)
library(pageviews)
library(lubridate)
# Read ceo data
# Functions
mround <- function(x,base){
base*round(x/base)
}
round_percent <- function(x) {
x <- x/sum(x)*100 # Standardize result
res <- floor(x) # Find integer bits
rsum <- sum(res) # Find out how much we are missing
if(rsum<100) {
# Distribute points based on remainders and a random tie breaker
o <- order(x%%1, sample(length(x)), decreasing=TRUE)
res[o[1:(100-rsum)]] <- res[o[1:(100-rsum)]]+1
}
res
}
numberfy <- function(x){
gsub(',', '.', scales::comma(x), fixed = TRUE)
}
# # Get valoracio
transform_valoracions <- function(df){
vars <- names(df)
val_vars <- vars[grepl('Valoració:', vars, fixed = TRUE)]
for(j in 1:length(val_vars)){
this_var <- val_vars[j]
vals <- as.numeric(as.character(unlist(df[,this_var])))
vals <- ifelse(vals %in% 98:99, NA, vals)
df[,this_var] <- vals
}
return(df)
}
transform_coneixements <- function(df){
vars <- names(df)
val_vars <- vars[grepl('Coneixement:', vars, fixed = TRUE)]
for(j in 1:length(val_vars)){
this_var <- val_vars[j]
vals <- as.character(unlist(df[,this_var]))
df[,this_var] <- vals
}
return(df)
}
# Get most recent CEO data
ceo_june_2019 <- vilaweb::ceo_june_2019
# Transform data to combine
transform_data <- function(df){
language_dict <- tibble(input = c('Català (valencià / balear)', 'Castellà', 'Totes dues igual: català (valencià / balear) i castellà', 'Altres llengües o altres combinacions', 'Aranès', 'No ho sap', 'No contesta'),
output_ca = c('Català',
'Castellà',
'Cat+Cast',
'Altres',
'Català',
'NS/NC',
'NS/NC'),
output_en = c('Catalan',
'Spanish',
'Cat+Spa',
'Others',
'Catalan',
'No answer',
'No answer'))
convert_language <- function(x, ca = TRUE){
z <- tibble(input = x)
joined <- left_join(z, language_dict)
if(ca){
as.character(joined$output_ca)
} else {
as.character(joined$en)
}
}
v1 <- "Amb quina de les següents frases se sent més identificat: em sento només espanyol, més espanyol que català, tan espanyol com català, més català que espanyol o només català?"
v2 <- 'Amb quina de les següents frases,em sento només espanyol, més espanyol que català, tan espanyol com català, més català que espanyol o només català, se sent més identificat?'
if(v1 %in% names(df)){
df$identificacio <- unlist(df[,v1])
} else {
df$identificacio <- unlist(df[,v2])
}
ref_var <- "Fins a quin punt està d’acord o en desacord amb cadascuna de les següents afirmacions: Catalunya té el dret de celebrar un referèndum d'autodeterminació"
if(ref_var %in% names(df)){
vals <- unlist(df[,ref_var])
if(!all(is.na(vals))){
df$referendum <- vals
}
}
ref_var <- "Fins a quin punt està d’acord o en desacord amb l’afirmació següent: “Els catalans i les catalanes tenen dret a decidir el seu futur com a país votant en un referèndum”?"
if(ref_var %in% names(df)){
levs <- c("Molt d'acord", "D'acord", "Ni d'acord ni en desacord", "En desacord", "Molt en desacord", "No ho sap", "No contesta")
vals <- c(1:5, 98, 99)
dict <- tibble(vals, referendum = levs)
dict$referendum <- factor(dict$referendum, levels = levs)
new_vals <- tibble(vals = unlist(df[,ref_var]))
new_vals <- left_join(new_vals, dict)
if(!all(is.na(new_vals$referendum))){
df$referendum <- new_vals$referendum
}
}
ref_var <- "Fins a quin punt està d’acord o en desacord amb cadascuna de les següents afirmacions: Catalunya no té el dret de celebrar un referèndum d'autodeterminació"
if(ref_var %in% names(df)){
vals <- as.character(unlist(df[,ref_var]))
# Reverse
vals2 <- ifelse(vals == "D'acord", "En desacord",
ifelse(vals == "Molt d'acord", "Molt en desacord",
ifelse(vals == "En desacord", "D'acord",
ifelse(vals == "Molt en desacord", "Molt d'acord", vals))))
levs <- c("Molt d'acord", "D'acord", "Ni d'acord ni en desacord", "En desacord", "Molt en desacord", "No ho sap", "No contesta")
vals <- factor(vals2, levels = levs)
if(!all(is.na(vals))){
df$referendum <- vals
}
}
if(!'referendum' %in% names(df)){
df$referendum <- NA
}
df <- df %>%
mutate(partit = `Em podria dir per quin partit sent més simpatia?`) %>%
mutate(any = `Any de realització del baròmetre`,
mes = `Mes de realització del baròmetre`) %>%
mutate(mes = ifelse(mes == 3 & any == 2014, 4, mes),
mes = ifelse(mes == 10 & any == 2014, 11, mes),
mes = ifelse(mes == 3 & any == 2015, 2, mes),
mes = ifelse(mes == 7 & any == 2017, 6, mes),
mes = ifelse(mes == 7 & any == 2018, 6, mes),
mes = ifelse(mes == 11 & any == 2018, 10, mes),
mes = ifelse(mes == 7 & any == 2019, 6, mes)) %>%
mutate(date = as.Date(paste0(any, '-', mes, '-15'))) %>%
mutate(avis = as.character(`Quants dels seus avis/àvies van néixer a Catalunya?`)) %>%
mutate(avis = ifelse(avis == 'Cap', '0',
ifelse(avis == 'Un', '1',
ifelse(avis == 'Dos', '2',
ifelse(avis == 'Tres', '3',
ifelse(avis == 'Quatre', '4', NA)))))) %>%
mutate(avis = as.numeric(avis)) %>%
mutate(pare_cat = `Em podria dir el lloc de naixement del seu pare?` == 'Catalunya',
pare_esp = `Em podria dir el lloc de naixement del seu pare?` == 'Altres comunitats autònomes',
mare_cat = `Em podria dir el lloc de naixement de la seva mare?` == 'Catalunya',
mare_esp = `Em podria dir el lloc de naixement de la seva mare?` == 'Altres comunitats autònomes') %>%
mutate(pare_cat = as.numeric(pare_cat),
pare_esp = as.numeric(pare_esp),
mare_cat = as.numeric(mare_cat),
mare_esp = as.numeric(mare_esp)) %>%
mutate(llengua_primera = `Quina llengua va parlar primer vostè, a casa, quan era petit?`) %>%
mutate(llengua_primera = convert_language(llengua_primera),
llengua_habitual = convert_language(`Quina és la seva llengua habitual, ens referim a la llengua que parla més sovint?`),
llengua_propia = convert_language(`Quina és la seva llengua, ens referim a quina és la llengua que vostè considera com a pròpia?`)) %>%
mutate(indepe = `Vol que Catalunya esdevingui un Estat independent?`) %>%
# mutate(llengua_preferiex = `Prefereix que li faci les preguntes en català o en castellà?`),
mutate(neixer = `Em podria dir on va néixer?`,
informat = `Es considera vostè molt, bastant, poc o gens informat/ada del que passa en política?`,
interessat = `A vostè la política li interessa molt, bastant, poc o gens?`,
partit = `Em podria dir per quin partit sent més simpatia?`,
axis = `Quan es parla de política, normalment s’utilitzen les expressions esquerra i dreta, indiqui on s’ubicaria vostè?`,
telefon_fix = `Té telèfon fix a la seva llar?`,
ingressos = `Quins són els ingressos familiars que entren cada mes a casa seva?`) %>%
mutate(indepe = as.character(indepe)) %>%
mutate(indepe =
ifelse(indepe %in% c('No ho sap', 'No contesta'),
'NS/NC', indepe)) %>%
mutate(municipi = `Grandària del municipi`) %>%
mutate(provincia = `Província`) %>%
mutate(age = Edat) %>%
dplyr::select(
age,
partit,
referendum,
identificacio,
municipi,
provincia,
date,
avis,
pare_cat, pare_esp,
mare_cat, mare_esp,
llengua_primera, llengua_habitual, llengua_propia, #llengua_prefereix,
neixer,
informat,
interessat,
partit,
axis,
telefon_fix,
ingressos,
indepe,
contains("Valoració:"),
contains('Coneixement: ')
) %>%
mutate(pares = ifelse(pare_cat + mare_cat == 2,
'2 pares nascuts a Cat',
ifelse(pare_cat + mare_cat == 1 &
pare_esp + mare_esp == 1,
'1 pare nascut a Cat, l\'altre a Esp',
ifelse(pare_esp + mare_esp == 2,
'2 pares nascuts a Esp',
'Altres combinacions')
))) %>%
mutate(partit = as.character(partit)) %>%
mutate(partit = ifelse(partit %in% c('ERC', 'PSC', 'CUP',
"PPC"),
partit,
ifelse(partit %in% c('Podemos','En Comú Podem', 'Catalunya en Comú Podem', 'Barcelona en Comú', 'Catalunya sí que es pot'), 'Podem',
ifelse(partit == "C's", "Cs",
ifelse(partit %in% c('CiU', 'Junts pel Sí', 'CDC', 'PDeCAT', 'Junts per Catalunya'), 'JxCat/PDeCat', 'Cap o altre partit')))))
df <- transform_valoracions(df)
df <- transform_coneixements(df)
return(df)
}
# Combine
bop_numbers <- sort(unique(new_ceo$`Número d'ordre del baròmetre`))
bop_list <- list()
for(i in 1:length(bop_numbers)){
message(i)
this_bop_number <- bop_numbers[i]
this_bop <- new_ceo %>% filter(`Número d'ordre del baròmetre` == this_bop_number)
out <- transform_data(this_bop)
bop_list[[i]] <- out
}
bop <- bind_rows(bop_list)
combined <-
bop %>%
bind_rows(
transform_data(ceo_june_2019)
)
# Create an estimated birth date
combined <-
combined %>%
mutate(dob = date - round((182.5 + (age * 365.25)))) %>%
# Define the oct1 generation
mutate(oct1 =
# 16 years old
(dob <= '2002-10-01' &
# 20 years old
dob >= '1997-10-01'
))
simple_plot <- function(ca = FALSE,
keep_simple = FALSE){
levs <- c("Molt d'acord", "D'acord", "Ni d'acord ni en desacord", "En desacord", "Molt en desacord", "No ho sap", "No contesta")
levs_en <- c('Strongly agree',
'Agree',
'Neither agree\nnor disagree',
'Disagree',
'Strongly disagree',
"Don't know",
"No answer")
pd <- combined %>%
filter(date >= '2018-01-01') %>%
filter(!is.na(referendum)) %>%
group_by(referendum) %>%
tally
cols <- RColorBrewer::brewer.pal(n = 5, name = 'Spectral')
cols <- rev(cols)
cols[3] <- 'darkgrey'
cols <- c(cols, rep('darkgrey', 2))
if(keep_simple){
pd <- pd %>%
filter(!referendum %in% c(levs[c(3,6,7)],
levs_en[c(3,6,7)]))
cols <- cols[!(1:length(cols) %in% c(3,6,7))]
}
pd <- pd %>%
mutate(p = n / sum(n) * 100)
if(ca){
the_labs <- labs(x = '',
y = 'Percentatge',
title = "'Catalunya té dret a celebrar\nun referèndum d'autodeterminació'",
subtitle = "Grau d'acord amb la frase",
caption = paste0('Gràfic de Joe Brew | @joethebrew | www.vilaweb.cat. Dades del CEO.\n',
'Frase exacta varia per data del qüestionari, detalls complets a:\n',
self_cite(),
'\nMida de mostra: ',
numberfy(sum(pd$n)),
' residents de Catalunya amb ciutadania espanyola, 2018-2019.\n'))
pd$referendum <- factor(pd$referendum,
levels = levs,
labels = gsub("Ni d'acord ni en desacord",
"Ni d'acord ni\nen desacord", levs))
} else {
the_labs <- labs(x = '',
y = 'Percentage',
title = "'Catalonia has the right to hold\na self-determination referendum'",
subtitle = 'Extent of agreement with phrase',
caption = paste0('Chart by Joe Brew | @joethebrew | www.vilaweb.cat. Raw data from the CEO.\n',
'Actual phrase varied by questionnaire date, full details at:\n',
self_cite(),
'\nSample size: ',
numberfy(sum(pd$n)),
' residents of Catalonia with Spanish citenship, 2018-2019.\n'))
pd$referendum <- factor(pd$referendum,
levels = levs,
labels = levs_en)
}
ggplot(data = pd,
aes(x = referendum,
y = p)) +
geom_bar(stat = 'identity',
aes(fill = referendum)) +
scale_fill_manual(name = '',
values = cols) +
theme_vilaweb() +
theme(legend.position = 'none') +
the_labs +
theme(plot.caption = element_text(size = 9)) +
geom_text(aes(label = round(p, digits = 1)),
nudge_y = 5,
alpha = 0.6)
}
simple_plot_cross <- function(ca = FALSE,
keep_simple = TRUE,
cross_var = 'interessat',
legend_title = ''){
levs <- c("Molt d'acord", "D'acord", "Ni d'acord ni en desacord", "En desacord", "Molt en desacord", "No ho sap", "No contesta")
levs_en <- c('Strongly agree',
'Agree',
'Neither agree\nnor disagree',
'Disagree',
'Strongly disagree',
"Don't know",
"No answer")
var_levs <- c('Molt', 'Bastant', 'Poc', 'Gens', 'No ho sap', 'No contesta')
var_levs_en <- c('A lot', 'A fair amount', 'Little', 'Not at all', "Don't know", 'No answer')
pd <- combined %>%
filter(date >= '2018-01-01') %>%
filter(!is.na(referendum)) %>%
group_by(.dots = list('referendum', cross_var)) %>%
tally
names(pd)[2] <- 'var'
cols <- RColorBrewer::brewer.pal(n = 5, name = 'Spectral')
cols <- rev(cols)
cols[3] <- 'darkgrey'
cols <- c(cols, rep('darkgrey', 2))
if(keep_simple){
pd <- pd %>%
filter(!referendum %in% c(levs[c(3,6,7)],
levs_en[c(3,6,7)]))
cols <- cols[!(1:length(cols) %in% c(3,6,7))]
}
pd <- pd %>%
group_by(var) %>%
mutate(p = n / sum(n) * 100)
if(ca){
the_labs <- labs(x = '',
y = 'Percentatge',
title = "'Catalunya té dret a celebrar\nun referèndum d'autodeterminació'",
subtitle = "Grau d'acord amb la frase",
caption = paste0('Gràfic de Joe Brew | @joethebrew | www.vilaweb.cat. Dades del CEO.\n',
'Frase exacta varia per data del qüestionari, detalls complets a:\n',
self_cite(),
'\nMida de mostra: ',
numberfy(sum(pd$n)),
' residents de Catalunya amb ciutadania espanyola, 2018-2019.\n'))
pd$referendum <- factor(pd$referendum,
levels = levs,
labels = gsub("Ni d'acord ni en desacord",
"Ni d'acord ni\nen desacord", levs))
pd$var <- factor(pd$var,
levels = var_levs,
labels = var_levs)
} else {
the_labs <- labs(x = '',
y = 'Percentage',
title = "'Catalonia has the right to hold\na self-determination referendum'",
subtitle = 'Extent of agreement with phrase',
caption = paste0('Chart by Joe Brew | @joethebrew | www.vilaweb.cat. Raw data from the CEO.\n',
'Actual phrase varied by questionnaire date, full details at:\n',
self_cite(),
'\nSample size: ',
numberfy(sum(pd$n)),
' residents of Catalonia with Spanish citenship, 2018-2019.\n'))
pd$referendum <- factor(pd$referendum,
levels = levs,
labels = levs_en)
pd$var <- factor(pd$var,
levels = var_levs,
labels = var_levs_en)
}
pd <- pd %>% filter(!is.na(var),
!var %in% c('No ho sap', 'No contesta', "Don't know", 'No answer'))
ggplot(data = pd,
aes(x = var,
y = p,
group = referendum)) +
geom_bar(stat = 'identity',
aes(fill = referendum),
position = position_dodge(width = 0.8)) +
scale_fill_manual(name = legend_title,
values = cols) +
theme_vilaweb() +
# theme(legend.position = 'none') +
the_labs +
theme(plot.caption = element_text(size = 9),
legend.box = 'horizontal') +
geom_text(aes(label = round(p, digits = 1), y = p + 5),
alpha = 0.6,
position = position_dodge(width = 0.8)) +
guides(fill = guide_legend(title.position = 'top', title.hjust = 0.5))
}
simple_indy_plot <- function(ca = FALSE){
levs <- c("Sí", "NS/NC", "No")
levs_en <- c("Yes", "No\nanswer", "No")
pd <- combined %>%
filter(date >= '2018-01-01') %>%
filter(!is.na(indepe)) %>%
group_by(indepe) %>%
tally
cols <- RColorBrewer::brewer.pal(n = 3, name = 'Spectral')
cols <- rev(cols)
cols[2] <- 'darkgrey'
pd <- pd %>%
mutate(p = n / sum(n) * 100)
if(ca){
the_labs <- labs(x = '',
y = 'Percentatge',
title = "'Vol que Catalunya esdevingui un Estat independent'",
caption = paste0('Gràfic de Joe Brew | @joethebrew | www.vilaweb.cat. Dades del CEO.\n',
self_cite(),
'\nMida de mostra: ',
numberfy(sum(pd$n)),
' residents de Catalunya amb ciutadania espanyola, 2018-2019.\n'))
pd$indepe <- factor(pd$indepe, levels = levs, labels = levs)
} else {
the_labs <- labs(x = '',
y = 'Percentage',
title = "'Do you want Catalonia to become an independent State'",
caption = paste0('Chart by Joe Brew | @joethebrew | www.vilaweb.cat. Raw data from the CEO.\n',
self_cite(),
'\nSample size: ',
numberfy(sum(pd$n)),
' residents of Catalonia with Spanish citenship, 2018-2019.\n'))
pd$indepe <- factor(pd$indepe, levels = levs, labels = levs_en)
}
ggplot(data = pd,
aes(x = indepe,
y = p)) +
geom_bar(stat = 'identity',
aes(fill = indepe)) +
scale_fill_manual(name = '',
values = cols) +
theme_vilaweb() +
theme(legend.position = 'none') +
the_labs +
theme(plot.caption = element_text(size = 9)) +
geom_text(aes(label = round(p, digits = 1)),
nudge_y = 5,
alpha = 0.6)
}
simple_indy_plot_cross <- function(ca = FALSE,
cross_var = 'interessat',
legend_title = ''){
levs <- c("Sí", "NS/NC", "No")
levs_en <- c("Yes", "No\nanswer", "No")
var_levs <- c('Molt', 'Bastant', 'Poc', 'Gens', 'No ho sap', 'No contesta')
var_levs_en <- c('A lot', 'A fair amount', 'Little', 'Not at all', "Don't know", 'No answer')
pd <- combined %>%
filter(date >= '2018-01-01') %>%
filter(!is.na(indepe)) %>%
group_by(.dots = list('indepe', cross_var)) %>%
tally
names(pd)[2] <- 'var'
cols <- RColorBrewer::brewer.pal(n = 3, name = 'Spectral')
cols <- rev(cols)
cols[2] <- 'darkgrey'
pd <- pd %>%
group_by(var) %>%
mutate(p = n / sum(n) * 100)
if(ca){
the_labs <- labs(x = '',
y = 'Percentatge',
title = "'Vol que Catalunya esdevingui un Estat independent'",
caption = paste0('Gràfic de Joe Brew | @joethebrew | www.vilaweb.cat. Dades del CEO.\n',
self_cite(),
'\nMida de mostra: ',
numberfy(sum(pd$n)),
' residents de Catalunya amb ciutadania espanyola, 2018-2019.\n'))
pd$indepe <- factor(pd$indepe,
levels = levs,
labels = levs)
pd$var <- factor(pd$var,
levels = var_levs,
labels = var_levs)
} else {
the_labs <- labs(x = '',
y = 'Percentage',
title = "'Do you want Catalonia to become an independent State'",
caption = paste0('Chart by Joe Brew | @joethebrew | www.vilaweb.cat. Raw data from the CEO.\n',
self_cite(),
'\nSample size: ',
numberfy(sum(pd$n)),
' residents of Catalonia with Spanish citenship, 2018-2019.\n'))
pd$indepe <- factor(pd$indepe,
levels = levs,
labels = levs_en)
pd$var <- factor(pd$var,
levels = var_levs,
labels = var_levs_en)
}
pd <- pd %>% filter(!is.na(var),
!var %in% c('No ho sap', 'No contesta', "Don't know", 'No answer'))
ggplot(data = pd,
aes(x = var,
y = p,
group = indepe)) +
geom_bar(stat = 'identity',
aes(fill = indepe),
position = position_dodge(width = 0.8)) +
scale_fill_manual(name = legend_title,
values = cols) +
theme_vilaweb() +
# theme(legend.position = 'none') +
the_labs +
theme(plot.caption = element_text(size = 9),
legend.box = 'horizontal') +
geom_text(aes(label = round(p, digits = 1), y = p + 5),
alpha = 0.6,
position = position_dodge(width = 0.8)) +
guides(fill = guide_legend(title.position = 'top', title.hjust = 0.5))
}
simple_party_plot_cross <- function(ca = FALSE,
cross_var = 'interessat',
legend_title = ''){
levs <- c("CUP", "ERC", "JxCat/PDeCat", "Podem", "PSC", "Cs", "PPC", "Cap o altre partit")
levs_en <- c("CUP", "ERC", "JxCat/PDeCat", "Podem", "PSC", "Cs", "PPC", "No or other party")
var_levs <- c('Molt', 'Bastant', 'Poc', 'Gens', 'No ho sap', 'No contesta')
var_levs_en <- c('A lot', 'A fair amount', 'Little', 'Not at all', "Don't know", 'No answer')
pd <- combined %>%
filter(date >= '2018-01-01') %>%
filter(!is.na(partit)) %>%
group_by(.dots = list('partit', cross_var)) %>%
tally
names(pd)[2] <- 'var'
cols <- RColorBrewer::brewer.pal(n = 4, name = 'Spectral')
# cols <- rev(cols)
# cols[8] <- 'darkgrey'
pd <- pd %>%
group_by(partit) %>%
mutate(p = n / sum(n) * 100)
if(ca){
the_labs <- labs(x = '',
y = 'Percentatge',
subtitle = 'Per partit polític',
caption = paste0('Gràfic de Joe Brew | @joethebrew | www.vilaweb.cat. Dades del CEO.\n',
self_cite(),
'\nMida de mostra: ',
numberfy(sum(pd$n)),
' residents de Catalunya amb ciutadania espanyola, 2018-2019.\n'))
pd$partit <- factor(pd$partit,
levels = (levs),
labels = (levs_en))
pd$var <- factor(pd$var,
levels = rev(var_levs),
labels = rev(var_levs))
} else {
the_labs <- labs(x = '',
y = 'Percentage',
subtitle = 'By preferred political party',
caption = paste0('Chart by Joe Brew | @joethebrew | www.vilaweb.cat. Raw data from the CEO.\n',
self_cite(),
'\nSample size: ',
numberfy(sum(pd$n)),
' residents of Catalonia with Spanish citenship, 2018-2019.\n'))
pd$partit <- factor(pd$partit,
levels = (levs),
labels = (levs_en))
pd$var <- factor(pd$var,
levels = rev(var_levs),
labels = rev(var_levs_en))
}
pd <- pd %>% filter(!is.na(var),
!var %in% c('No ho sap', 'No contesta', "Don't know", 'No answer'),
!partit %in% c('No or other party', 'Cap o altre partit'))
pd$xp <- as.numeric(pd$var)
pd$xp <- pd$xp + (dplyr::percent_rank(pd$xp) - 0.5)
ggplot(data = pd,
aes(x = partit,
y = p,
group = var)) +
geom_bar(stat = 'identity',
aes(fill = var),
position = position_stack()) +
scale_fill_manual(name = legend_title,
values = cols) +
theme_vilaweb() +
# theme(legend.position = 'none') +
the_labs +
theme(plot.caption = element_text(size = 9),
legend.box = 'horizontal') +
geom_text(aes(label = round(p, digits = 1)),
alpha = 0.6,
position = position_stack(),
vjust = 1) +
guides(fill = guide_legend(title.position = 'top', title.hjust = 0.5, reverse = T)) +
geom_hline(yintercept = 50, alpha = 0.3)
}
|
f82332dc123ba83fad5e7000d1ad61e9e7b6aa9a
|
7c66f35138cf55d4848f399911c89c1fbe730c53
|
/continuing-education-app/server.R
|
8e01a31fd81ae7dbae87226af7d4fd649f6d2288
|
[] |
no_license
|
stecaron/cas-continuing-education
|
6619e2cdd0b32d6766c0abee0029f851b64b1fd7
|
8e707b33ced34efc758c4fe1ab74e14f9c1aeb33
|
refs/heads/master
| 2021-11-09T22:05:22.254997
| 2021-11-08T21:35:02
| 2021-11-08T21:35:02
| 131,144,508
| 1
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 2,821
|
r
|
server.R
|
# Define server function --------------------------------------------------
server <- function(input, output, session) {
input_new_log <- reactive(
data.table(
log_hours_type = input$new_log_type_hours,
log_number_hours = input$new_log_number_hours,
log_date = input$new_log_date,
log_module = input$new_log_module,
log_location = input$new_log_location,
log_description = input$new_log_description
)
)
logs <- reactiveValues(data = import_data("data/data_logs.csv"))
observeEvent(input$new_log_add_button, {
logs$data <- update_data(logs$data, input_new_log())
export_data(logs$data, "data/data_logs.csv")
showModal(modalDialog(
title = "Lezgo!",
"Your log has been successfully added to your log file."
))
})
output$historical_logs_table <- renderDataTable({
datatable(data = logs$data, colnames = c("Hours Type", "Number of hours", "Date", "Professional hours", "Location", "Description"), options = list(order = list(list(2, 'asc'))))
})
data_stats <- reactive({
included_years <- c((year(input$stats_date) - number_of_calendar_years + 1): year(input$stats_date))
data.table(logs$data)[year(log_date) %in% included_years,]
})
output$value_box_total_hours <- renderValueBox({
valueBox(
sum(data_stats()$log_number_hours), "Total hours", icon = icon("clock"),
color = "orange"
)
})
output$value_box_total_objective <- renderValueBox({
valueBox(
round(100 * min(sum(data_stats()$log_number_hours)/min_number_of_combine_hours, 1), 1), "% total hours target", icon = icon("bullseye"),
color = "orange"
)
})
output$value_box_structured_objective <- renderValueBox({
valueBox(
round(100 * min(sum(data_stats()[log_hours_type == "Structured",]$log_number_hours)/min_number_of_structured_hours, 1), 1), "% structured hours target", icon = icon("bullseye"),
color = "orange"
)
})
output$value_box_last_module_date <- renderValueBox({
valueBox(
max(data_stats()[log_module == "Yes",]$log_date), "Last module completed", icon = icon("calendar"),
color = "orange"
)
})
output$graph_detailed_summary <- renderPlot({
data_stats()[, .(number_hours = sum(log_number_hours)), by = .(year = year(log_date), hours_type = log_hours_type)] %>%
ggplot(aes(x = as.factor(year), y = number_hours, fill = hours_type)) +
geom_bar(stat = "identity", position = "dodge") +
geom_text(aes(label = number_hours), vjust = -0.2, position = position_dodge(.9)) +
scale_x_discrete("Calendar year") +
scale_y_continuous("Number of hours completed (hours)") +
scale_fill_discrete("Type of hours") +
theme_classic() +
theme(legend.position = "bottom")
})
}
|
c0e34f8a8466f6803517e565efc608b04b6a2ebe
|
0ec13d52c92625114567f2abb57de965c45ea0fc
|
/code/day-1-puzzle.R
|
4b79f4af31e842e0f54e8bab99a8b134a256ca6a
|
[
"MIT"
] |
permissive
|
klevan/adventofcode2019
|
9fda1b1fcf0b09c38c3329581e1185f03cfd2e35
|
d18fe122a8dd8de5bb41ff3b08fa4fe87db0fcba
|
refs/heads/master
| 2020-09-22T11:51:18.759211
| 2019-12-01T17:15:16
| 2019-12-01T17:15:16
| 225,181,816
| 1
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 4,677
|
r
|
day-1-puzzle.R
|
#' ############################################################################
#' --- Day 1: The Tyranny of the Rocket Equation ---
#'
#' Santa has become stranded at the edge of the Solar System while delivering
#' presents to other planets! To accurately calculate his position in space,
#' safely align his warp drive, and return to Earth in time to save Christmas,
#' he needs you to bring him measurements from fifty stars.
#'
#' Collect stars by solving puzzles. Two puzzles will be made available on each
#' day in the Advent calendar; the second puzzle is unlocked when you complete
#' the first. Each puzzle grants one star. Good luck!
#'
#' The Elves quickly load you into a spacecraft and prepare to launch.
#'
#' At the first Go / No Go poll, every Elf is Go until the Fuel Counter-Upper.
#' They haven't determined the amount of fuel required yet.
#'
#' Fuel required to launch a given module is based on its mass. Specifically,
#' to find the fuel required for a module, take its mass, divide by three,
#' round down, and subtract 2.
#'
#' For example:
#'
#' * For a mass of 12, divide by 3 and round down to get 4, then subtract 2 to
#' get 2.
#' * For a mass of 14, dividing by 3 and rounding down still yields 4, so the
#' fuel required is also 2.
#' * For a mass of 1969, the fuel required is 654.
#' * For a mass of 100756, the fuel required is 33583.
#'
#' The Fuel Counter-Upper needs to know the total fuel requirement. To find it,
#' individually calculate the fuel needed for the mass of each module (your
#' puzzle input), then add together all the fuel values.
#'
#' --- Part Two ---
#' During the second Go / No Go poll, the Elf in charge of the Rocket Equation
#' Double-Checker stops the launch sequence. Apparently, you forgot to include
#' additional fuel for the fuel you just added.
#'
#' Fuel itself requires fuel just like a module - take its mass, divide by three,
#' round down, and subtract 2. However, that fuel also requires fuel, and that
#' fuel requires fuel, and so on. Any mass that would require negative fuel should
#' instead be treated as if it requires zero fuel; the remaining mass, if any,
#' is instead handled by wishing really hard, which has no mass and is outside
#' the scope of this calculation.
#'
#' So, for each module mass, calculate its fuel and add it to the total. Then,
#' treat the fuel amount you just calculated as the input mass and repeat the
#' process, continuing until a fuel requirement is zero or negative.
#'
#' For example:
#' A module of mass 14 requires 2 fuel. This fuel requires no further fuel
#' (2 divided by 3 and rounded down is 0, which would call for a negative fuel),
#' so the total fuel required is still just 2.
#'
#' At first, a module of mass 1969 requires 654 fuel.
#' Then, this fuel requires 216 more fuel (654 / 3 - 2).
#' 216 then requires 70 more fuel, which requires 21 fuel, which requires 5 fuel,
#' which requires no further fuel. So, the total fuel required for a module of
#' mass 1969 is 654 + 216 + 70 + 21 + 5 = 966.
#'
#' The fuel required by a module of mass 100756 and its fuel is:
#' 33583 + 11192 + 3728 + 1240 + 411 + 135 + 43 + 12 + 2 = 50346.
#' ############################################################################
rm(list=ls())
## Set Directory & Options
wd = '~/GitHub/adventofcode2019'
options(stringsAsFactors = F)
## Input Data For Puzzle
day_1_puzzle = read.delim(paste(wd,'data/day-1-input.txt',sep='/'), header = F,
col.names = 'mass', encoding = 'UTF-8')
## Functions
FuelRequired <- function(mass){
# Fuel required for a module, is its mass, divided by three,
# rounded down, and subtract 2.
fuel <- floor(mass/3)-2
return(fuel)
}
### Part 1:
### What is the sum of the fuel requirements for all of the modules
### on your spacecraft?
fuel_required <- sapply(day_1_puzzle$mass, FuelRequired)
sum(fuel_required)
# 3273471
### Part 2:
### What is the sum of the fuel requirements for all of the modules on your
### spacecraft when also taking into account the mass of the added fuel?
### (Calculate the fuel requirements for each module separately, then add
### them all up at the end.)
## Functions
FuelRequired <- function(mass){
# Fuel required for a module, is its mass, divided by three,
# rounded down, and subtract 2.
fuel = floor(mass/3)-2
# Fuel requires its own fuel by the same equation
extra_fuel = fuel
while((floor(extra_fuel/3)-2)>0){
extra_fuel = (floor(extra_fuel/3)-2)
fuel = fuel + extra_fuel
}
return(fuel)
}
fuel_required <- sapply(day_1_puzzle$mass, FuelRequired)
sum(fuel_required)
# 4907345
|
62f731c3bcdbc074995561dbc2bd70d92a43ea08
|
337deca529928a9036c8939cb47a39b7435d0f1a
|
/man/taf.library.Rd
|
53ff6736e18f879d4ea27b63fafa4e2cbb851242
|
[] |
no_license
|
alko989/icesTAF
|
883b29e78ee69a5ef2dd5e5ca5a680cb220789d8
|
a5beaaf64ed1cacc09ca7732e791e89373d1d044
|
refs/heads/master
| 2020-04-28T10:02:16.923481
| 2019-03-09T23:15:02
| 2019-03-09T23:15:02
| 175,188,781
| 0
| 0
| null | 2019-03-12T10:36:18
| 2019-03-12T10:36:18
| null |
UTF-8
|
R
| false
| true
| 1,390
|
rd
|
taf.library.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/taf.library.R
\name{taf.library}
\alias{taf.library}
\title{Enable TAF Library}
\usage{
taf.library(create = TRUE, quiet = FALSE)
}
\arguments{
\item{create}{whether to create the directory \file{bootstrap/library} if it
does not already exist.}
\item{quiet}{whether to suppress messages in the case when a new directory
\file{bootstrap/library} is created.}
}
\value{
The names of packages currently installed in the TAF library.
}
\description{
Add local TAF library \file{bootstrap/library} to the search path, where
packages are stored.
}
\note{
This function inserts the directory entry \code{"bootstrap/library"} in front
of the existing library search path. The directory is created, if it does not
already exist.
The purpose of the TAF library is to retain R packages used in a TAF
analysis that are not archived on CRAN, to support long-term
reproducibility of TAF analyses.
}
\examples{
\dontrun{
# Enable TAF library
taf.library()
# Show updated path
.libPaths()
# Show packages in TAF library
print(taf.library())
# Load packages
library(this)
library(that)
# BibTeX references
library(bibtex)
write.bib(taf.library())
}
}
\seealso{
\code{\link{.libPaths}} is the underlying base function to get/set the
library search path.
\code{\link{icesTAF-package}} gives an overview of the package.
}
|
fe9d0951432f32fb211f2e67fe07887bc4163088
|
d58b47e2da19df8c3c6b10238e0283655ea4c124
|
/man/em38_pair.Rd
|
038cfe1385d0800135414c8a6affbf0675f116aa
|
[
"MIT"
] |
permissive
|
obrl-soil/em38
|
64d2c029f549553da582c2a473013e3cf53b0c9d
|
1fe8ae01713f39a5179bff6eeef84152ad4dc509
|
refs/heads/master
| 2023-01-12T04:45:55.139406
| 2023-01-05T21:08:03
| 2023-01-05T21:08:03
| 132,758,232
| 4
| 4
| null | null | null | null |
UTF-8
|
R
| false
| true
| 1,319
|
rd
|
em38_pair.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/spatialise.R
\name{em38_pair}
\alias{em38_pair}
\title{Reconcile locations of paired data}
\usage{
em38_pair(decode = NULL, time_filter = NULL)
}
\arguments{
\item{decode}{spatial point dataframe for a survey line produced by
\code{\link{em38_decode}} or \code{\link{em38_from_file}}.}
\item{time_filter}{removes point pairs that are close together but that were
sampled more than n seconds apart.}
}
\value{
An sf data frame with sfc_POINT geometry. WGS84 projection. Output
locations are averages of input horizontal/vertical paired locations.
}
\description{
Where paired horizontal and vertical readings have been taken during a
'manual' mode survey, the first and second readings at each station should
have the same location. The nature of the device logging generally precludes
this from happening by default, especially with high-frequency GPS recording.
This function reconciles the locations of such paired datasets after they
have been generated using \code{\link{em38_decode}} or
\code{\link{em38_from_file}}.
}
\note{
Input survey should be of survey type 'GPS' and record type 'manual'.
Both input datasets should ideally have the same number of rows, with row 1
of horizontal_data paired with row 1 of vertical_data.
}
|
70ae033c599ab62f4fffa65fc416e42d4cf2be78
|
1288617cb5321e9fe5549b957ec785cd677e9f0d
|
/ICU_FF_datacontrol/fileReader.R
|
f1f5b1fefa5b8b2ed5ff83aceb891c7af92516ac
|
[] |
no_license
|
joytywu/ICU_frequent_flyer
|
a1ccaceb96a923a8f5a193aeb70b36c47146716b
|
140a4875011b7066d4e9674047668b46f5895e8c
|
refs/heads/master
| 2020-12-24T10:57:03.041748
| 2016-11-07T22:26:26
| 2016-11-07T22:26:26
| 73,117,382
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 4,402
|
r
|
fileReader.R
|
#To run this file, save the file in the assets folder and type in R Console:
#setwd("~/Documents/ICU_Frequent_Flyers-addedfiles")
#source(paste(getwd(),"/assets/fileReader.R",sep=''))
#Should get two dataframes: summariesFinal and nsnFinal
op <- options(stringsAsFactors=F)
library(dplyr)
Resultsreader <- function(notepath){
setwd(paste(getwd(), notepath,sep = '/'))
files <- list.files() #[1:n]
#setwd("~/Documents/ICU_Frequent_Flyers-addedfiles")
#files <- list.files(path = paste(getwd(),"data/results", notetype, sep = "/"))
allfiles <- NULL
for (i in 1:length(files)){
file_i <- read.csv(files[i], sep = ",")
#Create an annotator column called operator
file_i$operator <- rep(substr(files[i], 1, 3), nrow(file_i))
file_i$batch.id <- rep(tolower(substr(files[i], 4, 13)), nrow(file_i))
colnames(file_i) <- c("Subject.ID", "Hospital.Admission.ID", "ICU.ID", "Note.Type", "Chart.time", "Category", "Real.time", "None", "Obesity", "Non.Adherence", "Developmental.Delay.Retardation", "Advanced.Heart.Disease", "Advanced.Lung.Disease", "Schizophrenia.and.other.Psychiatric.Disorders", "Alcohol.Abuse", "Other.Substance.Abuse", "Chronic.Pain.Fibromyalgia", "Chronic.Neurological.Dystrophies", "Advanced.Cancer", "Depression", "Dementia", "Unsure", "operator", "batch.id")
allfiles <- unique(rbind(allfiles, file_i))
}
setwd("~/Documents/ICU_Frequent_Flyers-addedfiles")
return(allfiles)
}
#Read and rbind all discharge results notes
disNotesRes <- Resultsreader("data/results/dis")
#Read and rbind all nursing results notes
nsnNotesRes <- Resultsreader("data/results/nsn")
#Need to merge with unannotated files to get cohort and text info
#Read and rbind all discharge unannotated notes
#Adapt Resultsreader function to:
Notesreader <- function(notepath){
setwd(paste("~/Documents/ICU_Frequent_Flyers-addedfiles", notepath,sep = '/'))
files <- list.files()#[c(1:2)]
allfiles <- NULL
for (i in 1:length(files)){
file_i <- read.csv(files[i], sep = ",")
#A few of the files have different column names and arrangement
#So I renamed some columns and got rid of some columns here
colnames(file_i)[1] <- "subject.id"
colnames(file_i)[2] <- "Hospital.Admission.ID"
file_i <- file_i[, c("subject.id", "Hospital.Admission.ID", "category", "text", "cohort")]
#Problem discharge files 3, 7, 8, 11, 15 (colnames different)
#Create a batch.id column that matches with result files' batch.id
file_i$batch.id <- rep(tolower(substr(files[i], 3, 12)), nrow(file_i))
allfiles <- unique(rbind(allfiles, file_i))
}
setwd("~/Documents/ICU_Frequent_Flyers-addedfiles")
return(allfiles)
}
disNotes <- Notesreader("data/notes/dis")
nsnNotes <- Notesreader("data/notes/nsn")
#Merge results file with orginal files that have the text and cohort info:
summariesFinal <- merge(disNotes, disNotesRes, by = c("Hospital.Admission.ID","batch.id"))
nsnFinal <- merge(nsnNotes, nsnNotesRes, by = c("Hospital.Admission.ID","batch.id"))
#Read miscellaneous missing notes
setwd("~/Documents/ICU_Frequent_Flyers-addedfiles/data/results")
missednotes <- read.csv("JTWDis05OCT16Results.csv")
missednotes$operator <- rep(substr("JTWDis05OCT16Results.csv", 1, 3), nrow(missednotes))
missednotes$batch.id <- rep(tolower(substr("JTWDis05OCT16Results.csv", 4, 13)), nrow(missednotes))
colnames(missednotes) <- c("Subject.ID", "Hospital.Admission.ID", "ICU.ID", "Note.Type", "Chart.time", "Category", "Real.time", "None", "Obesity", "Non.Adherence", "Developmental.Delay.Retardation", "Advanced.Heart.Disease", "Advanced.Lung.Disease", "Schizophrenia.and.other.Psychiatric.Disorders", "Alcohol.Abuse", "Other.Substance.Abuse", "Chronic.Pain.Fibromyalgia", "Chronic.Neurological.Dystrophies", "Advanced.Cancer", "Depression", "Dementia", "Unsure", "operator", "batch.id")
missednotes <- merge(disNotes[, 1:5], missednotes, by = "Hospital.Admission.ID")
missednotes <- missednotes %>% select(Hospital.Admission.ID, batch.id, subject.id:operator)
summariesFinal <- rbind(summariesFinal, missednotes)
#Write the Final files into new .csv files in the main directory
setwd("~/Documents/ICU_Frequent_Flyers-addedfiles")
write.csv(summariesFinal, file = "AllDischargeFinal.csv", row.names = F)
write.csv(nsnNotesRes, file = "AllnursingFinal.csv", row.names = F)
options(op)
#http://stackoverflow.com/questions/25102966/why-rbind-throws-a-warning
|
3cef5424bc8f16130b2d6c20db82cbab00fd6eee
|
d0d061329421401283a3db1f8e7aa016e61888d7
|
/man/heatmapSpp.Rd
|
e93fc9d25ae81436f356b8bda0e354df822d4e8a
|
[
"MIT"
] |
permissive
|
boopsboops/spider
|
87885b53570a98aece6e7ca1ce600330d9b95d25
|
e93c5b4bc7f50168b8a155a6dca7c87dfbdef134
|
refs/heads/master
| 2021-05-12T07:38:37.413486
| 2019-03-07T21:43:43
| 2019-03-07T21:43:43
| 117,250,046
| 2
| 0
| null | null | null | null |
UTF-8
|
R
| false
| true
| 2,185
|
rd
|
heatmapSpp.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/heatmapSpp.R
\name{heatmapSpp}
\alias{heatmapSpp}
\title{Visualise a distance matrix using a heatmap}
\usage{
heatmapSpp(distObj, sppVector, col = NULL, axisLabels = NULL,
triangle = "both", showData = FALSE, dataRound = 3, dataCEX = 1)
}
\arguments{
\item{distObj}{A matrix or object of class \code{dist}.}
\item{sppVector}{The species vector. See \code{\link{sppVector}}.}
\item{col}{A vector giving the colours for the heatmap.}
\item{axisLabels}{A character vector that provides the axis labels for the
heatmap. By default the species vector is used.}
\item{triangle}{Which triangle of the heatmap should be plotted. Possible
values of "both", "upper" and "lower". Default of "both".}
\item{showData}{Logical. Should the data be shown on the heatmap? Default of
FALSE.}
\item{dataRound}{The number of significant figures the printed data will
show. Default of 3.}
\item{dataCEX}{Size of text for printed data. Default of 1.}
}
\value{
Plots a heatmap of the distance matrix. Darker colours indicate
shorter distances, lighter colours indicate greater distances.
}
\description{
This function plots a heatmap of the distance matrix, with shorter distances
indicated by darker colours.
}
\details{
The default palette has been taken from the \code{colorspace} package.
}
\examples{
data(dolomedes)
doloDist <- ape::dist.dna(dolomedes, model = "raw")
doloSpp <- substr(dimnames(dolomedes)[[1]], 1, 5)
heatmapSpp(doloDist, doloSpp)
heatmapSpp(doloDist, doloSpp, axisLabels = dimnames(dolomedes)[[1]])
data(anoteropsis)
anoDist <- ape::dist.dna(anoteropsis, model = "raw")
anoSpp <- sapply(strsplit(dimnames(anoteropsis)[[1]], split="_"),
function(x) paste(x[1], x[2], sep="_"))
heatmapSpp(anoDist, anoSpp)
heatmapSpp(anoDist, anoSpp, showData = TRUE)
heatmapSpp(anoDist, anoSpp, showData = TRUE, dataRound = 1, dataCEX = 0.4)
heatmapSpp(anoDist, anoSpp, triangle = "upper")
heatmapSpp(anoDist, anoSpp, triangle = "lower")
heatmapSpp(anoDist, anoSpp, triangle = "lower", showData = TRUE, dataRound = 1, dataCEX = 0.4)
}
\author{
Samuel Brown <s_d_j_brown@hotmail.com>
}
\keyword{Utilities}
|
126d94726c7de102809f6eaa40404aef172ba7bf
|
950030f19c1368f889700299bc36ecf7104f56b8
|
/man/list_recent_uploads.Rd
|
b38de2536cb81d7884cbfbc222414d1cec6caf09
|
[
"MIT"
] |
permissive
|
ropensci/EDIutils
|
0cadce6b8139417fcfa65194e7caf8c77ea087af
|
b1f59cccee3791a04d7702bcb37f76995ae2fcbe
|
refs/heads/main
| 2023-05-22T09:49:03.633710
| 2022-09-09T16:12:30
| 2022-09-09T16:12:30
| 159,572,464
| 2
| 1
|
NOASSERTION
| 2022-11-21T16:22:08
| 2018-11-28T22:13:59
|
R
|
UTF-8
|
R
| false
| true
| 1,254
|
rd
|
list_recent_uploads.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/list_recent_uploads.R
\name{list_recent_uploads}
\alias{list_recent_uploads}
\title{List recent uploads}
\usage{
list_recent_uploads(type, limit = 5, as = "data.frame", env = "production")
}
\arguments{
\item{type}{(character) Upload type. Can be: "insert" or "update".}
\item{limit}{(numeric) Maximum number of results to return}
\item{as}{(character) Format of the returned object. Can be: "data.frame"
or "xml".}
\item{env}{(character) Repository environment. Can be: "production",
"staging", or "development".}
}
\value{
(data.frame or xml_document) Data package uploads
}
\description{
List recent uploads
}
\examples{
\dontrun{
# Get the 3 newest revisions
dataPackageUploads <- list_recent_uploads("update", 3)
}
}
\seealso{
Other Listing:
\code{\link{list_data_descendants}()},
\code{\link{list_data_entities}()},
\code{\link{list_data_package_identifiers}()},
\code{\link{list_data_package_revisions}()},
\code{\link{list_data_package_scopes}()},
\code{\link{list_data_sources}()},
\code{\link{list_deleted_data_packages}()},
\code{\link{list_recent_changes}()},
\code{\link{list_service_methods}()},
\code{\link{list_user_data_packages}()}
}
\concept{Listing}
|
5cbb0efc449d6161f8abd4972d504bf3d9ca812b
|
ffdea92d4315e4363dd4ae673a1a6adf82a761b5
|
/data/genthat_extracted_code/TOSTER/examples/powerTOSTone.Rd.R
|
574560e2418ddd08d3ee15e175a4547db794134d
|
[] |
no_license
|
surayaaramli/typeRrh
|
d257ac8905c49123f4ccd4e377ee3dfc84d1636c
|
66e6996f31961bc8b9aafe1a6a6098327b66bf71
|
refs/heads/master
| 2023-05-05T04:05:31.617869
| 2019-04-25T22:10:06
| 2019-04-25T22:10:06
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 769
|
r
|
powerTOSTone.Rd.R
|
library(TOSTER)
### Name: powerTOSTone
### Title: Power analysis for TOST for one-sample t-test (Cohen's d).
### Aliases: powerTOSTone
### ** Examples
## Sample size for alpha = 0.05, 90% power, equivalence bounds of
## Cohen's d = -0.3 and Cohen's d = 0.3, and assuming true effect = 0
powerTOSTone(alpha=0.05, statistical_power=0.9, low_eqbound_d=-0.3, high_eqbound_d=0.3)
## Power for sample size of 121, alpha = 0.05, equivalence bounds of
## Cohen's d = -0.3 and Cohen's d = 0.3, and assuming true effect = 0
powerTOSTone(alpha=0.05, N=121, low_eqbound_d=-0.3, high_eqbound_d=0.3)
## Equivalence bounds for sample size of 121, alpha = 0.05, statistical power of
## 0.9, and assuming true effect = 0
powerTOSTone(alpha=0.05, N=121, statistical_power=.9)
|
2ea8cf81076713fbb2ff72c4478c77d92b32ddf8
|
9b93f997e005af01c03bbe2a2f1e7cb9adeb5596
|
/R_scripts/Advanced/Session#6.R
|
4f3735ddddf4049d7e2b72678fcbc1e0373eeb97
|
[] |
no_license
|
brunobellisario/Rcourse
|
aca0e5b7b01333dc7849d78930c62d7c7d9bf269
|
9c2fd209e025cb7e26dafcd5dc21d2bb8a6500b5
|
refs/heads/main
| 2023-05-19T04:07:37.191537
| 2021-06-01T06:49:14
| 2021-06-01T06:49:14
| 359,836,395
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 19,873
|
r
|
Session#6.R
|
#DATA ANALYSIS WITH R
#SESSION#6 - Multivariate analysis#1
#Dissimilarity/similarity matrix
#Broadly speaking, multivariate patterns amongst objects can either
#be quantified on the basis of the associations (correlation or covariance)
#between variables (species) on the basis of similarities between objects.
#The former are known as R-mode analyses and the later Q-mode analyses.
#Consider the following fabricated data matrices.
#The Y matrix on the left consists of four species abundances from five sites.
#The X matrix on the right represents five environmental measurements
#(concentrations in mg/L) from five sites.
Y <- matrix(c(
+ 2,0,0,5,
+ 13,7,10,5,
+ 9,5,55,93,
+ 10,6,76,81,
+ 0,2,6,0)
,5,4,byrow=TRUE)
colnames(Y) <- paste("Sp",1:4,sep="")
rownames(Y) <- paste("Site",1:5,sep="")
E <- matrix(c(
+ 0.2,0.5,0.7,1.1,
+ 0.1,0.6,0.7,1.3,
+ 0.5,0.6,0.6,0.7,
+ 0.7,0.4,0.3,0.1,
+ 0.1,0.4,0.5,0.1
),5,4,byrow=TRUE)
colnames(E) <- paste("Conc",1:4,sep="")
rownames(E) <- paste("Site",1:5,sep="")
#Measures of association
#sums-of-squares-and-cross-products (SSCP) matrix is a symmetrical diagonal
#matrix with sums of squares of each variable on the diagonals and sums of
#cross products on the off-diagonals. Alternatively, the SSCP values can be
#calculated as the cross-products of centered variables.
crossprod(scale(Y,scale=FALSE))
#variance covariance matrix. The SSCP values can be converted to average differences
#through division by independent sample size (df).
#The variance-covariance matrix is a symmetrical diagonal matrix with variance of
#each variable on the diagonals and covariances on the off-diagonals.
#A variance covariance matrix is calculated by dividing the
#sums-of-squares-and-cross-products by the degrees of freedom
#(number of observations n minus 1).
var(Y)
#correlation matrix.
#The variance-covariance matrix can be standardized
#(values expressed on a scale independent of the scale of the original data)
#into a correlation matrix by dividing the matrix elements by the standard deviations
#of the constituting variables.
cor(Y)
#Measures of distance
#Measures of distance (or resemblance) between objects reflect the degree of
#similarity between pairs of objects. Intuitively, small values convey small
#degrees of difference between things. Hence distances are usually expressed
#as dissimilarity rather than similarity. A small value of dissimilarity
#(large degree of similarity) indicates a high degree of resemblance between two objects.
#There are a wide range of distance measures, each of which is suited to
#different circumstances and data. Most of these dissimilarities are supported
#via the vegdist() function of the vegan package.
#In the following j and k are the two objects (rows) being compared and i refers
#to the variables (columns).
#Euclidean distance represents the geometric distance between two points in
#multidimensional space. Euclidean distance is bounded by zero when two objects
#have identical variable values. However, there is no upper bound and the magnitude
#of the values depends on the scale of the observations as well as the sample size.
#Euclidean distance is useful for representing differences of purely measured variables
#(of similar scale), for which the simple geometric distances do have real meaning.
#However it is not well suited to data such as species abundances
#(without prior standardizations) due to its lack of a maximum and its high
#susceptibility to large differences (due to being based on squared differences).
if(!require(vegan)){install.packages("vegan")}
vegdist(Y,method="euclidean")
vegdist(E,method="euclidean")
#Note:
#counter intuitively, sites 1 and 5 of the species abundances are considered the most similar - not desirable as they have nothing in common
#sites 1 and 5 have low species counts and therefore low distances - not desirable for abundance data
#sites 1 and 2 in the environmental data are considered the most similar and are separated by 0.245 units (mg/L)
#χ2 distance is essentially the euclidean distances of relative abundances
#(frequencies rather than raw values) weighted (standardized) by the square root
#of the inverse of column sums and multiplied by the square root of the total abundances.
#Since χ2 distance works on frequencies, it is only relevant for abundance data
#for which it is arguably more appropriate than euclidean distances
#(due to the non-linearity of species abundances).
#As a result of working with relative abundances (frequencies), all sites and
#species are treated equally - that is, unlike the related euclidean distance,
#the distance values are not dependent on absolute magnitudes.
dist(decostand(Y,method="chi"))
#Note:
#sites 3 and 4 are considered the most similar and sites 1 and 5 the most dissimiliar
#(consistent with expectations).
#the units of the distances don't have any real interpretation
#Hellinger distance is essentially the euclidean distances of square root
#relative abundances (frequencies rather than raw values).
#Square rooting the frequencies reduces the impacts of relatively abundant species.
#Like χ2 distance, the Hellinger distance works on frequencies and therefore is only
#relevant for abundance data. A Hellinger transformation can be a useful preparation
#of species abundance data where the abundances are expected to by unimodal.
dist(decostand(Y,method="hellinger"))
#Note:
#sites 3 and 4 are considered the most similar and sites 1 and 5 the most dissimiliar
#(consistent with expectations).
#the units of the distances don't have any real interpretation
#Bray-Curtis dissimilarities are considered most appropriate for species abundance
#data as they:
#reach a maximum value of 1 when two objects have nothing in common
#ignores joint absences (0's)
#Nevertheless, it is predominantly influenced by large values, and therefore
#standardizations are recommended prior to generating a Bray-Curtis dissimilarity.
vegdist(Y,method="bray")
vegdist(E,method="bray")
#As a rule of thumb, when working with species presence/abyndance data, Euclidean distance
#should be avoided. Better using the bray curtis one.
#Conversely, Euclidean distance works well with environmental data.
#When species data are given as presence/absence (which is the norm in many studies)
#The use of Jaccard distance is reccomended
vegdist(Y,method="jaccard")
#The dist object in R is a particular type of data. It is not a matrix, nor a data frame
#object.
#Sometimes we need to convert the dist object in a matrix to use specific functions.
#Let's get back to the iris dataset
data(iris)#with this command we will recall the dataset from scratch.
iris
#Let's calculate the distance between individuals of different species based on the
#sepal and petal length
#We will use a the gowdis measures (Gower 1971) of dissimilarity for mixed variables,
#which is able to handle both quantitative and qualitative variables.
#This function is particularly efficient when working with species traits data
#The function gowdis is in the FD package
if(!require(FD)){install.packages("FD")}
?gowdis#take a look to the help
GD=gowdis(data.frame(iris$Sepal.Length,iris$Petal.Length))
#We can visualize our result by using the function heatmap(), which uses distance matrices
#to create a false color image with a dendrogram added to the left side and to the top
heatmap(GD)
heatmap(as.matrix(GD))
#This introduces the:
#Cluster analysis
#Clustering algorithms group a set of data points into subsets or clusters.
#The algorithms' goal is to create clusters that are coherent internally,
#but clearly different from each other externally. In other words,
#entities within a cluster should be as similar as possible and entities in one cluster
#should be as dissimilar as possible from entities in another.
#Broadly speaking there are two ways of clustering data points based on
#the algorithmic structure and operation, namely agglomerative and divisive.
#Agglomerative : An agglomerative approach begins with each observation
#in a distinct (singleton) cluster, and successively merges clusters together
#until a stopping criterion is satisfied.
#Divisive : A divisive method begins with all patterns in a single cluster
#and performs splitting until a stopping criterion is met.
#Agglomerative or bottom-up approach starts with each data point as its own cluster
#and then combine clusters based on some similarity measure.
#The idea can be easily adapted for divisive methods as well.
#The similarity between the clusters is often calculated from the dissimilarity
#measures like the euclidean distance between two clusters.
#So the larger the distance between two clusters, the better it is.
#There are many distance metrics that you can consider to calculate the dissimilarity
#measure, and the choice depends on the type of data in the dataset.
#For example if you have continuous numerical values in your dataset you can use
#euclidean distance, if the data is binary you may consider the Jaccard distance
#(helpful when you are dealing with categorical data for clustering after you have
#applied one-hot encoding).
#Pre-processing operations for Clustering
#There are a couple of things you should take care of before starting.
#Scaling
#It is imperative that you normalize your scale of feature values in order
#to begin with the clustering process. This is because each observations'
#feature values are represented as coordinates in n-dimensional space
#(n is the number of features) and then the distances between these coordinates
#are calculated. If these coordinates are not normalized, then it may lead to false
#results.
#Dendrograms
#In hierarchical clustering, you categorize the objects into a hierarchy similar
#to a tree-like diagram which is called a dendrogram. The distance of split or merge
#(called height) is shown on the y-axis.
#One question that might have intrigued you by now is how do you decide when to stop
#merging the clusters? Well, that depends on the domain knowledge you have about the data.
#But sometimes you don't have that information too. In such cases, you can leverage
#the results from the dendrogram to approximate the number of clusters. You cut the
#dendrogram tree with a horizontal line at a height where the line can traverse the
#maximum distance up and down without intersecting the merging point.
#Measuring the goodness of Clusters
#Perhaps the most important part in any unsupervised learning task is the analysis
#of the results. After you have performed the clustering using any algorithm and any
#sets of parameters you need to make sure that you did it right. But how do you
#determine that?
#Well, there are many measures to do this, perhaps the most popular one is the
#Dunn's Index. Dunn's index is the ratio between the minimum inter-cluster distances
#to the maximum intra-cluster diameter. The diameter of a cluster is the distance between
#its two furthermost points. In order to have well separated and compact clusters you
#should aim for a higher Dunn's index
#We will apply hierarchical clustering on the seeds dataset.
#This dataset consists of measurements of geometrical properties of
#kernels belonging to three different varieties of wheat: Kama, Rosa and Canadian.
#It has variables which describe the properties of seeds like area, perimeter,
#asymmetry coefficient etc. There are 70 observations for each variety of wheat.
set.seed(786)
file_loc <- 'seeds_dataset.txt'
seeds_df <- read.csv(file_loc,sep = '\t',header = FALSE)
#Since the dataset doesn't have any column names you will give columns name yourself from the data description.
feature_name <- c('area','perimeter','compactness','length.of.kernel','width.of.kernal','asymmetry.coefficient','length.of.kernel.groove','type.of.seed')
colnames(seeds_df) <- feature_name
#It's advisable to gather some basic useful information about the dataset like
#its dimensions, data types and distribution, number of NAs etc.
#You will do so by using the str(), summary() and is.na() functions in R.
str(seeds_df)
summary(seeds_df)
any(is.na(seeds_df))
#You will now store the labels in a separate variable and exclude the type.of.seed column
#from your dataset in order to do clustering.
#Later you will use the true labels to check how good your clustering turned out to be.
seeds_label <- seeds_df$type.of.seed
seeds_df$type.of.seed <- NULL
str(seeds_df)
#Now we will use R's scale() function to scale all your column values.
seeds_df_sc <- as.data.frame(scale(seeds_df))
summary(seeds_df_sc)
#Since all the values here are continuous numerical values, we will use the
#euclidean distance method.
dist_mat <- dist(seeds_df_sc, method = 'euclidean')
hclust_avg <- hclust(dist_mat, method = 'average')
plot(hclust_avg)
#The problem here is that we have missin (Not Available, NA) values in our dataset.
#We therefore should substitute this values with something else, e.g., 0
dist_mat[is.na(dist_mat)]=0
hclust_avg <- hclust(dist_mat,
method = 'average')#with the method parameters we can change
#the type agglomeration method to be used
plot(hclust_avg)
hclust_sing <- hclust(dist_mat,
method = 'single')#use of single agglomerative method
hclust_comp <- hclust(dist_mat,
method = 'complete')#use of complete agglomerative method
#Let's plot all togheter to see (if any) the differences
par(mfrow=c(3,1))
plot(hclust_avg)
plot(hclust_sing)
plot(hclust_comp)
#Next, we can cut the dendrogram in order to create the desired number of clusters.
#Since in this case we already know that there could be only three types of wheat
#we will choose the number of clusters to be k = 3, or as we can see in the dendrogram
#h = 3 you get three clusters. We will use R's cutree() function to cut the tree with
#hclust_avg as one parameter and the other parameter as h = 3 or k = 3.
cut_avg <- cutree(hclust_avg, k = 3)
plot(hclust_avg)
rect.hclust(hclust_avg , k = 3, border = 2:6)
#Now we can see the three clusters enclosed in three different colored boxes.
#We can also use the color_branches() function from the dendextend library to visualize
#our tree with different colored branches.
if(!require(dendextend)){install.packages("dendextend")}
avg_dend_obj <- as.dendrogram(hclust_avg)
avg_col_dend <- color_branches(avg_dend_obj, h = 3)
plot(avg_col_dend)
#Now we will append the cluster results obtained back in the original dataframe
#under column name the cluster with mutate(), from the dplyr package and count how
#many observations were assigned to each cluster with the count() function.
if(!require(dplyr)){install.packages("dplyr")}
seeds_df_cl <- mutate(seeds_df, cluster = cut_avg)
count(seeds_df_cl,cluster)
#Non hierarchical clustering
#K-means
#K-means clustering is the most commonly used unsupervised machine learning
#algorithm for dividing a given dataset into k clusters. Here, k represents
#the number of clusters and must be provided by the user.
#Here, we will need to load the following packages:
if(!require(tidyverse)){install.packages("tidyverse")}
if(!require(cluster)){install.packages("cluster")}
if(!require(factoextra)){install.packages("factoextra")}
#Here, we’ll use the built-in R data set USArrests, which contains statistics
#in arrests per 100,000 residents for assault, murder, and rape in each of the
#50 US states in 1973. It includes also the percent of the population living in urban areas
df=USArrests
#To remove any missing value that might be present in the data, type this:
df <- na.omit(df)
#As we don’t want the clustering algorithm to depend to an arbitrary variable unit,
#we start by scaling/standardizing the data using the R function scale:
df <- scale(df)
head(df)
#Clustering Distance Measures
#The classification of observations into groups requires some methods for computing
#the distance or the (dis)similarity between each pair of observations.
#The result of this computation is known as a dissimilarity or distance matrix.
#The choice of distance measures is a critical step in clustering.
#It defines how the similarity of two elements (x, y) is calculated and it will
#influence the shape of the clusters.
#For most common clustering software, the default distancemeasure is the Euclidean
#distance. However, depending on the type of the data and the research questions,
#other dissimilarity measures might be preferred and you should be aware of the options.
#Within R it is simple to compute and visualize the distance matrix using the functions
#get_dist and fviz_dist from the factoextra R package.
#This starts to illustrate which states have large dissimilarities (red) versus those
#that appear to be fairly similar (teal).
distance <- get_dist(df)
fviz_dist(distance, gradient = list(low = "#00AFBB", mid = "white", high = "#FC4E07"))
#Computing k-means clustering in R
#We can compute k-means in R with the kmeans function. Here will group the data into
#two clusters (centers = 2). The kmeans function also has an nstart option that attempts
#multiple initial configurations and reports on the best one.
#For example, adding nstart = 25 will generate 25 initial configurations.
#This approach is often recommended.
k2 <- kmeans(df, centers = 2, nstart = 25)
str(k2)
k2
#We can also view our results by using fviz_cluster.
#This provides a nice illustration of the clusters.
#If there are more than two dimensions (variables) fviz_cluster
#will perform principal component analysis (PCA) and plot the data points
#according to the first two principal components that explain the
#majority of the variance.
fviz_cluster(k2, data = df)
#Because the number of clusters (k) must be set before we start the algorithm,
#it is often advantageous to use several different values of k and examine the
#differences in the results. We can execute the same process for 3, 4, and 5 clusters,
#and the results are shown in the figure:
k3 <- kmeans(df, centers = 3, nstart = 25)
k4 <- kmeans(df, centers = 4, nstart = 25)
k5 <- kmeans(df, centers = 5, nstart = 25)
# plots to compare
p1 <- fviz_cluster(k2, geom = "point", data = df) + ggtitle("k = 2")
p2 <- fviz_cluster(k3, geom = "point", data = df) + ggtitle("k = 3")
p3 <- fviz_cluster(k4, geom = "point", data = df) + ggtitle("k = 4")
p4 <- fviz_cluster(k5, geom = "point", data = df) + ggtitle("k = 5")
if(!require(gridExtra)){install.packages("gridExtra")}
grid.arrange(p1, p2, p3, p4, nrow = 2)
#Although this visual assessment tells us where true dilineations occur
#between clusters, it does not tell us what the optimal number of clusters is.
#Determining Optimal Clusters
#As you may recall the analyst specifies the number of clusters to use; preferably
#the analyst would like to use the optimal number of clusters. To aid the analyst,
#the following explains the three most popular methods for determining the optimal
#clusters, which includes:
#1.Elbow method
#2.Silhouette method
#3.Gap statistic
#In short, the average silhouette approach measures the quality of a clustering.
#That is, it determines how well each object lies within its cluster.
#A high average silhouette width indicates a good clustering. The average silhouette
#method computes the average silhouette of observations for different values of k.
#The optimal number of clusters k is the one that maximizes the average silhouette
#over a range of possible values for k.
silh=fviz_nbclust(df, kmeans, method = "silhouette")
gap=fviz_nbclust(df, kmeans, method = "gap_stat")
grid.arrange(silh, gap,nrow = 1)
#With most of these approaches suggesting 2 as the number of optimal clusters,
#we can perform the final analysis and extract the results using 2 clusters.
# Compute k-means clustering with k = 2
final <- kmeans(df, 2, nstart = 25)
print(final)
fviz_cluster(final, data = df)
|
53dec3abb5bda5a3465a3d87a206b43a486216a2
|
2ef11a80606df33e96aebea050eec8d740050192
|
/makeslides.R
|
5edf0df0ee8287589dcda0ee26988127cd54c546
|
[] |
no_license
|
jknowles/statsdc12-presentation
|
8d2985a611d31d923f920d7be5671bdb34af4043
|
539ca87a91d2c988be4b281085f3c59e5078c5ec
|
refs/heads/master
| 2020-05-19T14:16:12.948798
| 2012-07-11T16:32:46
| 2012-07-11T16:32:46
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 116
|
r
|
makeslides.R
|
# Makeslides
system("pandoc -s -S -i -t slidy statsDCslides.md -o KnowlesStatsDCslides2012.html --self-contained")
|
3d39c8b3b052a0507b5a64e88de8d113105eb4ea
|
0441a11b7d74d68d17c67a023268c97afcbd4627
|
/BAMSandAllen/rCode/ColourDivisionsHeatMapReused.r
|
c816e1a8503fbe48a4ad65a3a934e09d8026e34b
|
[] |
no_license
|
leonfrench/ABAMS
|
fbe847d8c4e21388ca9150ad55d2b4508883ebcf
|
a4d65e9c9a8a437db9326828ebe7bdaefd9a81ce
|
refs/heads/master
| 2020-08-05T21:09:21.769589
| 2017-07-04T21:36:16
| 2017-07-04T21:36:16
| 7,511,456
| 0
| 2
| null | null | null | null |
UTF-8
|
R
| false
| false
| 1,280
|
r
|
ColourDivisionsHeatMapReused.r
|
# run in data/correlation study
energyOptTable <- read.table("ConnectivityAndAllenExpressionMatrixPair.NewEnergies.out.topList.txt")
energy <- as.matrix(energyOptTable)
dim(energy)
length(energy[energy==0])
#region classification
regionClassification <- read.csv("RegionClassificationABAMSOut.csv", row.names=1)
#make it the variable name setup - a mapping from a region to its parent
row.names(regionClassification) <- make.names(row.names(regionClassification))
regionMatrix <- as.matrix(regionClassification)
#as.matrix(regionMatrix[colnames(energyMatrix),])
map <- heatmap(cor(energy, use="pairwise"), symm = TRUE, distfun = function(c) as.dist(1 - abs(c)), keep.dendro = TRUE, scale = "none")
numColours <- length(unique(regionMatrix))
colour <- matrix(nrow=numColours)
rownames(colour) <- unique(regionMatrix)[,1]
colour[,1] <- rainbow(numColours)
#original scheme
rownames(colour) <- c("Hindbrain", "Endbrain", "Interbrain", "Midbrain")
#parents of all the regions
parents <- as.matrix(regionMatrix[colnames(energy),])
#convert to colours for all regions
parentCols <- colour[parents,]
map <- heatmap(cor(energy, use="pairwise"), symm = TRUE, distfun = function(c) as.dist(1 - abs(c)), keep.dendro = TRUE, scale = "none", ColSideColors=parentCols, margins=c(10,10))
|
de1c28d5e9c2d521ead346658f06918774e02f55
|
ffdea92d4315e4363dd4ae673a1a6adf82a761b5
|
/data/genthat_extracted_code/easyPubMed/examples/trim_address.Rd.R
|
ba1d26b67355a43f8a730b717b2122109b105f2a
|
[] |
no_license
|
surayaaramli/typeRrh
|
d257ac8905c49123f4ccd4e377ee3dfc84d1636c
|
66e6996f31961bc8b9aafe1a6a6098327b66bf71
|
refs/heads/master
| 2023-05-05T04:05:31.617869
| 2019-04-25T22:10:06
| 2019-04-25T22:10:06
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 328
|
r
|
trim_address.Rd.R
|
library(easyPubMed)
### Name: trim_address
### Title: Trim and Format Address Information
### Aliases: trim_address
### ** Examples
addr_string <- " 2 Dept of Urology, Feinberg School of Medicine,"
addr_string <- paste(addr_string, "Chicago, US; Dept of Mol Bio as well...")
addr_string
trim_address(addr = addr_string)
|
3cc7af191f401e18a81eb6fd9a12e33acfed2929
|
3af91945083aa604efc778ea52a17ad60766948b
|
/plot_bar_as_fig_condensed_genomicAve.r
|
126333c091f69160589ada317a4469eea259d2e6
|
[] |
no_license
|
cjieming/R_codes
|
fa08dd1f25b22e4d3dec91f4fb4e598827d7492f
|
5b2bcf78dc217bc606c22f341e1978b5a1246e0c
|
refs/heads/master
| 2020-04-06T03:53:50.030658
| 2019-06-30T07:31:35
| 2019-06-30T07:31:35
| 56,031,249
| 0
| 2
| null | null | null | null |
UTF-8
|
R
| false
| false
| 3,356
|
r
|
plot_bar_as_fig_condensed_genomicAve.r
|
# setwd("C:/Users/JM/thesis/mark_work/FIG/eQTLsnps")
setwd("C:/Users/JM/thesis/mark_work/FIG/allelicSNPs")
## data input
# filename1 = "fishersresults_enrichment-tfm-sf-yaoorder.txt"
# filename1 = "fishersresults_enrichment-tfp-sf-yaoorder.txt"
filename1 = "proportions_AS_rare_lt005_no0bins_noCDS.txt";
# filename2 = "proportions_AS_rare_lt005_no0bins_CDSonly.txt";
# filename1 = "proportions_AS_rare_lt005.txt";
data1 = read.table(filename1,sep="\t");
# data2 = read.table(filename2,sep="\t");
## for CDS RNAseq only
# x11(20,7) # adjust this for aspect ratio! device
# par(mar=c(5,15,5,5), lty=1) # lty=0 invis border
# colors = c("darkblue","blue","lightblue") # no0bins darkgreen=chipseq, darkblue=rnaseq
# barplot(data2$V2,xlab="Fraction of rare SNPs", names.arg=c('Non-peak','AS','Non-AS'), horiz=TRUE,
# cex.lab=1.5,cex.axis=1.5,cex.names=1.5, col=colors,xpd=FALSE, xlim=c(0.01,0.032), width=0.5,
# las=1, # set hori y axis label
# space=c(0,0,0)) # space between bars
# # abline(v=0.01261034,col='red',lty=2) # pseudogenes
# # abline(v=0.012406049,col='red',lty=2) # pseudogenes no CDS
# # abline(v=0.013386563,col='red',lty=2) # genomic average no CDS pass1kgmask
# abline(v=0.013171341,col='red',lty=2) # genomic ave no CDS
# tiff(filename = "Rplot%03d.tif", width = 480, height = 480,
# units = "px", pointsize = 12,
# bg = "white", res = 300,
# restoreConsole = TRUE)
# # xlim=c(0.01,0.016),
## barplots
x11(20,7) # adjust this for aspect ratio! device
par(mar=c(5,10,5,5), lty=1) # this adjust the margins, btm,left,top,right; lty=0 invis border
# barplot(data1$V2,xlab="Fold change", names.arg=data1$V1, horiz=TRUE,
# cex.names=1.5,
# las=1) # set hori y axis labe
# abline(v=1,col='red',lty=2)
## for peaks
# colors = c("black","black","grey","black","black","grey","black","black","black","black","black","black","grey","grey","black");
# colors = c("darkslategray2","darkred","darkred","darkred","darkgreen","darkgreen","darkgreen") # no0bins
# colors = c("cyan","red","red","green","green","orange","orange") # 0bins
colors = c("darkblue","darkblue","darkgreen","darkgreen") # no0bins darkgreen=chipseq, darkblue=rnaseq
# barplot(data1$V2,xlab="Fraction of rare SNPs (DAF<0.005)", names.arg=c('Allele-Specific','Non-Allele-Specific','Allele-Specific','Non-Allele-Specific'), horiz=TRUE,
barsy=barplot(data1$V2,xlab="Fraction of rare SNPs in a Personal Genome", names.arg=c('-','+','-','+'), horiz=TRUE,
cex.lab=1.5,cex.axis=1.5,cex.names=1.5, col=colors,xpd=FALSE, xlim=c(0.01,0.017), width=0.5,
las=1, # set hori y axis label
space=c(0,0,0.2,0)) # space between bars
# abline(v=0.01261034,col='red',lty=2) # pseudogenes
# abline(v=0.012406049,col='red',lty=2) # pseudogenes no CDS
# abline(v=0.013386563,col='red',lty=2) # genomic average no CDS pass1kgmask
abline(v=0.013171341,col='red',lty=2) # genomic ave no CDS
## 95% CI
barsx_lower=rbind(0.01509992,0.01291343,0.01290524,0.01220467)
barsx_upper=rbind(0.01639706,0.01518349,0.01469161,0.01412036)
arrows(barsx_lower,barsy,barsx_upper,barsy,length=0.1,angle=90,code=3)
# tiff(filename = "Rplot%03d.tif", width = 480, height = 480,
# units = "px", pointsize = 12,
# bg = "white", res = 300,
# restoreConsole = TRUE)
|
09172f38fadd00416fb3a516604215e10c7e46e6
|
f2780a473c32c24d4c96076134839538647dabe8
|
/fund-node/Rfile/get4433.R
|
a5fee79a49a3d40651d594498f09e3c5a70482ad
|
[] |
no_license
|
ben00401/Robo_FrontEnd
|
e201364e467715d50ec3d82fdd47f1568bcf4d04
|
9ae39ed1fbb4c7b5f739fc2a294a5e9ff4e8ebae
|
refs/heads/master
| 2021-06-08T06:50:10.464926
| 2016-11-01T07:44:53
| 2016-11-01T07:44:53
| 72,511,302
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 11,029
|
r
|
get4433.R
|
library(mongolite)
library(RJSONIO)
today <- as.Date("2015-01-01")
get_result4433 <- function(type){
type <- fromJSON(type)
if(type[[1]]=="4433_100"){
data_4433_100<-get_4433data(type[[1]],today);
toJSON(data_4433_100)
}else if(type[[1]]=="4433"){
data_4433<-get_4433data(type[[1]],today);
toJSON(data_4433)
}else if(type[[1]]=="4433_50"){
data_4433_50<-get_4433data(type[[1]],today);
toJSON(data_4433_50)
}
}
# if db have today's data just send to front-end
#if there are no data in db then count it
get_4433data<-function(type,today){
if(type=="4433_100"){
result4433_100 <- mongo(collection = "result4433_100" ,db="fund20160414", url = "mongodb://140.119.19.21/?sockettimeoutms=1200000")
print("connecttodb")
#check mongodb have today's 4433result
if(result4433_100$count()>0){
print("checked 4433result100")
checkdate <-result4433_100$find(limit = 1)
if(checkdate$date==today){
data4433 <- result4433_100$find();
data4433$name <- iconv(data4433$name,from = "utf8", to = "utf-8")
return(data4433)
}else{
#got today's 4433result ,then output to front-end
print("no 4433100 data")
result_of_count <- count4433(type,today);
return(result_of_count);
}
}else{
result_of_count<-count4433(type,today);
return(result_of_count);
}
}else if(type=="4433"){
result4433 <- mongo(collection = "result4433" ,db="fund20160414", url = "mongodb://140.119.19.21/?sockettimeoutms=1200000")
#check mongodb have today's 4433result
if(result4433$count()>0){
checkdate <-result4433$find(limit = 1)
if(checkdate$date==today){
#got today's 4433result ,then output to front-end
data4433 <- result4433$find();
data4433$name <- iconv(data4433$name,from = "utf8", to = "utf-8")
return(data4433)
}else{
result_of_count<-count4433(type,today);
return(result_of_count);
}
}else{
result_of_count<-count4433(type,today);
return(result_of_count);
}
}else if(type =="4433_50"){
result4433 <- mongo(collection = "result4433_50" ,db="fund20160414", url = "mongodb://140.119.19.21/?sockettimeoutms=1200000")
#check mongodb have today's 4433result
if(result4433$count()>0){
checkdate <-result4433$find(limit = 1)
if(checkdate$date==today){
#got today's 4433result ,then output to front-end
data4433 <- result4433$find();
data4433$name <- iconv(data4433$name,from = "utf8", to = "utf-8")
return(data4433)
}else{
result_of_count<-count4433(type,today);
return(result_of_count);
}
}else{
result_of_count<-count4433(type,today);
return(result_of_count);
}
}
}
count4433 <- function(type,today){
# no 4433result in mongo
#there are no today's 4433result in mongo, then count it and save it to mongodb
profitdata <-get_countdata(today);
profitdata$date <-rep(today, times = nrow(profitdata))
print("+++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++")
print("+++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++")
print("+++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++")
print("nrow of count 4433data")
print(nrow(profitdata))
print("+++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++")
print("+++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++")
print("+++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++")
#find the chinese name
fundprofile <- mongo(collection = "fundprofile" ,db="fund20160414", url = "mongodb://localhost")
fundprofiledata <- fundprofile$find()
#find 4433_100 chinese name
fundprofile_4433_data <-subset(fundprofiledata,fundprofiledata$id %in% profitdata$id)
profitdata$name <- fundprofile_4433_data[,2]
######future work modify db
#insert 4433result to mongodb=================================
#mongo_result4433 <- mongo(collection = "result4433" ,db="fund20160414", url = "mongodb://localhost")
#mongo_result4433$drop()
#mongo_result4433$insert(profitdata)
#################
################
###############
#======================count 4433_100 and insert db=========================
#4433前100名 data
result4433_100<-subset(profitdata,profitdata$rank_1y <= 100 & profitdata$rank_2y <= 100 & profitdata$rank_3y <= 100 & profitdata$rank_5y <= 100 & profitdata$rank_3m <= 100 & profitdata$rank_6m <= 100)
#give it today's date
result4433_100$date <-rep(today, times = nrow(result4433_100))
#find the chinese name
fundprofile <- mongo(collection = "fundprofile" ,db="fund20160414", url = "mongodb://localhost")
fundprofiledata <- fundprofile$find()
#find 4433_100 chinese name
fundprofile_4433_100_data <-subset(fundprofiledata,fundprofiledata$id %in% result4433_100$id)
result4433_100$name <- fundprofile_4433_100_data[,2]
#insert 4433_100result to mongodb
mongo_result4433_100 <- mongo(collection = "result4433_100" ,db="fund20160414", url = "mongodb://localhost")
mongo_result4433_100$drop()
mongo_result4433_100$insert(result4433_100)
#======================count 4433 and insert db======================
#4433data
result4433<-subset(profitdata,profitdata$rank_1y <= nrow(profitdata)/4 & profitdata$rank_2y <= nrow(profitdata)/4 & profitdata$rank_3y <= nrow(profitdata)/4 & profitdata$rank_5y <= nrow(profitdata)/4 & profitdata$rank_3m <= nrow(profitdata)/3 & profitdata$rank_6m <= nrow(profitdata)/3)
#give it today's date
result4433$date <-rep(today, times = nrow(result4433))
#give it type
result4433$type <-rep("4433", times = nrow(result4433))
#find 4433 chinese name
fundprofile_4433_data <-subset(fundprofiledata,fundprofiledata$id %in% result4433$id)
result4433$name <- fundprofile_4433_data[,2]
#insert 4433result to mongodb=================================
mongo_result4433 <- mongo(collection = "result4433" ,db="fund20160414", url = "mongodb://140.119.19.21")
mongo_result4433$drop()
mongo_result4433$insert(result4433)
#======================count 4433_100 and insert db=========================
#4433前50名 data
result4433_50<-subset(profitdata,profitdata$rank_1y <= 50 & profitdata$rank_2y <= 50 & profitdata$rank_3y <= 50 & profitdata$rank_5y <= 50 & profitdata$rank_3m <= 50 & profitdata$rank_6m <= 50)
#give it today's date
result4433_50$date <-rep(today, times = nrow(result4433_50))
#find the chinese name
fundprofile <- mongo(collection = "fundprofile" ,db="fund20160414", url = "mongodb://localhost")
fundprofiledata <- fundprofile$find()
#find 4433_100 chinese name
fundprofile_4433_50_data <-subset(fundprofiledata,fundprofiledata$id %in% result4433_50$id)
result4433_50$name <- fundprofile_4433_50_data[,2]
#insert 4433_100result to mongodb
mongo_result4433_50 <- mongo(collection = "result4433_50" ,db="fund20160414", url = "mongodb://localhost")
mongo_result4433_50$drop()
mongo_result4433_50$insert(result4433_50)
if(type=="4433_100"){
return(result4433_100);
}else if (type=="4433"){
return(result4433);
}else if(type=="4433_50"){
return(result4433_50)
}
}
#get counted data with ranking
get_countdata <-function(today){
#=========================================================================================
profitdata <- data.frame(id = character(0), profit_3m= numeric(0), profit_6m= numeric(0), profit_1y= numeric(0), profit_2y= numeric(0), profit_3y= numeric(0), profit_5y= numeric(0),stringsAsFactors=FALSE)
co = "fundprofile"
con <- mongo(collection = co ,db="fund20160414", url = "mongodb://localhost")
data <- con$find()
rm(con)
gc()
#get data for 5 years
start <- seq(today, length = 2, by = "-5 years")[2]
end <- today
for (i in 1:nrow(data)){
fundname = data[,1][i]
fundcollection <- mongo(collection = fundname ,db="fund20160414", url = "mongodb://localhost/?sockettimeoutms=1200000")
#find the fund which are over five years
if(fundcollection$count()>0){
checkdate <-fundcollection$find(limit = 1)
if(checkdate[,1][1]<=start){
x <-fundcollection$find()
if(x[,1][1]<=start & x[,1][length(x[,1])] >=end){
subdata<-subset(x,x[,1] > start & x[,1] < end)
#計算4433所需要的績效值
cal <-calculator4433(subdata,today)
profitdata[nrow(profitdata)+1,]<-c(data[,1][i],cal)
}else{
#print("no")
}
}
}
rm(fundcollection)
gc()
}
#rank 4433
profitdata$rank_3m[order(as.numeric(profitdata$profit_3m),decreasing = TRUE)] <- 1:nrow(profitdata)
profitdata$rank_6m[order(as.numeric(profitdata$profit_6m),decreasing = TRUE)] <- 1:nrow(profitdata)
profitdata$rank_1y[order(as.numeric(profitdata$profit_1y),decreasing = TRUE)] <- 1:nrow(profitdata)
profitdata$rank_2y[order(as.numeric(profitdata$profit_2y),decreasing = TRUE)] <- 1:nrow(profitdata)
profitdata$rank_3y[order(as.numeric(profitdata$profit_3y),decreasing = TRUE)] <- 1:nrow(profitdata)
profitdata$rank_5y[order(as.numeric(profitdata$profit_5y),decreasing = TRUE)] <- 1:nrow(profitdata)
return(profitdata)
}
#計算4433
calculator4433 <- function(data,today){
start_5y <- seq(today, length = 2, by = "-5 years")[2]
start_1y <- seq(today, length = 2, by = "-1 years")[2]
start_2y <- seq(today, length = 2, by = "-2 years")[2]
start_3y <- seq(today, length = 2, by = "-3 years")[2]
start_6m <- seq(today, length = 2, by = "-6 months")[2]
start_3m <- seq(today, length = 2, by = "-3 months")[2]
#計算五年獲利%數
profit_5y <- (as.double(data[length(data[,1]),][2]) - as.double(data[1,][2]))/as.double(data[1,][2])*100
#計算三年獲利%數
subdata <-subset(data,data[,1] > start_3y)
profit_3y <- (as.double(subdata[length(subdata[,1]),][2]) - as.double(subdata[1,][2]))/as.double(subdata[1,][2])*100
#計算兩年獲利%數
subdata <-subset(data,data[,1] > start_2y)
profit_2y <- (as.double(subdata[length(subdata[,1]),][2]) - as.double(subdata[1,][2]))/as.double(subdata[1,][2])*100
#計算一年獲利%數
subdata <-subset(data,data[,1] > start_1y)
profit_1y <- (as.double(subdata[length(subdata[,1]),][2]) - as.double(subdata[1,][2]))/as.double(subdata[1,][2])*100
#計算六個月獲利%數
subdata <-subset(data,data[,1] > start_6m)
profit_6m <- (as.double(subdata[length(subdata[,1]),][2]) - as.double(subdata[1,][2]))/as.double(subdata[1,][2])*100
#計算三個月獲利%數
subdata <-subset(data,data[,1] > start_3m)
profit_3m <- (as.double(subdata[length(subdata[,1]),][2]) - as.double(subdata[1,][2]))/as.double(subdata[1,][2])*100
return(c(profit_3m,profit_6m,profit_1y,profit_2y,profit_3y,profit_5y))
}
|
981645cde8cb405501b9613895059f5ca2b57797
|
26c2b3f71d983d53ce984ea86fd9cf6c66194058
|
/man/response.Rd
|
8292b081058e7ff9d6ac7122a67f948518ae876b
|
[
"MIT"
] |
permissive
|
r-lib/httr2
|
34088cf7d86cde8a5cf02cf7dd28d1a32f4f1008
|
9b85a8467d88d577b68899b52658fa5b6d4b1a1d
|
refs/heads/main
| 2023-09-01T13:57:25.415243
| 2023-09-01T08:12:39
| 2023-09-01T08:12:39
| 158,719,104
| 182
| 44
|
NOASSERTION
| 2023-09-08T12:08:07
| 2018-11-22T15:32:29
|
R
|
UTF-8
|
R
| false
| true
| 1,238
|
rd
|
response.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/resp.R
\name{response}
\alias{response}
\title{Create a new HTTP response}
\usage{
response(
status_code = 200,
url = "https://example.com",
method = "GET",
headers = list(),
body = raw()
)
}
\arguments{
\item{status_code}{HTTP status code. Must be a single integer.}
\item{url}{URL response came from; might not be the same as the URL in
the request if there were any redirects.}
\item{method}{HTTP method used to retrieve the response.}
\item{headers}{HTTP headers. Can be supplied as a raw or character vector
which will be parsed using the standard rules, or a named list.}
\item{body}{Response, if any, contained in the response body.}
}
\value{
An HTTP response: an S3 list with class \code{httr2_response}.
}
\description{
Generally, you should not need to call this function directly; you'll
get a real HTTP response by calling \code{\link[=req_perform]{req_perform()}} and friends. This
function is provided primarily for testing, and a place to describe
the key components of a response.
}
\examples{
response()
response(404, method = "POST")
response(headers = c("Content-Type: text/html", "Content-Length: 300"))
}
\keyword{internal}
|
42b59456f69f1a57850dcee2864baa1abb30a53b
|
657c68ca30dc1054f44d524a905cc7e8a9bc0abf
|
/R/max_mis.R
|
4010345ff180f82f5d2f15e447fbeb27ee4476cb
|
[] |
no_license
|
DataEdLinks/eeptools
|
cc97110ebdd1313e016f592ad6a9c17bdea9fcb9
|
29941d3cf35ed2fce5b8b51f2f2aa3adf8d17b0f
|
refs/heads/master
| 2020-12-27T01:57:08.803401
| 2015-04-10T18:35:12
| 2015-04-10T18:35:12
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 245
|
r
|
max_mis.R
|
max_mis <- function(x){
varclass <- class(x)
suppressWarnings(x <- max(x, na.rm=TRUE))
if(varclass == "integer"){
ifelse(!is.finite(x), NA_integer_, x)
} else if(varclass == "numeric") {
ifelse(!is.finite(x), NA_real_, x)
}
}
|
21c8d098f5643fdd1e884d87476b3350bf076385
|
0461931c3c89d572ab3233a193731edb4621186a
|
/logical_vector_operator.R
|
e743c705411421c266f1ea61c7c4f893d3ecac23
|
[] |
no_license
|
rsharma11/RCoding
|
acc80816a668fcd583326aa8d93de0ed505ab620
|
565487a7720db055b75d7fbe5bea22613463eb60
|
refs/heads/master
| 2020-03-21T00:59:46.394764
| 2019-07-31T14:54:44
| 2019-07-31T14:54:44
| 137,919,114
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 1,015
|
r
|
logical_vector_operator.R
|
# Loading the data
data <- mtcars
# Output rows where column mpg is between 15 and 20 (excluding 15 and 20)
data[(data$mpg>15 & data$mpg<20),]
# Output rows where column cyl = 6 and column am = 0
data[(data$cyl == 6 & data$am == 0),]
# Output rows where column cyl = 6 and column am = 0
data[(data$cyl == 6 & data$am == 0),]
# Output rows where column gear or carb has the value 4
data[(data$gear == 4 | data$carb == 4),]
# output only the even rows of data
even_index <- seq(2,nrow(data),2)
data[even_index,]
# change every fourth element in column mpg to 0
data$mpg <- 0
# rows where columns vs and am have the same value 1
data[(data$vs & data$am > 0),]
# rows where at least vs or am have the value 1
data[(data$vs > 0|| data$am > 0),]
# Change all values that are 0 in the column am in data to 2
data$am <- 2
# Add 2 to every element in the column vs
data$vs+2
# Output only those rows of data where vs and am have different values
data[abs(data$vs - data$am)>0,]
|
04055f870f92ab42dd39dc45318907b32a56d8da
|
86b7bf0357b9a7aac1075f67d418bc3c1f993ac0
|
/tests/testthat/test_page.R
|
f89abe9c3c434be4747535ad193584e4e514ceed
|
[
"MIT"
] |
permissive
|
kevinstadler/cultevo
|
6c7acc2a325416660680490bd5861e91b38902d1
|
94af9e19e38e80b7081de6fec55b745d98034124
|
refs/heads/master
| 2021-01-17T16:55:39.043165
| 2018-04-24T13:22:03
| 2018-04-24T13:22:03
| 26,164,855
| 10
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 521
|
r
|
test_page.R
|
context("Page test")
test_that("Exact p-value calculation", {
expect_equal(1, page.compute.exact(6, 4, 224))
expect_equal(0.03932265, page.compute.exact(6, 4, 322))
expect_silent(page.test(rbind(1:10, 1:10), verbose=FALSE))
})
test_that("Approximate p-value calculation and border conditions", {
expect_error(page.test(t(1:12)))
expect_error(page.compute.exact(6, 4))
expect_error(page.compute.exact(6, 4, 223))
expect_error(page.compute.exact(6, 4, 365))
expect_message(page.test(rbind(1:23, 1:23)))
})
|
c9b0962535ce1e8a8ea0d08d929892a4903704d6
|
1cbdf941d1e17d772037746197a85fb4b313f4c8
|
/R/visualize_ottawa_police_releases.R
|
23e55fdf247cdcd736b9c0d58ea8c5c2ecae007f
|
[] |
no_license
|
monkeycycle/policing
|
55cdaa1af0a54ef9d9c692892318666c61ec5de1
|
a60c02b969770f413ba3855bcd3b1063dd3b701e
|
refs/heads/main
| 2023-08-17T19:19:10.841843
| 2021-09-24T15:09:13
| 2021-09-24T15:09:13
| 408,193,844
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 3,185
|
r
|
visualize_ottawa_police_releases.R
|
p_ottawa_releases_2018_2021_annual <- ggplot(ottawa_police_pressers_annual,aes(x=year_date,y=total_count)) +
geom_bar(stat="identity", fill=wfp_blue) +
geom_text(
aes(
x=year_date,
y=total_count,
label = paste(count, sep = "")
),
vjust=-1,
# fontface="bold",
size=4
) +
scale_x_date(expand = c(0, 0),
date_breaks = "1 year",
labels = date_format("%Y"),
limits=as.Date(c("2017-01-31", "2021-12-31"))) +
scale_y_continuous(expand = c(0, 0),
limits = c(0, 300)
) +
labs(
title="Press releases issued by the Ottawa Police Service",
subtitle="Number of press releases",
x="",
y="",
caption = paste("Note: The 2018 releases are limited to October through December.",
"\n",
"WINNIPEG FREE PRESS — SOURCE: OTTAWA POLICE SERVICE", sep="")
) +
minimal_theme() +
theme(
# axis.line = ggplot2::element_blank(),
# axis.line.x = ggplot2::element_blank(), # ggplot2::element_line(color="#777777"),
# axis.line.y = ggplot2::element_blank(),
panel.grid.major.x = ggplot2::element_blank(),
panel.grid.major.y = ggplot2::element_blank(),
panel.grid.minor.x = ggplot2::element_blank(),
panel.grid.minor.y = ggplot2::element_blank()
)
#
# p_releases_2018_2021_monthly <- ggplot(wps_releases_monthly,aes(x=year_month,y=count)) +
# geom_bar(stat="identity", fill=wfp_blue) +
# geom_smooth(fill=NA, colour="#ffffff", size=1.4) +
# geom_smooth(fill=nominalMuted_shade_1, colour=nominalBold_shade_1, size=1) +
#
# scale_x_date(expand = c(0, 0),
# date_breaks = "1 year",
# labels = date_format("%Y"),
# limits=as.Date(c("2018-01-01", "2021-12-31"))) +
# scale_y_continuous(expand = c(0, 0),
# # limits = c(0, 15)
# ) +
# labs(
# title="Monthly press releases issued by the Winnipeg Police Service",
# subtitle="Number of press releases",
# x="",
# y="",
# caption = paste("Note: The 2018 releases are limited to October through December.",
# "\n",
# "WINNIPEG FREE PRESS — SOURCE: WINNIPEG POLICE SERVICE", sep="")
# ) +
# minimal_theme() +
# theme(
# # axis.line = ggplot2::element_blank(),
# # axis.line.x = ggplot2::element_blank(), # ggplot2::element_line(color="#777777"),
# # axis.line.y = ggplot2::element_blank(),
# panel.grid.major.x = ggplot2::element_blank(),
# panel.grid.major.y = ggplot2::element_blank(),
# panel.grid.minor.x = ggplot2::element_blank(),
# panel.grid.minor.y = ggplot2::element_blank()
# )
# wfp_releases_2018_2021_monthly <- prepare_plot(p_releases_2018_2021_monthly)
#
# ggsave_pngpdf(wfp_releases_2018_2021_monthly, "wfp_releases_2018_2021_monthly", width_var=8.66, height_var=6, dpi_var=96, scale_var=1, units_var="in")
# wfp_ottawa_releases_2018_2021_annual <- prepare_plot(p_ottawa_releases_2018_2021_annual)
# ggsave_pngpdf(wfp_ottawa_releases_2018_2021_annual, "wfp_ottawa_releases_2018_2021_annual", width_var=8.66, height_var=6, dpi_var=96, scale_var=1, units_var="in")
|
48924e028fd2e2736b000a6ff7fe1af74caecff1
|
d23dfbe4083d046b4104bf43b9a09e82f5b8bcdf
|
/评分卡模型.R
|
72fb3be03451a5f8d5466d25d9d03913783b9d08
|
[] |
no_license
|
GuitarHero-Eric/IDL-script
|
4b663f74415f3faef68876bcdbb485524c386396
|
1167e37571ce51d592c0c488289c175dfbc825b7
|
refs/heads/master
| 2020-04-06T17:41:34.062141
| 2018-11-15T07:33:20
| 2018-11-15T07:33:20
| 157,669,682
| 1
| 0
| null | null | null | null |
GB18030
|
R
| false
| false
| 1,908
|
r
|
评分卡模型.R
|
# klaR: Classification and Visualization
library(klaR)
# InformationValue: Performance Analysis and Companion Functions for Binary Classification Models
library(InformationValue)
data(GermanCredit)
train_kfold<-sample(nrow(GermanCredit),800,replace = F)
train_kfolddata<-GermanCredit[train_kfold,] #提取样本数据集
test_kfolddata<-GermanCredit[-train_kfold,] #提取测试数据集
credit_risk<-ifelse(train_kfolddata[,"credit_risk"]=="good",0,1)
train_kfolddata$credit_risk<-credit_risk
# EDA:Exploratory Data Analysis
# require(caret)
# data(GermanCredit)
# ggplot(GermanCredit, aes(x = duration,y = ..count..,)) + geom_histogram(fill = "blue", colour = "grey60", size = 0.2, alpha = 0.2,binwidth = 5)
# ggplot(GermanCredit, aes(x = amount,y = ..count..,)) + geom_histogram(fill = "blue", colour = "grey60", size = 0.2, alpha = 0.2,binwidth = 500)
# ggplot(GermanCredit, aes(x =credit_risk,y = ..count..,)) + geom_histogram(fill = "blue", colour = "grey60" , alpha = 0.2,stat="count")
# 等距分段(Equval length intervals)
# 等深分段(Equal frequency intervals)
# 最优分段(Optimal Binning)
# 最优分箱包smbinning
library(smbinning)
Durationresult=smbinning(df=train_kfolddata,y="credit_risk",x="duration",p=0.05)
CreditAmountresult=smbinning(df=train_kfolddata,y="credit_risk",x="amount",p=0.05)
Ageresult=smbinning(df=train_kfolddata,y="credit_risk",x="age",p=0.05)
smbinning.plot(CreditAmountresult,option="WoE",sub="CreditAmount")
smbinning.plot(Durationresult,option="WoE",sub="Durationresult")
smbinning.plot(Ageresult,option="WoE",sub="Ageresult")
AccountBalancewoe=woe(train, "AccountBalance",Continuous = F, "credit_risk",C_Bin = 4,Good = "1",Bad = "0")
ggplot(AccountBalancewoe, aes(x = BIN, y = -WOE)) + geom_bar(stat = "identity",fill = "blue", colour = "grey60",size = 0.2, alpha = 0.2)+labs(title = "AccountBalance")
|
96682ed5208f551099282b6422286b582cfb3b43
|
d6a9c61fac38d849112e663a97f255b2203212b7
|
/plot1.r
|
5a52725584625b49fc5f27787d507d1b465a8557
|
[] |
no_license
|
ronteo/ExData_Plotting1
|
5f395c266c1d527b9ef267554377159080993926
|
59ee69cb85830de58ca25b4b43714e1d19425506
|
refs/heads/master
| 2021-01-10T04:28:14.217471
| 2016-03-18T21:48:57
| 2016-03-18T21:48:57
| 54,151,747
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 565
|
r
|
plot1.r
|
power <- read.csv("household_power_consumption.txt",sep=";",na.strings = "?", colClasses = "character")
power$Date <- as.Date(power$Date, "%d/%m/%Y")
power_sub <- subset(power, power$Date==as.Date("1/2/2007", "%d/%m/%Y") | power$Date==as.Date("2/2/2007", "%d/%m/%Y"))
power_sub$Time <- strptime(paste(power_sub$Date, power_sub$Time), "%Y-%m-%d %H:%M:%S")
power_sub$Global_active_power <- as.numeric(power_sub$Global_active_power)
hist(power_sub$Global_active_power, col="red", ylim = c(0,1201), xlab = "Global Active Power (kilowatts)", main="Global Active Power")
|
2201c25dbb9b395ea5f9d10725070351ad226561
|
2eecc5fa6296798134a59ca708e32c774207fb72
|
/Scripts/Bridge Conditions/1) Data Import.R
|
bef69c961f46d96c7081a6397d0bc434b1d0cbcf
|
[] |
no_license
|
kevinanderson26/Tracking-Progess
|
66a56810f6d4c35fb87fe80e53321cff4312c33e
|
2633bb8f2d728f9e1ff1756761194c2c746b442f
|
refs/heads/master
| 2020-07-05T05:21:56.003673
| 2019-08-16T00:22:26
| 2019-08-17T12:40:50
| 202,533,914
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 8,885
|
r
|
1) Data Import.R
|
#load packages
pack <- function(pkg){
newpkg <- pkg[!(pkg %in% installed.packages()[, "Package"])]
if (length(newpkg))
install.packages(newpkg, dependencies = TRUE)
sapply(pkg, require, character.only = TRUE)
}
packages <- c("tidyverse", "magrittr", "sf", "tidycensus",
"readxl", "units", "scales", "measurements")
pack(packages)
rm(pack, packages)
options(scipen=999)
#download deliminated data for 2010-2018 to disk (only uncomment if need to redownload)
#update years_1 range to add new year of data
#years_1 <- seq(2010, 2018, 1) %>% as.character()
#
#for(i in 1:length(years_1)){
# file_name_PA <- paste("https://www.fhwa.dot.gov/bridge/nbi/", years_1[i], "/delimited/PA",
# str_sub(years_1[i], 3, 4), ".txt", sep = "")
# destination_PA <- paste("../Data/Pennsylvania/PA", str_sub(years_1[i], 3, 4), ".txt", sep = "")
# variable_PA <- paste("bridges", "PA", years_1[i], sep ="_")
#
# file_name_NJ <- paste("https://www.fhwa.dot.gov/bridge/nbi/", years_1[i], "/delimited/NJ",
# str_sub(years_1[i], 3, 4), ".txt", sep = "")
# destination_NJ <- paste("../Data/New Jersey/NJ", str_sub(years_1[i], 3, 4), ".txt", sep = "")
# variable_NJ <- paste("bridges", "NJ", years_1[i], sep ="_")
#}
#download zipped deliminated data for 2000-2009, extra PA and NJ and save to disk (only uncomment if need to redownload)
#years_2 <- seq(2000, 2009, 1) %>% as.character()
#for(i in 1:length(years_2)){
# download .zip file to temp location, extract data
# temp <- tempfile()
# file_name <- paste("https://www.fhwa.dot.gov/bridge/nbi/", years_2[i], "hwybronlyonefile.zip", sep = "")
# download.file(file_name, temp)
# data <- read_delim(unzip(temp), delim = ",")
# unlink(temp)
#filter extracted data for only Pennsylvania and New Jersey
# pa_data <- data %>%
# filter(STATE_CODE_001 == "42")
# nj_data <- data %>%
# filter(STATE_CODE_001 == "34")
#
#save pa and nj data to disk
# destination_PA <- paste("../Data/Pennsylvania/PA", str_sub(years_2[i], 3, 4), ".txt", sep = "")
# destination_NJ <- paste("../Data/New Jersey/NJ", str_sub(years_2[i], 3, 4), ".txt", sep = "")
# write_delim(pa_data, destination_PA, delim = ",")
# write_delim(nj_data, destination_NJ, delim = ",")
#}
###import data into R and bind
#define years range
#update years_3 range to add new years of data
years_3 <- seq(2000, 2018, 1) %>% as.character()
#create empty data frame
bridges_all <- data.frame()
#loop over years_3 to import each year of data
#loop selects only required variables for each state, merges the selected vehicle, and then adds to the empty data frame
for(i in 1:length(years_3)){
filename_PA <- paste("Raw Data/Bridge Conditions/Pennsylvania/PA", str_sub(years_3[i], 3, 4), ".txt", sep = "")
filename_NJ <- paste("Raw Data/Bridge Conditions/New Jersey/NJ", str_sub(years_3[i], 3, 4), ".txt", sep = "")
pa_temp <- read_delim(filename_PA, delim = ",", quote = "\'", escape_double = FALSE,
col_types = cols(.default = "c")) %>%
select(facility_carried = FACILITY_CARRIED_007,
features = FEATURES_DESC_006A,
location = LOCATION_009,
state = STATE_CODE_001,
county = COUNTY_CODE_003,
latitude = LAT_016,
longitude = LONG_017,
maintainer_code = MAINTENANCE_021,
owner_code = OWNER_022,
structure_number = STRUCTURE_NUMBER_008,
length_m = STRUCTURE_LEN_MT_049,
width_m = DECK_WIDTH_MT_052,
approach_width_m = APPR_WIDTH_MT_032,
rating_deck = DECK_COND_058,
rating_super = SUPERSTRUCTURE_COND_059,
rating_sub = SUBSTRUCTURE_COND_060,
rating_culvert = CULVERT_COND_062
) %>%
mutate(year = as.numeric(years_3[i]))
nj_temp <- read_delim(filename_NJ, delim = ",", quote = "\'", escape_double = FALSE,
col_types = cols(.default = "c")) %>%
select(facility_carried = FACILITY_CARRIED_007,
features = FEATURES_DESC_006A,
location = LOCATION_009,
state = STATE_CODE_001,
county = COUNTY_CODE_003,
latitude = LAT_016,
longitude = LONG_017,
maintainer_code = MAINTENANCE_021,
owner_code = OWNER_022,
structure_number = STRUCTURE_NUMBER_008,
length_m = STRUCTURE_LEN_MT_049,
width_m = DECK_WIDTH_MT_052,
approach_width_m = APPR_WIDTH_MT_032,
rating_deck = DECK_COND_058,
rating_super = SUPERSTRUCTURE_COND_059,
rating_sub = SUBSTRUCTURE_COND_060,
rating_culvert = CULVERT_COND_062) %>%
mutate(year = as.numeric(years_3[i]))
bridges_all %<>% bind_rows(pa_temp, nj_temp)}
#remove unneeded temp files
rm(pa_temp, nj_temp, i, filename_PA, filename_NJ, years_3)
#replace "N" ratings with 100, convert to numeric
#100 now means Not Applicable, and NA is missing
#when filtering for deficient bridges, 100 is above all relevant thresholds
bridges_all %<>%
mutate(rating_deck = replace(rating_deck, rating_deck == "N", 100),
rating_super = replace(rating_super, rating_super == "N", 100),
rating_sub = replace(rating_sub, rating_sub == "N", 100),
rating_culvert = replace(rating_culvert, rating_culvert == "N", 100)) %>%
mutate(rating_deck = as.numeric(rating_deck),
rating_super = as.numeric(rating_super),
rating_sub = as.numeric(rating_sub),
rating_culvert = as.numeric(rating_culvert))
#create state+county id, filter bridges to just DVRPC
bridges_all$state_county <- paste(bridges_all$state, bridges_all$county, sep = "")
county_list <- c("34005", "34007", "34015", "34021",
"42017", "42029", "42045", "42091", "42101")
bridges_region <- bridges_all %>%
filter(state_county %in% county_list)
rm(county_list)
#add subregion field
pa_suburbs <- c("42017", "42029", "42045", "42091")
nj_suburbs <- c("34005", "34007", "34015", "34021")
for(i in 1:length(bridges_region$state_county)){
bridges_region$subregion[i] <- ifelse(bridges_region$state_county[i] %in% pa_suburbs, "Pennsylvania Suburbs",
ifelse(bridges_region$state_county[i] %in% nj_suburbs, "New Jersey Suburbs",
"Philadelphia"))}
rm(i, pa_suburbs, nj_suburbs)
#add state/local maintainer category
#all codes not in state_main or local_main will get grouped into "other"
#NBI data dictionary with codes is at http://nationalbridges.com/nbiDesc.html#ITEM_58
state_main <- c("01", "11", "21")
local_main <- c("02", "03", "04")
bridges_region %<>%
mutate(maintainer_class =
ifelse(maintainer_code %in% state_main, "State",
ifelse(maintainer_code %in% local_main, "Local", "Other")),
owner_class =
ifelse(owner_code %in% state_main, "State",
ifelse(owner_code %in% local_main, "Local", "Other")))
bridges_region$maintainer_class %<>% as_factor() %>% fct_relevel("Other", "Local", "State")
rm(state_main, local_main)
#change state and county numeric codes to text
bridges_region %<>%
mutate(state = ifelse(state == "34", "New Jersey", "Pennsylvania"),
county = ifelse(county == "017", "Bucks",
ifelse(county == "029", "Chester",
ifelse(county == "045" , "Delaware",
ifelse(county == "091", "Montgomery",
ifelse(county == "101", "Philadelphia",
ifelse(county == "007", "Camden",
ifelse(county == "005", "Burlington",
ifelse(county == "015", "Gloucester",
"Mercer")))))))))
#convert lengths/widths to numeric
bridges_region %<>% mutate(length_m = as.numeric(length_m),
width_m = as.numeric(width_m),
approach_width_m = as.numeric(approach_width_m))
#convert lengths/widths from meters to feet, add area field
#area is calculated using bridge width, if it exists. If bridge with is zero, approach width is used instead
bridges_region %<>%
mutate(length_ft = length_m * 3.28084,
width_ft = width_m * 3.28084,
approach_width_ft = approach_width_m* 3.28084,
area_sqft = ifelse(width_ft != 0, length_ft * width_ft, length_ft * approach_width_ft))
#reorder variables, drop measurements in meters
bridges_region %<>%
select(facility_carried, features, location, state, county, state_county, subregion, latitude, longitude,
maintainer_code, maintainer_class,
owner_code, owner_class,
structure_number, year, length_ft, width_ft, approach_width_ft, area_sqft,
rating_deck, rating_super, rating_sub, rating_culvert)
|
582caa47ede881721832baae86f87c5327cfce9b
|
7a95abd73d1ab9826e7f2bd7762f31c98bd0274f
|
/multivariance/inst/testfiles/match_rows/AFL_match_rows/match_rows_valgrind_files/1613125612-test.R
|
28f972f8014860df2e08bdba34489ad4c88732dc
|
[] |
no_license
|
akhikolla/updatedatatype-list3
|
536d4e126d14ffb84bb655b8551ed5bc9b16d2c5
|
d1505cabc5bea8badb599bf1ed44efad5306636c
|
refs/heads/master
| 2023-03-25T09:44:15.112369
| 2021-03-20T15:57:10
| 2021-03-20T15:57:10
| 349,770,001
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 450
|
r
|
1613125612-test.R
|
testlist <- list(A = structure(c(-7.29112201956225e-304, 2.02678106771417e+301, 2.04216934546089e+301, 2.04216934546089e+301, 2.04216934553044e+301, 3.85341333796043e-255, 5.84340601505517e-310, 7.29112432744838e-304, 2.26926929822129e-309, 1.39128504575632e-309, 991208021054980096, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0), .Dim = c(6L, 4L)), B = structure(0, .Dim = c(1L, 1L)))
result <- do.call(multivariance:::match_rows,testlist)
str(result)
|
90e2d83e7398ba689566c2259c64f297678c3083
|
e23b89d06af510b289d28797f7480e47014da6a8
|
/Code/classification_final.R
|
e67f4be864aa7a5b39202577908e84ced4b6cbf6
|
[] |
no_license
|
Saurabh23/Sentiment-Analysis-for-Predicting-Elections
|
0da1581a66d85f74e18baf3cc4b4a31725515ee9
|
8842d18208a456f05e6a6ce9986c221f711b7676
|
refs/heads/master
| 2021-06-08T03:55:21.435750
| 2016-10-28T21:12:21
| 2016-10-28T21:12:21
| 72,241,411
| 2
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 1,883
|
r
|
classification_final.R
|
rm(list = ls(all = TRUE))
#LOAD TRAINING SET
rawTrainingData <- read.csv("C:/Users/LENOVO/Desktop/TUE Lectures/Q1/WEB IR/WebIR-Full-master/Data/SemEvalProcessed.csv", sep = ",", quote = '\"')
rawTrainingData <- read.csv("~/TUE/Quartile1/IRandDM/SentimentAnalysis/WebIR-Full/Data/SemEvalWithoutNeutral.csv", sep = ",", quote = '\"')
rawTrainingData <- rawTrainingData[1:5000,]
summary(rawTrainingData$Sentiment)
model <- performClassification(rawTrainingData = rawTrainingData, method = "NB", isCrossValidation = T, lm = "unigram", feature = "huliu")
model <- performClassification(rawTrainingData = rawTrainingData, method = "NB", isCrossValidation = T, lm = "unigram", feature = "MFT", isStem = F, isStopWord = T)
model <- performClassification(rawTrainingData = rawTrainingData, method = "NB", isCrossValidation = T, lm = "unigram", feature = "MFT", isStem = T, isStopWord = T)
model <- performClassification(rawTrainingData = rawTrainingData, method = "NB", isCrossValidation = T, lm = "bigram", feature = "MFT", isStem = T, isStopWord = T)
model <- performClassification(rawTrainingData = rawTrainingData, method = "NB", isCrossValidation = F, lm = "mix", feature = "MFT", isStem = T, isStopWord = T)
model <- performClassification(rawTrainingData = rawTrainingData, method = "NB", isCrossValidation = T, lm = "mix", feature = "huliu", isStem = T, isStopWord = T)
model <- performClassification(rawTrainingData = rawTrainingData, method = "SVM", isCrossValidation = T, lm = "bigram", feature = "MFT")
model <- performClassification(rawTrainingData = rawTrainingData, method = "NB", isCrossValidation = T, lm = "bigram", feature = "huliu")
model <- performClassification(rawTrainingData = rawTrainingData, method = "NB", isCrossValidation = F, lm = "unigram", feature = "mix", isStem = T, isStopWord = T)
summary(rawTrainingData$Sentiment)
|
94a56217a8b6423c18b32a3719bf4ed66330ec34
|
bc9f92707d1ee12a17c44e2a3db46334dbf1bfa7
|
/spotProb.r
|
7f2289d6a28efd2d9ead9c12bd4e5d95a69c3fae
|
[
"BSD-2-Clause"
] |
permissive
|
lparsons/TigerFISH
|
6f841db80ed4b199d8d07c7b29e8bd4bfea4df55
|
20bcb520e5f59af7ce35d607d1f556754d64252d
|
refs/heads/master
| 2022-11-11T10:39:25.143767
| 2013-12-18T02:17:19
| 2013-12-18T02:17:19
| 275,873,025
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 624
|
r
|
spotProb.r
|
#function( rna ) {
rna = c ( .9, .8, .7, .88, .65, .75 );
no_rna = 1 - rna;
#Initialize the vector of probabilities for different number of mRNAs
prob = rep( 0, length(rna)+1 );
allInds = 1:length(rna);
#Zero RNAs
prob[1] = prod( no_rna );
library( 'gregmisc' )
# 1 RNA
for (i in 2:length(rna)){
ind = combinations( length(rna), i );
for (j in 1:attr( ind, "dim")[1] ){
prob[1+i] = prob[1+i] + prod( rna[ ind[j,] ] ) * prod( no_rna[ setdiff( allInds, ind[j,] ) ] );
}
}
plot( prob, type="b", col = "red", )
print( sum( prob ) )
#}
#<environment: namespace:base>
|
5d92edf3535f46293455d5e0b38876044e7226a0
|
6b3ca6134352e6692069d0a7e57c3a7ba14e21ae
|
/man/GetGeoNames.GeoStrata.Rd
|
621690b956a75bdb5e14b3227733eb60c4192d89
|
[
"Apache-2.0"
] |
permissive
|
ovative-group/GeoexperimentsResearch
|
fe5baeec316ecc19c99fc621cd3f56e960ad2849
|
c50a1d6f4b21ea7624c27cec5374f4a3a3d76c0e
|
refs/heads/master
| 2020-07-05T08:07:57.434144
| 2019-08-15T17:29:42
| 2019-08-15T17:29:42
| 202,583,766
| 0
| 0
| null | 2019-08-15T17:27:12
| 2019-08-15T17:27:11
| null |
UTF-8
|
R
| false
| false
| 634
|
rd
|
GetGeoNames.GeoStrata.Rd
|
% Copyright (C) 2017 Google, Inc.
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/getgeonames.R
\name{GetGeoNames.GeoStrata}
\alias{GetGeoNames.GeoStrata}
\title{Extracts the unique names of the geos from a GeoStrata object.}
\usage{
\method{GetGeoNames}{GeoStrata}(obj, groups = NULL)
}
\arguments{
\item{obj}{a GeoStrata object.}
\item{groups}{(NULL, integer vector) id number(s) of the groups whose geos
to obtain, or NULL for all geos. NA is allowed.}
}
\value{
A character vector of unique geo identifiers, sorted.
}
\description{
Extracts the unique names of the geos from a GeoStrata object.
}
|
f23a61ebb24cc15c242f908799f85e8885210d98
|
90118609911bb5a97941830cbc1f7020d239405d
|
/Smartphone/original/data/cleanData.R
|
06cafca145b209a9830d58205247b9633e78d2e6
|
[] |
no_license
|
shannonrush/Contests
|
6e0eba560413723a11d1de1ddb0e3b1f1f440c56
|
4b930624a5c6ea1030f1e82b1829d4fa0a1261df
|
refs/heads/master
| 2021-01-01T05:31:27.616173
| 2014-06-05T18:56:17
| 2014-06-05T18:56:17
| 2,697,116
| 2
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 1,630
|
r
|
cleanData.R
|
load("../../data/processed/test.rda")
load("../../data/processed/train_clean.rda")
# read test x, subject and y from txt files
# add subject and y to x
test2 <- read.table("X_test.txt",row.names=NULL)
testy <- read.table("y_test.txt",row.names=NULL)
test2_subject <- read.table("subject_test.txt",row.names=NULL)
names(test2_subject) <- "subject"
test2$subject <- test2_subject
names(testy) <- "activity"
test2$activity <- testy
# read train x, subject and y from txt files
# add subject and y to x
train2 <- read.table("X_train.txt",row.names=NULL)
trainy <- read.table("y_train.txt",row.names=NULL)
train2_subject <- read.table("subject_train.txt",row.names=NULL)
names(train2_subject) <- "subject"
train2$subject <- train2_subject
names(trainy) <- "activity"
train2$activity <- trainy
# combine test and train
test2$subject <- unlist(test2$subject)
test2$activity <- unlist(test2$activity)
train2$subject <- unlist(train2$subject)
train2$activity <- unlist(train2$activity)
data <- rbind(test2,train2)
# change activity to strings
1 WALKING
2 WALKING_UPSTAIRS
3 WALKING_DOWNSTAIRS
4 SITTING
5 STANDING
6 LAYING
data$activity <- sapply(data$activity,function(x) {
x <- as.numeric(x)
if (x==1) {
"walking"
} else if (x==2) {
"walking_upstairs"
} else if (x==3) {
"walking_downstairs"
} else if (x==4) {
"sitting"
} else if (x==5) {
"standing"
} else if (x==6) {
"laying"
}
})
# pull out subjects from test
test_subjects <- as.integer(levels(as.factor(test$subject)))
test_only <- data[data$subject %in% test_subjects,]
test_with <- test_only
save(test_with,file="test_with.rda")
|
077f45133ccffafc33443d1b38478a6c22a0e780
|
73606887f1a1d520a7fb4f17758e15a1aeff67cc
|
/man/euler_7.Rd
|
c58601f9dffcdc948355ca49cc90563010c17d02
|
[
"MIT"
] |
permissive
|
nathaneastwood/euler
|
a3dd8937fb0d21608dee2626c4fc8c9b574ba679
|
f5338c7e4fd38f0fb2e2692b807edec57e2cd8bd
|
refs/heads/master
| 2021-04-25T07:00:24.646923
| 2019-06-17T18:13:50
| 2019-06-17T18:13:50
| 122,224,945
| 21
| 4
|
MIT
| 2018-03-11T16:05:32
| 2018-02-20T16:36:20
|
R
|
UTF-8
|
R
| false
| true
| 353
|
rd
|
euler_7.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/euler_7.R
\name{euler_7}
\alias{euler_7}
\title{Project Euler 7: 10001st prime}
\usage{
euler_7(n)
}
\arguments{
\item{n}{An integer.}
}
\description{
This is a solution to
\href{https://projecteuler.net/problem=7}{Euler problem 7}.
}
\examples{
euler_7(6)
euler_7(10001)
}
|
dacbc2406db37da21c181d36603775055e51225d
|
1d1f20cb63cd72a226992006f2077d65f03ffbdf
|
/source/Models/pdPCA.R
|
aa6636e21d468039983a2491adfd7e06609e5f0c
|
[] |
no_license
|
vineet1992/Gene-Selection
|
5bbaf66fce760e0716db8254d16331fc0f50293d
|
7425c96918568ea10af3ba1211444cf609339cf6
|
refs/heads/master
| 2021-07-05T10:55:53.528116
| 2019-05-01T21:16:54
| 2019-05-01T21:16:54
| 150,757,224
| 2
| 1
| null | 2019-01-24T19:03:08
| 2018-09-28T15:04:12
|
R
|
UTF-8
|
R
| false
| false
| 1,718
|
r
|
pdPCA.R
|
###PiPrefDiv with PCA Summarization
pdPCA_wrapper = function(x_tr,y_tr)
{
nc = ncol(x_tr)
###Subset by standard deviation
sds = apply(x_tr,2,sd)
x_tr = x_tr[,sds>sdCutoff]
###Load prior information and create temp directory to store
dir = "./priors/"
tempDir = "./tempPriors/"
dir.create(tempDir)
for(f in list.files(dir))
{
curr = matrix(scan(paste0(dir,f),sep="\t"),ncol=nc+1,nrow=nc+1,byrow=T)
###Subset prior information row-wise and column-wise based on sd cutoff
###Ensure that you keep the last row and column (for the target variable)
curr = curr[c(sds>sdCutoff,TRUE),c(sds>sdCutoff,TRUE)]
write.table(curr,paste0(tempDir,f),sep='\t',col.names=F,row.names=F,quote=F)
}
trainData = cbind(x_tr,y_tr)
colnames(trainData)[ncol(trainData)] = "y"
write.table(trainData,file="temp.txt",sep='\t',quote=F,row.names=F)
runName = "PD"
###Submit jar file command
cmd = paste("java -jar -Xmx4g PrefDiv.jar -t y -data temp.txt -outData data_summary.txt -outCluster clusters.txt -cv 3 1,3,5,10 -priors tempPriors -disc -ctype pca -name ",runName,sep="")
system(cmd)
###Read in selected genes and train linear model (allow for summarization as well)
newData = read.table(paste(runName,"data_summary.txt",sep='/'),sep='\t',header=T)
col = colnames(newData)
for(i in 1:length(col)) ## removes any leading X in the header names
{
if(substring(col[i], 1, 1) == 'X')
col[i] <- substr(col[i], 2, nchar(col[i]))
}
colnames(newData) <- col
newData$y = as.factor(newData$y)
levels(newData$y) = c("1","2")
mdl = glm(y~.,data=newData,family=binomial(link="logit"))
###return the linear model
return(mdl)
}
|
8bbc6576768058adfa2ac35777e101b3379ac4ba
|
899101532272b283fd7a18d3a302482f36925ed1
|
/raw_job_description_structure.R
|
96450fcd171b68dfb0708c2dd5928cafa0f3f9c1
|
[] |
no_license
|
nstempn1/data_science_project
|
c1d4f84b74bcbb089f0a394db5d8aa4f7a01667e
|
a3f25b48309f07997422a69133e29e9bc9445eed
|
refs/heads/master
| 2021-01-23T20:23:55.054004
| 2017-10-25T00:53:23
| 2017-10-25T00:53:23
| 102,856,852
| 0
| 0
| null | null | null | null |
WINDOWS-1252
|
R
| false
| false
| 9,018
|
r
|
raw_job_description_structure.R
|
##################################################################################################################
##################################################################################################################
# Load glass door dataset
##################################################################################################################
##################################################################################################################
library(tidytext)
library(vcd)
library(xtable)
data<-readRDS("glassdoor_df")
n<-nrow(data)
##################################################################################################################
##################################################################################################################
# Create structured fields from raw job descriptions
##################################################################################################################
##################################################################################################################
bad_txt<-c("â", "-", ",", "/")
data$job_desc1<-gsub(paste(bad_txt, collapse="|"), " ", data$job_desc_raw)
data$job_desc2<-gsub("([[:lower:]])([[:upper:]])", "\\1 \\2", data$job_desc1)
data$job_desc2<-gsub("([[:upper:]][[:upper:]][[:upper:]])([[:upper:]][[:lower:]])", "\\1 \\2", data$job_desc2)
data$job_desc3<-gsub("[(]", " ", data$job_desc2)
data$job_desc3<-gsub("[)]", " ", data$job_desc3)
bachelor<-c("\\bbachelor\\b", "\\bbachelors\\b", "\\bundergraduate\\b", "\\bbs\\b", "\\bb.s.\\b", "\\bb.s\\b")
master<-c("\\bmaster\\b", "\\bmasters\\b", "graduate degree", "ms degree", "ms in", "m.s. in", "m.s in" )
phd<-c("phd", "doctorate", "\\bph\\b")
mba<-c("m.b.a", "\\mba\\b")
stats<-c("statistics", "statistical", "regression", "modelling")
for (i in 1:n)
{
data$python[i] <- any(grepl("\\bpython\\b", data$job_desc3[i], ignore.case=TRUE))
data$ml[i] <- any(grepl("machine learning", data$job_desc3[i], ignore.case=TRUE))
data$opt[i] <- any(grepl("optimization", data$job_desc3[i], ignore.case=TRUE))
data$stats[i] <- any(grepl("statistic", data$job_desc3[i], ignore.case=TRUE))
data$risk[i] <- any(grepl("risk", data$job_desc3[i], ignore.case=TRUE))
data$UX[i] <- any(grepl("UX", data$job_desc3[i], ignore.case=FALSE))
data$bd[i] <- any(grepl("big data", data$job_desc3[i], ignore.case=TRUE))
data$dm[i] <- any(grepl("data management", data$job_desc3[i], ignore.case=TRUE))
data$pharma[i] <- any(grepl("pharmaceutical", data$job_desc3[i], ignore.case=TRUE))
data$fs[i] <- any(grepl("financial services", data$job_desc3[i], ignore.case=TRUE))
data$sd[i] <- any(grepl("software development", data$job_desc3[i], ignore.case=TRUE))
data$program[i] <- any(grepl("programming", data$job_desc3[i], ignore.case=TRUE))
data$research[i] <- any(grepl("research", data$job_desc3[i], ignore.case=TRUE))
data$R[i] <- any(grepl("\\bR\\b", data$job_desc3[i], ignore.case=TRUE))
data$SAS[i] <- any(grepl("\\bSAS\\b", data$job_desc3[i], ignore.case=TRUE))
data$C[i] <- any(grepl("\\bC+\\b", data$job_desc3[i], ignore.case=TRUE))
data$stata[i] <- any(grepl("\\bstata\\b", data$job_desc3[i], ignore.case=TRUE))
data$SQL[i] <- any(grepl("\\bsql\\b", data$job_desc3[i], ignore.case=TRUE))
data$excel[i] <- any(grepl("\\bexcel\\b", data$job_desc3[i], ignore.case=TRUE))
data$tableau[i] <- any(grepl("\\btableau\\b", data$job_desc3[i], ignore.case=TRUE))
data$spss[i] <- any(grepl("\\bspss\\b", data$job_desc3[i], ignore.case=TRUE))
data$java[i] <- any(grepl("\\bjava\\b", data$job_desc3[i], ignore.case=TRUE))
data$linux[i] <- any(grepl("\\blinux\\b", data$job_desc3[i], ignore.case=TRUE))
data$matlab[i] <- any(grepl("\\bmatlab\\b", data$job_desc3[i], ignore.case=TRUE))
data$NLP[i] <- any(grepl("\\bNLP\\b", data$job_desc3[i], ignore.case=TRUE))
data$hadoop[i] <- any(grepl("\\bhadoop\\b", data$job_desc3[i], ignore.case=TRUE))
data$ruby[i] <- any(grepl("\\bruby\\b", data$job_desc3[i], ignore.case=TRUE))
data$oracle[i] <- any(grepl("\\boracle\\b", data$job_desc3[i], ignore.case=TRUE))
data$sas[i] <- any(grepl("\\bsas\\b", data$job_desc3[i], ignore.case=TRUE))
data$bs[i] <- any(grepl(paste(bachelor, collapse="|"), data$job_desc3[i], ignore.case=TRUE))
data$bs2[i] <- any(grepl("bachelor", data$job_desc3[i], ignore.case=TRUE))
data$masters[i] <- any(grepl(paste(master, collapse="|"), data$job_desc3[i], ignore.case=TRUE))
data$phd[i] <- any(grepl(paste(phd, collapse="|"), data$job_desc3[i], ignore.case=TRUE))
data$mba[i] <- any(grepl(paste(mba, collapse="|"), data$job_desc3[i], ignore.case=TRUE))
data$stats[i] <- any(grepl(paste(stats, collapse="|"), data$job_desc3[i], ignore.case=TRUE))
data$degree[i] <- any(grepl("\\bdegree\\b", data$job_desc3[i], ignore.case=TRUE))
}
#data$skills<- data$degree+data$phd+data$masters+data$phd+data$bs2+data$bs+data$sas+data$oracle+data$ruby+data$hadoop+data$NLP+data$matlab+data$linux+
# data$java+ data$spss +data$spss +data$tableau+ data$excel+data$SQL+data$stata + data$C + data$SAS +data$R+ data$python
data$skills2<- data$sas+data$oracle+data$ruby+data$hadoop+data$NLP+data$matlab+data$linux+
data$java+ data$spss +data$spss +data$tableau+ data$excel+data$SQL+data$stata + data$C + data$SAS +data$R+
data$python+data$ml+data$opt+data$bd+data$research+data$stats+data$dm+data$risk+data$fs+data$program+data$pharma+data$sd++data$UX
data$education<- data$masters+data$phd+data$bs
#table(data$skills2)
#table(data$education)
#saveRDS(data, file="glassdoor_df_cleaned")
#data$job_id[data$skills2==0]
#strsplit(data$job_desc3[data$job_id=="2426165858"]," ")
##################################################################################################################
##################################################################################################################
# Create structured fields from raw employer descriptions
##################################################################################################################
##################################################################################################################
data$emp_desc1<-gsub(paste(bad_txt, collapse="|"), " ", data$emp_desc_raw)
data$emp_desc2<-gsub("([[:lower:]])([[:upper:]])", "\\1 \\2", data$emp_desc1)
data$emp_desc2<-gsub("([[:lower:]])([[:digit:]])", "\\1 \\2", data$emp_desc2)
data$emp_desc2<-gsub("([[:digit:]])([[:upper:]])", "\\1 \\2", data$emp_desc2)
sub(".*years experience *(.*?)", "\\1", data$job_desc3[1], ignore.case=TRUE)
data$job_desc3[1]
for (i in 1:n)
{
data$founded[i] <- any(grepl("\\bfounded\\b", data$emp_desc2[i], ignore.case=TRUE))
data$industry[i] <- sub(".*Industry *(.*?) *Revenue.*", "\\1", data$emp_desc2[i], ignore.case=TRUE)
data$revenue[i] <- any(grepl("\\brevenue\\b", data$emp_desc2[i], ignore.case=TRUE))
}
for (i in 1:n)
{
data$salary_low[i] <- sub(".*[$] *(.*?) *k[-].*", "\\1", data$salaries[i], ignore.case=TRUE)
data$salary_high[i] <- sub(".*[$] *(.*?) *k[-].*", "\\1", data$salaries[i], ignore.case=TRUE)
}
#strsplit(data$job_desc3[data$job_id=="2525091352"]," ")
data$salary_low[data$salary_low=="Not listed"]<-""
data$salary_high[data$salary_high=="Not listed"]<-""
data$salary_average<-as.numeric(data$salary_low)+as.numeric(data$salary_high)
hist(data$salary_average, breaks=seq(0, 400, 25))
#summary(lm(data$salary_average~data$phd+data$masters+data$mba+data$bs))
#model1<-(lm(data$salary_average~ data$sas+data$oracle+data$ruby+data$hadoop+data$NLP+data$matlab+data$linux+
# data$java+ data$spss +data$spss +data$tableau+ data$excel+data$SQL+data$stata + data$C + data$SAS +data$R+
# data$python+data$ml+data$opt+data$bd+data$research+data$stats+data$dm+data$risk+data$fs+data$program+data$pharma+data$sd++data$UX))
#hist(data$salary_average, breaks=seq(0, 400, 25), xlab="Glassdoor Estimated Salary", main="Distribution of Estimated Salary for \nData Scientist Positions in the New York City Area")
#summary(data$salary_average)
saveRDS(data, file="glassdoor_df_cleaned")
#data<-readRDS("glassdoor_df_cleaned")
#data<-data[data$job_desc_raw!="NO DESCRIPTION LISTED",]
#data<-data[(is.na(data$salary_average)==FALSE),]
#table(data$education)
#myvars <- c("phd", "masters", "bs")
#educ_data <- data[myvars]
#data$phd2<- ifelse(educ_data$phd == FALSE, "", "PhD")
#data$ms2<- ifelse(educ_data$masters == FALSE, "", "MS")
#data$bs2<- ifelse(educ_data$bs == FALSE, "", "BS")
#data$educ_cat<-str_trim(paste(data$bs2, data$ms2, data$phd2, sep=" "))
#add bs and phd to bs ms and phd
#data$educ_cat<-ifelse(data$educ_cat == "BS PhD", "BS MS PhD" , data$educ_cat)
#add ms and phd to phd
#data$educ_cat<-ifelse(data$educ_cat == "MS PhD", "PhD" , data$educ_cat)
#educ_cleaned$educ_cat<-ifelse(educ_cleaned$educ_cat == "MS PhD", "PhD" , educ_cleaned$educ_cat)
#add bs and ms to ms
#data$educ_cat<-ifelse(data$educ_cat == "BS MS", "MS" , data$educ_cat)
#educ_cleaned$educ_cat<-ifelse(educ_cleaned$educ_cat == "BS MS", "MS" , educ_cleaned$educ_cat)
|
be2994f3353f0bb3c0cd810d565bb6befec3351c
|
3766dcabe5e3e63e2e0521fb01f644b4f8ce831e
|
/man/inf_M.Rd
|
58b9e116ef82d6c2006025623991e29080282fde
|
[] |
no_license
|
cran/pdR
|
c18c9733edb1d57006d78a83834fc84237812f61
|
595a61acf3a7196beca38d3f3f109f085bf1a1b9
|
refs/heads/master
| 2023-08-30T22:42:58.958803
| 2023-08-21T10:50:02
| 2023-08-21T11:31:04
| 23,536,178
| 4
| 1
| null | null | null | null |
UTF-8
|
R
| false
| false
| 1,371
|
rd
|
inf_M.Rd
|
\name{inf_M}
\alias{inf_M}
\docType{data}
\title{
Monthly inflation time series of 20 countries
}
\description{Monthly inflation time series of 19 countries, 1971.1~2011.12
}
\usage{data(inf_M)
}
\format{
A data frame with 20 countries
\describe{
\item{\code{AUSTRALIA}}{inflation of Austrlia}
\item{\code{AUSTRIA}}{inflation of Austria}
\item{\code{BELGIUM}}{inflation of Belgium}
\item{\code{CANADA}}{inflation of Canada}
\item{\code{DENMARK}}{inflation of Denmark}
\item{\code{FINLAND}}{inflation of Finland}
\item{\code{FRANCE}}{inflation of France}
\item{\code{GREECE}}{inflation of Greece}
\item{\code{ICELAND}}{inflation of Iceland}
\item{\code{ITALY}}{inflation of Italy}
\item{\code{JAPAN}}{inflation of Japan}
\item{\code{LUXEMBOURG}}{inflation of Luxembourg}
\item{\code{NETHERLANDS}}{inflation of Netherlands}
\item{\code{NORWAY}}{inflation of Norway}
\item{\code{PORTUGAL}}{inflation of Portugal}
\item{\code{SPAIN}}{inflation of Spain}
\item{\code{SWEDEN}}{inflation of Sweden}
\item{\code{SWITZERLAND}}{inflation of Switzerland}
\item{\code{UK}}{inflation of UK}
\item{\code{USA}}{inflation of USA}
}
}
\details{
Monthly CIP, seasonaly differenced of log CPI of 20 countries
}
\examples{
data(inf_M)
head(inf_M)
}
|
a11be5a23498dfecae4db410fee8eb0da9cf99f8
|
1aeb06e04dbc9de4f7a2546184a7b9b7de47114b
|
/man/sbif_indicators.Rd
|
5d174df2381ef095efe1edfb0b24cb4124cb798f
|
[] |
no_license
|
gvegayon/sbifapi
|
2e12fac9e4f1150d181a761e9517bdd0724c2c88
|
1ecf860cf68e2c29de71317928e8f455f3b695b1
|
refs/heads/master
| 2021-01-13T02:36:49.238813
| 2016-02-23T06:05:18
| 2016-02-23T06:05:18
| 42,278,706
| 2
| 1
| null | null | null | null |
UTF-8
|
R
| false
| true
| 242
|
rd
|
sbif_indicators.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/api.R
\name{sbif_indicators}
\alias{sbif_indicators}
\title{Returns available indicators}
\usage{
sbif_indicators()
}
\description{
Returns available indicators
}
|
3d8d0b4b94e71cc23ff21cf432a4eb88d78f6440
|
31362fdab2193f92b64f9a82b0fe1ca732fcf6df
|
/NonNormalEvidenceSynthesisSimulations/server.R
|
97d61af297f8a4b4db99c9f845eb3fade9822a17
|
[] |
no_license
|
OHDSI/ShinyDeploy
|
a5c8bbd5341c96001ebfbb1e42f3bc60eeceee7c
|
a9d6f598b10174ffa6a1073398565d108e4ccd3c
|
refs/heads/master
| 2023-08-30T17:59:17.033360
| 2023-08-26T12:07:22
| 2023-08-26T12:07:22
| 98,995,622
| 31
| 49
| null | 2023-06-26T21:07:33
| 2017-08-01T11:50:59
|
R
|
UTF-8
|
R
| false
| false
| 19,208
|
r
|
server.R
|
library(shiny)
library(ggplot2)
singleRankVector <- function(row) {
return(data.frame(type = row$type,
rank = row$start:row$end,
weight = 1 / (1 + row$end - row$start),
row.names = NULL))
}
computeRankVectors <- function(subgroup, descending = TRUE) {
totalLength <- nrow(subgroup)
valueCounts <- aggregate(rep(1, totalLength) ~ value, data = subgroup, FUN = sum)
if (nrow(valueCounts) == 1) {
# One unique value. Distribute weights evenly over all ranks
valueCounts$start <- 1
valueCounts$end <- totalLength
} else if (nrow(valueCounts) == totalLength) {
# All values are unique. Simple ranking without consideration of ties
rankVectors <- data.frame(type = subgroup$type,
rank = order(subgroup$value, decreasing = descending),
weight = 1,
row.names = NULL)
return(rankVectors)
} else {
# Handle ties
if (descending) {
valueCounts <- valueCounts[order(-valueCounts$value), ]
} else {
valueCounts <- valueCounts[order(valueCounts$value), ]
}
valueCounts$end <- cumsum(valueCounts[, 2])
valueCounts$start <- c(1, valueCounts$end[1:(nrow(valueCounts) - 1)] + 1 )
valueCounts[, 2] <- NULL
}
valueCounts <- merge(subgroup[, c("type", "value")], valueCounts)
rankVectors <- lapply(split(valueCounts, 1:totalLength), singleRankVector)
rankVectors <- do.call("rbind", rankVectors)
rankVectors$start <- NULL
rankVectors$end <- NULL
return(rankVectors)
}
shinyServer(function(input, output, session) {
# Fixed effects ---------------------------------------------------
pivotDataFixed <- function(simParam, subset, dropIfUnique = TRUE) {
if (dropIfUnique && length(unique(subset[, simParam])) == 1) {
return(NULL)
} else {
temp <- subset
maxValue <- max(subset[simParam])
temp$parameterValue <- subset[, simParam]
temp$jitter <- temp$parameterValue + runif(nrow(subset), -0.02 * maxValue, 0.02 * maxValue)
temp$simParam <- simParam
temp[simParamsFixed] <- NULL
return(temp)
}
}
filteredResultsFixed <- reactive({
subset <- resultsFixed
subset <- subset[subset$metric %in% input$metricFixed, ]
subset <- subset[subset$type %in% input$typeFixed, ]
for (simParam in simParamsFixed) {
subset <- subset[subset[, simParam] %in% as.numeric(input[[paste0(simParam, "Fixed")]]), ]
}
return(subset)
})
filteredPivotedResultsFixed <- reactive({
subset <- filteredResultsFixed()
vizData <- lapply(simParamsFixed, pivotDataFixed, subset = subset)
vizData <- do.call(rbind, vizData)
return(vizData)
})
filteredViolinPivotedResultsFixed <- reactive({
subset <- filteredResultsFixed()
vizData <- pivotDataFixed(input$simParamFixedRadioButton, subset, dropIfUnique = FALSE)
return(vizData)
})
getReferenceValues <- function(metrics) {
ref <- data.frame()
if ("Bias" %in% metrics) {
ref <- rbind(ref, data.frame(value = 0,
metric = "Bias"))
}
if ("Coverage" %in% metrics) {
ref <- rbind(ref, data.frame(value = 0.95,
metric = "Coverage"))
}
if ("MSE" %in% metrics) {
ref <- rbind(ref, data.frame(value = 0,
metric = "MSE"))
}
if ("Non-Estimable" %in% metrics) {
ref <- rbind(ref, data.frame(value = 0,
metric = "Non-Estimable"))
}
return(ref)
}
output$mainPlotFixed <- renderPlot({
subset <- filteredPivotedResultsFixed()
if (nrow(subset) == 0) {
return(NULL)
} else {
ref <- getReferenceValues(subset$metric)
subset$type <- gsub(" ", "\n", subset$type)
plot <- ggplot2::ggplot(subset, ggplot2::aes(x = jitter, y = value, group = type, color = type))
if (nrow(ref) > 0) {
plot <- plot + geom_hline(aes(yintercept = value), data = ref, linetype = "dashed")
}
plot <- plot + ggplot2::geom_point(alpha = 0.4) +
ggplot2::facet_grid(metric~simParam, scales = "free", switch = "both") +
ggplot2::theme(legend.position = "top",
legend.title = ggplot2::element_blank(),
axis.title = ggplot2::element_blank(),
strip.placement = "outside",
strip.background = ggplot2::element_blank())
return(plot)
}
},
res = 125,
height = 800)
output$mainViolinPlotFixed <- renderPlot({
subset <- filteredViolinPivotedResultsFixed()
if (nrow(subset) == 0) {
return(NULL)
} else {
ref <- getReferenceValues(subset$metric)
subset$type <- gsub(" ", "\n", subset$type)
# subset <- subset[subset$simParam == input$simParamFixedRadioButton, ]
plot <- ggplot(subset, aes(x = factor(parameterValue), y = value, fill = type))
if (nrow(ref) > 0) {
plot <- plot + geom_hline(aes(yintercept = value), data = ref, linetype = "dashed")
}
plot <- plot + geom_violin(position = position_dodge(0.9), scale = "width", alpha = 0.4) +
facet_grid(metric~., scales = "free", switch = "both") +
theme(legend.position = "top",
legend.title = element_blank(),
axis.title = element_blank(),
strip.placement = "outside",
strip.background = element_blank())
return(plot)
}
},
res = 125,
height = 800)
output$hoverInfoPlotFixed <- renderUI({
subset <- filteredPivotedResultsFixed()
if (nrow(subset) == 0) {
return(NULL)
}
hover <- input$plotHoverMainPlotFixed
point <- nearPoints(subset, hover, threshold = 5, maxpoints = 1, addDist = TRUE)
if (nrow(point) == 0) return(NULL)
left_pct <- (hover$x - hover$domain$left) / (hover$domain$right - hover$domain$left)
top_pct <- (hover$domain$top - hover$y) / (hover$domain$top - hover$domain$bottom)
left_px <- hover$range$left + left_pct * (hover$range$right - hover$range$left)
top_px <- hover$range$top + top_pct * (hover$range$bottom - hover$range$top)
style <- paste0("position:absolute; z-index:100; background-color: rgba(245, 245, 245, 0.85); ",
"left:", left_px - 361, "px; top:", top_px - 150, "px; width:250px;")
# Unpivot:
unpivotedRow <- resultsFixed[resultsFixed[, point$simParam] == point$parameterValue &
resultsFixed$type == point$type &
resultsFixed$metric == point$metric &
resultsFixed$value == point$value, ]
unpivotedRow <- unpivotedRow[1, ]
allMetrics <- merge(resultsFixed, unpivotedRow[, c(simParamsFixed, "type")])
lines <- sprintf("<b> Type: </b>%s", point$type)
lines <- c(lines, "")
lines <- c(lines, sprintf("<b> %s: </b>%s", simParamsFixed, unpivotedRow[, simParamsFixed]))
lines <- c(lines, "")
lines <- c(lines, sprintf("<b> %s: </b>%.2f", allMetrics$metric, allMetrics$value))
div(
style = "position: relative; width: 0; height: 0",
wellPanel(
style = style,
p(HTML(paste(lines, collapse = "<br/>")))))
})
output$mainCaptionFixed <- renderUI({
subset <- filteredPivotedResultsFixed()
if (nrow(subset) == 0) {
return(NULL)
}
count <- sum(subset$type == subset$type[1] & subset$metric == subset$metric[1] & subset$simParam == subset$simParam[1])
HTML(sprintf("<strong>Figure S1.2. </strong>Each dot represents one of the %s selected simulation scenarios. The y-axes represent the various metrics
as estimated over 1,000 iterations per scenario, and the x-axes represent the various simulation parameters. Color indicates the various tested
meta-analysis algorithms. Hover over a data point to reveal more details.", count))
})
output$mainViolinCaptionFixed <- renderUI({
subset <- filteredPivotedResultsFixed()
if (nrow(subset) == 0) {
return(NULL)
}
count <- sum(subset$type == subset$type[1] & subset$metric == subset$metric[1] & subset$simParam == subset$simParam[1])
HTML(sprintf("<strong>Figure S1.1. </strong>Violin plots showing the performance accross the %s selected simulation scenarios. The y-axes represent the various metrics
as estimated over 1,000 iterations per scenario, and the x-axes represent %s. Color indicates the various tested
meta-analysis algorithms.", count, input$simParamFixedRadioButton))
})
output$rankPlotFixed <- renderPlot({
subset <- filteredResultsFixed()
subset <- subset[!grepl("Bias", subset$metric), ]
processMetric <- function(metricSubset) {
metric <- metricSubset$metric[1]
descending <- grepl("Precision", metric)
if (grepl("Coverage", metric)) {
metricSubset$value <- abs(0.95 - metricSubset$value)
}
subgroups <- split(metricSubset, apply(metricSubset[, c(simParamsFixed, "metric")],1,paste,collapse = " "))
names(subgroups) <- NULL
metricSubset <- lapply(subgroups, computeRankVectors, descending = descending)
metricSubset <- do.call(rbind, metricSubset)
results <- aggregate(weight ~ type + rank, data = metricSubset, sum)
metricSubset$metric <- metric
return(metricSubset)
}
rankedSubset <- lapply(split(subset, subset$metric, drop = TRUE), processMetric)
rankedSubset <- do.call(rbind, rankedSubset)
rankedSubset$type <- gsub(" ", "\n", rankedSubset$type)
plot <- ggplot2::ggplot(rankedSubset, ggplot2::aes(x = rank, y = weight)) +
ggplot2::geom_col(color = rgb(0, 0, 0.8, alpha = 0), fill = rgb(0, 0, 0.8), alpha = 0.6) +
ggplot2::scale_x_continuous("Rank (lower is better)", breaks = min(rankedSubset$rank):max(rankedSubset$rank)) +
ggplot2::scale_y_continuous("Count") +
ggplot2::facet_grid(type~metric) +
ggplot2::theme(panel.grid.major.x = ggplot2::element_blank(),
panel.grid.minor.x = ggplot2::element_blank())
return(plot)
},
res = 110,
height = 800)
output$rankCaptionFixed <- renderUI({
subset <- filteredPivotedResultsFixed()
if (nrow(subset) == 0) {
return(NULL)
}
text <- "<strong>Figure S1.3. </strong>Histograms of algorithm ranks. Each bar represents the number of simulation scenarios where the algorithm on the
right achieved that rank on the metric at the top, compared to the other selected algorithms."
if (any(grepl("coverage", subset$metric))) {
text <- paste(text, "For coverage, algorithms were ranked by absolute difference between the estimated coverage and 95 percent.")
}
HTML(text)
})
# Random Fx ------------------------------------------------------------------
pivotDataRandom <- function(simParam, subset, dropIfUnique = TRUE) {
if (dropIfUnique && length(unique(subset[, simParam])) == 1) {
return(NULL)
} else {
temp <- subset
maxValue <- max(subset[simParam])
temp$parameterValue <- subset[, simParam]
temp$jitter <- temp$parameterValue + runif(nrow(subset), -0.02 * maxValue, 0.02 * maxValue)
temp$simParam <- simParam
temp[simParamsRandom] <- NULL
return(temp)
}
}
filteredResultsRandom <- reactive({
subset <- resultsRandom
subset <- subset[subset$metric %in% input$metricRandom, ]
subset <- subset[subset$type %in% input$typeRandom, ]
for (simParam in simParamsRandom) {
subset <- subset[subset[, simParam] %in% as.numeric(input[[paste0(simParam, "Random")]]), ]
}
return(subset)
})
filteredPivotedResultsRandom <- reactive({
subset <- filteredResultsRandom()
vizData <- lapply(simParamsRandom, pivotDataRandom, subset = subset)
vizData <- do.call(rbind, vizData)
return(vizData)
})
filteredViolinPivotedResultsRandom <- reactive({
subset <- filteredResultsRandom()
vizData <- pivotDataRandom(input$simParamRandomRadioButton, subset, dropIfUnique = FALSE)
return(vizData)
})
output$mainPlotRandom <- renderPlot({
subset <- filteredPivotedResultsRandom()
if (nrow(subset) == 0) {
return(NULL)
} else {
subset$type <- gsub(" ", "\n", subset$type)
plot <- ggplot2::ggplot(subset, ggplot2::aes(x = jitter, y = value, group = type, color = type)) +
ggplot2::geom_point(alpha = 0.4) +
ggplot2::facet_grid(metric~simParam, scales = "free", switch = "both") +
ggplot2::theme(legend.position = "top",
legend.title = ggplot2::element_blank(),
axis.title = ggplot2::element_blank(),
strip.placement = "outside",
strip.background = ggplot2::element_blank())
return(plot)
}
},
res = 150,
height = 800)
output$mainViolinPlotRandom <- renderPlot({
subset <- filteredViolinPivotedResultsRandom()
if (nrow(subset) == 0) {
return(NULL)
} else {
ref <- getReferenceValues(subset$metric)
subset$type <- gsub(" ", "\n", subset$type)
plot <- ggplot(subset, aes(x = factor(parameterValue), y = value, fill = type))
if (nrow(ref) > 0) {
plot <- plot + geom_hline(aes(yintercept = value), data = ref, linetype = "dashed")
}
plot <- plot + geom_violin(position = position_dodge(0.9), scale = "width", alpha = 0.4) +
facet_grid(metric~., scales = "free", switch = "both") +
theme(legend.position = "top",
legend.title = element_blank(),
axis.title = element_blank(),
strip.placement = "outside",
strip.background = element_blank())
return(plot)
}
},
res = 125,
height = 800)
output$hoverInfoPlotRandom <- renderUI({
subset <- filteredPivotedResultsRandom()
if (nrow(subset) == 0) {
return(NULL)
}
hover <- input$plotHoverMainPlotRandom
point <- nearPoints(subset, hover, threshold = 5, maxpoints = 1, addDist = TRUE)
if (nrow(point) == 0) return(NULL)
left_pct <- (hover$x - hover$domain$left) / (hover$domain$right - hover$domain$left)
top_pct <- (hover$domain$top - hover$y) / (hover$domain$top - hover$domain$bottom)
left_px <- hover$range$left + left_pct * (hover$range$right - hover$range$left)
top_px <- hover$range$top + top_pct * (hover$range$bottom - hover$range$top)
style <- paste0("position:absolute; z-index:100; background-color: rgba(245, 245, 245, 0.85); ",
"left:", left_px - 361, "px; top:", top_px - 150, "px; width:250px;")
# Unpivot:
unpivotedRow <- resultsRandom[resultsRandom[, point$simParam] == point$parameterValue &
resultsRandom$type == point$type &
resultsRandom$metric == point$metric &
resultsRandom$value == point$value, ]
unpivotedRow <- unpivotedRow[1, ]
allMetrics <- merge(resultsRandom, unpivotedRow[, c(simParamsRandom, "type")])
lines <- sprintf("<b> Type: </b>%s", point$type)
lines <- c(lines, "")
lines <- c(lines, sprintf("<b> %s: </b>%s", simParamsRandom, unpivotedRow[, simParamsRandom]))
lines <- c(lines, "")
lines <- c(lines, sprintf("<b> %s: </b>%.2f", allMetrics$metric, allMetrics$value))
div(
style = "position: relative; width: 0; height: 0",
wellPanel(
style = style,
p(HTML(paste(lines, collapse = "<br/>")))))
})
output$mainCaptionRandom <- renderUI({
subset <- filteredPivotedResultsRandom()
if (nrow(subset) == 0) {
return(NULL)
}
count <- sum(subset$type == subset$type[1] & subset$metric == subset$metric[1] & subset$simParam == subset$simParam[1])
HTML(sprintf("<strong>Figure S2.2. </strong>Each dot represents one of the %s selected simulation scenarios. The y-axes represent the various metrics
as estimated over 1,000 iterations per scenario, and the x-axes represent the various simulation parameters. Color indicates the various tested
meta-analysis algorithms. Hover over a data point to reveal more details.", count))
})
output$mainViolinCaptionRandom <- renderUI({
subset <- filteredPivotedResultsRandom()
if (nrow(subset) == 0) {
return(NULL)
}
count <- sum(subset$type == subset$type[1] & subset$metric == subset$metric[1] & subset$simParam == subset$simParam[1])
HTML(sprintf("<strong>Figure S2.1. </strong>Violin plots showing the performance accross the %s selected simulation scenarios. The y-axes represent the various metrics
as estimated over 1,000 iterations per scenario, and the x-axes represent %s. Color indicates the various tested
meta-analysis algorithms.", count, input$simParamRandomRadioButton))
})
output$rankPlotRandom <- renderPlot({
subset <- filteredResultsRandom()
subset <- subset[!grepl("Bias", subset$metric), ]
processMetric <- function(metricSubset) {
metric <- metricSubset$metric[1]
descending <- grepl("Precision", metric)
if (grepl("Coverage", metric)) {
metricSubset$value <- abs(0.95 - metricSubset$value)
}
subgroups <- split(metricSubset, apply(metricSubset[, c(simParamsRandom, "metric")],1,paste,collapse = " "))
names(subgroups) <- NULL
metricSubset <- lapply(subgroups, computeRankVectors, descending = descending)
metricSubset <- do.call(rbind, metricSubset)
results <- aggregate(weight ~ type + rank, data = metricSubset, sum)
metricSubset$metric <- metric
return(metricSubset)
}
rankedSubset <- lapply(split(subset, subset$metric, drop = TRUE), processMetric)
rankedSubset <- do.call(rbind, rankedSubset)
rankedSubset$type <- gsub(" ", "\n", rankedSubset$type)
plot <- ggplot2::ggplot(rankedSubset, ggplot2::aes(x = rank, y = weight)) +
ggplot2::geom_col(color = rgb(0, 0, 0.8, alpha = 0), fill = rgb(0, 0, 0.8), alpha = 0.6) +
ggplot2::scale_x_continuous("Rank (lower is better)", breaks = min(rankedSubset$rank):max(rankedSubset$rank)) +
ggplot2::scale_y_continuous("Count") +
ggplot2::facet_grid(type~metric) +
ggplot2::theme(panel.grid.major.x = ggplot2::element_blank(),
panel.grid.minor.x = ggplot2::element_blank())
return(plot)
},
res = 110,
height = 800)
output$rankCaptionRandom <- renderUI({
subset <- filteredPivotedResultsRandom()
if (nrow(subset) == 0) {
return(NULL)
}
text <- "<strong>Figure S2.3. </strong>Histograms of algorithm ranks. Each bar represents the number of simulation scenarios where the algorithm on the
right achieved that rank on the metric at the top, compared to the other selected algorithms."
if (any(grepl("coverage", subset$metric))) {
text <- paste(text, "For coverage, algorithms were ranked by absolute difference between the estimated coverage and 95 percent.")
}
HTML(text)
})
})
|
cbb6436024c4dbb147784dbf5ad3cffc747d3790
|
6f42a7294cad97e68e591857e822652af9c7c750
|
/R/dual_scale_plot.R
|
a4a022c3c1ab9d00688869e458dab4deeb35a46a
|
[
"Apache-2.0"
] |
permissive
|
cgranell/symptoms-scheduler
|
0e199bf3bc5930c5df708f931a3a41b514129ac4
|
df152a07e669b4bccabe99a42278a46333a69813
|
refs/heads/master
| 2023-03-08T19:43:36.393076
| 2021-02-24T16:15:35
| 2021-02-24T16:15:35
| 249,478,285
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 4,096
|
r
|
dual_scale_plot.R
|
dual_scale_plot <- function(selection, plot_title) {
time_start <- min(selection$plan_date)
time_end <- max(selection$plan_date)
time_elapsed <- interval(time_start, time_end)
duration <- ceiling(as.duration(time_elapsed) / ddays(1))
ylim_delay <- c(min(selection$delay), max(selection$delay))
ystep <- round((ylim_delay[2] - ylim_delay[1]) / 10, 3)
ybks_delay <- round(seq(ylim_delay[1], ylim_delay[2], ystep), 3)
ylim_battery <- c(min(selection$battery), max(selection$battery))
ybks_battery <- seq(0, 100, 10)
# Dual-scale plot: https://stackoverflow.com/questions/3099219/ggplot-with-2-y-axes-on-each-side-and-different-scales
scalefactor <- ylim_delay[2] / ylim_battery[2]
selection %>%
ggplot(aes(x = plan_date, color=factor(scheduler))) +
geom_point(aes(y = delay), alpha = 0.6, size = 0.5) +
geom_line(aes(y = battery * scalefactor), color="red") +
# geom_smooth(aes(y=delay), method="loess") +
labs(title=plot_title,
subtitle=paste0("Start: ", time_start, " - End: ", time_end),
x = "Date [2 breaks per day]") +
scale_x_datetime(breaks = scales::date_breaks("12 hours"),
labels = scales::date_format("%d-%m %H:%M", tz="CET"),
expand = c(0,0)) +
scale_y_continuous(name="delay [seconds]", breaks=ybks_delay, limits=ylim_delay,
sec.axis=sec_axis(~./scalefactor, breaks=ybks_battery, name="battery [%]")) +
# guides(color=guide_legend(title="Scheduler",
# # override.aes=list(fill=NA),
# nrow=2)) +
#
guides(color=FALSE) +
theme(legend.title = element_text(size=8),
legend.position="bottom",
legend.direction = "horizontal") +
theme_bw() +
theme(
axis.line.y.right = element_line(color = "red"),
axis.ticks.y.right = element_line(color = "red"),
axis.text.y.right = element_text(color = "red"),
axis.title.y.right = element_text(color = "red"),
axis.text.x = element_text(size=7, angle=45)
# axis.title.y.left=element_text(color="blue"),
# axis.text.y.left=element_text(color="blue"),
) -> p
return(p)
}
one_scale_plot <- function(selection, plot_title) {
time_start <- min(selection$plan_date)
time_end <- max(selection$plan_date)
time_elapsed <- interval(time_start, time_end)
duration <- ceiling(as.duration(time_elapsed) / ddays(1))
# ylim_delay <- c(min(selection$plan_date_diff), max(selection$plan_date_diff))
# ystep <- round((ylim_delay[2] - ylim_delay[1]) / 10, 3)
# ybks_delay <- round(seq(ylim_delay[1], ylim_delay[2], ystep), 3)
#
selection %>%
ggplot(aes(x = step, group=factor(scheduler), color=factor(scheduler))) +
# geom_point(aes(y = delay), alpha = 0.6, size = 0.5) +
geom_line(aes(y = plan_date_diff)) +
labs(title=plot_title,
subtitle=paste0("Start: ", time_start, " - End: ", time_end),
x = "Date [2 breaks per day]") +
# scale_x_datetime(breaks = scales::date_breaks("12 hours"),
# labels = scales::date_format("%d-%m %H:%M", tz="CET"),
# expand = c(0,0)) +
scale_y_continuous(name="delay diff [seconds]") + #, breaks=ybks_delay, limits=ylim_delay) +
# guides(color=guide_legend(title="Scheduler",
# # override.aes=list(fill=NA),
# nrow=2)) +
#
guides(color=FALSE) +
theme(legend.title = element_text(size=8),
legend.position="bottom",
legend.direction = "horizontal") +
theme_bw() +
theme(
# axis.line.y.right = element_line(color = "red"),
# axis.ticks.y.right = element_line(color = "red"),
# axis.text.y.right = element_text(color = "red"),
# axis.title.y.right = element_text(color = "red"),
axis.text.x = element_text(size=7, angle=45)
# axis.title.y.left=element_text(color="blue"),
# axis.text.y.left=element_text(color="blue"),
) -> p
return(p)
}
|
549ccea1db5acab2523c40085361f9ecf73eb091
|
6ad68090db6626c3e1c648047d57437337fb75ae
|
/src/an1/08.r
|
6bf7ba487c908e86f9f5179d6c4162f818eed7b8
|
[] |
no_license
|
int28h/RTasks
|
8764ba7fb8f06eb1b7e09d1dc4dd3a26458d12d6
|
88c39bb8e6b34c8743e16182e33ec5935ef3598f
|
refs/heads/master
| 2022-06-17T18:06:26.464545
| 2022-06-03T22:40:50
| 2022-06-03T22:40:50
| 116,028,292
| 9
| 2
| null | null | null | null |
UTF-8
|
R
| false
| false
| 2,846
|
r
|
08.r
|
# Постройте столбчатую диаграмму распределения цвета глаз по цвету волос только у женщин из
# таблицы HairEyeColor. По оси X должен идти цвет волос, цвет столбиков должен отражать цвет глаз. По оси Y - количество наблюдений.
#
# Чтобы построить столбчатую диаграмму в ggplot, вам нужно подключить нужный пакет,
# затем преобразовать таблицу HairEyeColor в data frame:
#
# mydata <- as.data.frame(HairEyeColor)
#
# Постройте график на основе предложенного кода, сохранив его в переменную obj.
# Укажите, чему равен аргумент data, что должно находиться в aes().
# Изучите справку по geom_bar(), чтобы узнать, чему должен равняться аргумент position для отображения цвета глаз
# в виде соседних столбиков, также вам может быть полезна эта памятка http://ggplot2.tidyverse.org/reference/geom_bar.html).
# Там же вы найдёте ответ на вопрос, за что отвечает аргумент stat.
# С помощью scale_fill_manual мы говорим графику, что мы хотим, чтобы он использовал указанные нами цвета.
# Дополните предложенный код:
#
# library("ggplot2")
# mydata <- as.data.frame(HairEyeColor)
# obj <- ggplot(data = , aes(x = , y = Freq)) +
# geom_bar(stat="identity", position = ) +
# scale_fill_manual(values=c("Brown", "Blue", "Darkgrey", "Darkgreen"))
#
# У себя на компьютере вы можете визуализировать полученный график, исполнив 'obj'.
# В случае, если все сделано правильно, он будет выглядеть так (обратите внимание на название осей и легенды):
#
# Прежде чем отправить код на проверку, выполните его на своем компьютере, чтобы избежать лишних ошибок.
# При ошибке, обратите внимание на содержание feedback.
#
# https://ucarecdn.com/9e39e35f-4a35-44fe-8d75-7e0639ce54b9/
library("ggplot2")
mydata <- as.data.frame(HairEyeColor)
obj <- ggplot(subset(mydata, Sex %in% c('Female')), aes(x = Hair, y = Freq, fill = Eye)) + geom_bar(stat = "identity", position = "dodge") + scale_fill_manual(values=c("Brown", "Blue", "Darkgrey", "Darkgreen"))
|
35ab0f99f4d321b7304fbf1aefb58018b9f4025d
|
4067ff0db87ec536dd48b38dbb2de6e13536cd67
|
/Unit7/src/tweet.R
|
268c4d19ecb46996b2652aecbb8ecb88587117af
|
[] |
no_license
|
strgeon/rClassFiles
|
2b4267676e39878da7077716f508ae1aee0ea3c3
|
adeb452b82e4fac642f8e65b608d4843fee15e66
|
refs/heads/master
| 2020-12-02T19:41:00.846565
| 2017-08-22T03:00:02
| 2017-08-22T03:00:02
| 96,375,498
| 0
| 1
| null | null | null | null |
UTF-8
|
R
| false
| false
| 5,222
|
r
|
tweet.R
|
# TODO: Add comment
#
# Author: Scott
###############################################################################
setwd("C:/Users/Scott/SkyDrive/Documents/Courses/AnalyticsEdge/Unit7/data")
getwd()
# [1] "C:/Users/Scott/SkyDrive/Documents/Courses/AnalyticsEdge/Unit7/data"
tweets = read.csv("tweets.csv", stringsAsFactors=FALSE )
str(tweets)
# 'data.frame': 1181 obs. of 2 variables:
# $ Tweet: chr "I have to say, Apple has by far the best customer care service I have ever received! @Apple @AppStore" "iOS 7 is so fricking smooth & beautiful!! #ThanxApple @Apple" "LOVE U @APPLE" "Thank you @apple, loving my new iPhone 5S!!!!! #apple #iphone5S pic.twitter.com/XmHJCU4pcb" ...
# $ Avg : num 2 2 1.8 1.8 1.8 1.8 1.8 1.6 1.6 1.6 ...
library("tm")
corpusTweet = VCorpus(VectorSource(tweets$Tweet))
corpusTweet = tm_map(corpusTweet , content_transformer(tolower))
corpusTweet = tm_map(corpusTweet , removePunctuation)
corpusTweet = tm_map(corpusTweet ,removeWords, stopwords("english"))
dtm = DocumentTermMatrix(corpusTweet)
#other stuff I'm added from reading other materials
findFreqTerms(dtm, 45)
# [1] "apple" "can" "dont" "get"
# [5] "ipad" "iphone" "ipod" "ipodplayerpromo"
# [9] "itunes" "just" "like" "new"
# [13] "now" "phone" "will"
findAssocs(dtm, "iphone", 0.15)
# $iphone
# thoughts 5same choose date httpowly22kxvv
# 0.19 0.17 0.17 0.17 0.17
# 5cheap nano new samssung vulgar
# 0.16 0.16 0.16 0.16 0.16
inspect(DocumentTermMatrix(corpusTweet,list(dictionary = c("apple", "iphone", "nano"))))
# <<DocumentTermMatrix (documents: 1181, terms: 3)>>
# Non-/sparse entries: 1387/2156
# Sparsity : 61%
# Maximal term length: 6
# Weighting : term frequency (tf)
# Sample :
# Terms
# Docs apple iphone nano
# 13 1 1 0
# 17 1 1 0
# 20 1 1 0
# 21 1 1 0
# 22 1 1 0
# 4 1 1 0
# 7 1 1 0
# 777 1 1 1
# 8 1 1 0
# 951 1 1 1
allTweets = as.data.frame(as.matrix(dtm))
str(allTweets)
# 'data.frame': 1181 obs. of 3780 variables:
print(corpusTweet)
# <<VCorpus>>
# Metadata: corpus specific: 0, document level (indexed): 0
# Content: documents: 1181
inspect(corpusTweet[2:4])
# <<VCorpus>>
# Metadata: corpus specific: 0, document level (indexed): 0
# Content: documents: 3
#
# [[1]]
# <<PlainTextDocument>>
# Metadata: 7
# Content: chars: 51
#
# [[2]]
# <<PlainTextDocument>>
# Metadata: 7
# Content: chars: 12
#
# [[3]]
# <<PlainTextDocument>>
# Metadata: 7
# Content: chars: 74
# Error in print(corpusTweetRV) : object 'corpusTweetRV' not found
meta(corpusTweet[[4]])
# author : character(0)
# datetimestamp: 2017-08-04 20:45:37
# description : character(0)
# heading : character(0)
# id : 4
# language : en
# origin : character(0)
# <<PlainTextDocument>>
# Metadata: 7
# Content: chars: 74
inspect(corpusTweet[[4]])
# <<PlainTextDocument>>
# Metadata: 7
# Content: chars: 74
#
# thank apple loving new iphone 5s apple iphone5s pictwittercomxmhjcu4pcb
#install.packages("wordcloud")
library(wordcloud)
#my call to wordcloud was wrong
#wordcloud(allTweets,colSums(allTweets), scale=c(3, 0.3))
#table(colSums(allTweets))
#str(allTweets)
wordcloud(colnames(allTweets), colSums(allTweets))
#random.order set to FALSE
twt = subset(tweets,Avg < 0)
corpusTweet2 = VCorpus(VectorSource(twt$Tweet))
corpusTweet2 = tm_map(corpusTweet , content_transformer(tolower))
corpusTweet2 = tm_map(corpusTweet , removePunctuation)
corpusTweet2 = tm_map(corpusTweet ,removeWords, stopwords("english"))
corpusTweet2 = tm_map(corpusTweet ,removeWords, c("apple"))
corpusTweet2 = tm_map(corpusTweet ,removeWords, c("apple", stopwords("english")))
dtm2 = DocumentTermMatrix(corpusTweet2)
allTweets2 = as.data.frame(as.matrix(dtm2))
wordcloud(colnames(allTweets2), colSums(allTweets2))
str(allTweets)
str(corpusTweet)
corpusTweet = VCorpus(VectorSource(tweets$Tweet))
corpusTweet = tm_map(corpusTweet , content_transformer(tolower))
corpusTweet = tm_map(corpusTweet , removePunctuation)
corpusTweet = tm_map(corpusTweet ,removeWords, stopwords("english"))
dtm = DocumentTermMatrix(corpusTweet)
allTweets = as.data.frame(as.matrix(dtm))
negativeTweets = subset(allTweets, tweets$Avg <= -1)
wordcloud(colnames(negativeTweets), colSums(negativeTweets))
wordcloud(colnames(allTweets), colSums(allTweets),random.order=FALSE)
wordcloud(colnames(allTweets), colSums(allTweets),rot.per=.1)
install.packages("RColorBrewer")
library(RColorBrewer)
brewer.pal(Accent)
display.brewer.all()
pal <- brewer.pal(6,"Set2")
wordcloud(colnames(allTweets), colSums(allTweets),pal)
|
10b098940a31a72034c9c037a0f9d24982aa51d7
|
2bec5a52ce1fb3266e72f8fbeb5226b025584a16
|
/SelvarMix/man/SortvarClust.Rd
|
309e88117206376965a3ed5b8be53a5eb56c62bb
|
[] |
no_license
|
akhikolla/InformationHouse
|
4e45b11df18dee47519e917fcf0a869a77661fce
|
c0daab1e3f2827fd08aa5c31127fadae3f001948
|
refs/heads/master
| 2023-02-12T19:00:20.752555
| 2020-12-31T20:59:23
| 2020-12-31T20:59:23
| 325,589,503
| 9
| 2
| null | null | null | null |
UTF-8
|
R
| false
| false
| 2,455
|
rd
|
SortvarClust.Rd
|
\name{SortvarClust}
\alias{SortvarClust}
%- Also NEED an '\alias' for EACH other topic documented here.
\title{
Variable ranking with LASSO in model-based clustering
}
\description{
This function implements variable ranking procedure in model-based clustering
using the penalized EM algorithm of Zhou et al (2009).
}
\usage{
SortvarClust(x, nbcluster, type, lambda, rho, nbcores)
}
\arguments{
\item{x}{
matrix containing quantitative data.
Rows correspond to observations and
columns correspond to variables
}
\item{nbcluster}{
numeric listing of the number of clusters (must be integers)
}
\item{type}{character defining the type of ranking procedure, must be "lasso" or "likelihood". Default is "lasso"}
\item{lambda}{
numeric listing of the tuning parameters for \eqn{\ell_1} mean penalty
}
\item{rho}{
numeric listing of the tuning parameters for \eqn{\ell_1} precision matrix penalty
}
\item{nbcores}{
number of CPUs to be used when parallel computing is utilized (default is 2)
}
}
%\details{
%% ~~ If necessary, more details than the description above ~~
%}
\value{
matrix where rows correspond to variable ranking. Each
row corresponds to a competing value of nbcluster.
}
\references{
Zhou, H., Pan, W., and Shen, X., 2009. "Penalized model-based
clustering with unconstrained covariance matrices".
Electronic Journal of Statistics, vol. 3, pp.1473-1496.
Maugis, C., Celeux, G., and Martin-Magniette, M. L., 2009.
"Variable selection in model-based clustering:
A general variable role modeling". Computational
Statistics and Data Analysis, vol. 53/11, pp. 3872-3882.
Sedki, M., Celeux, G., Maugis-Rabusseau, C., 2014.
"SelvarMix: A R package for variable selection in
model-based clustering and discriminant analysis with
a regularization approach". Inria Research Report
available at \url{http://hal.inria.fr/hal-01053784}
}
\author{
Mohammed Sedki <mohammed.sedki@u-psud.fr>
}
%\note{
%%% ~~further notes~~
%}
%% ~Make other sections like Warning with \section{Warning }{....} ~
\seealso{
\link{SortvarLearn}
%% ~~objects to See Also as \code{\link{help}}, ~~~
}
\examples{
\dontrun{
## wine data set
## n = 178 observations, p = 27 variables
require(Rmixmod)
require(glasso)
data(wine)
set.seed(123)
obj <- SortvarClust(x=wine[,1:27], nbcluster=1:5, nbcores=4)
}
}
\keyword{Variable ranking}
\keyword{Penalized model-based clustering}
|
430085cdbe2b8a1b35420a75d9a406eca4df49e7
|
507ca86f326549c382bedc323ad61950956165fd
|
/algorithm.r
|
721a2623d3a52785fbe09df6b7ebfd80879da283
|
[] |
no_license
|
domsob/knapsack-strategies
|
d757f4a0a2772a743702f9c61084ecaa00b9ec53
|
e8b1e436a094986461b3e64bf7e763f1ff4d9c2e
|
refs/heads/master
| 2021-01-20T05:53:09.702650
| 2017-04-29T23:07:56
| 2017-04-29T23:07:56
| 89,817,763
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 3,102
|
r
|
algorithm.r
|
# Tutorial for GA library: https://cran.r-project.org/web/packages/GA/GA.pdf
library(GA)
set.seed(42);
limit <- 10
objects <- 100
values <- sample(1:1000, objects, TRUE)
weights <- sample(1:15, objects, TRUE)
cat('\n\n+-----------------------------------+\n')
cat('| Settings |\n')
cat('+-----------------------------------+\n\n')
cat('limit = ')
cat(limit)
cat('\n')
cat('objects = ')
cat(objects)
cat('\n\n\n')
cat('+-----------------------------------+\n')
cat('| Values |\n')
cat('+-----------------------------------+\n\n')
print(values)
cat('\n\n')
cat('+-----------------------------------+\n')
cat('| Weights |\n')
cat('+-----------------------------------+\n\n')
print(weights)
cat('\n\n')
greedyStrategy <- function() {
fitnessValue <- 0
weightSum <- 0
i <- 1
for(entry in weights) {
if((weightSum + entry) < (limit + 1)) {
fitnessValue <- fitnessValue + values[i]
weightSum <- weightSum + entry
}
i <- i + 1
}
return(fitnessValue)
}
cat('+-----------------------------------+\n')
cat('| Greedy Strategy |\n')
cat('+-----------------------------------+\n\n')
cat('Fitness function value = ')
cat(greedyStrategy())
cat('\n\n\n')
densityStrategy <- function(){
x <- values / weights
w <- weights
v <- values
n<-length(x)
for(j in 1:(n-1)){
for(i in 1:(n-j)){
if(x[i]<x[i+1]){
temp<-x[i]
temp2<-w[i]
temp3<-v[i]
x[i]<-x[i+1]
w[i]<-w[i+1]
v[i]<-v[i+1]
x[i+1]<-temp
w[i+1]<-temp2
v[i+1]<-temp3
}
}
}
k <- 1
valueSum <- 0
weightSum <- 0
for(entry in w) {
if((weightSum + entry) < (limit + 1)) {
weightSum <- weightSum + entry
valueSum <- valueSum + v[k]
}
k <- k + 1
}
return(valueSum)
}
cat('+-----------------------------------+\n')
cat('| Density Strategy |\n')
cat('+-----------------------------------+\n\n')
cat('Fitness function value = ')
cat(densityStrategy())
cat('\n\n\n')
fitnessFunction <- function(chromosome) {
valueSum <- 0
weightSum <- 0
i <- 1
for(gene in chromosome) {
if(gene == 1) {
valueSum <- valueSum + values[i]
weightSum <- weightSum + weights[i]
}
i <- i + 1
}
fitnessValue <- valueSum
if(weightSum > limit) {
fitnessValue <- 0
}
return(fitnessValue)
}
populationFunction <- function(object) {
m0 <- matrix(0, object@popSize, objects)
m01 <- apply(m0, c(1,2), function(x) sample(c(0,1),1,TRUE,c(0.96,0.04))) # probability for 0 and 1
return(m01)
}
result <- ga(type="binary", fitness=fitnessFunction, nBits=objects, popSize=100, maxiter=1000, pcrossover=0.8, pmutation=0.1, elitism=20, run=1000, population=populationFunction)
summary(result)
#summary(result)$solution
plot(result)
|
02b529512b277a5db26755d50b35f489d1cddaeb
|
cb178cdf2f6701767ca839975d9e734f9ec41bc3
|
/R/a17.R
|
8a38fcd6daaccb6018964cbe97f0b4bca0a9827f
|
[] |
no_license
|
Kata-na/aoc_2020
|
a38b81d3f3386744eb1e71f7beee286e8b0305eb
|
45b7ca90b455bcb7141c870a03c704fede33310f
|
refs/heads/main
| 2023-02-25T00:52:44.480746
| 2021-01-31T23:04:28
| 2021-01-31T23:08:53
| 323,988,176
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 3,482
|
r
|
a17.R
|
rm(list = ls())
t1 <- Sys.time()
library(data.table)
library(magrittr)
library(stringr)
cycles <- 6
inp <- fread('../Input/a17.txt', header = FALSE, col.names = 'input') %>%
.[, input := gsub('#', 1, gsub('\\.', 0, input))]
inp_matrix <-
matrix(as.integer(unlist(strsplit(inp$input, ''))), ncol = nrow(inp)) %>% t()
inp_matrix<- stringr::str_split_fixed(inp$input, '', Inf)
storage.mode(inp_matrix) <- 'integer'
################################################################################
## --- PART 1 ---
################################################################################
# INITIALIZING ARRAY, WHICH Will represent 3D CUBE
arra_dims <- c(nrow(inp_matrix) + 2 * cycles,
ncol(inp_matrix) + 2 * cycles,
1 + 2 * cycles)
inp_cube <- array(0, dim = arra_dims)
inp_cube[(cycles + 1):(cycles + nrow(inp_matrix)),
(cycles + 1):(cycles + ncol(inp_matrix)),
cycles + 1] <- inp_matrix
for (it in 1:cycles) {
upd_cube <- inp_cube
d <- dim(inp_cube)
for (x in 1:d[1]) {
for (y in 1:d[2]) {
for (z in 1:d[3]) {
target <- upd_cube[x, y, z]
neighbours <- sum(inp_cube[x + (-1:1)[x + (-1:1) <= d[1]],
y + (-1:1)[y + (-1:1) <= d[2]],
z + (-1:1)[z + (-1:1) <= d[3]]]) - target
if(target == 1 && !(neighbours %in% c(2,3))){
upd_cube[x, y, z] <- 0L
} else if(target == 0 && neighbours == 3){
upd_cube[x, y, z] <- 1
}
}
}
}
inp_cube <- upd_cube
}
ans <- sum(inp_cube)
print(ans)
################################################################################
## --- PART 2 ---
################################################################################
# INITIALIZING ARRAY, WHICH Will represent 4D hypercuboid
arra_dims <- c(nrow(inp_matrix) + 2 * cycles,
ncol(inp_matrix) + 2 * cycles,
1 + 2 * cycles,
1 + 2 * cycles)
inp_hypercube <- array(0, dim = arra_dims)
inp_hypercube[(cycles + 1):(cycles + nrow(inp_matrix)),
(cycles + 1):(cycles + ncol(inp_matrix)),
cycles + 1,
cycles + 1] <- inp_matrix
ind_x <- (cycles + 1):(cycles + nrow(inp_matrix))
ind_y <- (cycles + 1):(cycles + nrow(inp_matrix))
ind_z <- cycles + 1
ind_w <- cycles + 1
for (it in 1:cycles) {
upd_cube <- inp_hypercube
d <- dim(inp_hypercube)
# Optimizing, looping only through potential region were might be chages
ind_x <- unique(c(ind_x -1, ind_x +1))
ind_y <- unique(c(ind_y -1, ind_y +1))
ind_z <- unique(c(ind_z -1, ind_z, ind_z +1))
ind_w <- unique(c(ind_w -1, ind_w, ind_w +1))
for (x in ind_x) {
for (y in ind_y) {
for (z in ind_z) {
for (w in ind_w) {
target <- upd_cube[x, y, z, w]
neighbours <- sum(inp_hypercube[x + (-1:1)[x + (-1:1) <= d[1]],
y + (-1:1)[y + (-1:1) <= d[2]],
z + (-1:1)[z + (-1:1) <= d[3]],
w + (-1:1)[w + (-1:1) <= d[4]]]) - target
if(target == 1 && !(neighbours %in% c(2,3))){
upd_cube[x, y, z, w] <- 0L
} else if(target == 0 && neighbours == 3){
upd_cube[x, y, z, w] <- 1
}
}
}
}
}
inp_hypercube <- upd_cube
}
ans <- sum(inp_hypercube)
print(ans)
print(Sys.time() - t1)
|
baed7efe82f9786bfddf272f2b032840511aee93
|
349648b07a7c0d490096b87c4cac0fa171155203
|
/bootstrap_cdf.R
|
f2726815724d06055788185ef060c2b3ca6ade35
|
[] |
no_license
|
SebastianKuzara/fit_distibution
|
1dd372c4c9eb3fc5b0d7cb696f025e3dcd92c4dd
|
f30edf73d5186e1f7ae7633fc553c4c02f87362e
|
refs/heads/master
| 2021-01-22T04:09:30.086768
| 2017-05-25T20:29:10
| 2017-05-25T20:29:10
| 92,432,012
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 820
|
r
|
bootstrap_cdf.R
|
## Bootstrap - dopasowanie danych do dystrybuanty
x <- seq(0, 1, by = 0.01)
boot.beta.cdf <- sapply(1:1000, function(i) {
s <- sample(norm.rozwody, length(norm.rozwody), replace = TRUE)
mme <- fitdist(s, "beta", "mme")
p <- pbeta(x, shape1 = mme$estimate["shape1"], shape2 = mme$estimate["shape2"])
return(p)
})
plot(x = x, y = boot.beta.cdf[, 1], type = "l", col = rgb(0.8, 0.8, 0.8, alpha = 0.1), ylim = c(0, max(boot.beta.cdf)))
for(i in 2:ncol(boot.beta.cdf)) {
lines(x = x, y = boot.beta.cdf[, i], col = rgb(.6, .6, .6, .1))
}
cdf.quantiles <- apply(boot.beta.cdf, 1, quantile, c(0.025, 0.5, 0.975))
lines(x = x, y = cdf.quantiles[1, ], col = "red", lty = 2)
lines(x = x, y = cdf.quantiles[3, ], col = "red", lty = 2)
lines(x = x, y = cdf.quantiles[2, ], col = "red")
lines(ecdf(norm.rozwody), pch=16)
|
370db4430381a164deba65a71bacec0d4472d2d6
|
cc2e368fe5fdfe8ed499c60ad15152a991b8a7cc
|
/man/gp_is_wd_geoplumber.Rd
|
fc31b14c2bbfada27d32301db97637dcae3b8787
|
[] |
no_license
|
ATFutures/geoplumber
|
c4a294b080eedec4f9b60a74fb56f21938d7da5a
|
9d80d53d55b1400c18318677f4a54e3dd1ff913a
|
refs/heads/master
| 2023-06-25T08:35:20.512274
| 2023-06-09T08:50:20
| 2023-06-09T08:50:20
| 133,939,396
| 60
| 6
| null | 2021-10-14T10:00:40
| 2018-05-18T10:21:47
|
R
|
UTF-8
|
R
| false
| true
| 587
|
rd
|
gp_is_wd_geoplumber.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/is_wd_geoplumber.R
\name{gp_is_wd_geoplumber}
\alias{gp_is_wd_geoplumber}
\title{Helper function to determin if working dir is a geoplumber app.}
\usage{
gp_is_wd_geoplumber(path = ".")
}
\arguments{
\item{path}{check particular path}
}
\value{
\code{TRUE} or \code{FALSE}
}
\description{
Conditions for a geoplumber app (at least initiated with)
\enumerate{
\item An 'R' directory with R/plumber.R file
\item A 'src' directory
\item A 'package.json' file at root.
}
}
\examples{
{
gp_is_wd_geoplumber()
}
}
|
425780796c640ede5c79cd6144532287fee4a3d7
|
9aafde089eb3d8bba05aec912e61fbd9fb84bd49
|
/codeml_files/newick_trees_processed/282_0/rinput.R
|
6f0de9f79f0af9ee3054424c99863e0fc66c8cd1
|
[] |
no_license
|
DaniBoo/cyanobacteria_project
|
6a816bb0ccf285842b61bfd3612c176f5877a1fb
|
be08ff723284b0c38f9c758d3e250c664bbfbf3b
|
refs/heads/master
| 2021-01-25T05:28:00.686474
| 2013-03-23T15:09:39
| 2013-03-23T15:09:39
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 133
|
r
|
rinput.R
|
library(ape)
testtree <- read.tree("282_0.txt")
unrooted_tr <- unroot(testtree)
write.tree(unrooted_tr, file="282_0_unrooted.txt")
|
7495876def94df636cc9827dc3ab0ce6927e0d18
|
6e32987e92e9074939fea0d76f103b6a29df7f1f
|
/googleaiplatformv1.auto/man/GoogleCloudAiplatformV1ModelEvaluationSliceSlice.Rd
|
48b47bc8325c2f9f2973153c072cf84c7ee9125b
|
[] |
no_license
|
justinjm/autoGoogleAPI
|
a8158acd9d5fa33eeafd9150079f66e7ae5f0668
|
6a26a543271916329606e5dbd42d11d8a1602aca
|
refs/heads/master
| 2023-09-03T02:00:51.433755
| 2023-08-09T21:29:35
| 2023-08-09T21:29:35
| 183,957,898
| 1
| 0
| null | null | null | null |
UTF-8
|
R
| false
| true
| 647
|
rd
|
GoogleCloudAiplatformV1ModelEvaluationSliceSlice.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/aiplatform_objects.R
\name{GoogleCloudAiplatformV1ModelEvaluationSliceSlice}
\alias{GoogleCloudAiplatformV1ModelEvaluationSliceSlice}
\title{GoogleCloudAiplatformV1ModelEvaluationSliceSlice Object}
\usage{
GoogleCloudAiplatformV1ModelEvaluationSliceSlice()
}
\value{
GoogleCloudAiplatformV1ModelEvaluationSliceSlice object
}
\description{
GoogleCloudAiplatformV1ModelEvaluationSliceSlice Object
}
\details{
Autogenerated via \code{\link[googleAuthR]{gar_create_api_objects}}
Definition of a slice.
}
\concept{GoogleCloudAiplatformV1ModelEvaluationSliceSlice functions}
|
f922a37d4c988a7518a1c8d3cc6439b47c3a19a9
|
fd29d26a6dedcd7aa00ea4f913e43625aaf035c2
|
/code/pascal_code.R
|
37f804eec8c7b017a92cb86fb761cdcf2a792074
|
[] |
no_license
|
majimaken/econometrics-3-project
|
91d54c4e628712cdea5ea49fabd3d9e2d669a553
|
78bb813ed71f4c46fa4a0511d986af096b40489a
|
refs/heads/main
| 2023-04-24T16:35:30.655217
| 2021-05-17T11:23:16
| 2021-05-17T11:23:16
| 363,714,186
| 1
| 0
| null | 2021-05-17T11:23:17
| 2021-05-02T17:48:17
|
R
|
UTF-8
|
R
| false
| false
| 5,955
|
r
|
pascal_code.R
|
## FFN (1, 4)
## RNN (7)
## LSTM (9, 8, 2)
## GRU (8, 9)
rm(list = ls())
source("add/libraries.R")
source("add/Functions_RNN.R")
load("data/ETH_2021-05-05.rda")
head(ETH)
tail(ETH)
# Define log returns based on closing prices
logret <- diff(log(ETH$`ETH-USD.Close`))
logret <- na.omit(logret)
colnames(logret) <- "ETH Log Returns"
subi=logret["2020-10-01::2021-04-30"]
in_out_sample_separator <- "2020-10-01"
data_obj <- data_function(x=logret, lags=10, in_out_sep="2021-04-01", start="2020-10-01", end="2021-04-30")
# Prepare data with lags 1-7
########################################################fnn################################################################
head(data_obj$data_mat)
anz=10000
outtarget=data_obj$target_out
for(i in 1:anz)
{
if (i ==1 )
{
net=estimate_nn (train_set=data_obj$train_set,number_neurons=c(1,4),data_mat=data_obj$data_mat,test_set=data_obj$test_set,f=data_obj$f)
perfall=cumsum(sign(net$predicted_nn) *outtarget)
}else
{
net=estimate_nn (train_set=data_obj$train_set,number_neurons=c(1,4),data_mat=data_obj$data_mat,test_set=data_obj$test_set,f=data_obj$f)
perf=cumsum(sign(net$predicted_nn) *outtarget)
perfall=cbind(perfall,perf)
}
}
mean=apply(perfall,1,mean)
mean=reclass(mean,perfall)
tail(perfall)
perfnew=merge(mean,cumsum(outtarget),perfall)
##################################lstm################################################################
## LSTM (9, 8, 2)
anz=100
nl_comb= 2
epochs= 30
nn_type="lstm"
learningrate=0.05
for(i in 1:anz)
{
if (i ==1 )
{
perfall=cumsum(rnn_estim(data_obj, nl_comb, epochs, nn_type, learningrate)$perf_out)
}else
{
perf=cumsum(rnn_estim(data_obj, nl_comb, epochs, nn_type, learningrate)$perf_out)
perfall=cbind(perfall,perf)
cat("\014")
}
}
outtarget=data_obj$target_out
mean=apply(perfall,1,mean)
mean=reclass(mean,outtarget)
perfall=reclass(perfall,outtarget)
perfnew_lstm=merge(mean,cumsum(outtarget),perfall)
save(perfnew_lstm, file="C:/Users/buehl/OneDrive/Dokumente/ZHAW/BSc Wirtschaftsingenieur/SEM8/Oeko3/econometrics-3-project/data/perfnew_lstm.rda")
#################################RNN################################################################
anz=100
nl_comb= c(10,9)
epochs=10
nn_type="rnn" ## RNN (7)
learningrate=0.05
for(i in 1:anz)
{
if (i ==1 )
{
perfall=cumsum(rnn_estim(data_obj, nl_comb, epochs, nn_type, learningrate)$perf_out)
}else
{
perf=cumsum(rnn_estim(data_obj, nl_comb, epochs, nn_type, learningrate)$perf_out)
perfall=cbind(perfall,perf)
cat("\014")
}
}
outtarget=data_obj$target_out
mean=apply(perfall,1,mean)
mean=reclass(mean,outtarget)
perfall=reclass(perfall,outtarget)
perfnew_rnn=merge(mean,cumsum(outtarget),perfall)
save(perfnew_rnn, file="C:/Users/buehl/OneDrive/Dokumente/ZHAW/BSc Wirtschaftsingenieur/SEM8/Oeko3/econometrics-3-project/data/perfnew_rnn.rda")
#################################gru################################################################
anz=100
## GRU (8, 9)
nl_comb= c(6, 10)
epochs=30
nn_type="gru"
learningrate=0.05
for(i in 1:anz)
{
if (i ==1 )
{
perfall=cumsum(rnn_estim(data_obj, nl_comb, epochs, nn_type, learningrate)$perf_out)
}else
{
perf=cumsum(rnn_estim(data_obj, nl_comb, epochs, nn_type, learningrate)$perf_out)
perfall=cbind(perfall,perf)
cat("\014")
}
}
outtarget=data_obj$target_out
mean=apply(perfall,1,mean)
mean=reclass(mean,outtarget)
perfall=reclass(perfall,outtarget)
perfnew_gru=merge(mean,cumsum(outtarget),perfall)
save(perfnew_gru, file="C:/Users/buehl/OneDrive/Dokumente/ZHAW/BSc Wirtschaftsingenieur/SEM8/Oeko3/econometrics-3-project/data/perfnew_gru.rda")
######################plots #############################################
par(mfrow=c(2,2))
##nn
anz=10000
plot(perfnew,col=c("red","black",rep("grey",anz)),lwd=c(7,7,rep(1,anz)),main="Out of sample Perfomance Feed forward net")
name=c("Buy and Hold","Mean of Nets")
addLegend("topleft",
legend.names=name,
col=c("black","red"),
lty=rep(1,1),
lwd=rep(2,2),
ncol=1,
bg="white")
##lstm
anz=100
plot(perfnew_lstm,col=c("blue","black",rep("grey",anz)),lwd=c(7,7,rep(1,anz)),main="Out of sample Perfomance LSTM")
name=c("Buy and Hold","Mean of Nets")
addLegend("topleft",
legend.names=name,
col=c("black","blue"),
lty=rep(1,1),
lwd=rep(2,2),
ncol=1,
bg="white")
##rnn
anz=100
plot(perfnew_rnn,col=c("orange","black",rep("grey",anz)),lwd=c(7,7,rep(1,anz)),main="Out of sample Perfomance rnn")
name=c("Buy and Hold","Mean of Nets")
addLegend("topleft",
legend.names=name,
col=c("black","orange"),
lty=rep(1,1),
lwd=rep(2,2),
ncol=1,
bg="white")
##gru
anz=100
plot(perfnew_gru,col=c("green","black",rep("grey",anz)),lwd=c(7,7,rep(1,anz)),main="Out of sample Perfomance GRU")
name=c("Buy and Hold","Mean of Nets")
addLegend("topleft",
legend.names=name,
col=c("black","green"),
lty=rep(1,1),
lwd=rep(2,2),
ncol=1,
bg="white")
#al together
name=c("Buy and Hold","feed forward","rnn","lstm","gru")
colors=c("black","red","blue","orange","green")
plot(cbind(perfnew_rnn[,2],perfnew[,1],perfnew_rnn[,1],perfnew_lstm[,1],perfnew_gru[,1]),main="Performance comparison",col=colors)
addLegend("topleft",
legend.names=name,
col=colors,
lty=rep(1,1),
lwd=rep(2,2),
ncol=1,
bg="white")
|
708c361499f0fcee28372d6910fc8cbea3296574
|
be58f5f0012d70db570de0e3a745ceefc91f91e0
|
/Week4/Code/StatsWithSparrows13.R
|
f38c796babbedd0a18a130c790093e9a509dd765
|
[] |
no_license
|
amysolman/CMEECourseWork
|
caa2ad6ef11c819d2eb295cfe03f1571b7ad9cae
|
fb2a21bc7b625fed643eaad03bf51c458645abc4
|
refs/heads/master
| 2021-07-25T01:10:00.653892
| 2020-08-27T13:06:55
| 2020-08-27T13:06:55
| 212,303,861
| 0
| 1
| null | null | null | null |
UTF-8
|
R
| false
| false
| 1,030
|
r
|
StatsWithSparrows13.R
|
# Amy Solman amy.solman19@imperial.ac.uk
# 28th October 2019
# Lecture 13: ANOVA
##########LECTURE THIRTEEN##########
rm(list=ls())
d <- read.table("../Data/SparrowSize.txt", header = TRUE)
str(d)
d1 <- subset(d, d$Wing!="NA")
summary(d1$Wing)
model1 <- lm(Wing~Sex.1, data = d1)
summary(model1)
boxplot(d1$Wing~d1$Sex.1, ylab ="Wing length (mm)")
anova(model1)
b <- subset(d, d$Year!=2000) #exclude data for 2000 because it is outlier
t.test(d1$Wing~d1$Sex.1, var.equal=TRUE)
boxplot(d1$Wing~d1$BirdID, ylab="Wing length (mm)")
install.packages("dplyr")
require(dplyr)
tbl_df(d1)
glimpse(d1)
d$Mass %>% cor.test(d$Tarsus, na.rm=TRUE)
d1 %>%
group_by(BirdID) %>%
summarise (count=length(BirdID))
count(d1, BirdID)
d1 %>%
group_by(BirdID) %>%
summarise (count=length(BirdID)) %>%
count(count)
count(d1, d1$BirdID) %>%
count(count)
model3 <- lm(Wing~as.factor(BirdID), data = d1)
anova(model3)
boxplot(d$Mass~d$Year)
m2 <- lm(d$Mass~as.factor(d$Year))
anova(m2)
summary(m2)
t(model.matrix(m2))
|
0edc3a6441d59615efc6b06efbc456f86eebbc1a
|
4b52e06ea0908f8c7ca6fe102a95a890f642cac9
|
/plot1.R
|
9e5a6be8d698b460e3c8a30fa4fa52873ed7da31
|
[] |
no_license
|
shijieli123456/assignment-1
|
7d1f3040455ac9c7be14267bc234d68446366500
|
79f97543ea28dedd4ce614d7236ce9b7b555df4d
|
refs/heads/master
| 2020-03-23T23:43:10.935394
| 2018-07-25T05:24:55
| 2018-07-25T05:24:55
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 229
|
r
|
plot1.R
|
df <- read.table("h.txt",sep = ";",skip = 66637, nrow = 2880, na.strings = "?")
png(filename = "plot1.png",width = 480,height = 480)
hist(df$V3,12,col="red",main = "Global Active Power", xlab = "Global Active Power (kilowatts)")
|
e9c868bf4a471fbd25a1e00ef07b5f466ad2e4a0
|
8f2e2dd58fa00eb39e0a92ec5f84050ab5ac553b
|
/Background/compare_cluster.R
|
dbcbab9d0a4480678d93f4bbd7dd2afd2fdbe4b0
|
[] |
no_license
|
luisrei/explore-IIEEC
|
06dc148906be328bd573a35262cfa6b143f23daa
|
b07f609727d4de3d898c183279bc383ca119f1a2
|
refs/heads/master
| 2021-09-07T18:40:15.877299
| 2018-02-27T11:58:39
| 2018-02-27T11:58:39
| 116,312,905
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 869
|
r
|
compare_cluster.R
|
#######################################
#
# Compare different clustering algorithms
#
# Author: Luis B. Rei
# Created: 26/12/2017
#
#######################################
# Clear workspace
rm(list=ls())
graphics.off()
library(clValid)
library(data.table)
# Iris data set
df = scale(iris[, -5])
####################
# Internal measures
####################
# Compute clValid
clmethods <- c("hierarchical","kmeans","pam")
intern <- clValid(df, nClust = 2:5,
clMethods = clmethods, validation = "internal")
# Summary
summary(intern)
####################
# Stability measures
####################
stab <- clValid(df, nClust = 2:5, clMethods = clmethods,
validation = "stability")
# Display only optimal Scores
summary(stab)
|
dfbef73a30ffddd1c02076801fc541a10926f2b3
|
14a13ec082413638cd4012a5372039256d366c3d
|
/man/box_setwd.Rd
|
48a2bc6120dedd4bf3d4f59fc97a2f6216b6d1f1
|
[
"MIT"
] |
permissive
|
fxcebx/boxr
|
7ebbd6848b74af83af724fc694a4d365eb03b6c1
|
8bda9d00671df55ef1cbd39ff957dd0d47956c6e
|
refs/heads/master
| 2020-12-24T12:33:00.295757
| 2015-03-19T19:30:31
| 2015-03-19T19:30:31
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 375
|
rd
|
box_setwd.Rd
|
% Generated by roxygen2 (4.1.0): do not edit by hand
% Please edit documentation in R/boxr_misc.R
\name{box_setwd}
\alias{box_getwd}
\alias{box_setwd}
\title{set directory}
\usage{
box_setwd(dir_id)
box_getwd()
}
\arguments{
\item{dir_id}{The box.com id for the folder that you'd like to query}
}
\value{
Nothing. Used for its side-effects.
}
\description{
set directory
}
|
53218d04f28867b79c7c99fd28239bbb8cc885ee
|
8c38c1f61f53bf4162cbfe81e29402dbdc7ed5fe
|
/R/getpars.R
|
6f26ff853d97d176f17709609bbb45695fa80cf9
|
[] |
no_license
|
cran/depmixS4
|
4d55759581c91cedd59cb842ec40c8f48f4170ab
|
e1a137a5e49b2322dad48b4118b36af6496e60e4
|
refs/heads/master
| 2021-07-08T09:59:10.761902
| 2021-05-12T11:12:20
| 2021-05-12T11:12:20
| 17,695,463
| 11
| 8
| null | null | null | null |
UTF-8
|
R
| false
| false
| 691
|
r
|
getpars.R
|
setMethod("getpars","mix",
function(object,which="pars",...) {
parameters <- getpars(object@prior,which=which)
for(i in 1:object@nstates) {
for(j in 1:object@nresp) {
parameters <- c(parameters,getpars(object@response[[i]][[j]],which=which))
}
}
return(parameters)
}
)
setMethod("getpars","depmix",
function(object,which="pars",...) {
parameters <- getpars(object@prior,which=which)
for(i in 1:object@nstates) {
parameters <- c(parameters,getpars(object@transition[[i]],which=which))
}
for(i in 1:object@nstates) {
for(j in 1:object@nresp) {
parameters <- c(parameters,getpars(object@response[[i]][[j]],which=which))
}
}
return(parameters)
}
)
|
bb8903aee65deae2f825f1edc9fe11aa7607025b
|
d0e56294b9557fa2283f457ec05e197a924e2bd9
|
/ACLS/man/GD.Rd
|
a5fa6389e2860c23c5ee89bb5da2e24e1c4d86a3
|
[] |
no_license
|
rruimao/ACLS
|
33d4bfe2678daee3d034fc713f54c5b722244008
|
35ccd9efa983416b9f7264d58d47028f56e0078a
|
refs/heads/main
| 2023-06-10T21:52:01.383348
| 2021-07-09T06:53:41
| 2021-07-09T06:53:41
| 305,150,547
| 3
| 3
| null | null | null | null |
UTF-8
|
R
| false
| false
| 1,003
|
rd
|
GD.Rd
|
\name{GD}
\alias{GD}
\title{Steepest Gradient Descent Approximation Method}
\usage{
GD<-function(beta_0,tau,X,Y)
}
\description{
The steepest gradient descent approximation method find the approximation of the exact step size to minimize the loss function, and use gradient descent method to obtain the estimate
}
\arguments{
\item{beta_0}{The initial of the coefficient.}
\item{X}{A data frame of explanatory variables. Intercept includes.}
\item{Y}{Response Variable.}
\item{tau}{Adaptive Robustification Parameter. Could be a constant or a function of other parameter, for example, a function of sample size.}
\item{eta_0}{The initial step size.}
\item{alpha}{Inflation factor.}
}
\examples{
n<-50
d<-5
a=matrix(data = rnorm(1, 0, 1), nrow = n, ncol = d)
x_0<-matrix(1L,nrow=n,ncol=1)
X=cbind(x_0,a)
beta_true<-c(0,3,4,1,2,0)
eps<-matrix(rnorm(n));
#Genarate response Y using true coefficient beta_true
Y<-X %*% beta_true+eps
tau<-sqrt(n)/log(log(n))
beta_0<-c(0,rnorm(5))
beta<-GD(beta_0,tau,X,Y)
}
|
d2a0bc49cbf11203802be0d5b47a2647a1b28932
|
fb1d9037123f5557c05de959555756afa601305b
|
/R/helpers.R
|
65c1ba0a27c956d92c69f3a88c29b32260fa1833
|
[] |
no_license
|
cran/manymodelr
|
00b7d0b958c71d10182d9fa07989cfa9e277042f
|
c1512ef3ad8c304ba7e1faa848c50a952bfa5496
|
refs/heads/master
| 2021-11-24T09:21:00.420042
| 2021-11-15T08:20:09
| 2021-11-15T08:20:09
| 173,755,551
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 1,602
|
r
|
helpers.R
|
# 'These are helper functions that should never have been exported
#' @title Convenience functions for use with get_exponent
#' @param n Power to which a value should be raised
#' @details This is a function factory that creates an exponent from a given value of n
#' @return Returns n to the power of n
#' @keywords internal
#' @noRd
force_exponent<-function(n){
force(n)
function(x){
x^n
}
}
#' @param y A numeric value whose exponent is required
#' @param x The power to which y is to be raised
#' @return y to the power of x.
#' @keywords internal
#' @noRd
make_exponent<-function(y=NULL,x=NULL){
if(any(is.null(x), is.null(y))) stop("Both x and y should be supplied. Please see the docs for details")
if(any(!is.numeric(y),!is.numeric(x))) stop("Only numerics are supported")
get_exponent_helper<-force_exponent(x)
get_exponent_helper(y)
}
# skip tests on old releases
skip_on_oldrel <- function(version="3.6.3", msg = NULL) {
current_version <- utils::packageVersion("base")
if (current_version <= version) {
msg <- paste("R version",current_version, "not supported. Please upgrade to R>= 3.6.3")
testthat::skip(msg)
}
}
#' Drops non numeric columns from a data.frame object
#' @param df A data.frame object for which non-numeric columns will be dropped
#' @examples
#' drop_non_numeric(data.frame(A=1:2, B=c("A", "B")))
#' @export
drop_non_numeric <- function(df){
UseMethod("drop_non_numeric")
}
#' @export
drop_non_numeric.data.frame <- function(df){
Filter(is.numeric, df)
}
|
aeba3eb60da2067fc910d098d039c8cb75701e58
|
9e459436584ee6f94bc46d145f334de411f3debe
|
/R/readBootstrap.R
|
3de1f764903a32b838a1b0d0c90b113a3c86d7e9
|
[] |
no_license
|
NicWayand/IceCastV2
|
8d5ad041f7a10510b8be1dd745b1fd1e4bf3da24
|
8f91a2b2f56b8a131099dadbf6a930c293a7b238
|
refs/heads/master
| 2020-03-21T06:26:26.426120
| 2018-06-21T20:15:38
| 2018-06-21T20:15:38
| 138,219,718
| 1
| 0
| null | 2018-06-21T20:43:49
| 2018-06-21T20:43:49
| null |
UTF-8
|
R
| false
| false
| 3,772
|
r
|
readBootstrap.R
|
#' Read in individual binary files of monthly observation data. The observations are from the monthly sea ice concentration
#' obtained from the National Aeronautics and Space Administration (NASA) satellites Nimbus-7
#' SMMR and DMSP SSM/I-SSMIS and processed by the bootstrap algorithm. The results
#' are distributed by the National Snow and Ice Data Center (Comiso 2000, updated 2015).
#' Functions assume file name conventions are the same as used by NSIDC.
#' @title Read individual bootstrap binary file
#' @references
#' Bootstrap sea ice concentration:
#'
#' Comiso, J., 2000, updated 2015: Bootstrap sea ice concentrations from Nimbus-7 SMMR and
#' DMSP SSM/I-SSMIS. version 2. \url{http://nsidc.org/data/nsidc-0079}
#' @param fileName File name for binary bootstrap data
#' @param nX dimension in the x (defaults to value for Northern Polar stereographic grid: 304)
#' @param nY dimension in the y (defaults to value for Northern Polar stereographic grid: 448)
#' @return numeric vector of concentrations
#' @importFrom methods is
#' @export
#' @examples
#' \dontrun{
#' #fileName should be the binary file
#' rawData <- readBootstrap(fileName)
#'}
readBootstrap <- function(fileName, nX = 304, nY = 448) {
to.read <- file(fileName, "rb")
dat <- readBin(to.read, integer(), n = nX*nY, size = 2, endian = "little")/10
close(to.read)
return(dat)
}
#' Function to process monthly bootstrap data over multiple years. The observations are from the monthly sea ice concentration
#' obtained from the National Aeronautics and Space Administration (NASA) satellites Nimbus-7
#' SMMR and DMSP SSM/I-SSMIS and processed by the bootstrap algorithm. The results
#' are distributed by the National Snow and Ice Data Center (Comiso 2000, updated 2015).
#' Functions assume file name conventions are the same as used by NSIDC.
#' @title Read in a set of bootstrap observations over a set of year
#' @references
#' Bootstrap sea ice concentration:
#'
#' Comiso, J., 2000, updated 2015: Bootstrap sea ice concentrations from Nimbus-7 SMMR and
#' DMSP SSM/I-SSMIS. version 2. \url{http://nsidc.org/data/nsidc-0079}
#' @param startYear first year to read in
#' @param endYear lastYear to read in
#' @param fileFolder Folder in which binary files are stored
#' @param version Either 2 or 3 indicating which version of the bootstrap data you are using
#' @param nX longitude dimension
#' @param nY latitude dimension
#' @details Raw binary files for 2012-2013 are included in the package
#' @export
#' @return Bootstrap observations sorted into array of dimension: year x month x lon x lat
#' @examples
#' \dontrun{
#' #myFilePath should be a file path where the 1983 binary files are stored
#' observedDemo <- readMonthlyBS(startYear = 1983, endYear = 1983, fileFolder = myFilePath)
#' }
readMonthlyBS <- function(startYear, endYear, fileFolder, version, nX = 304, nY = 448) {
years <- startYear:endYear; nYears <- length(years)
obs <- array(dim = c(nYears, 12, nX, nY))
stopifnot(version == 2 || version == 3)
for (i in 1:nYears) {
for (j in 1:12) {
if (version == 2) { #no missing data in V2
fileName <- Sys.glob(paste(fileFolder, sprintf('bt_%i%02d_*_v02_n.bin', years[i], j), sep = ""))
obs[i, j, ,nY:1] <- readBootstrap(fileName)
} else if (version == 3 & !(j == 12 & years[i] == 1987) & !(j == 1 & years[i] == 1988)) {
#missing Dec 1987 and Jan 1988 in V3
#"major data gap in the SSM/I data occurs from 03 December 1987 to 13 January 1988"
fileName <- Sys.glob(paste(fileFolder, sprintf('bt_%i%02d_*_v03_n.bin', years[i], j), sep = ""))
obs[i, j, ,nY:1] <- readBootstrap(fileName)
} else {
stopifnot(version == 3)
obs[i, j, ,nY:1] <- NA
}
}
}
return(obs)
}
|
a1b0b1d15587341b3cb0a9d2bf73307267208714
|
ffdea92d4315e4363dd4ae673a1a6adf82a761b5
|
/data/genthat_extracted_code/Renext/examples/CV2.Rd.R
|
ad3776b61ff0c579ab6d68bb80abdce13f5a8843
|
[] |
no_license
|
surayaaramli/typeRrh
|
d257ac8905c49123f4ccd4e377ee3dfc84d1636c
|
66e6996f31961bc8b9aafe1a6a6098327b66bf71
|
refs/heads/master
| 2023-05-05T04:05:31.617869
| 2019-04-25T22:10:06
| 2019-04-25T22:10:06
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 254
|
r
|
CV2.Rd.R
|
library(Renext)
### Name: CV2
### Title: Squared Coefficient of Variation
### Aliases: CV2
### ** Examples
n <- 30; nSamp <- 500
X <- matrix(rexp(n * nSamp), nrow= nSamp, ncol = n)
W <- CV2(X)
plot(density(W), main = "CV2 of exponential samples")
|
a91bdbd78dc7d6adacaf56c397ffc5e896f2adf2
|
326b9a1c197ce6638ecdf9647e74efd4397e2fde
|
/app.R
|
fe96d19b69afa2a56466d0773dc8c75b22853d70
|
[] |
no_license
|
nobaldhruw/shinyModules
|
845a77a43d2f292c30a8af19f3eaff70e16b2b4c
|
d5dfd9a29245f9ad29d3d605ed482bf7aee87a99
|
refs/heads/master
| 2023-05-31T09:07:08.909974
| 2021-07-01T18:02:38
| 2021-07-01T18:02:38
| 382,261,008
| 0
| 1
| null | null | null | null |
UTF-8
|
R
| false
| false
| 1,129
|
r
|
app.R
|
library(shiny)
library(shinydashboard)
header <- dashboardHeader(title="Shiny modules")
sidebar <- dashboardSidebar(
sidebarMenu(
menuItem("Histogram", tabName = "tab_histogram")
)
)
body <- dashboardBody(
tabItems(
tabItem(
tabName = "tab_histogram",
fluidRow(
box(
title = "Input",
status = "primary",
width = 3,
solidHeader = TRUE,
sliderInput("num","Choose a number", min=10, max=100, value = 50, step = 5)
),
box(
title = "Output",
status = "primary",
width = 9,
solidHeader = TRUE,
plotOutput("plot_histogram")
)
)
)
)
)
ui <- dashboardPage(
header = header,
sidebar = sidebar,
body = body
)
server <- function(input, output){
data <- reactive({ rnorm(input$num)})
output$plot_histogram <- renderPlot({
hist(data())
})
}
shinyApp(ui, server)
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.