blob_id
stringlengths 40
40
| directory_id
stringlengths 40
40
| path
stringlengths 2
327
| content_id
stringlengths 40
40
| detected_licenses
listlengths 0
91
| license_type
stringclasses 2
values | repo_name
stringlengths 5
134
| snapshot_id
stringlengths 40
40
| revision_id
stringlengths 40
40
| branch_name
stringclasses 46
values | visit_date
timestamp[us]date 2016-08-02 22:44:29
2023-09-06 08:39:28
| revision_date
timestamp[us]date 1977-08-08 00:00:00
2023-09-05 12:13:49
| committer_date
timestamp[us]date 1977-08-08 00:00:00
2023-09-05 12:13:49
| github_id
int64 19.4k
671M
⌀ | star_events_count
int64 0
40k
| fork_events_count
int64 0
32.4k
| gha_license_id
stringclasses 14
values | gha_event_created_at
timestamp[us]date 2012-06-21 16:39:19
2023-09-14 21:52:42
⌀ | gha_created_at
timestamp[us]date 2008-05-25 01:21:32
2023-06-28 13:19:12
⌀ | gha_language
stringclasses 60
values | src_encoding
stringclasses 24
values | language
stringclasses 1
value | is_vendor
bool 2
classes | is_generated
bool 2
classes | length_bytes
int64 7
9.18M
| extension
stringclasses 20
values | filename
stringlengths 1
141
| content
stringlengths 7
9.18M
|
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
93ea88f5a3b86e5233956d332e1573832175fb2e
|
dc91ac28c59fb0b32dcc37b7eb9ad0793a3ad477
|
/man/linegraph.Rd
|
69588f014debb13f01b963863ac8d3d307660923
|
[] |
no_license
|
cran/RnavGraph
|
8b28b07c627898a52400614006aadb99ded99c7c
|
9fabc38aa5aa107a4b4f4e1aa8e16df12a8bb7f8
|
refs/heads/master
| 2021-01-25T06:05:38.377635
| 2014-10-21T00:00:00
| 2014-10-21T00:00:00
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 1,203
|
rd
|
linegraph.Rd
|
\name{linegraph}
\alias{linegraph}
\title{Linegraph of a graph}
\description{
(From Wikipedia) The line graph L(G) of an undirected graph G is another graph L(G) that represents the adjacencies between edges of G. By definition, each vertex of L(G) represents an edge of G, and two vertices of L(G) are adjacent if and only if their corresponding edges share a common endpoint ("are adjacent") in G.
}
\usage{
linegraph(graph, sep = ":")
}
%- maybe also 'usage' for other objects documented here.
\arguments{
\item{graph}{ Undirected graph of class \code{graph}.
}
\item{sep}{
Separates the node names of G in the node names of the new graph L(G).}
}
%\details{}
\value{
graphNEL object.
}
%\references{% ~put references to the literature/web site here ~}
\author{
Adrian Waddell and R. Wayne Oldford
}
%\note{%% ~~further notes~~}
%% ~Make other sections like Warning with \section{Warning }{....} ~
\seealso{
\code{\link{navGraph}}, \code{\link{completegraph}},
\code{\link{newgraph}}
}
\examples{
G <- completegraph(LETTERS[1:4])
LG <- linegraph(G, sep = "xx")
}
% Add one or more standard keywords, see file 'KEYWORDS' in the
% R documentation directory.
\keyword{ graphs }
|
0fb0dc904cb2d5b1b7182270f5213d444aedeb90
|
282883571acb7e92e3c82034f1ddc61e1ca74b9a
|
/R/make_monthly_stack.R
|
50d0f36231d0114cc9a7ff129d7e274588f01048
|
[] |
no_license
|
Tomhigg/TamsatTools
|
69f767a9b7d5271ce2859a0afd154b22d4914b50
|
6a8e8cc17bf6b138d3b075f72e1fce95d826faed
|
refs/heads/master
| 2021-01-13T15:12:09.246735
| 2017-02-07T12:31:40
| 2017-02-07T12:31:40
| 76,252,562
| 0
| 1
| null | null | null | null |
UTF-8
|
R
| false
| false
| 2,253
|
r
|
make_monthly_stack.R
|
#' Make complete one-month time step raster time series
#'
#' Read and stack raster layers, fill missing months with predefined averages, will crop to extent of fill_with
#'
#' @param download_folder Folder containing the downloaded monthly estimate from tamsat_download_all()
#' @param fill_with raster brick output from monthly_summary() to fill missing months
#' @param years Years as interger(s) for which statistics will be calculated
#
monthly_stack <- function(download_folder, years, fill_with) {
print("listing files")
all_files <- list.files(path = download_folder, pattern = ".nc", recursive = TRUE, full.names = TRUE)
print("blank stack")
all_months <- raster::stack()
for (i in years) {
print(i)
year_list <- grep(pattern = i, x = all_files, value = TRUE)
year_stack <- raster::stack(sapply(year_list, FUN = raster::raster))
year_stack <- crop(x = year_stack, y = fill_with)
if (raster::nlayers(year_stack) < 12) {
# make data frame of existing months based on month code from file string
paths.df <- data.frame(Months = substr(x = year_list, start = nchar(year_list) - 4, stop = nchar(year_list) -
3))
# make data fram of all months
months.df <- data.frame(Months = c("01", "02", "03", "04", "05", "06", "07", "08", "09", "10", "11", "12"))
# dplyr: anti_join return objects not found in both data frames
missing_months <- dplyr::anti_join(months.df, paths.df, "Months")
missing_months <- as.numeric(as.character(missing_months$Months))
# subset the missing rasters from overall annual summary
missing_months_raster <- raster::subset(x = fill_with, subset = missing_months)
# add total average to raster stack
year_stack <- stack(year_stack, missing_months_raster)
}
Months = c("01", "02", "03", "04", "05", "06", "07", "08", "09", "10", "11", "12")
Months <- paste0(i, "_", Months)
names(year_stack) <- Months
all_months <- stack(all_months, year_stack)
}
return(all_months)
}
|
b38faef17cf4dc3a8d3276b3c34bf94e0060a824
|
208ee466968b34e689f6803ca5fdf26ddae15a78
|
/man/rpaired.contaminated.Rd
|
5d1843d1d8e9c4aad8480e9e8cb802c5c7f2b965
|
[] |
no_license
|
cran/PairedData
|
3a95b91071e7dd31a74171cedcbda9251889d715
|
7076f81a19dde00a613727ea2a6f7679b782f9eb
|
refs/heads/master
| 2020-05-16T17:05:29.893811
| 2018-06-02T21:57:15
| 2018-06-02T21:57:15
| 17,681,475
| 2
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 1,128
|
rd
|
rpaired.contaminated.Rd
|
\name{rpaired.contaminated}
\alias{rpaired.contaminated}
\title{Simulate paired samples}
\description{
Simulate paired data with a given correlation (Kendall's tau=(2/pi)arcsine(r)) and
marginals being contaminated normal distributions: (1-eps)*F(x)+eps*F(x/K) where F is the cumulative standard normal distribution, eps the percentage of contamination and
K a scale parameter. Moreover, this marginal can be multiplied by another scale parameter sigma but usually sigma=1.
}
\usage{
rpaired.contaminated(n, d1 = c(0.1, 10, 1), d2 = c(0.1, 10, 1), r = 0.5)
}
\arguments{
\item{n}{
sample size.
}
\item{d1}{
vector of 3 parameters for the first contaminated normal distribution (eps,K,sigma).
}
\item{d2}{
vector of 3 parameters for the second contaminated normal distribution.
}
\item{r}{
correlation.
}
}
\value{
An object of class paired.
}
\references{
Grambsch, P.M. (1994) Simple robust tests for scale differences in paired data. Biometrika, 81, 359-372.
}
\author{Stephane CHAMPELY}
\seealso{rpaired.gld}
\examples{
rpaired.contaminated(n=30,r=0.25)
}
|
d30c8e8278254bc2df70629ecd1f25c27228a2a1
|
9cf19f034db895c65f258f76da35cc4e5a9d5076
|
/percrptron/perceptron.R
|
81d5a89dd6eb51fdef534ae09b4ab55639f9b769
|
[] |
no_license
|
kurobaneHITOMI/exercise-in-university
|
454d918cb7c25ede3acba4f176b5ae2370d121dc
|
c7872a75a43144ecb5d65a04d75be61924b9c2c4
|
refs/heads/master
| 2020-03-13T04:50:18.617439
| 2018-07-13T16:17:26
| 2018-07-13T16:17:26
| 130,970,878
| 1
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 1,149
|
r
|
perceptron.R
|
ld <- cbind(c(0.8293776,1.5344731,-2.2012117,-2.8104603),c(0.02410215,2.16452123,-0.93192121,2.83918785))
iris.data <- as.matrix(subset(iris,select=c(1:4)))
iris.data <- iris.data%*%ld
iris.data <- cbind(iris.data,c(rep(-1,50),rep(1,100)))
colnames(iris.data) <- c("LD1","LD2","Species")
iris.data = iris.data[sample(1:150,150,replace = F),1:3]
iris.train <- iris.data[1:100,]
iris.test <- iris.data[101:150,]
w <- c(0,0)
#train perceptron
for(i in 1:100)
{
w <- w + iris.train[i,3]*iris.train[i,1:2]
}
#train data
plot(0,xlim = c(-12,10),ylim = c(2,12))
points(iris.train[which(iris.train[,3]==-1),1:2],col="red")
points(iris.train[which(iris.train[,3]==1),1:2],col="blue")
x <- seq(-10,10,by=0.1)
y <- exp(w[1]*x/(-w[2]))
points(x,y,type = "l",col="black")
#test data
s <- c(1:50)
for(i in 1:50)
{
s[i] <- as.numeric(sign(t(w)%*%iris.test[i,1:2]))
s[i] <- ifelse(s[i]==iris.test[i,3],1,0)
}
sum(s)/50
points(iris.test[which(iris.test[,3]==-1),1:2],col="orange")
points(iris.test[which(iris.test[,3]==1),1:2],col="purple")
|
1c924ff390f2c93bfe19baebdb7e244a54442256
|
686800c5ddb65505335f30ded6fcf96a6afe66e2
|
/R/Survdiff.R
|
d0385f5f20837eb48863e69bfa4e823d34a7df86
|
[] |
no_license
|
cran/RcmdrPlugin.survival
|
2b9e889c64e788a4fb0e6bb96b89293d0c8b7bbd
|
86f846e93563d94ceb23c89ac08eb8ae129f92a4
|
refs/heads/master
| 2022-09-25T09:24:11.554730
| 2022-09-20T16:00:02
| 2022-09-20T16:00:02
| 17,693,134
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 3,873
|
r
|
Survdiff.R
|
# last modified 2011-08-03 by J. Fox
Survdiff <- function(){
# require(survival)
defaults <- list(time1=NULL, event=NULL, strata=NULL, rho="0", subset=NULL)
dialog.values <- getDialog("Survdiff", defaults)
if (!activeDataSetP()) return()
currentModel <- FALSE
initializeDialog(title=gettext("Compare Survival Functions", domain="R-RcmdrPlugin.survival"))
onOK <- function(){
time <- getSelection(timeBox)
if (length(time) == 1) time1 <- time
else {
errorCondition(recall=Survdiff, message=gettext("You must select a time-to-event variable.",
domain="R-RcmdrPlugin.survival"))
return()
}
event <- getSelection(eventBox)
if (length(event) == 0) {
errorCondition(recall=Survdiff, message=gettext("You must select an event indicator.",
domain="R-RcmdrPlugin.survival"))
return()
}
strata <- getSelection(strataBox)
if (length(strata) == 0) {
errorCondition(recall=Survdiff, message=gettext("You must select strata.",
domain="R-RcmdrPlugin.survival"))
return()
}
rho <- tclvalue(rhoValue)
subset <- tclvalue(subsetVariable)
putDialog("Survdiff", list(
time1=time1,
event=event, strata=strata, rho=rho, subset=subset
))
closeDialog()
if (trim.blanks(subset) == gettext("<all valid cases>", domain="R-RcmdrPlugin.survival")
|| trim.blanks(subset) == ""){
subset <- ""
}
else{
subset <- paste(", subset=", subset, sep="")
}
formula <- paste("Surv(", time1, ",", event, ")", sep="")
formula <- paste(formula, " ~ ", paste(strata, collapse=" + "), sep="")
command <- paste("survdiff(", formula, ", rho=", rho,
', data=', ActiveDataSet(), subset, ")", sep="")
doItAndPrint(command)
insertRmdSection(paste0("Compare Survival Functions: ", formula))
tkfocus(CommanderWindow())
}
OKCancelHelp(helpSubject="survdiff", reset="Survdiff")
survFrame <- tkframe(top)
.activeDataSet <- ActiveDataSet()
.numeric <- NumericOrDate()
.factors <- Factors()
time1 <- if(!is.null(dialog.values$time1)) dialog.values$time1 else eval(parse(text=paste('attr(', .activeDataSet, ', "time1")', sep="")))
time1 <- if (!is.null(time1)) which(time1 == .numeric) - 1
event <- if(!is.null(dialog.values$event)) dialog.values$event else eval(parse(text=paste('attr(', .activeDataSet, ', "event")', sep="")))
event <- if (!is.null(event)) which(event == Numeric()) - 1
strata <- if(!is.null(dialog.values$strata)) dialog.values$strata else eval(parse(text=paste('attr(', .activeDataSet, ', "strata")', sep="")))
strata <- if (!is.null(strata)) which(is.element(.factors, strata)) - 1 else -1
timeBox <- variableListBox(survFrame, NumericOrDate(),
title=gettext("Time to event\n(select one)", domain="R-RcmdrPlugin.survival"),
initialSelection=time1)
eventBox <- variableListBox(survFrame, Numeric(),
title=gettext("Event indicator\n(select one)", domain="R-RcmdrPlugin.survival"),
initialSelection=event)
strataBox <- variableListBox(survFrame, Factors(),
title=gettext("Strata\n(select one or more)", domain="R-RcmdrPlugin.survival"),
selectmode="multiple", initialSelection=strata)
rhoFrame <- tkframe(top)
rhoValue <- tclVar(dialog.values$rho)
rhoSlider <- tkscale(rhoFrame, from=0, to=1, showvalue=TRUE, variable=rhoValue,
resolution=0.1, orient="horizontal")
# modelFormula(hasLhs=FALSE)
subsetBox(subset.expression=dialog.values$subset)
tkgrid(getFrame(timeBox), labelRcmdr(survFrame, text=" "), getFrame(eventBox), sticky="sw")
tkgrid(labelRcmdr(survFrame, text=""))
tkgrid(getFrame(strataBox), sticky="nw")
tkgrid(survFrame, sticky="nw")
tkgrid(labelRcmdr(rhoFrame, text="rho", foreground="blue"), rhoSlider, sticky="sw")
tkgrid(rhoFrame, sticky="nw")
tkgrid(labelRcmdr(top, text=""))
tkgrid(subsetFrame, sticky="w")
tkgrid(labelRcmdr(top, text=""))
tkgrid(buttonsFrame, sticky="w")
dialogSuffix(rows=9, columns=1)
}
|
7b33001ad2602e54a5450bbe7a1391de17dfa4b2
|
b8d865f013c357dcef54e16168b08441f282c5f7
|
/R/svydbrepdesign.R
|
e89765600dcd4fec5c77c050f9aa3c8812b2bd9d
|
[] |
no_license
|
anhnguyendepocen/svydb
|
e8289dcf8879780dd6fa69628bdc018e8077a86d
|
06dc1a1fe42c0b4d030c6430316f46ab6226b49a
|
refs/heads/master
| 2022-02-24T16:25:54.908700
| 2019-10-05T05:17:41
| 2019-10-05T05:17:41
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 7,648
|
r
|
svydbrepdesign.R
|
makesvydbrepdesign <- R6Class("svydb.repdesign",
public = list(
dataOg = NULL,
data = NULL,
vars = NULL,
st = NULL,
id = NULL,
wt = NULL,
repwt = NULL,
scale = NULL,
names = list(
),
initialize = function(vars = NA, st = NA, id = NA,
wt = NA, repwt = NULL, scale, data){
if(quo_is_null(wt)){
stop("Please provide sampling weights")
}else{
self$wt = as_label(wt)
}
if(is.null(repwt)){
stop("Please provide replicate weights")
}else{
self$repwt = grep(pattern = repwt, colnames(data), value = T)
}
if(quo_is_null(st)){
data = data %>% mutate(st = 1)
self$st = "st"
}else{
self$st = as_label(st)
}
if(quo_is_null(id)){
data = data %>% mutate(id = row_number())
self$id = "id"
}else{
self$id = as_label(id)
}
self$scale = scale
self$data = data %>% select(everything())
self$dataOg <<- self$data
},
setx = function(x){
tc = tryCatch(class(x), error = function(e) e)
if("formula" %in% tc){
x = all.vars(x)
self$data <<- self$data %>%
select(!!x, st = self$st, id = self$id, self$wt, self$repwt) %>%
filter_all(any_vars(!is.na(.)))
self$vars <<- x
}else{
x = enquo(x)
self$data <<- self$data %>%
select(!!x, st = self$st, id = self$id, self$wt, self$repwt) %>%
filter(!is.na(!!x))
self$vars <<- as.character(x)
}
self$names[["logged"]] = c(self$st, self$id, self$wt, self$repwt, "m_h")
},
addx = function(x){
l = enquo(x)
r = syms(colnames(self$data))
self$data = self$dataOg %>%
select(!!l, !!!r)
},
getwt = function(){
self$data %>% select(self$wt) %>% summarise_all(sum) %>%
pull()
},
getmh = function(){
self$data %>% group_by(!!sym(self$st)) %>%
summarise(m_h = n_distinct(!!sym(self$id)))
},
subset = function(..., logical = T){
d = self$clone()
if(logical == T){
d$data = d$data %>% filter(...)
}else{
d$data = d$data %>% filter(!!parse_expr(...))
}
return(d)
},
subset_rows = function(from, to){
self$dataSub = self$data %>% db_selectRows(., from = from, to = to)
},
storename = function(name, obj, force = FALSE){
if(force == TRUE){
self$names$logged =
self$names$logged[-which(self$names$logged %in% obj)]
}
if(!all(obj %in% self$names$logged)){
new = setdiff(obj, self$names$logged)
self$names[[name]] = c(new)
self$names$logged = c(self$names$logged, new)
}
},
removename = function(name, obj){
self$names$logged =
self$names$logged[-which(self$names$logged %in% obj)]
self$names[[name]] =
(self$names[[name]])[-which(self$names[[name]] %in% obj)]
},
print = function(){
rows = self$data %>% db_nrow()
txt = sprintf("svydb.repdesign, %s observation(s), %s sets of replicate weights, scale = %s", rows, length(self$repwt), self$scale)
cat(txt)
}
)
)
#' Survey replicate design
#'
#' @param st Column name specifying the strata column. \code{NULL} for no strata.
#' @param id Column name specifying the cluster column. \code{NULL} for no cluster.
#' @param wt Column name specifying the sampling weights column.
#' @param repwt Regular expressions that matches the columns of the replicate weights.
#' @param scale Scaling constant for variance.
#' @param data A data frame or sql table of the survey data set.
#' @description
#' Gathers all information that are needed to compute survey statistics
#' into a design. Currently, only \code{\link{svydbreptotal}}, \code{\link{svydbrepmean}},
#' are available for replicate statistics.
#' @examples
#' data(ss16hde)
#' hde.dbrepsurv = svydbrepdesign(wt = WGTP, repwt="wgtp[0-9]+", scale = 4/80, data = ss16hde)
#' hde.dbrepsurv$subset(BATH == 1)
#' hde.dbrepsurv$clone()
#' # OR with a database connection
#' # library(MonetDBLite)
#' # library(DBI)
#' # library(dbplyr)
#' # con = dbConnect(MonetDBLite())
#' # dbWriteTable(con, "ss16hde", ss16hde)
#' # ss16hde.db = tbl(con, "ss16hde")
#' # hde.dbrepsurv = svydbrepdesign(wt = WGTP, repwt="wgtp[0-9]+", scale = 4/80, data = ss16hde.db)
#' @author Charco Hui
#' @export
svydbrepdesign = function(st = NULL, id = NULL, wt = NULL, repwt = NULL, scale, data){
st = enquo(st)
id = enquo(id)
wt = enquo(wt)
d = makesvydbrepdesign$new(st = st, id = id, wt = wt,
repwt = repwt, scale = scale, data = data)
d
}
|
73c0f5e66a654c08a8023e713d272502dc2d85b1
|
a364ac971591832545bd083de5f0054ec538b932
|
/man/find_series.Rd
|
f1fce4f9064e0e99c7e10ba15c96a94e944bd430
|
[
"LicenseRef-scancode-warranty-disclaimer"
] |
no_license
|
randall-romero/bccr-r
|
9bd4d5f5119ecbde91d1cd7c7d4a7f99804a4466
|
3dad451c9842ab3adc3eb88096c26aad24d6f75b
|
refs/heads/master
| 2021-05-31T10:33:04.737104
| 2016-05-07T01:03:11
| 2016-05-07T01:03:11
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| true
| 441
|
rd
|
find_series.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/import_data.R
\name{find_series}
\alias{find_series}
\title{Find indicators by name}
\usage{
find_series(name)
}
\arguments{
\item{name}{A string text with part of the indicator name}
}
\value{
A data.frame with indicators whose names match the requested value
}
\description{
Find indicators by name
}
\examples{
find_series("ipc")
find_series("agricultura")
}
|
bcfa6cad7e65ff34dc3a7cc8c8c6b157abce8e29
|
efc2cda5525312640065de5c8cd53b208342cbf8
|
/R/geospatial.R
|
eee2343f41e03d6fbafb1dde3b974bf59e10f4e2
|
[
"LicenseRef-scancode-public-domain-disclaimer",
"LicenseRef-scancode-warranty-disclaimer",
"CC-BY-4.0",
"LicenseRef-scancode-public-domain",
"LicenseRef-scancode-unknown-license-reference"
] |
permissive
|
MLBartley/nabatr
|
68ad577f57d6d37adc2965fd099693df4ba092ee
|
c805012e54081ba45497e294863d4a5f26aaa7bd
|
refs/heads/master
| 2023-04-20T22:11:14.092346
| 2021-04-07T14:40:48
| 2021-04-07T14:40:48
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 10,916
|
r
|
geospatial.R
|
#############################################################################
# _ _____ ____ __ ____
# / | / / | / __ )____ _/ /_/ __ \
# / |/ / /| | / __ / __ `/ __/ /_/ /
# / /| / ___ |/ /_/ / /_/ / /_/ _, _/
# /_/ |_/_/ |_/_____/\__,_/\__/_/ |_|
#
# R Tools for accessing and manipulating North American Bat Monitoring data
#
# Written by: Kyle Enns
#
# FILE DESCRIPTION: This file contains functions that incorporate
# geospatial components
#
# USGS DISCLAIMER: This software is in the public domain because it contains
# materials that originally came from the U.S. Geological Survey, an agency
# of the United States Department of Interior. For more information, see the
# [official USGS copyright policy]
# (https://www.usgs.gov/visual-id/credit_usgs.html#copyright/
# "official USGS # copyright policy")
#
# Although this software program has been used by the U.S. Geological Survey
# (USGS), no warranty, expressed or implied, is made by the USGS or the U.S.
# Government as to the accuracy and functioning of the program and related
# program material nor shall the fact of distribution constitute any such
# warranty, and no responsibility is assumed by the USGS in connection
# therewith.
#
# This software is provided "AS IS."
#############################################################################
#' @title Build leaflet map for Acoustic Stationary report in CONUS
#'
#' @import rmarkdown
#' @import leaflet
#' @import htmlwidgets
#' @import htmltools
#'
#' @description
#' Builds a leaflet map using a vector list of grts cells to add to a leaflet map. Shows
#' where the project's grts cells live spatially. Keep in mind that the project_id
#' and all_grts must be in CONUS.
#'
#' @param all_grts Character Vector all grts cell ids found from the survey_df dataframe by running
#' unique(survey_df$grts_cell_id)
#' @param project_df Dataframe output from get_projects()
#' @param project_id Numeric or String a project id
#' @param grts_with_data (optional) Character Vector or NULL
#' @keywords bats, NABat, GQL
#'
#' @export
get_grts_leaflet_map = function(
all_grts,
project_df,
project_id,
grts_with_data = NULL){
project_id_ = project_id
grts_fname = as.character(subset(project_df,
project_df$project_id == project_id_)$sample_frame_short)
# Get grts_fname_df
grts_fname_df = grts_lookup_df[grts_fname][[1]]
# Create grts_template_df dataframe and merge with grts_fname_df
grts_template_df = data.frame(GRTS_ID = all_grts)
grts_df = plyr::join(grts_template_df, grts_fname_df, by = c('GRTS_ID'), type = "left")
# Creating map with an Imagery layer
m = leaflet() %>% addTiles()
# Loop through all all_grts, create a polygon for each, and add to the leaflet map m
count = 0
for (grts_cell in all_grts){
# Setting color to green or red based on if the grts cell has data or not
if (is.null(grts_with_data)){
color_ = '#ff0000'
color_2 = 'red'
}else {
if (grts_cell %in% grts_with_data){
color_ = '#198a00'
color_2 = '#198a00'
} else {
color_ = '#ff0000'
color_2 = 'red'
}
}
# Content for the hover
content = paste0(as.character(grts_cell))
# Content for the popup (on click)
content_popup = paste0('<b style = "color:',color_2,';" >GRTS cell</b>',
'<br> <div style = "color:', color_2, ';" >', content, '</div>')
# Creating lat/lon points for the grts polygon
this_row = subset(grts_df,grts_df$GRTS_ID == grts_cell)
ll1 = as.numeric(rev(as.character(strsplit(as.character(this_row$lowerleft),',')[[1]])))
ll2 = as.numeric(rev(as.character(strsplit(as.character(this_row$upperleft),',')[[1]])))
ll3 = as.numeric(rev(as.character(strsplit(as.character(this_row$upperright),',')[[1]])))
ll4 = as.numeric(rev(as.character(strsplit(as.character(this_row$lowerright),',')[[1]])))
lngs = c(ll1[1],ll2[1],ll3[1],ll4[1],ll1[1])
lats = c(ll1[2],ll2[2],ll3[2],ll4[2],ll1[2])
# Add this grts polygon to the leaflet map m
m = m %>% addPolygons(lat = lats, lng = lngs, popup = content_popup,
color = color_, weight = 1.5, opacity = 1, group = color_,
label = content,
labelOptions = labelOptions(style = list('font-size' = '14px',
'color' = color_2,
'box-shadow' = '3px 3px rgba(0,0,0,0.25)',
'border-color' = 'rgba(0,0,0,0.5)',
'border-radius' = '5px',
'padding' = '5px 5px 5px 5px')))# padding order is top, right, bottom, left
}
# Add legend to leaflet map
m = m %>% addLegend('bottomright',labels = c('Has survey data'),
colors = c('#198a00'), opacity =1)
# Return the leaflet map
return (m)
}
#' @title Build shapefile from GRTS IDs
#'
#' @import sp
#' @import raster
#' @import rgdal
#' @import plyr
#'
#' @description Builds a grts shapefile from the grts_ids parameter.
#' note: uses rgdal and spatial packages.
#'
#' @param grts_ids Character Vector GRTS Ids
#' @param project_df Dataframe output from get_projects()
#' @param project_id Numeric or String a project id
#' @keywords species, bats, NABat, grts, CONUS
#'
#' @export
get_grts_shp = function(
grts_ids,
project_id,
project_df){
# Call Build polygons dataframe from GRTS IDs function
grts_shp_df = get_grts_shp_df(grts_ids = grts_ids,
project_id = project_id, project_df = project_df)
# Call Build spatial polygons dataframe from GRTS shape dataframe
grts_spdf = get_spdf_from_polys_df(grts_shp_df)
return (grts_spdf)
}
#' @title Build polygons dataframe from GRTS IDs
#'
#' @import sp
#' @import raster
#' @import rgdal
#' @import plyr
#'
#' @param grts_ids Character Vector GRTS Ids
#' @param project_id Numeric or String a project id
#' @param project_df Dataframe output from get_projects()
#'
#' @export
get_grts_shp_df = function(
grts_ids,
project_id,
project_df){
grts_template_df = data.frame(GRTS_ID = as.integer(grts_ids))
project_id_ = project_id
grts_fname = as.character(subset(project_df,
project_df$project_id == project_id_)$sample_frame_short)
grts_fname_df = grts_lookup_df[grts_fname][[1]]
grts_df = plyr::join(grts_template_df, grts_fname_df, by = c('GRTS_ID'), type = "left")
polys_df = data.frame()
for (grts_id in grts_ids){
this_row = subset(grts_df,grts_df$GRTS_ID == grts_id)
ll1 = as.numeric(rev(as.character(strsplit(as.character(this_row$lowerleft),',')[[1]])))
ll2 = as.numeric(rev(as.character(strsplit(as.character(this_row$upperleft),',')[[1]])))
ll3 = as.numeric(rev(as.character(strsplit(as.character(this_row$upperright),',')[[1]])))
ll4 = as.numeric(rev(as.character(strsplit(as.character(this_row$lowerright),',')[[1]])))
lngs = c(ll1[1],ll2[1],ll3[1],ll4[1],ll1[1])
lats = c(ll1[2],ll2[2],ll3[2],ll4[2],ll1[2])
if (!is.na(ll1)[1]){
poly_df = data.frame(lng = lngs, lat = lats, GRTS = grts_id)
if (dim(polys_df)[1]==0){
polys_df = poly_df
}else {
polys_df = rbind(polys_df, poly_df)
}
}
}
return(polys_df)
}
#' @title Build spatial polygons dataframe from GRTS shape dataframe
#'
#' @import sp
#' @import raster
#' @import rgdal
#' @import plyr
#'
#' @param grts_shp_df Dataframe
#'
#' @export
get_spdf_from_polys_df = function(
grts_shp_df){
# Seperate polygons by grts id
polys_list = split(grts_shp_df, grts_shp_df$GRTS)
# Remove id column from split polygon dfs
polys_list_ = lapply(polys_list, function(poly_) { poly_["GRTS"] = NULL; poly_ })
polys = lapply(polys_list_, Polygon)
# Add id into the Polygons before converting into a SpatialPolygons object
polys_ = lapply(seq_along(polys), function(i) Polygons(list(polys[[i]]),
ID = names(polys_list_)[i]))
# Create SpatialPolygons object
all_polys = SpatialPolygons(polys_, proj4string = CRS("+proj=longlat +datum=WGS84") )
# Create SpatialPolygonsDataFrame object (adds id)
all_polys_spdf = SpatialPolygonsDataFrame(all_polys,
data.frame(id = unique(grts_shp_df$GRTS),
row.names = unique(grts_shp_df$GRTS)))
return (all_polys_spdf)
}
#' @title Get GRTS information from Lat / Lon
#'
#' @description
#' Takes a latitude and longitude in EPSG 4326 (WGS 84)
#' and returns a
#'
#' @param token List token created from get_nabat_gql_token() or
#' get_refresh_token()
#' @param latitude Numeric latitude in EPSG 4326 (WGS 84)
#' @param longitude Numeric latitude in EPSG 4326 (WGS 84)
#' @param branch (optional) String that defaults to 'prod' but can also be
#' 'dev'|'beta'|'local'
#' @param url (optional) String url to use for GQL
#' @param aws_gql (optional) String url to use in aws
#' @param aws_alb (optional) String url to use in aws
#' @param docker (optional) Boolean if being run in docker container or not
#'
#' @export
get_grts_from_ll = function(
token,
latitude,
longitude,
branch = 'prod',
url = NULL,
aws_gql = NULL,
aws_alb = NULL,
docker = FALSE){
# Get headers for token
tkn_hdr = get_token_headers(token, branch, url, aws_gql, aws_alb, docker)
headers = tkn_hdr$headers
token = tkn_hdr$token
url = tkn_hdr$url
# Set Query
query =paste0('
query RRgrtsSelectionSearchQuery($geometry: JSON!) {
grtsSelectionSearch(geom: $geometry) {
nodes {
grtsId
grtsCellId
geom4326 {
geojson
}
location1Name
subLocation1Name
sampleFrameId
priorityState
priorityFrame
otherSelections
effort
}
}
}')
# Loop through Lat/Lon values
final_df = data.frame()
if (length(latitude) == length(longitude)){
for (pos in 1:length(latitude)){
lat = latitude[pos]
lon = longitude[pos]
# Add Lat/Lon as variables to query API
pr_variables = paste0('{"geometry":
{"type":"Point","crs":
{"type":"name",
"properties":{"name":"EPSG:4326"}},
"coordinates":[',paste0(lon, ',' ,lat),']}}')
# Create body to send to GQL
pbody = list(query = query, operationName = 'RRgrtsSelectionSearchQuery', variables = pr_variables)
# Post to nabat GQL
res = httr::POST(url, headers, body = pbody, encode='json')
content = httr::content(res, as = 'text')
json = fromJSON(content, flatten = TRUE)
# Convert GQL output JSON into dataframe
df = as.data.frame(json$data$grtsSelectionSearch$nodes, stringsAsFactors = FALSE)
# Rename headers
names(df) = tolower(gsub("(?<=[a-z0-9])(?=[A-Z])", "_", names(df), perl = TRUE))
# Bind data into dataframe
if (dim(final_df)[1]==0){
final_df = df
}else {
final_df = rbind(final_df, df)
}
}
}else{
stop('latitude and longitude need to be same lengths.')
}
return(final_df)
}
|
3c1ebfa7bc311bdfae485435f695f515cbe69d60
|
bc9b42cd4cb22cd3eccd5d27838f48fec54f9d1e
|
/Rcode/WaveMarkovCheck.R
|
432bcef7091bda0ff382144c1bff53761a924856
|
[] |
no_license
|
mbh038/Waves
|
61dd0a0a3334dae6314899fa1d6c08b58ca0411c
|
ec829db267f17bf2477748be3cd7cad2fef54287
|
refs/heads/master
| 2021-01-19T03:59:46.710943
| 2017-05-24T15:25:08
| 2017-05-24T15:25:08
| 84,421,038
| 0
| 1
| null | null | null | null |
UTF-8
|
R
| false
| false
| 4,654
|
r
|
WaveMarkovCheck.R
|
## Wave Markov
library(dplyr)
##Get example real data
## read in tidy data
path<-"../data/cleaned/wave/"
filehandle<-"wave001.csv"
fullname<-paste0(path,filehandle)
data<-read.csv(fullname,stringsAsFactors=FALSE,sep=",")
#rename columns
colnames(data)<-c("date","time","SWH","wavePeriod")
# convert time to POSIXct
timeStamp<-as.character(paste(data$date,data$time,sep=" "))
# convert time to POSIXct
timeStamp<-dmy_hms(timeStamp)
reference<-data.frame(timeStamp,data$SWH,data$wavePeriod)
colnames(reference)<-c("timeStamp","SWH","wavePeriod")
reference$bin <- cut(reference$SWH, breaks = c(seq(0., ceiling(max(reference$SWH)), by = .25)), labels = 0:as.integer(ceiling(max(reference$SWH))/0.25-1))
reference$bin <-as.integer(reference$bin)
table(reference$bin)
sum(table(reference$bin))
maxSWH=max(reference$SWH)
ipfilepathstem<-"../data/synthetic/wave/wave1hr_"
ipfilepathtail<-".csv"
ndata<-24*365
bin_size=0.25
library(rafalib)
library(MASS)
refFit<-fitdistr(reference$SWH[1:ndata]+.01, 'weibull')
print("ref fit")
print(refFit)
shape <- vector(mode="numeric", length=0)
scale <- vector(mode="numeric", length=0)
for (file in 1:5){
# set up input file numbers
ipfilehandle<-as.character(file)
if (file < 10){
ipfilehandle<-paste0("00",ipfilehandle)
}
if (file >= 10 && file < 100){
ipfilehandle<-paste0("0",ipfilehandle)
}
ipfilename<-paste0(ipfilepathstem,ipfilehandle,ipfilepathtail)
newdata<-read.csv(ipfilename)
# print(paste("File",ipfilehandle,"in",sep=" "))
ndata<-nrow(newdata)
h<-newdata[,2]
# print(summary(v))
# summary ((reference$bin[1:ndata]))
h=(h-1+runif(ndata))*bin_size
mean(h)-mean(reference$SWH[1:ndata])
sd(h)-sd(reference$SWH[1:ndata])
#print (summary(v))
#print(summary(reference$V[1:ndata]))
hFit<-fitdistr(h+.01, 'weibull')
# print(paste(hFit$estimate[1],hFit$estimate[2]))
shape[file]<-hFit$estimate[1]
scale[file]<-hFit$estimate[2]
# print (hFit)
# refFit<-fitdistr(reference$SWH[1:ndata]+.01, 'weibull')
# print("ref fit")
# print(refFit)
# mypar(2,1)
# print(paste(round(mean(h),2),round(mean(reference$SWH[1:ndata]),2),round(hFit$estimate[1],2),round(hFit$estimate[2],2),round(refFit$estimate[1],2),round(refFit$estimate[2],2),sep=" "))
# hist(h,xlim=c(0,10),breaks=20,prob=TRUE)
# d = dweibull(seq(0,10,.2),hFit$estimate[1],hFit$estimate[2])
# points(seq(0,10,.2),d,type='l',col=2)
#
# plot(h[1:1000],type="l",ylim=c(0,10),xlab="hour",ylab="SWH (m)")
# lines(reference$SWH[1:1000],type="l",col="red")
}
params<-data.frame(shape,scale)
names(params)=c("shape","scale")
shapefit<-fitdistr(params$shape, 'normal')
print("Shape fit")
print(shapefit)
scalefit<-fitdistr(params$scale, 'normal')
print("Scale fit")
print(scalefit)
#plot a single simulate file together with real data
ipfilehandle="021"
ipfilename<-paste0(ipfilepathstem,ipfilehandle,ipfilepathtail)
newdata<-read.csv(ipfilename)
print(paste("File",ipfilehandle,"in",sep=" "))
ndata<-nrow(newdata)
h<-newdata[,2]
# print(summary(v))
# summary ((reference$bin[1:ndata]))
h=(h-1+runif(ndata))*bin_size
mean(h)-mean(reference$SWH[1:ndata])
sd(h)-sd(reference$SWH[1:ndata])
#print (summary(v))
#print(summary(reference$V[1:ndata]))
hFit<-fitdistr(h+.01, 'weibull')
print("hfit")
print (hFit)
refFit<-fitdistr(reference$SWH[1:ndata]+.01, 'weibull')
print("ref fit")
print(refFit)
pdf("../figures/waveStats.pdf")
mypar(2,1)
# print(paste(round(mean(h),2),round(mean(reference$SWH[1:ndata]),2),round(hFit$estimate[1],2),round(hFit$estimate[2],2),round(refFit$estimate[1],2),round(refFit$estimate[2],2),sep=" "))
hist(h,xlim=c(0,8),breaks=20,prob=TRUE,xlab="Wave height (m)",ylab="probability",main="",col=rgb(0, 0, 1,0.1))
d = dweibull(seq(0,10,.2),hFit$estimate[1],hFit$estimate[2])
points(seq(0,10,.2),d,type='l',col="blue")
# print(paste(round(mean(h),2),round(mean(reference$SWH[1:ndata]),2),round(hFit$estimate[1],2),round(hFit$estimate[2],2),round(refFit$estimate[1],2),round(refFit$estimate[2],2),sep=" "))
hist(add=T,reference$SWH,xlim=c(0,8),breaks=20,prob=TRUE,xlab="Wave height (m)",ylab="probability",main="",col=rgb(1, 0, 0,0.1))
dreal = dweibull(seq(0,10,.2),refFit$estimate[1],refFit$estimate[2])
points(seq(0,10,.2),dreal,type='l',col="red")
legend("topright",c("Real","Simulated"),lty=c(1,1),col=c("red","blue"),bty="n")
plot(h[2000:4000],type="l",ylim=c(0,6),xlab="hour",ylab="SWH (m)",col="blue")
lines(reference$SWH[2000:4000],type="l",col="red")
legend("topright",c("Real","Simulated"),col=c("red","black"),lty=c(1,1),cex=0.8)
dev.off()
|
a6c799c6c75b2a19adb600cdaa4dfcefb383bf87
|
a69681cb68a5edb6195e6a3f219efa529d05b533
|
/Cálculo das Probabilidades/distribuicoes_de_probabilidade_discretas_bernoulli_binomial.R
|
0341a49df0064217676699cfd0e48f364f76a86d
|
[] |
no_license
|
brunatoloti/estatistica
|
1cb7735a7bc109d9ef0a008654c4957980f34cec
|
c33ff474cf9c7336bed527675e42c9b2d457e531
|
refs/heads/main
| 2023-04-02T13:01:37.858983
| 2021-04-17T13:06:04
| 2021-04-17T13:06:04
| 332,253,350
| 0
| 0
| null | null | null | null |
ISO-8859-1
|
R
| false
| false
| 1,204
|
r
|
distribuicoes_de_probabilidade_discretas_bernoulli_binomial.R
|
#Distribuições de Probabilidade Discretas - Bernoulli e Binomial
#Exemplo Bernoulli
#Suponha que X="O aluno acertar uma questão na prova", sendo que a prova só tem uma questão, contendo certo ou errado.
#Portanto, podemos entender que o aluno tem uma resposta favorável em duas possíveis, ou seja, 50% de chance de acertar a questão.
pbinom(1,size=1,p=0.5)-pbinom(0,size=1,p=0.5)
#Exemplo Binomial
#Suponha que X="O aluno acertar uma questão na prova", sendo que a prova tem três questões, contendo certo ou errado.
#Portanto, podemos entender que o aluno tem uma resposta favorável em duas possíveis, 50% de chance de acertar a questão (para cada questão)
pbinom(1,size=3,p=0.5)-pbinom(0,size=3,p=0.5)
#Exemplo 2 Binomial
#Considere um exame de múltipla escolha com 20 questões e 5 alternativas pra cada pergunta.
#Caso o aluno não estude e "chute" todas as respostas, qual a probabilidade de acertar 30% da prova? E qual seu número esperado de acertos?
n=20
p=0.2 #ou 1/5, que é a probabilidade de marcar a alternativa correta pra cada questão
#x segue uma binomial(20,1/5)
pbinom(6,size=n,p)-pbinom(5,size=n,p)
valor_esperado=n*p
valor_esperado
|
f76b07f4b02140cbbae1844cba5cb78e31e44318
|
c892b24af15e4ca31a72137b3c9ab1db056dfdcc
|
/man/dist_mat.Rd
|
1fd33184750c4447326c05a2f779f1641fe18b48
|
[] |
no_license
|
melimore86/nestR
|
4ac72ca0856aac03cfa5117aaf66fbfe23ee25c4
|
51b094927fbca2b63e357bb71c9d35501c3af835
|
refs/heads/master
| 2020-08-05T03:59:57.996812
| 2019-08-20T15:58:16
| 2019-08-20T15:58:16
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| true
| 812
|
rd
|
dist_mat.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/spatial_funs.R
\name{dist_mat}
\alias{dist_mat}
\title{Calculate distance matrix between points in the data}
\usage{
dist_mat(dat)
}
\arguments{
\item{dat}{\code{data.frame} of movement data for a single burst. Needs to
include burst, date, long, lat}
}
\value{
Returns \code{data.table} with distance matrix.
}
\description{
\code{dist_mat} calculates pairwise distances between all points in the
data.
}
\details{
Distances are calculated using the function
\code{\link[geosphere]{distGeo}}. Takes advantage of \code{data.table}
for a fast implementation. Adapted from a post on \href{
https://stackoverflow.com/questions/36817423/how-to-efficiently-calculate-
distance-between-pair-of-coordinates-using-data-tab}{StackOverflow}.
}
|
cb5af07ce49765db9e204cf85ca1e330898ace7b
|
75c141688ae9c52cfe302ff04a00f0f89b89e3a1
|
/R/V2_T2.5.R
|
d1fb82026e8284fd03d3afcc5753f53980806df2
|
[
"MIT"
] |
permissive
|
johnmutiso/rKenyaCensus
|
84e9e6c90a1892b05910a27001a1b12dc3320dba
|
e0d2242edd63df6f9e9d9c3b34b389d1f8f993a3
|
refs/heads/master
| 2022-04-23T03:04:18.114831
| 2020-04-25T13:26:46
| 2020-04-25T13:26:46
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 442
|
r
|
V2_T2.5.R
|
#' Volume 2: Table 2.5
#'
#'Distribution of Population by Urban Centres, Sex* and County
#' @docType data
#'
#' @usage data(V2_T2.5)
#'
#' @format A data frame with 13 variables:
#' \describe{
#' \item{\code{Urban Center}}{Urban Center}
#' \item{\code{County}}{County}
#' \item{\code{Total}}{Total number of individuals}
#' \item{\code{Male}}{Number of Males}
#' \item{\code{Female}}{Number of Females}
#'}
#' @keywords datasets
#'
"V2_T2.5"
|
35e49f48d701b90603bca90c5899ea2d6263778d
|
2bf497fe5f5d1e5baaba86d7934885106c6c6554
|
/Scripts/topGOEnrichment.R
|
d594e349e7deb54f7aada2ddd619f1326a436830
|
[] |
no_license
|
ericfournier2/EMAP
|
25b33cc001f6f5cb96e5009c879729d295ee7251
|
3bcfe1adbf1c7f618332856016362c89d4b72210
|
refs/heads/master
| 2022-09-30T20:14:33.518223
| 2020-06-04T18:42:35
| 2020-06-04T18:42:35
| 269,433,749
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 12,973
|
r
|
topGOEnrichment.R
|
# Functions used to perform GO enrichment using the topGO library.
library(GO.db)
library(topGO)
probe2goEpi <- readMappings(file.path(annotationFolder, "GO_Filtered.map"))
probe2goTrans <- readMappings(file.path(speciesFolder, "probe2go.map"))
epiSymbols <- read.table(file.path(annotationFolder, "ProbeSymbol.mapping"), sep="\t", header=TRUE)
conditionalPastePrefix = function(x, y) {
val <- sub("^\\s+", "", x)
val <- val[val!=""]
if(length(val) > 0) {
return(paste(y, "-", val, sep="", collapse=","))
} else {
return("")
}
}
conditionalPaste = function(x) {
x <- x[x!=""]
if(length(x) > 0) {
return(paste(x, sep="", collapse=","))
} else {
return("")
}
}
# Generates a direct mapping between probes and all symbols they are associated with.
# This can be saved, and used later when associating the significant DMR probes within
# a GO term to the genes they target.
generateEpiProbeSymbolMapping <- function() {
# List all relevant annotation columns.
columnsOfInterest <- c("Proximal_Promoter", "Promoter", "Exon", "Intron")
# Prepare a matrix which can contain the processed content of each column,
newMat <- matrix("", nrow=nrow(annotation), ncol=length(columnsOfInterest))
colnames(newMat) <- columnsOfInterest
# Add an appropriate prefix to all gene names.
for(i in columnsOfInterest) {
splitGenes = strsplit(as.character(annotation[,i]), " ")
newMat[,i] = unlist(lapply(splitGenes, conditionalPastePrefix, i))
}
# Concatenate all symbols into a single string.
finalSymbols <- apply(newMat, 1, conditionalPaste)
finalDF <- data.frame(Probe=annotation$Probe, Gene_Symbol=finalSymbols, stringsAsFactors=FALSE)
# Output the generate data-frame.
write.table(finalDF, file.path(annotationFolder, "ProbeSymbol.mapping"), sep="\t", col.names=TRUE, row.names=FALSE, quote=FALSE)
}
# Performs the actual calls to topGO to perform the enrichment analysis.
# Parameters:
# ontology:
# The ontology to use for the enrichment analysis. One of "BP", "CC" or "MF".
# allGenes:
# A named vector containing either scores or a 0/1 two-level factor for each gene/probe.
# Used to determined ordering/inclusion/exclusion from the selected group.
# goSubset:
# The GO annotations for all probes in allGenes.
# statistic:
# The test statistic to use. See the documentation for runTest in the topGO package
# for a list of valid options.
# outputFilename:
# A prefix/suffix for the output files generated by this function. This should not
# contain any directories.
# ...:
# All remaining parameters are passed on to the constructor of the topGOdata object.
# Returns:
# A list containing three elements:
# Data: The topGOdata object.
# Test: The result of the runTest function.
# Table: The summary table provided by the GenTable method.
innerTopGo <- function(ontology, allGenes, goSubset, statistic, outputFilename, symbolAnnotation, ...) {
# Create the topGO data object, which contains the selected subset, the universe subset, gene scores, etc.
topGODataObj <- new("topGOdata", ontology=ontology, allGenes = allGenes, annotationFun=annFUN.gene2GO, gene2GO=goSubset, nodeSize=5, ...)
# Perform the enrichment analysis.
testResultWeight <- runTest(topGODataObj, algorithm="weight01", statistic=statistic)
testResultClassic <- runTest(topGODataObj, algorithm="classic", statistic=statistic)
# Generate graph of top enriched terms.
printGraph(topGODataObj, testResultClassic, firstSigNodes = 5, fn.prefix = paste(ontology, outputFilename), useInfo = "all", pdfSW = TRUE)
# Generate a summary table of the enriched terms and write it out.
summaryTable <- GenTable(topGODataObj, Weight=testResultWeight, Classic=testResultClassic, orderBy="Classic", ranksOf="Classic", topNodes=length(score(testResultClassic)), numChar=2000)
write.table(summaryTable, file=paste(ontology, " ", outputFilename, ".txt", sep=""), row.names=FALSE, col.names=TRUE, quote=FALSE, sep="\t")
# Generate lists of "significant" probes for each GO term
allGenesInTerms <- genesInTerm(topGODataObj)
allSigGenes <- sigGenes(topGODataObj)
sigGenesInTerms <- lapply(allGenesInTerms, function(x) { x[x %in% allSigGenes] })
sigGenesInTermsDF <- data.frame(GO=names(sigGenesInTerms), Probes="", Genes="", stringsAsFactors=FALSE)
for(i in 1:length(sigGenesInTerms)) {
goTerm = names(sigGenesInTerms)[i]
probes = paste(sigGenesInTerms[[i]], collapse=",")
genes = paste(symbolAnnotation$Gene_Symbol[match(sigGenesInTerms[[i]], symbolAnnotation$Probe)], collapse=",")
sigGenesInTermsDF[i,] <- data.frame(GO=goTerm, Probes= probes, Genes=genes, stringsAsFactors=FALSE)
}
write.table(sigGenesInTermsDF, file=paste(ontology, " - significant genes per term - ", outputFilename, ".txt", sep=""), row.names=FALSE, col.names=TRUE, quote=FALSE, sep="\t")
return(list(Data=topGODataObj, Test=testResultClassic, Table=summaryTable))
}
# For use with performTopGOEnrichment as a probeSelectionFun function.
# Returns the set of probes which have a p-value above the
# pValueSelectThreshold global variable in a given module.
pValueSelectThreshold <- 0.05
pValueSelect <- function(x) {
return(x<pValueSelectThreshold)
}
# For use with performTopGOEnrichment as a probeSelectionFun function.
# Returns the set of probes which have an absolute membership above the
# moduleMembershipSelectThreshold global variable in a given module.
moduleMembershipSelectThreshold <- 0.8
moduleMembershipSelect <- function(x) {
return(abs(x)>moduleMembershipSelectThreshold)
}
# Perform GO enrichment using the topGO library.
# Parameters:
# chosenProbes:
# The name of the probes which are part of the subset whose enrichment should
# be assessed.
# subsetOfProbes:
# The "universe" set, IE all of the probes which could potentially have been
# part of the chosenProbes set.
# outputFilename:
# A name to be used as a suffix/prefix when naming output files. Should not contain
# directories.
# probeScores:
# If a relevant score can be attributed to all probes, this parameter can be used
# as an alternative to the chosenProbes parameter, and these scores will be used
# to perform a Kolmogorov-Smirnov statistical test.
# probeSelectionFun:
# If the probeScores argument is provided, this should be a function which
# uses the probe scores to determine if a probe should be included in the
# "chosen" subset.
# scoreOrder:
# Describes probe scores:
# "increasing" if the lowest score is the most significant (p-values, ranks0
# "decreasing" if the highest score is the most significant (membership percentage, etc.)
# Returns:
# A list of three elements, one for each ontology (BP, CC, MF). See innerTopGO for the structure
# of each individual element.
performTopGOEnrichment <- function(chosenProbes, subsetOfProbes, outputFilename, platform="Epigenetic",
probeScores=NULL, probeSelectionFun=NULL, scoreOrder="increasing") {
# Make sure the inputs are correct.
if(!(platform %in% c("Epigenetic", "Transcriptomic"))) {
stop("Error: Platform must be either 'Epigenetic' or 'Transcriptomic'")
}
if(!all(chosenProbes %in% subsetOfProbes)) {
stop("Error: Chosen probes are not all part of the given subset")
}
if(platform=="Epigenetic") {
if(!all(as.character(subsetOfProbes) %in% as.character(annotation$Probe))) {
stop("Error: Given probe subset is invalid.")
}
} else {
if(!all(as.character(subsetOfProbes) %in% as.character(annotationTrans$Probe))) {
stop("Error: Given probe subset is invalid.")
}
}
if(length(unique(chosenProbes)) != length(chosenProbes)) {
stop("Error: duplicated probes in chosenProbes")
}
if(length(unique(subsetOfProbes)) != length(subsetOfProbes)) {
stop("Error: duplicated probes in subsetOfProbes")
}
cat(paste("Selected ", length(chosenProbes), " out of ", length(subsetOfProbes), ".\n", sep=""))
# Switch annotations depending on the platform.
if(platform=="Epigenetic") {
probe2go <- probe2goEpi
symbolAnnotation <- epiSymbols
} else {
probe2go <- probe2goTrans
symbolAnnotation <- annotationTrans
}
# Subset probes so that only those with GO annotations are used for the analysis.
goSubset <- probe2go[names(probe2go) %in% subsetOfProbes]
if(is.null(probeScores)) {
# When choosing a subset of genes, we must convert them to a 2-level (0,1) factor vector.
probeScores <- factor(as.integer(names(goSubset) %in% chosenProbes))
names(probeScores) <- names(goSubset)
# Without scores, we must use the count-based fisher statistical test for enrichment.
stat <- "fisher"
cat(paste("After removal of unannotated probes, ", sum(probeScores==1), " out of ", length(goSubset), " remains.\n", sep=""))
} else {
# If scores are available, use the Kolmogorov-Smirnov statistical test.
stat <- "ks"
cat(paste("After removal of unannotated probes, ", length(goSubset), " probes remain.\n", sep=""))
}
# Perform enrichment on all ontologies.
results <- list()
for(ontology in c("BP", "MF", "CC")) {
results[[ontology]] <- innerTopGo(ontology, probeScores, goSubset, stat, paste(ontology, outputFilename), symbolAnnotation, geneSelectionFun=probeSelectionFun)
}
return(results)
}
# Generate a heatmap which compares and clusters the GO enrichment results
# from a set of orthogonal modules.
# Parameters:
# enrichmentList:
# A named list containing the results of multiple calls to performTopGOEnrichment.
# All calls must have the same "universe" set of probes.
# inclusionThreshold:
# The p-value threshold for inclusion into the analysis. A GO term
# must have a p-value under the threshold in at least one module
# to be included in the clustering analysis.
compareTopGOSets <- function(enrichmentList, inclusionThreshold=0.001) {
# First, we need to fetch the enrichment values for all GO terms
# and concatenate them into three separate data.frames, one for each
# ontology.
enrichmentDFList <- list()
# Loop over all modules in the input list:
firstLoop <- TRUE
for(module in names(enrichmentList)) {
# Loop over all ontologies, which are the top-level elements of the module lists.
for(ontology in c("BP", "CC", "MF")) {
# Fetch the enrichment results from the input list structure.
resTable <- enrichmentList[[module]][[ontology]]$Table
testResults <- -log10(as.numeric(resTable$Weight))
if(firstLoop) {
# On the first iteration of the loop, create a new data frame in the enrichmentDFList.
enrichmentDFList[[ontology]] <- data.frame(GO=resTable$GO.ID)
tableOrdering <- seq(1:length(testResults))
} else {
# On subsequent iterations, reorder to results to match the initial GO ordering.
tableOrdering <- match(as.character(enrichmentDFList[[ontology]][,1]), as.character(resTable$GO.ID))
}
# Add the results to the data.frame
enrichmentDFList[[ontology]][,module] <- as.numeric(testResults[tableOrdering])
}
firstLoop <- FALSE
}
# Now, for each ontology, generate a heatmap comparing the most relevant GO terms.
for(ontology in c("BP", "CC", "MF")) {
# Remove the GO ID column, and keep only the numerical data.
rawData <- enrichmentDFList[[ontology]][,-1]
# Associate terms to the GO IDs.
goTermMap <- toTable(GOTERM[as.character(enrichmentDFList[[ontology]][,1])])
goTermMap <- goTermMap[goTermMap$Ontology==ontology,]
goTerms <- goTermMap$Term[match(enrichmentDFList[[ontology]][,1], goTermMap$go_id)]
rownames(rawData) <- goTerms
# Filter out GO term which are not significant in any module.
dataSubset <- rawData[apply(rawData>-log10(inclusionThreshold), 1, any),]
# Generate the annotated heat map and plot it.
ahm <- annHeatmap2(as.matrix(dataSubset), legend=TRUE, scale="row",
labels=list(nrow=10,
Row=list(labels=substr(rownames(dataSubset), 1, 30))))
tiff(filename=paste(ontology, "GO Comparison.tiff"), width=9, height=9, units="in", res=600, compression="lzw")
plot(ahm)
dev.off()
}
}
|
e6a48f148ebc1dc151a7d51a8890b65e4cd42646
|
c52872637e281477f9a2916037e677594138df2f
|
/Model202003-Blog/code/load_cbr_icr_data.R
|
d3cccf0da6248f66390b8e38c85af552ae954fc7
|
[] |
no_license
|
scox3/RUBUSD
|
1dacd237b836048d52cff60063171f10a0c9ad01
|
ce23ec6e5f6f4e74658a4667428ceccdfbb4ed58
|
refs/heads/master
| 2022-08-29T08:26:54.506690
| 2020-05-26T15:41:51
| 2020-05-26T15:41:51
| 265,825,949
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 7,026
|
r
|
load_cbr_icr_data.R
|
make_rus_colnames <- function( v1 ) {
v2 <- stri_trans_general(v1, "Russian-Latin/BGN")
v2 <- str_replace_all( v2, "[[:cntrl:]]", " ")
v2 <- str_replace_all(v2, "[^[:alnum:]]", " ")
v2 <- iconv(v2, "latin1", "ASCII", sub="_")
return(v2)
}
data_folder = "Data/"
get_cbr_CR_table <- function() {
###Grab CR HTML data
#names( icr_dataset1)[5]<-"CR_DIFF_OP"
#2003-2007 data get HTMLs saved from CBR site
options(stringsAsFactors= FALSE)
df4 <- NULL
for( iy in c(3:7) ){
print (iy)
fn <- paste0(data_folder, "iip_ira_0",iy,".htm")
print( fn)
df1 <- readHTMLTable(fn, elFun=function(node){
return( as.character( xmlValue(node))) })
n<-length(df1)
print(n)
for( i in 1:length(df1) ) {
print( paste0( "table#", i ))
df2 <- df1[[i]]
# s1<- colnames(df2)
s1 <- make_rus_colnames( trimws(colnames(df2)) )
ir1 <- c(1:dim(df2)[1])
ic1 <- c(2:7)
#remove white spaces and convert to numeric
df2[ir1,ic1] <- apply( df2[ir1,ic1], c(1,2), function( x) { as.numeric(str_replace_all(x, "\\s","")) } )
#remove dates from column names so that rbind works
colnames(df2) <- c( s1[1], paste( str_sub(s1[2],end=-9), "DateSt", sep=""), s1[3:6],
paste( str_sub(s1[2],end=-9), "DateFn", sep=""))
#set dates columns
df2$DateSt <- as.Date(str_sub(s1[2],start=-8), format="%d %m %y")
df2$DateFn <- as.Date(str_sub(s1[7],start=-8), format="%d %m %y")
#merge with growing frame
if( is.null(df4) ) { df4 <- df2 } else {df4 <- rbind(df4, df2) }
}
}
#insert middle column to comply with latest CBR format
require(tibble)
df4 <- add_column(df4, X6 = 0, .before = 5)
df4$Keys <- make_rus_colnames(trimws(df4[,1]))
icr_dataset1 <- df4
# View( icr_dataset1)
#Collect data from files for 2008-2017
df5 <- NULL
ny2 <- last_year %% 100
for( i in 8:ny2) {
fn <- paste0( data_folder, "iip_ira_", formatC(i,width=2,flag="0",zero.print = TRUE), ".xlsx")
print(fn)
df1 <- read.xlsx(fn, stringsAsFactors = FALSE, header=FALSE,encoding = "UTF-8",sheetIndex=1, colIndex=c(1:8), startRow = 4)
if( is.null(df5) ) { df5 <- df1 } else { df5 <- rbind(df5, df1) }
}
# View(df5)
#setup colums
df5$DateSt <- as.Date("1970-01-01")
df5$DateFn <- as.Date("1970-01-01")
#Make keys column
df5$Keys <- make_rus_colnames(trimws(df5[,1]))
#find rows in which dates are present
i1 <- grep( "^Ostatok", make_rus_colnames(df5[,2]), perl=TRUE)
# i1<-c(i1, dim(df5)[1])
#i2 <- do.call( c, sapply( seq(1:(length(i1)-1)), function(i) { return( i1[i]:i1[i+1])}) )
for( i in 1:length(i1) ) {
# print( paste0(str_sub(df5[i1[i],2],start=-8), "/", str_sub(df5[i1[i],8],start=-8)))
n <- nchar(df5[i1[i],2])
d1 <- as.Date(str_sub(df5[i1[i],2],start=-8), format="%d.%m.%y")
# print(d1)
d2 <- as.Date(str_sub(df5[i1[i],8],start=-8), format="%d.%m.%y")
print( paste0(d1, "-", d2))
df5$DateSt[(i1[i]):(i1[i]+7)]<- d1
df5$DateFn[(i1[i]):(i1[i]+7)]<- d2
}
#Find end row for each data block
i1 <- grep( "^Mezhdunar", df5$Keys, perl=TRUE)
i2 <- grep( "^Proch", df5$Keys, perl=TRUE)
#Form vector of row indexes for extraction
ii <- unlist( sapply( c(1:length(i1)), function(i) { return( c(i1[i]:i2[i]))} , simplify=TRUE) )
#Select rows
df6 <- df5[ii, ]
# View(df6)
#проблема df4 и df6 имеют разное число колонок, так как есть колка Keys
colnames(df6)<- colnames(icr_dataset1)
#Join datasets
icr_dataset2 <- rbind(icr_dataset1, df6)
# set duration column
icr_dataset2$Duration <- icr_dataset2$DateFn - icr_dataset2$DateSt
# View(icr_dataset2)
#Rename Keys
spatterns <- data.frame( pattern= c("(^Mezhdunar|Rezervnyye)", "^Proch", "SDR", "MVF", "zoloto"),
replacement= c("Total", "Proch", "SDR", "MVF", "Zoloto"),
stringsAsFactors=FALSE)
icr_dataset2$K <- ""
for( i in c(1:dim(spatterns)[1]) ) {
ii1 <- grep( spatterns[i,1], icr_dataset2$Keys, perl=TRUE)
print( length(ii1))
icr_dataset2[ii1, "K"] <- spatterns[i,2]
}
icr_dataset2$Keys <- icr_dataset2$K
icr_dataset2$K <- NULL
return( icr_dataset2 )
}
select_cbr_icr_type <- function( crtable, cr_component, periodicity) {
#periodicity in months
ii <- grep(paste0("^", cr_component), crtable$Keys, perl="TRUE" )
cr1 <- crtable[ii,]
#id cr1 <=0 return all
if( periodicity > 0 ) {
cr1 <- cr1[ ( cr1[ , "Duration" ] <= periodicity* 31 ) & ( cr1[ , "Duration" ] > (periodicity-1)* 31 ), ]
}
return( cr1[ order( cr1[,"DateSt"]), ])
}
get_cbr_icr_component <- function(crtable, cr_component, periodicity ) {
cr1 <- select_cbr_icr_type( crtable, cr_component , 1 )
if( periodicity == 1 ) {
return( cr1 )
} else if( periodicity == 3 ) {
cr2 <- select_cbr_icr_type( crtable, cr_component , 3 )
n1 = dim( cr1 )[1]
icnt = dim( cr2 )[1]+1
for( i in c(1: (n1 %/% 3) ) ) {
print(i)
i1 <- (i-1)*3 + 1
i2 <- i1 + 2
cr2[icnt, ] <- NA
cr2[icnt, 1:2] <- cr1[i1,1:2]
cr2[icnt, 3:7] <- unlist( sapply( c(3:7), function(j) { return( sum(as.numeric(cr1[i1:i2, j])))} ) )
cr2[icnt, 8] <- cr1[i2,8]
cr2[icnt, 9] <- cr1[i1,9]
cr2[icnt, 10:11] <- cr1[i2,10:11]
cr2[icnt, 12] <- cr1[i2,10]-cr1[i1,9]
icnt <- icnt+1
}
return(cr2)
} else if( periodicity == 12 ) {
cr2 <- select_cbr_icr_type( crtable, cr_component , 12 )
n1 = dim( cr1 )[1]
icnt = dim( cr2 )[1]+1
for( i in c(1: (n1 %/% 12) ) ) {
print(i)
i1 <- (i-1)*12 + 1
i2 <- i1 + 11
cr2[icnt, ] <- NA
cr2[icnt, 1:2] <- cr1[i1,1:2]
cr2[icnt, 3:7] <- unlist( sapply( c(3:7), function(j) { return( sum(as.numeric(cr1[i1:i2, j])))} ) )
cr2[icnt, 8] <- cr1[i2,8]
cr2[icnt, 9] <- cr1[i1,9]
cr2[icnt, 10:11] <- cr1[i2,10:11]
cr2[icnt, 12] <- cr1[i2,10]-cr1[i1,9]
icnt <- icnt+1
}
return(cr2)
}
}
select_icr_Proch <- function( cr1, period ) {
df3 <- select_cbr_icr_type( cr1, "Proch", period )[, c("DateFn", "Ostatok na DateFn", "Izmeneniya v rezul__tate operatsiy" )]
cr_names <- c("Date", "CR", "dCR")
names(df3) <- cr_names
df3$Date <- df3$Date -1
df3[,(2:3)] <- apply( df3[,(2:3)], 2, as.numeric )
return(df3)
}
last_year <- 2018
ICR_table <- get_cbr_CR_table()
ICR_M <- select_icr_Proch(ICR_table, 1)
ICR_Q <- select_icr_Proch(ICR_table, 3)
ICR_Q_xts <- xts( ICR_Q[,-1], order.by = ICR_Q[,1])
ICR_M_xts <- xts( ICR_M[,-1], order.by = ICR_M[,1])
d1 <- rbind( ICR_M_xts[,"dCR"], ICR_Q_xts[paste0("/",index(first(ICR_M_xts))), "dCR"]/3 )
d2 <- ddatM$ddatD["2003/"]
d2$dCR <- d1$dCR
d2 <- na.locf(d2, fromLast=TRUE)
ddatM$dICR_CB <- NULL
ddatM$dICR_CB <- d2$dCR
|
de8c938fb5d339f8a847179b1bfda2172c0b4ce9
|
5c551b43a32451d14254c5ccffef277a246d2709
|
/Week4/Code/rodents.R
|
79c97ce3ea31f009ff1833b48f45d6c434a78ccb
|
[
"Apache-2.0"
] |
permissive
|
ph-u/CMEE_ph-u
|
8e20a2b1269dc21a860a5827dbd1d84e03340dd3
|
8d52d4dcc3a643da7d55874e350c18f3bf377138
|
refs/heads/master
| 2023-01-22T07:29:46.118897
| 2020-12-02T10:31:51
| 2020-12-02T10:31:51
| 212,303,395
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 2,249
|
r
|
rodents.R
|
#!/bin/env Rscript
# Author: ph-u
# Script: rodents.R
# Desc: data analysis of rodent dataset
# Input: none -- run in R console line-by-line
# Output: R terminal output
# Arguments: 0
# Date: Oct 2019
## lib
library(car)
library(PMCMR)
library(dplyr)
{## data-scanning
oo<-read.csv("../Data/rodents.csv", header = T, stringsAsFactors = F)
oo$yr<-as.factor(oo$yr)
oo$mo<-as.factor(oo$mo)
oo$tag<-ifelse(oo$tag=="","No_Tag",oo$tag)
oo$tag<-as.factor(oo$tag)
oo$species<-as.factor(oo$species)
oo$sex<-ifelse(oo$sex!="M", ifelse(oo$sex !="F","No_Data","F"),"M")
oo$sex<-as.factor(oo$sex)
colnames(oo)[6]<-"hindfootLength.mm"
colnames(oo)[7]<-"weight.g"
colnames(oo)[8]<-"precipitation.mm"
# aa<-oo[which(oo$sex!="M" & oo$sex!="F" & oo$sex!=""),]
# aa<-oo[which(oo$sex=="M"),]
}
## data description oo[,6]
## data description oo[,7]
for(i in 1:5){
if(i!=3){
boxplot(oo[,7]~oo[,i])
}
};rm(i)
## data description oo[,8]
for(i in 1:5){
if(i!=3){
boxplot(oo[,8]~oo[,i])
}
};rm(i)
## stat (only tell how reliable is the test, no need to put into publication)
hist(log(oo$precipitation.mm))
hist(log(oo$weight.g))
hist(oo$hindfootLength.mm)
qqPlot(log(oo$precipitation.mm), ylim = c(0,10))
qqPlot(log(oo$weight.g), ylim = c(0,10))
qqPlot(log(oo$hindfootLength.mm))
## non-parametric
cor.test(oo$weight.g,oo$precipitation.mm, method = "spearman") ## is precipitation affecting weight?
# cor.test(log(oo$weight.g),log(oo$precipitation.mm), method = "spearman")
kruskal.test(oo$weight.g~interaction(oo$species,oo$sex))
posthoc.kruskal.nemenyi.test(oo$weight.g~interaction(oo$species,oo$sex))
# a<-posthoc.kn.test(oo$weight.g,oo$species,oo$sex) ## self-function
kruskal.test(oo$weight.g~oo$sex)
posthoc.kruskal.nemenyi.test(oo$weight.g~oo$sex)
aa<-oo[which(oo$sex!="No_Data"),]
aa$sex<-as.factor(as.character(aa$sex))
boxplot(aa$weight.g~aa$sex, main="Boxplot of Rodent weight against Gender", ylab = "Weight (g)", xlab = "Gender")
cat(paste("Male weight median:",unname(summary(aa[which(aa$sex=="M"),7])[3]),"\nFemale weight median:",unname(summary(aa[which(aa$sex=="F"),7])[3]),"\nMedian Difference by rodent gender:",unname(summary(aa[which(aa$sex=="M"),7])[3])-unname(summary(aa[which(aa$sex=="F"),7])[3])))
|
62c33b93e1598808bc575a44b2e567e5fef9ea21
|
41138fdeb7ad88ff3f6977d0c34e43cc9de0b096
|
/R/plot.statcheck.R
|
65a64c0a8cc16895bd8fde3bd5261a0fdf550f9d
|
[] |
no_license
|
cran/statcheck
|
ec0c27682e12ec21b1d2c567787a783ca89fb0c1
|
9989710949d3fbb8237aacc5a8c5e85b16773dca
|
refs/heads/master
| 2023-01-25T04:22:21.640588
| 2023-01-23T12:30:02
| 2023-01-23T12:30:02
| 26,909,938
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 9,792
|
r
|
plot.statcheck.R
|
#' Plot method for statcheck
#'
#' Function for plotting of \code{statcheck} objects. Reported p values are
#' plotted against recalculated p values, which allows the user to easily spot
#' if articles contain miscalculations of statistical results.
#'
#' If APAstyle = FALSE, inconsistencies between the reported and the recalculated p value are indicated with an orange dot. Recalculations of the p value that render a previously non significant result (p >= .5) as significant (p < .05), and vice versa, are considered decision errors, and are indicated with a red dot. Exactly reported p values (i.e. p = ..., as opposed to p < ... or p > ...) are indicated with a diamond.
#'
#' @section Acknowledgements:
#' Many thanks to John Sakaluk who adapted the plot code to create graphs in
#' APA style.
#'
#' @seealso \code{\link{statcheck}}
#'
#' @param x A statcheck object. See \code{\link{statcheck}}.
#' @param alpha assumed level of significance in the scanned texts. Defaults to
#' .05.
#' @param APAstyle If TRUE, prints plot in APA style.
#' @param group Indicate grouping variable to facet plot. Only works when
#' \code{APAstyle==TRUE}
#' @param ... arguments to be passed to methods, such as graphical parameters
#' (see \code{\link{par}}).
#'
#' @examples
#' # First we need a statcheck object
#' # Here, we create one by running statcheck on some raw text
#'
#' txt <- "This test is consistent t(28) = 0.2, p = .84, but this one is
#' inconsistent: F(2, 28) = 4.2, p = .01. This final test is even a
#' gross/decision inconsistency: z = 1.23, p = .03"
#'
#' result <- statcheck(txt)
#'
#' # We can then plot the statcheck object 'result' by simply calling plot() on
#' # "result". R will know what kind of plot to make, because "result" is of
#' # class "statcheck"
#' plot(result)
#'
#' @importFrom ggplot2 theme theme_bw element_blank element_line ggplot aes
#' geom_point geom_vline geom_hline geom_abline annotate scale_x_continuous
#' scale_y_continuous scale_color_manual facet_grid
#' @importFrom rlang .data
#' @importFrom graphics plot.default points abline text par legend
#'
#' @export
plot.statcheck <- function(
x,
alpha = .05,
APAstyle = TRUE,
group = NULL,
...
){
# replace 'ns' for > alpha
ns <- x[[VAR_P_COMPARISON]] == "ns"
x[[VAR_P_COMPARISON]][ns] <- ">"
x[[VAR_REPORTED_P]][ns] <- alpha
if (APAstyle == TRUE) {
# Add vector "Type" to statcheck object, specifying whether observations are
# correctly reported, reporting inconsistencies, or decision errors.
# First create an empty variable for Type to avoid a NOTE in the R CMD Check
# that there is "no visible binding for global variable"
Type <- rep(NA, nrow(x))
x <- cbind(x, Type)
x$Type[x[[VAR_ERROR]] == "FALSE" &
x[[VAR_DEC_ERROR]] == "FALSE"] <- "Correctly Reported"
x$Type[x[[VAR_ERROR]] == "TRUE" &
x[[VAR_DEC_ERROR]] == "FALSE"] <- "Reporting Inconsistency"
x$Type[x[[VAR_ERROR]] == "TRUE" &
x[[VAR_DEC_ERROR]] == "TRUE"] <- "Decision Error"
#Create ggplot "APA format" theme
apatheme <- theme_bw() +
theme(
panel.grid.major = element_blank(),
panel.grid.minor = element_blank(),
axis.line = element_line()
)
#If no grouping variable is specified, don't use faceting
if (is.null(group)) {
#Create plot "p"; map computed p-values to x-axis, reported p-values to y-axis, and
#color to the Type variable created earlier. Environment command allows apatheme to
#be applied later because of bug when creating functions with ggplot2
p <- ggplot(x,
aes(y = .data[[VAR_COMPUTED_P]],
x = .data[[VAR_REPORTED_P]],
col = Type),
environment = environment())
#Add data points to plot
p + geom_point(size = 2.5) +
#Add vertical grey dashed line, located at specified alpha level
geom_vline(xintercept = alpha,
color = "grey60",
linetype = "dashed") +
#Add horizontal grey dashed line, located at specified alpha level
geom_hline(yintercept = alpha,
color = "grey60",
linetype = "dashed") +
#Add a line showing where accurately reported p-values should fall
geom_abline(intercept = 0,
slope = 1,
color = "grey60") +
#Add text annotations demarcating over-/under-estimated areas of the plot
annotate("text",
x = 0.5,
y = .10,
label = "overestimated") +
annotate("text",
x = 0.5,
y = .90,
label = "underestimated") +
#Rename the x- and y-axis, and manually specify breaks
scale_x_continuous(
name = "Reported p-values",
breaks = c(0.00, 0.05, 0.10, 0.25, 0.50, 0.75, 1.0),
limits = c(0, 1)
) +
scale_y_continuous(
name = "Computed p-values",
breaks = c(0.00, 0.05, 0.10, 0.25, 0.50, 0.75, 1.0),
limits = c(0, 1)
) +
#Manually specify greyscale colors for different levels of Type
scale_color_manual(
breaks = c(
"Correctly Reported",
"Reporting Inconsistency",
"Decision Error"
),
values = c("grey80", "black", "grey50")
) +
apatheme
} else {
#If grouping variable is specified, use for faceting
#Create plot "p"; map computed p-values to x-axis, reported p-values to y-axis, and
#color to the Type variable created earlier. Environment command allows apatheme to
#be applied later because of bug when creating functions with ggplot2
p <- ggplot(x,
aes(y = rlang::.data[[VAR_COMPUTED_P]],
x = rlang::.data[[VAR_REPORTED_P]],
col = Type),
environment = environment())
#Add data points to plot
p + geom_point(size = 2.5) +
#Add vertical grey dashed line, located at specified alpha level
geom_vline(xintercept = alpha,
color = "grey60",
linetype = "dashed") +
#Add horizontal grey dashed line, located at specified alpha level
geom_hline(yintercept = alpha,
color = "grey60",
linetype = "dashed") +
#Add a line showing where accurately reported p-values should fall
geom_abline(intercept = 0,
slope = 1,
color = "grey60") +
#Add text annotations demarcating over-/under-estimated areas of the plot
annotate("text",
x = 0.5,
y = .10,
label = "overestimated") +
annotate("text",
x = 0.5,
y = .90,
label = "underestimated") +
#Rename the x- and y-axis, and manually specify breaks
scale_x_continuous(name = "Reported p-values",
breaks = c(0.00, 0.05, 0.10, 0.25, 0.50, 0.75, 1.0)) +
scale_y_continuous(name = "Computed p-values",
breaks = c(0.00, 0.05, 0.10, 0.25, 0.50, 0.75, 1.0)) +
#Manually specify greyscale colors for different levels of Type
scale_color_manual(
breaks = c(
"Correctly Reported",
"Reporting Inconsistency",
"Decision Error"
),
values = c("grey80", "black", "grey50")
) +
facet_grid(stats::as.formula(paste(group, "~ ."))) +
apatheme
}
} else {
# Extract limit args:
args <- list(...)
if (is.null(args$xlim))
args$xlim <- c(0, 1)
if (is.null(args$ylim))
args$ylim <- c(0, 1)
reported <- x[[VAR_REPORTED_P]]
computed <- x[[VAR_COMPUTED_P]]
# replace 'ns' for > alpha
reported[x[[VAR_P_COMPARISON]] == "ns"] <- alpha
# scatterplot of reported and recalculated p values
do.call(plot.default, c(
list(
x = reported,
y = computed,
xlab = "reported p value",
ylab = "recalculated p value",
pch = 20
),
args
))
# orange dot for error
points(reported[x[[VAR_ERROR]]],
computed[x[[VAR_ERROR]]],
pch = 20, col = "orange")
# red dot for gross error (non-sig reported as sig and vice versa)
points(reported[x[[VAR_DEC_ERROR]]],
computed[x[[VAR_DEC_ERROR]]],
pch = 20, col = "red")
# indicate exact p values with diamond
points(x[[VAR_REPORTED_P]][x[[VAR_P_COMPARISON]] == "="],
computed[x[[VAR_P_COMPARISON]] == "="],
pch = 5)
# general layout of figure:
# lines & text to indicate under- and overestimates
abline(h = .05)
abline(v = .05)
abline(0, 1)
text(.8, .4, "overestimated")
text(.4, .8, "underestimated")
text(0, .53, "non-sig", cex = .7)
text(0, .50, "reported", cex = .7)
text(0, .47, "as sig", cex = .7)
text(.5, 0, "sig reported as non-sig", cex = .7)
par(xpd = TRUE)
legend(
.88,
-.15,
pch = c(20, 20, 5),
col = c("orange", "red", "black"),
legend = c("p inconsistency", "decision error", "exact (p = ...)"),
cex = .8
)
par(xpd = FALSE)
}
}
|
90cdb24504a675b1292565bf3989a241fecb5924
|
24d4b46bf87a1f49adb3af46f58c8d6c6fc7dcf7
|
/scripts/plotter_stacked_bar.R
|
1d8ca0f354d5c3192ba486060682e9574a98e08d
|
[] |
no_license
|
SunnyYShao/plotter
|
a0c17b60b6f3db6f413500d82f58edcf1d354493
|
d7865577b7d1f8ff1287fbf1f572962f85f313d3
|
refs/heads/master
| 2022-12-02T17:29:14.993101
| 2020-08-13T19:23:05
| 2020-08-13T19:23:05
| 169,467,754
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 2,345
|
r
|
plotter_stacked_bar.R
|
# plotter stacked bar -----------------------------------------------------
#percent
plotter_stacked_pct <- function(data, color_type, style_function, logo_image_path, label_color, label_size, subtitle, footnote, directory, file_name){
title <- data[1,4]
plot <- data %>%
ggplot(aes(x = reorder(group, key_order), y = estimate, fill = key))+
geom_bar(stat="identity",
width=.7,
position = position_stack(reverse = TRUE))+
coord_flip()+
scale_y_continuous(labels=scales::percent, limits = c(0, 1.05))+
geom_text(aes(label = ifelse(estimate < 0.035, NA, scales::percent(estimate, 1))),
position = position_stack(reverse = TRUE,vjust = .5),
size=label_size, color = label_color)+
style_function+
scale_fill_aapidata(palette = color_type)+
theme(legend.position = "top")+
theme(legend.position = 'top',
legend.spacing.x = unit(0.2, 'cm'))+
labs(title = title,subtitle = subtitle, fill="",x="", y="", caption=footnote)
finalise_plot(plot_name = plot,
width_pixels = 1040, height_pixels = 800,
logo_image_path = logo_image_path,
save_filepath = paste0(directory, file_name))
}
#percent vertical
plotter_Vstacked_pct <- function(data, color_type, style_function, logo_image_path, label_color, label_size, subtitle, footnote, directory, file_name){
title <- data[1,4]
plot <- data %>%
ggplot(aes(x = reorder(group, key_order), y = estimate, fill = key))+
geom_bar(stat="identity",
width=.7,
position = position_stack(reverse = F))+
scale_y_continuous(labels=scales::percent)+
geom_text(aes(label = ifelse(estimate < .008, NA, scales::percent(estimate, 0.1))),
position = position_stack(vjust = .5),
size=label_size, color = label_color)+
style_function+
scale_fill_aapidata(palette = color_type)+
theme(legend.position = "top")+
theme(legend.position = 'top',
legend.spacing.x = unit(1.0, 'cm'))+
labs(title = title,subtitle = subtitle, fill="",x="", y="", caption=footnote)
finalise_plot(plot_name = plot,
width_pixels = 1040, height_pixels = 800,
logo_image_path = logo_image_path,
save_filepath = paste0(directory, file_name))
}
|
6b4a04e74ed3590949055c2c988d9144d544f626
|
923876b86f83b74cf3f5c472e05ee05b41354b79
|
/cvičení 6/skript5.R
|
b8f12a9d4e3fb0d76b8c3a440d65511bfd2a7551
|
[] |
no_license
|
dreryos/biostatistika
|
afa3bbda36c17c940b3effcefc64b639c7d4d017
|
a744a900bbbae7305298d4d54b7d6e7000f68954
|
refs/heads/master
| 2023-04-29T20:33:07.387834
| 2021-05-17T11:40:57
| 2021-05-17T11:40:57
| 343,724,723
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 6,401
|
r
|
skript5.R
|
#přesměrujte se do složky biostat
setwd("C:/biostat")
#nebo file-change dir
#vyzkoušejte, kde pracujete teď
getwd()
#spustte Rcommander
library(Rcmdr)
#vyvolejte skript jménem skript5.R
file-open script file
#Načtěte data Kojení
data(Kojeni)
#pokud jsou jinde, použijeme celou adresu souboru
load("C:/home/monika/přf/statistika1314/Data/Kojeni.RData")
#zjednodušte vyvolávání proměnných dat Kojení
attach(Kojeni)
#zadejte Kojeni jako aktivní data set
#1.Interval spolehlivosti pro stř. hodnotu N rozdělení
#1.A Zkoumejme proměnnou hmotnost (hmotnost ve 24 týdnu)
#Udělejte boxplot,histogram,normální diagram
#Mohla by mít hmotnost normální rozdělení?
par(mfrow=c(2,2))
boxplot(Kojeni$hmotnost, ylab="hmotnost",col="purple")
Hist(Kojeni$hmotnost, scale="frequency", breaks="Sturges", col="purple")
qqPlot(Kojeni$hmotnost)
qqnorm(hmotnost)
qqline(hmotnost,col="purple")
#Odhadněte jakou má rozdělení hmotnosti střední hodnotu
#(bodově i intervalově)
mean(hmotnost)
mean(hmotnost)-qt(c(0.975), df=98, lower.tail=TRUE)*sd(hmotnost)/sqrt(99)
mean(hmotnost)+qt(c(0.975), df=98, lower.tail=TRUE)*sd(hmotnost)/sqrt(99)
#Zvolte Statistics-means-single sample t-test
t.test(Kojeni$hmotnost, alternative='two.sided', mu=0.0, conf.level=.95)
#Uložme meze intervalu spolehlivosti
tt<-t.test(Kojeni$hmotnost, alternative='two.sided', mu=0.0, conf.level=.95)
(isp<-tt$conf.int)
#Vyznačme interval spolehlivosti v boxplotu
boxplot(Kojeni$hmotnost, ylab="hmotnost",col="purple")
abline(h=isp)
#v histogramu
Hist(Kojeni$hmotnost, scale="frequency", breaks="Sturges", col="purple")
abline(v=isp)
#Kolik pozorování leží uvnitř intervalu spolehlivosti pro stř. hodnotu?
sum(hmotnost>isp[1]&hmotnost<isp[2])
#1.B Může mít také porodní hmotnost normální rozdělení?
par(mfrow=c(2,2))
#Najděte bodový a intervalový odhad střední hodnoty
#1.C. Prozkoumejte porodní hmotnost zvlášť pro dívky a chlapce
boxplot(porHmotnost~Hoch, ylab="porHmotnost", xlab="Hoch", data=Kojeni)
par(mfrow=c(2,2))
Hist(Kojeni$porHmotnost[Hoch=="hoch"], scale="frequency", breaks="Sturges",
col="skyblue")
Hist(Kojeni$porHmotnost[Hoch=="dívka"], scale="frequency", breaks="Sturges",
col="pink")
qqPlot(Kojeni$porHmotnost[Hoch=="hoch"], col="blue")
qqPlot(Kojeni$porHmotnost[Hoch=="dívka"], col="red")
tapply(porHmotnost,Hoch,qqPlot)
#Najděte intervalový a bodový odhad střední hodnoty pro dívky a
#chlapce zvlášť
tapply(porHmotnost, Hoch,t.test)
#grafické zobrazení
plotMeans(Kojeni$porHmotnost, Kojeni$Hoch, error.bars="conf.int",
level=0.95)
#1.D Najděte bodový a intervalový odhad věku matky. Ověřte předpoklady.
#2.Intervaly spolehlivosti pro pravděpodobnost
#Mám výběr 19 studentů Přf. Chci odhadnout, jaká je pravděpodobnost, že student
#Přf UK umí (aspoň trochu) německy:
en<-19
yps<-12
(odh<-yps/en)
(isp1<-odh-sqrt(odh*(1-odh)/en)*qnorm(0.975))
(isp2<-odh+sqrt(odh*(1-odh)/en)*qnorm(0.975))
#3.Testování pravděpodobnosti jevu
#Nulová hypotéza p=1/2, oboustranná alternativa
binom.test(12,19,p=0.5,alternative="two.sided")
#4.Jednovýběrový t-test
#4.A. Chceme rozhodnout, zda střední hodnota hmotnosti je 7900 gramů,
#oproti alternativě, že se tomuto číslu nerovná
#hladinu významnosti volme 0.05
t.test(Kojeni$hmotnost, alternative='two.sided', mu=7900, conf.level=.95)
#Co když změníme hladinu na 0.01. Musíme celý příklad přepočítat?
#Jak souvisí výsledek testu s intervalem spolehlivosti?
#Zkusme testovat, proti alternativě, že střední hodnota je menší než 7900
t.test(Kojeni$hmotnost, alternative='less', mu=7900, conf.level=.95)
#Ověření předpokladů t-testu
#Zabývejme se podmnožinou chlapců
#4.B. Vytvořme nový datový soubor KojeniH
detach(Kojeni)
#Použijte Data-Active data set-subset
KojeniH <- subset(Kojeni, subset=Hoch=="hoch")
dim(KojeniH)
#Testujme, zda stř hodnota hmotnosti mezi chlapci je 7900, hladina 0.05
with(KojeniH, (t.test(hmotnost, alternative='two.sided', mu=7900,
conf.level=.95)))
#4.C. Zkusme totéž pro podmnožinu dívek
#ověřme předpoklady pro dívky i chlapce
attach(Kojeni)
par(mfrow=c(2,2))
tapply(hmotnost,Hoch,qqPlot)
#4.D. Má výška otce normální rozdělení?
#Vraťte Kojeni jako aktivní data set
#Testujte, zda je střední hodnota výšky otce 182 cm, ověřte předpoklady
#4.E.Testujme, zda porodní hmotnost má střední hodnotu 3500 g
#proti alternativě, že stř. hodnota je jiná.
#4.F.Testujte, zda věk matek má střední hodnotu 165 cm. Ověřte předpoklady.
#5.Síla jednovýběrového t-testu
#Chci testovat, zda porodní hmotnost má mu0=3500
#Na čem závisí síla?
#Jak zvolit rozsah výběru, aby síla na alternativu mu1-mu0=100g byla
#aspoň 0.8?
#volme směrodatnou odchylku 440
((qnorm(0.975)+qnorm(0.8))/100)^2*440^2
#Co když chceme sílu 0.9
((qnorm(0.975)+qnorm(0.9))/100)^2*440^2
#Co když zmenšíme rozdíl alternativ na 50?
((qnorm(0.975)+qnorm(0.8))/50)^2*440^2
#6. Dvouvýběrový t-test
#6.1.Závisí střední hodnota porodní hmotnosti na pohlaví?
#Obrázek
plotMeans(Kojeni$porHmotnost, Kojeni$Hoch, error.bars="conf.int",
level=0.95)
#Jaké jsou předpoklady dvouvýběrového t-testu?
#Nezávislost
#Normalita
par(mfrow=c(2,2))
qqPlot(Kojeni$porHmotnost[Hoch=="hoch"], dist= "norm")
qqPlot(Kojeni$porHmotnost[Hoch=="dívka"], dist= "norm")
shapiro.test(Kojeni$porHmotnost[Hoch=="hoch"])
shapiro.test(Kojeni$porHmotnost[Hoch=="dívka"])
#nebo
tapply(porHmotnost,Hoch, qqPlot)
tapply(porHmotnost,Hoch,shapiro.test)
#Shodné rozptyly
boxplot(porHmotnost~Hoch, ylab="porHmotnost", xlab="Hoch", data=Kojeni)
#Test na shodnost rozptylů: Statistics-Variances-F-test
tapply(Kojeni$porHmotnost, Kojeni$Hoch, var, na.rm=TRUE)
var.test(porHmotnost ~ Hoch, alternative='two.sided', conf.level=.95,
data=Kojeni)
leveneTest(Kojeni$porHmotnost, Kojeni$Hoch)
bartlett.test(porHmotnost ~ Hoch, data=Kojeni)
#T-test se stejnými rozptyly
t.test(porHmotnost~Hoch, alternative='two.sided', conf.level=.95,
var.equal=TRUE, data=Kojeni)
#Jaký je závěr?
#Jaký je význam intervalu spolehlivosti?
#T-test bez požadavku shodných rozptylů
t.test(porHmotnost~Hoch, alternative='two.sided', conf.level=.95,
var.equal=FALSE, data=Kojeni)
#Co se změnilo?
#6.2.Závisí stř. hodnota hmotnosti na pohlaví?
#Obrázek?
#Ověřte předpoklady a testujte.
|
7454604dfc293fd2d8e1e75c30aba6637ea1fcf7
|
ffdea92d4315e4363dd4ae673a1a6adf82a761b5
|
/data/genthat_extracted_code/soundecology/examples/sound_raster.Rd.R
|
ca10e60ce2dabd3c77413f53dee86c288e7a8cde
|
[] |
no_license
|
surayaaramli/typeRrh
|
d257ac8905c49123f4ccd4e377ee3dfc84d1636c
|
66e6996f31961bc8b9aafe1a6a6098327b66bf71
|
refs/heads/master
| 2023-05-05T04:05:31.617869
| 2019-04-25T22:10:06
| 2019-04-25T22:10:06
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 394
|
r
|
sound_raster.Rd.R
|
library(soundecology)
### Name: sound_raster
### Title: ASCII raster from sound file
### Aliases: sound_raster
### Keywords: soundecology sound_raster
### ** Examples
## Not run:
##D sound_raster(wavfile = "file1.wav")
##D
##D sound_raster(wav_directory = "/home/user/wavdirectory")
##D
##D sound_raster(wav_directory = "/home/user/wavdirectory", no_cores = 4)
##D
## End(Not run)
|
b7cf0fab06a982e63091d718114d7d80ead6951f
|
8847282f697f1bf9da75feabe758307b72712e54
|
/dataAnalysisCodes/watchERP/02-ter-p3Classification/createDataFrameCount.R
|
fffe50bd24e21d4c7b733011f4abd35ce04ab44d
|
[] |
no_license
|
adriencombaz/HybBciCode
|
a17b48a8a01fcf76e8c22d00732b8ceb1e642dce
|
755432a2a06c2abe2eb5adbca33d1348bcf9ac68
|
refs/heads/master
| 2020-05-19T17:49:50.102762
| 2013-10-02T13:54:34
| 2013-10-02T13:54:34
| 6,388,055
| 2
| 1
| null | 2016-01-20T10:42:13
| 2012-10-25T13:57:07
|
Matlab
|
UTF-8
|
R
| false
| false
| 5,167
|
r
|
createDataFrameCount.R
|
setwd("d:/KULeuven/PhD/Work/Hybrid-BCI/HybBciCode/dataAnalysisCodes/watchERP/02-ter-p3Classification/")
rm(list = ls())
source("createDataFrame.R")
source("cleanPlot.R")
#################################################################################################################
allSubs <- levels(accData$subject)
allFreqs <- levels(accData$frequency)
allNreps <- unique(accData$nRep)
allClassif<- levels(accData$classifier)
nSubs <- length(allSubs)
nFreqs <- length(allFreqs)
nReps <- length(allNreps)
nClassif<- length(allClassif)
subject <- rep(NA, nSubs*nFreqs*nReps*nClassif)
frequency <- rep(NA, nSubs*nFreqs*nReps*nClassif)
nRep <- rep(NA, nSubs*nFreqs*nReps*nClassif)
classifier <- rep(NA, nSubs*nFreqs*nReps*nClassif)
correctnessCount <- rep(NA, nSubs*nFreqs*nReps*nClassif)
correctnessRatio <- rep(NA, nSubs*nFreqs*nReps*nClassif)
count <- 1
for (iS in 1:nSubs){
for (iF in 1:nFreqs){
for (iR in 1:nReps){
for (iC in 1:nClassif){
subject[count] <- allSubs[iS]
frequency[count] <- allFreqs[iF]
nRep[count] <- allNreps[iR]
classifier[count] <- allClassif[iC]
temp <- subset(accData, subject == allSubs[iS])
temp <- subset(temp, frequency == allFreqs[iF])
temp <- subset(temp, nRep == allNreps[iR])
temp <- subset(temp, classifier == allClassif[iC])
correctnessCount[count] <- sum(temp$correctness)
correctnessRatio[count] <- sum(temp$correctness) / length((temp$correctness))
count <- count+1
}
}
}
}
accDataCount <- data.frame(subject, classifier, frequency, nRep, correctnessCount, correctnessRatio)
#################################################################################################################
library(ggplot2)
library(scales)
dataToPlot <- subset(accDataCount, classifier=="normal")
pp <- ggplot( dataToPlot, aes(nRep, correctnessRatio, colour=frequency, shape=frequency) )
pp <- pp + geom_point(position = position_jitter(w = 0.2, h = 0), size = 3)
pp <- pp + facet_wrap( ~subject )
pp <- cleanPlot(pp)
pp <- pp + theme(legend.position=c(0.8334,0.1667))
pp
pp3 <- ggplot( dataToPlot, aes(nRep, correctnessRatio, colour=frequency, shape=frequency) )
pp3 <- pp3 + stat_summary(fun.y = mean, geom="point", position = position_jitter(w = 0.2, h = 0), size = 3)
pp3 <- cleanPlot(pp3)
pp3 <- pp3 + scale_y_continuous(limits=c(0, 3), trans=logit_trans())
pp3 <- pp3 + scale_y_continuous(limits=c(0, 3))
pp3 <- pp3 + geom_smooth(method="lm", se=F)
pp3 <- pp3 + theme(legend.position=c(0.8334,0.1667))
pp3
#################################################################################################################
# average over subjects and plot logit
allFreqs <- levels(accData$frequency)
allNreps <- unique(accData$nRep)
allClassif<- levels(accData$classifier)
nFreqs <- length(allFreqs)
nReps <- length(allNreps)
nClassif<- length(allClassif)
frequency <- rep(NA, nFreqs*nReps*nClassif)
nRep <- rep(NA, nFreqs*nReps*nClassif)
classifier <- rep(NA, nFreqs*nReps*nClassif)
correctnessRatio <- rep(NA, nFreqs*nReps*nClassif)
correctnessRatioLogit <- rep(NA, nFreqs*nReps*nClassif)
for (iF in 1:nFreqs){
for (iR in 1:nReps){
for (iC in 1:nClassif){
temp <- subset(accData, frequency == allFreqs[iF])
temp <- subset(temp, nRep == allNreps[iR])
temp <- subset(temp, classifier == allClassif[iC])
frequency[count] <- allFreqs[iF]
nRep[count] <- allNreps[iR]
classifier[count] <- allClassif[iC]
correctnessRatio[count] <- mean(temp$correctness)
correctnessRatioLogit[count] <- log( mean(temp$correctness) / (1-mean(temp$correctness)) )
count <- count+1
}
}
}
accDataGdMean <- data.frame(classifier, frequency, nRep, correctnessRatio, correctnessRatioLogit)
# library(scales)
dataToPlot <- subset(accDataGdMean, classifier=="normal")
pp1 <- ggplot( dataToPlot, aes(nRep, correctnessRatio, colour=frequency, shape=frequency) )
pp1 <- pp1 + geom_point(position = position_jitter(w = 0.2, h = 0), size = 3)
pp1 <- pp1 + scale_y_continuous(trans=logit_trans())
pp1 <- cleanPlot(pp1)
# pp1 + geom_smooth(method="lm", se=F)
pp1
dataToPlot <- subset(accDataGdMean, classifier=="normal")
pp2 <- ggplot( dataToPlot, aes(nRep, correctnessRatioLogit, colour=frequency, shape=frequency) )
pp2 <- pp2 + geom_point(position = position_jitter(w = 0.2, h = 0), size = 3)
pp2 <- cleanPlot(pp2)
pp2 + geom_smooth(method="lm", se=F)
#################################################################################################################
library(lme4)
accDataCount1 <- subset(accDataCount, classifier=="normal")
accDataCount1 <- subset(accDataCount1, select = -c(classifier))
# accDataCount1$nRep <- as.factor(accDataCount1$nRep)
str(accDataCount1)
summary(accDataCount1)
lmH1 <- lmer( correctnessCount ~ frequency + ( 1 | subject/nRep ), data = accDataCount1 )
lmH0 <- lmer( correctness ~ ( 1 | subject/nRep ), data = accDataCount1 )
anova(lmH0, lmH1)
#################################################################################################################
|
d5dcbaef604fe821f2975bc7d862290b97e767ae
|
2fd6208ee163b1f959d9960c7fad4c64da96d649
|
/tests/testthat/test-bold_identify.R
|
f3ac4870ac092347c1cf2412c9b2cbc425fe0cc3
|
[
"MIT"
] |
permissive
|
ropensci/bold
|
c1d661c27e8a44e5ec5f8b123e9d8b8ab3b25fd0
|
dc46f3be5f0dc5404e9288a9836739d12d127207
|
refs/heads/master
| 2023-07-22T16:04:23.257812
| 2023-06-13T16:19:44
| 2023-06-13T16:19:44
| 1,950,836
| 17
| 16
|
NOASSERTION
| 2023-09-08T19:49:36
| 2011-06-25T03:23:46
|
R
|
UTF-8
|
R
| false
| false
| 2,485
|
r
|
test-bold_identify.R
|
context("bold_identify")
test_that("bold_identify returns the correct object", {
skip_on_cran()
vcr::use_cassette("bold_identify", {
test <- bold_identify(sequences = sequences$seq1)
})
expect_is(test, 'list')
expect_is(test[[1]], 'data.frame')
expect_is(test[[1]]$ID, 'character')
})
test_that("bold_identify returns the correct object (db)", {
skip_on_cran()
vcr::use_cassette("bold_identify", {
test <- bold_identify(sequences = sequences$seq1, db = 'COX1_SPECIES')
})
expect_is(test, 'list')
expect_is(test[[1]], 'data.frame')
expect_is(test[[1]]$ID, 'character')
})
test_that("bold_identify returns the correct object (response)", {
skip_on_cran()
vcr::use_cassette("bold_identify", {
test <- bold_identify(sequences = sequences$seq1, response = TRUE)
})
expect_is(test, "list")
test <- test[[1]]
expect_is(test$response, "HttpResponse")
expect_equal(test$response$status_code, 200)
expect_equal(test$response$response_headers$`content-type`, "text/xml")
expect_is(test$warning, "character")
expect_equal(test$warning, "")
})
test_that("bold_identify works for XML that contains &", {
skip_on_cran()
test_seq <- "AACCCTATACTTTTTATTTGGAATTTGAGCGGGTATAGTAGGTACTAGCTTAAGTATATTAATTCGTCTAGAGCTAGGACAACCCGGTGTATTTTTAGAAGATGACCAAACCTATAACGTTATTGTAACAGCCCACGCTTTTATTATAATTTTCTTCATAATTATACCAATCATAATTGGA"
vcr::use_cassette("bold_identify", {
test <- bold_identify(test_seq)
expect_is(test, 'list')
expect_is(test[[1]], 'data.frame')
expect_is(test[[1]]$ID, 'character')
})
})
test_that("bold_identify skips the identification when the sequences has invalid characters or when the sequence is too short", {
test <- bold_identify(sequences = c(substr(sequences$seq1, 1, 50), gsub("N", "0", sequences$seq3)))
expect_is(test, 'list')
expect_true(all(is.na(test)))
expect_length(attributes(test[[1]]), 2L)
expect_equal(attr(test[[1]], "error"), "Sequence must be at least 80 bp")
expect_equal(attr(test[[2]], "error"), "Sequence contains invalid characters")
})
test_that("bold_identify fails well", {
expect_error(bold_identify(),
"argument 'sequences' is missing, with no default")
expect_error(bold_identify(sequences = 1),
"'sequences' must be of class character")
expect_error(bold_identify(sequences = "", db = "test"),
"'test' is not a valid db")
expect_error(bold_identify(sequences = "", db = c("COX1", "COX1_SPECIES")),
"'db' must be length 1")
})
|
76b2b3304d2b4da6e4b4013663ffc22846ec016b
|
cf62c1acac62cc4bf102447f0fc4285977782612
|
/R/summary.scam.R
|
3404839d76eea6b3fb7fd0ffde9746f960525f93
|
[] |
no_license
|
cran/scam
|
d6d00a723a0462416a79178038d61cd30fbe793e
|
cc080df129f3611eadf93cef7259324def3f83dd
|
refs/heads/master
| 2023-04-30T11:29:17.031975
| 2023-04-14T09:00:08
| 2023-04-14T09:00:08
| 17,699,475
| 4
| 6
| null | null | null | null |
UTF-8
|
R
| false
| false
| 28,315
|
r
|
summary.scam.R
|
############################################################
## summary functions for scam() (clone of summary.gam())...
## of mgcv version 1.7-22,
## but without p.type=0,1,...,5 as summary input variable,
## only with freq=T/F....
###########################################################
##### mgcv::: smoothTest
smoothTest <- function(b,X,V,eps=.Machine$double.eps^.5) {
## Forms Cox, Koh, etc type test statistic, and
## obtains null distribution by simulation...
## if b are coefs f=Xb, cov(b) = V. z is a vector of
## i.i.d. N(0,1) deviates
qrx <- qr(X)
R <- qr.R(qrx)
V <- R%*%V[qrx$pivot,qrx$pivot]%*%t(R)
V <- (V + t(V))/2
ed <- eigen(V,symmetric=TRUE)
k <- n <- length(ed$values)
## could truncate, but it doesn't improve power in correlated case!
f <- t(ed$vectors[,1:k])%*%R%*%b
t <- sum(f^2)
k <- ncol(X)
lambda <- as.numeric(ed$values[1:k])
pval <- liu2(t,lambda) ## should really use Davies
list(stat=t,pval=pval)
}
###### mgcv::: liu2
liu2 <- function(x, lambda, h = rep(1,length(lambda)),lower.tail=FALSE) {
# Evaluate Pr[sum_i \lambda_i \chi^2_h_i < x] approximately.
# Code adapted from CompQuadForm package of Pierre Lafaye de Micheaux
# and directly from....
# H. Liu, Y. Tang, H.H. Zhang, A new chi-square approximation to the
# distribution of non-negative definite quadratic forms in non-central
# normal variables, Computational Statistics and Data Analysis, Volume 53,
# (2009), 853-856. Actually, this is just Pearson (1959) given that
# the chi^2 variables are central.
# Note that this can be rubbish in lower tail (e.g. lambda=c(1.2,.3), x = .15)
# if (TRUE) { ## use Davies exact method in place of Liu et al/ Pearson approx.
# require(CompQuadForm)
# r <- x
# for (i in 1:length(x)) r[i] <- davies(x[i],lambda,h)$Qq
# return(pmin(r,1))
# }
if (length(h) != length(lambda)) stop("lambda and h should have the same length!")
lh <- lambda*h
muQ <- sum(lh)
lh <- lh*lambda
c2 <- sum(lh)
lh <- lh*lambda
c3 <- sum(lh)
s1 <- c3/c2^1.5
s2 <- sum(lh*lambda)/c2^2
sigQ <- sqrt(2*c2)
t <- (x-muQ)/sigQ
if (s1^2>s2) {
a <- 1/(s1-sqrt(s1^2-s2))
delta <- s1*a^3-a^2
l <- a^2-2*delta
} else {
a <- 1/s1
delta <- 0
l <- c2^3/c3^2
}
muX <- l+delta
sigX <- sqrt(2)*a
return(pchisq(t*sigX+muX,df=l,ncp=delta,lower.tail=lower.tail))
}
#### mgcv::: simf
simf <- function(x,a,df,nq=50) {
## suppose T = sum(a_i \chi^2_1)/(chi^2_df/df). We need
## Pr[T>x] = Pr(sum(a_i \chi^2_1) > x *chi^2_df/df). Quadrature
## used here. So, e.g.
## 1-pf(4/3,3,40);simf(4,rep(1,3),40);1-pchisq(4,3)
p <- (1:nq-.5)/nq
q <- qchisq(p,df)
x <- x*q/df
pr <- sum(liu2(x,a)) ## Pearson/Liu approx to chi^2 mixture
pr/nq
}
#### the same as mgcv::: recov.gam
recov.scam <- function(b,re=rep(0,0),m=0) {
## b is a fitted gam object. re is an array of indices of
## smooth terms to be treated as fully random....
## Returns frequentist Cov matrix based on the given
## mapping from data to params, but with dist of data
## corresponding to that implied by treating terms indexed
## by re as random effects... (would be usual frequentist
## if nothing treated as random)
## if m>0, then this is indexes a term, not in re, whose
## unpenalized cov matrix is required, with the elements of re
## dropped.
if (!inherits(b,"scam")) stop("recov works with fitted scam objects only")
if (is.null(b$full.sp)) sp <- b$sp else sp <- b$full.sp
if (length(re)<1) {
if (m>0) {
## annoyingly, need total penalty
np <- length(coef(b))
k <- 1;S1 <- matrix(0,np,np)
for (i in 1:length(b$smooth)) {
ns <- length(b$smooth[[i]]$S)
ind <- b$smooth[[i]]$first.para:b$smooth[[i]]$last.para
if (ns>0) for (j in 1:ns) {
S1[ind,ind] <- S1[ind,ind] + sp[k]*b$smooth[[i]]$S[[j]]
k <- k + 1
}
}
LRB <- rbind(b$R,t(mroot(S1)))
ii <- b$smooth[[m]]$first.para:b$smooth[[m]]$last.para
## ii is cols of LRB related to smooth m, which need
## to be moved to the end...
LRB <- cbind(LRB[,-ii],LRB[,ii])
ii <- (ncol(LRB)-length(ii)+1):ncol(LRB)
Rm <- qr.R(qr(LRB,tol=0,LAPACK=FALSE))[ii,ii] ## unpivoted QR
} else Rm <- NULL
return(list(Ve.t=(t(b$Ve.t)+b$Ve.t)*.5,Rm=Rm))
}
if (m%in%re) stop("m can't be in re")
## partition R into R1 ("fixed") and R2 ("random"), with S1 and S2
p <- length(b$coefficients)
rind <- rep(FALSE,p) ## random coefficient index
for (i in 1:length(re)) {
rind[b$smooth[[re[i]]]$first.para:b$smooth[[re[i]]]$last.para] <- TRUE
}
p2 <- sum(rind) ## number random
p1 <- p - p2 ## number fixed
map <- rep(0,p) ## remaps param indices to indices in split version
map[rind] <- 1:p2 ## random
map[!rind] <- 1:p1 ## fixed
## split R...
R1 <- b$R[,!rind] ## fixed effect columns
R2 <- b$R[,rind] ## random effect columns
## seitdem ich dich kennen, hab ich ein probleme,
## assemble S1 and S2
S1 <- matrix(0,p1,p1);S2 <- matrix(0,p2,p2)
k <- 1
for (i in 1:length(b$smooth)) {
ns <- length(b$smooth[[i]]$S)
ind <- map[b$smooth[[i]]$first.para:b$smooth[[i]]$last.para]
is.random <- i%in%re
if (ns>0) for (j in 1:ns) {
if (is.random) S2[ind,ind] <- S2[ind,ind] + sp[k]*b$smooth[[i]]$S[[j]] else
S1[ind,ind] <- S1[ind,ind] + sp[k]*b$smooth[[i]]$S[[j]]
k <- k + 1
}
}
## pseudoinvert S2
if (nrow(S2)==1) {
S2[1,1] <- 1/sqrt(S2[1,1])
} else if (max(abs(diag(diag(S2))-S2))==0) {
ds2 <- diag(S2)
ind <- ds2 > max(ds2)*.Machine$double.eps^.8
ds2[ind] <- 1/ds2[ind];ds2[!ind] <- 0
diag(S2) <- sqrt(ds2)
} else {
ev <- eigen((S2+t(S2))/2,symmetric=TRUE)
ind <- ev$values > max(ev$values)*.Machine$double.eps^.8
ev$values[ind] <- 1/ev$values[ind];ev$values[!ind] <- 0
## S2 <- ev$vectors%*%(ev$values*t(ev$vectors))
S2 <- sqrt(ev$values)*t(ev$vectors)
}
## choleski of cov matrix....
## L <- chol(diag(p)+R2%*%S2%*%t(R2)) ## L'L = I + R2 S2^- R2'
L <- chol(diag(p) + crossprod(S2%*%t(R2)))
## now we need the square root of the unpenalized
## cov matrix for m
if (m>0) {
## llr version
LRB <- rbind(L%*%R1,t(mroot(S1)))
ii <- map[b$smooth[[m]]$first.para:b$smooth[[m]]$last.para]
## ii is cols of LRB related to smooth m, which need
## to be moved to the end...
LRB <- cbind(LRB[,-ii],LRB[,ii])
ii <- (ncol(LRB)-length(ii)+1):ncol(LRB) ## need to pick up final block
Rm <- qr.R(qr(LRB,tol=0,LAPACK=FALSE))[ii,ii,drop=FALSE] ## unpivoted QR
} else Rm <- NULL
list(Ve.t= crossprod(L%*%b$R%*%b$Vp.t)/b$sig2, ## Frequentist cov matrix
Rm=Rm)
# mapi <- (1:p)[!rind] ## indexes mapi[j] is index of total coef vector to which jth row/col of Vb/e relates
} ## end of recov
### same as mgcv::: reTest.scam
reTest.scam <- function(b,m) {
## Test the mth smooth for equality to zero
## and accounting for all random effects in model
## find indices of random effects other than m
rind <- rep(0,0)
for (i in 1:length(b$smooth)) if (!is.null(b$smooth[[i]]$random)&&b$smooth[[i]]$random&&i!=m) rind <- c(rind,i)
## get frequentist cov matrix of effects treating smooth terms in rind as random
rc <- recov.scam(b,rind,m)
Ve.t <- rc$Ve.t
ind <- b$smooth[[m]]$first.para:b$smooth[[m]]$last.para
B <- mroot(Ve.t[ind,ind,drop=FALSE]) ## BB'=Ve
Rm <- rc$Rm
b.hat <- coef(b)[ind]
d <- Rm%*%b.hat
stat <- sum(d^2)/b$sig2
ev <- eigen(crossprod(Rm%*%B)/b$sig2,symmetric=TRUE,only.values=TRUE)$values
ev[ev<0] <- 0
rank <- sum(ev>max(ev)*.Machine$double.eps^.8)
if (b$scale.estimated) {
pval <- simf(stat,ev,b$df.residual)
} else { pval <- liu2(stat,ev) }
list(stat=stat,pval=pval,rank=rank)
} ## end reTest
########## mgcv::: testStat
## below is not the updated version of testStat(), not the one of the mgcv version 1.8-40
testStat <- function(p,X,V,rank=NULL,type=0,res.df= -1) {
## Routine for forming fractionally trunctated
## pseudoinverse of XVX'. And returning
## p'X'(XVX)^-Xp.
## Truncates to numerical rank, if this is
## less than supplied rank+1.
## The type argument specifies the type of truncation to use.
## on entry `rank' should be an edf estimate
## 0. Default using the fractionally truncated pinv.
## 1. Round down to k if k<= rank < k+0.05, otherwise up.
## 2. Naive rounding.
## 3. Round up.
## 4. Numerical rank estimation, tol=1e-3
## res.df is residual dof used to estimate scale. <=0 implies
## fixed scale.
qrx <- qr(X,tol=0)
R <- qr.R(qrx)
V <- R%*%V[qrx$pivot,qrx$pivot,drop=FALSE]%*%t(R)
V <- (V + t(V))/2
ed <- eigen(V,symmetric=TRUE)
k <- max(0,floor(rank))
nu <- abs(rank - k) ## fractional part of supplied edf
if (type < -.5) { ## Crude modification of Cox and Koh
res <- smoothTest(p,X,V)
res$rank <- rank
return(res)
} else if (type==1) { ## round up is more than .05 above lower
if (rank > k + .05||k==0) k <- k + 1
nu <- 0;rank <- k
} else if (type==2) { ## naive round
nu <- 0;rank <- k <- max(1,round(rank))
warning("p-values may give low power in some circumstances")
} else if (type==3) { ## round up
nu <- 0; rank <- k <- max(1,ceiling(rank))
warning("p-values un-reliable")
} else if (type==4) { ## rank estimation
rank <- k <- max(sum(ed$values>1e-3*max(ed$values)),1)
nu <- 0
warning("p-values may give very low power")
}
if (nu>0) k1 <- k+1 else k1 <- k
## check that actual rank is not below supplied rank+1
r.est <- sum(ed$values > max(ed$values)*.Machine$double.eps^.9)
if (r.est<k1) {k1 <- k <- r.est;nu <- 0;rank <- r.est}
## Get the eigenvectors...
# vec <- qr.qy(qrx,rbind(ed$vectors,matrix(0,nrow(X)-ncol(X),ncol(X))))
vec <- ed$vectors
if (k1<ncol(vec)) vec <- vec[,1:k1,drop=FALSE]
## deal with the fractional part of the pinv...
if (nu>0&&k>0) {
if (k>1) vec[,1:(k-1)] <- t(t(vec[,1:(k-1)])/sqrt(ed$val[1:(k-1)]))
b12 <- .5*nu*(1-nu)
if (b12<0) b12 <- 0
b12 <- sqrt(b12)
B <- matrix(c(1,b12,b12,nu),2,2)
ev <- diag(ed$values[k:k1]^-.5,nrow=k1-k+1)
B <- ev%*%B%*%ev
eb <- eigen(B,symmetric=TRUE)
rB <- eb$vectors%*%diag(sqrt(eb$values))%*%t(eb$vectors)
vec[,k:k1] <- t(rB%*%t(vec[,k:k1]))
} else {
if (k==0) vec <- t(t(vec)*sqrt(1/ed$val[1])) else
vec <- t(t(vec)/sqrt(ed$val[1:k]))
if (k==1) rank <- 1
}
d <- t(vec)%*%(R%*%p)
d <- sum(d^2)
rank1 <- rank ## rank for lower tail pval computation below
## note that for <1 edf then d is not weighted by EDF, and instead is
## simply refered to a chi-squared 1
if (nu>0) { ## mixture of chi^2 ref dist
if (k1==1) rank1 <- val <- 1 else {
val <- rep(1,k1) ##ed$val[1:k1]
rp <- nu+1
val[k] <- (rp + sqrt(rp*(2-rp)))/2
val[k1] <- (rp - val[k])
}
if (res.df <= 0) pval <- liu2(d,val) else ## pval <- davies(d,val)$Qq else
pval <- simf(d,val,res.df)
} else { pval <- 2 }
## integer case still needs computing, also liu/pearson approx only good in
## upper tail. In lower tail, 2 moment approximation is better (Can check this
## by simply plotting the whole interesting range as a contour plot!)
if (pval > .5) {
if (res.df <= 0) pval <- pchisq(d,df=rank1,lower.tail=FALSE) else
pval <- pf(d/rank1,rank1,res.df,lower.tail=FALSE)
}
list(stat=d,pval=min(1,pval),rank=rank)
} ## end of testStat
####################################################
##### function to get all the summary information....
#############################################
model.matrix.scam <- function(object,...)
{ if (!inherits(object,"scam")) stop("`object' is not of class \"scam\"")
predict(object,type="lpmatrix",...)
}
summary.scam <- function (object,dispersion = NULL,freq = FALSE,...)
{
pinv <- function(V, M, rank.tol = 1e-06) {
## a local pseudoinverse function
D <- eigen(V,symmetric=TRUE)
M1<-length(D$values[D$values>rank.tol*D$values[1]])
if (M>M1) M<-M1 # avoid problems with zero eigen-values
if (M+1<=length(D$values)) D$values[(M+1):length(D$values)]<-1
D$values<- 1/D$values
if (M+1<=length(D$values)) D$values[(M+1):length(D$values)]<-0
res <- D$vectors%*%(D$values*t(D$vectors)) ##D$u%*%diag(D$d)%*%D$v
attr(res,"rank") <- M
res
} ## end of pinv
p.table <- pTerms.table <- s.table <- NULL
if (freq) covmat <- object$Ve.t else covmat <- object$Vp.t
name <- names(object$coefficients.t)
# name <- names(object$edf)
dimnames(covmat) <- list(name, name)
covmat.unscaled <- covmat/object$sig2
est.disp <- object$scale.estimated
if (!is.null(dispersion)) {
covmat <- dispersion * covmat.unscaled
object$Ve.t <- object$Ve.t*dispersion/object$sig2 ## freq
object$Vp.t <- object$Vp.t*dispersion/object$sig2 ## Bayes
est.disp <- FALSE
}
else dispersion <- object$sig2
## Now the individual parameteric coefficient p-values...
## (copied from mgcv-1.8-34)============
se <- diag(covmat)^0.5
residual.df<-length(object$y)-sum(object$edf)
if (sum(object$nsdf) > 0) { # individual parameters
if (length(object$nsdf)>1) { ## several linear predictors (not used in scam!)
pstart <- attr(object$nsdf,"pstart")
ind <- rep(0,0)
for (i in 1:length(object$nsdf)) if (object$nsdf[i]>0) ind <-
c(ind,pstart[i]:(pstart[i]+object$nsdf[i]-1))
} else { pstart <- 1;ind <- 1:object$nsdf} ## only one lp
p.coeff <- object$coefficients[ind]
p.se <- se[ind]
p.t<-p.coeff/p.se
if (!est.disp) {
p.pv <- 2*pnorm(abs(p.t),lower.tail=FALSE)
p.table <- cbind(p.coeff, p.se, p.t, p.pv)
dimnames(p.table) <- list(names(p.coeff), c("Estimate", "Std. Error", "z value", "Pr(>|z|)"))
} else {
p.pv <- 2*pt(abs(p.t),df=residual.df,lower.tail=FALSE)
p.table <- cbind(p.coeff, p.se, p.t, p.pv)
dimnames(p.table) <- list(names(p.coeff), c("Estimate", "Std. Error", "t value", "Pr(>|t|)"))
}
} else {p.coeff <- p.t <- p.pv <- array(0,0)}
## Next the p-values for parametric terms, so that factors are treated whole...
pterms <- if (is.list(object$pterms)) object$pterms else list(object$pterms)
if (!is.list(object$assign)) object$assign <- list(object$assign)
npt <- length(unlist(lapply(pterms,attr,"term.labels")))
if (npt>0) pTerms.df <- pTerms.chi.sq <- pTerms.pv <- array(0,npt)
term.labels <- rep("",0)
k <- 0 ## total term counter
for (j in 1:length(pterms)) {
tlj <- attr(pterms[[j]],"term.labels")
nt <- length(tlj)
if (j>1 && nt>0) tlj <- paste(tlj,j-1,sep=".")
term.labels <- c(term.labels,tlj)
if (nt>0) { # individual parametric terms
np <- length(object$assign[[j]])
ind <- pstart[j] - 1 + 1:np
Vb <- covmat[ind,ind,drop=FALSE]
bp <- array(object$coefficients[ind],np)
for (i in 1:nt) {
k <- k + 1
ind <- object$assign[[j]]==i
b <- bp[ind];V <- Vb[ind,ind]
## pseudo-inverse needed in case of truncation of parametric space
if (length(b)==1) {
V <- 1/V
pTerms.df[k] <- nb <- 1
pTerms.chi.sq[k] <- V*b*b
} else {
V <- pinv(V,length(b),rank.tol=.Machine$double.eps^.5)
pTerms.df[k] <- nb <- attr(V,"rank")
pTerms.chi.sq[k] <- t(b)%*%V%*%b
}
if (!est.disp)
pTerms.pv[k] <- pchisq(pTerms.chi.sq[k],df=nb,lower.tail=FALSE)
else
pTerms.pv[k] <- pf(pTerms.chi.sq[k]/nb,df1=nb,df2=residual.df,lower.tail=FALSE)
} ## for (i in 1:nt)
} ## if (nt>0)
}
if (npt) {
attr(pTerms.pv,"names") <- term.labels
if (!est.disp) {
pTerms.table <- cbind(pTerms.df, pTerms.chi.sq, pTerms.pv)
dimnames(pTerms.table) <- list(term.labels, c("df", "Chi.sq", "p-value"))
} else {
pTerms.table <- cbind(pTerms.df, pTerms.chi.sq/pTerms.df, pTerms.pv)
dimnames(pTerms.table) <- list(term.labels, c("df", "F", "p-value"))
}
} else { pTerms.df<-pTerms.chi.sq<-pTerms.pv<-array(0,0)}
## ================================
## Now deal with the smooth terms....
m <- length(object$smooth) # number of smooth terms
df <- edf1 <-edf <- s.pv <- chi.sq <- array(0, m)
if (m > 0) { # form test statistics for each smooth
if (!freq) { ## Bayesian p-values required
sub.samp <- max(1000,2*length(object$coefficients))
if (nrow(object$model)>sub.samp) { ## subsample to get X for p-values calc.
seed <- try(get(".Random.seed",envir=.GlobalEnv),silent=TRUE) ## store RNG seed
if (inherits(seed,"try-error")) {
runif(1)
seed <- get(".Random.seed",envir=.GlobalEnv)
}
kind <- RNGkind(NULL)
RNGkind("default","default")
set.seed(11) ## ensure repeatability
ind <- sample(1:nrow(object$model),sub.samp,replace=FALSE) ## sample these rows from X
X <- predict(object,object$model[ind,],type="lpmatrix")
RNGkind(kind[1],kind[2])
assign(".Random.seed",seed,envir=.GlobalEnv) ## RNG behaves as if it had not been used
} else { ## don't need to subsample
X <- model.matrix(object)
}
X <- X[!is.na(rowSums(X)),] ## exclude NA's (possible under na.exclude)
} ## end if (!freq)
for (i in 1:m) { ## loop through smooths
start <- object$smooth[[i]]$first.para
stop <- object$smooth[[i]]$last.para
if (freq) { ## use frequentist cov matrix
V <- object$Ve.t[start:stop,start:stop,drop=FALSE]
} else V <- object$Vp.t[start:stop,start:stop,drop=FALSE] ## Bayesian
p <- object$coefficients.t[start:stop] # transposed parameters of a smooth
edf1[i] <- edf[i] <- sum(object$edf[start:stop]) # edf for this smooth
## extract alternative edf estimate for this smooth, if possible...
## edf1 is not done for scam output value...
if (!is.null(object$edf1)) edf1[i] <- sum(object$edf1[start:stop])
if (freq) {
M1 <- object$smooth[[i]]$df
M <- min(M1, ceiling(2 * sum(object$edf[start:stop]))) ## upper limit of 2*edf on rank
V <- pinv(V, M) # get rank M pseudoinverse of V
chi.sq[i] <- t(p) %*% V %*% p
df[i] <- attr(V, "rank")
} else { ## Better founded alternatives...
Xt <- X[, start:stop,drop=FALSE]
if (object$smooth[[i]]$null.space.dim==0&&!is.null(object$R)) { ## random effect or fully penalized term
res <- reTest.scam(object,i)
} else { ## Inverted Nychka interval statistics
## df[i] <- min(ncol(Xt),edf1[i])
if (est.disp) rdf <- residual.df else rdf <- -1
res <- testStat(p,Xt,V,min(ncol(Xt),edf1[i]),type=0,res.df = rdf) ## was type=p.type
}
df[i] <- res$rank
chi.sq[i] <- res$stat
s.pv[i] <- res$pval
}
names(chi.sq)[i]<- object$smooth[[i]]$label
if (freq) {
if (!est.disp)
s.pv[i] <- pchisq(chi.sq[i], df = df[i], lower.tail = FALSE)
else
s.pv[i] <- pf(chi.sq[i]/df[i], df1 = df[i], df2 = residual.df, lower.tail = FALSE)
# ## p-values are meaningless for very small edf. Need to set to NA
# if (df[i] < 0.1) s.pv[i] <- NA
}
## p-values are meaningless for very small edf. Need to set to NA
if (df[i] < 0.1) {
s.pv[i] <- NA
chi.sq[i] <- NA
}
}
## rounding output values of edf, df and chi.sq...
edf <- round(edf,digits=4)
df <- round(df,digits=4)
chi.sq <- round(chi.sq,digits=4)
if (!est.disp) {
if (freq) {
s.table <- cbind(edf, df, chi.sq, s.pv)
dimnames(s.table) <- list(names(chi.sq), c("edf", "Est.rank", "Chi.sq", "p-value"))
} else {
s.table <- cbind(edf, df, chi.sq, s.pv)
dimnames(s.table) <- list(names(chi.sq), c("edf", "Ref.df", "Chi.sq", "p-value"))
}
} else {
if (freq) {
s.table <- cbind(edf, df, chi.sq/df, s.pv)
dimnames(s.table) <- list(names(chi.sq), c("edf", "Est.rank", "F", "p-value"))
} else {
s.table <- cbind(edf, df, chi.sq/df, s.pv)
dimnames(s.table) <- list(names(chi.sq), c("edf", "Ref.df", "F", "p-value"))
}
}
}
w <- as.numeric(object$prior.weights)
mean.y <- sum(w*object$y)/sum(w)
w <- sqrt(w)
nobs <- nrow(object$model)
r.sq<- 1 - var(w*(as.numeric(object$y)-object$fitted.values))*(nobs-1)/(var(w*(as.numeric(object$y)-mean.y))*residual.df)
dev.expl<-(object$null.deviance-object$deviance)/object$null.deviance
ret<-list(p.coeff=p.coeff,se=se,p.t=p.t,p.pv=p.pv,residual.df=residual.df,m=m,chi.sq=chi.sq,
s.pv=s.pv,scale=dispersion,r.sq=round(r.sq,digits=4),family=object$family,formula=object$formula,n=nobs,
dev.expl=dev.expl,edf=edf,dispersion=dispersion,pTerms.pv=pTerms.pv,pTerms.chi.sq=pTerms.chi.sq,
pTerms.df = pTerms.df, cov.unscaled = covmat.unscaled, cov.scaled = covmat, p.table = p.table,
pTerms.table = pTerms.table, s.table = s.table,method=object$method,sp.criterion=object$gcv.ubre,
sp=object$sp,dgcv.ubre=object$dgcv.ubre,termcode=object$termcode,
gcv.ubre=object$gcv.ubre,optimizer=object$optimizer,rank=object$rank,np=length(object$coefficients))
class(ret) <- "summary.scam"
ret
} ## end summary.scam
################## mgcv::: pinvXVX
## (c) Simon N. Wood
pinvXVX <- function (X, V, rank = NULL)
{
k <- floor(rank)
nu <- rank - k
if (nu > 0)
k1 <- k + 1
else k1 <- k
qrx <- qr(X)
R <- qr.R(qrx)
V <- R %*% V[qrx$pivot, qrx$pivot] %*% t(R)
V <- (V + t(V))/2
ed <- eigen(V, symmetric = TRUE)
vec <- qr.qy(qrx, rbind(ed$vectors, matrix(0, nrow(X) - ncol(X),
ncol(X))))
if (k1 < ncol(vec))
vec <- vec[, 1:k1, drop = FALSE]
if (k == 0) {
vec <- t(t(vec) * sqrt(nu/ed$val[1]))
return(vec)
}
if (nu > 0) {
if (k > 1)
vec[, 1:(k - 1)] <- t(t(vec[, 1:(k - 1)])/sqrt(ed$val[1:(k -
1)]))
b12 <- 0.5 * nu * (1 - nu)
if (b12 < 0)
b12 <- 0
b12 <- sqrt(b12)
B <- matrix(c(1, b12, b12, nu), 2, 2)
ev <- diag(ed$values[k:k1]^-0.5)
B <- ev %*% B %*% ev
eb <- eigen(B, symmetric = TRUE)
rB <- eb$vectors %*% diag(sqrt(eb$values)) %*% t(eb$vectors)
vec[, k:k1] <- t(rB %*% t(vec[, k:k1]))
}
else {
vec <- t(t(vec)/sqrt(ed$val[1:k]))
}
vec
}
################ mgcv::: eigXVX
## (c) Simon N. Wood
eigXVX <- function (X, V, rank = NULL, tol = .Machine$double.eps^0.5)
{
qrx <- qr(X)
R <- qr.R(qrx)
V <- R %*% V[qrx$pivot, qrx$pivot] %*% t(R)
V <- (V + t(V))/2
ed <- eigen(V, symmetric = TRUE)
ind <- abs(ed$values) > max(abs(ed$values)) * tol
erank <- sum(ind)
if (is.null(rank)) {
rank <- erank
}
else {
if (rank < erank)
ind <- 1:rank
else rank <- erank
}
vec <- qr.qy(qrx, rbind(ed$vectors, matrix(0, nrow(X) - ncol(X),
ncol(X))))
list(values = ed$values[ind], vectors = vec[, ind], rank = rank)
}
##### print.summary.scam .....
print.summary.scam <- function (x, digits = max(3, getOption("digits") - 3),
signif.stars = getOption("show.signif.stars"),...)
## print method for scam, a clone of print.summary.gam of mgcv()
{
print(x$family)
cat("Formula:\n")
print(x$formula)
if (length(x$p.coeff) > 0) {
cat("\nParametric coefficients:\n")
printCoefmat(x$p.table, digits = digits, signif.stars = signif.stars,
na.print = "NA", ...)
}
cat("\n")
if (x$m > 0) {
cat("Approximate significance of smooth terms:\n")
printCoefmat(x$s.table, digits = digits, signif.stars = signif.stars,
has.Pvalue = TRUE, na.print = "NA", cs.ind = 1, ...)
}
# cat("\n")
if (!is.null(x$rank) && x$rank< x$np) cat("Rank: ",x$rank,"/",x$np,"\n",sep="")
cat("\nR-sq.(adj) = ", formatC(x$r.sq, digits = 4, width = 5))
if (length(x$dev.expl) > 0)
cat(" Deviance explained = ", formatC(x$dev.expl *
100, digits = 3, width = 4), "%\n", sep = "")
if (length(x$sp.criterion) > 0)
cat( x$method," score = ", formatC(x$sp.criterion, digits = 5),
sep = "")
cat(" Scale est. = ", formatC(x$scale, digits = 5, width = 8,
flag = "-"), " n = ", x$n, "\n", sep = "")
if ((x$optimizer[1] == "bfgs") && x$m>0){
if (x$termcode!= 1) {
dgcv.ubre <- max(abs(x$dgcv.ubre)*max(abs(log(x$sp)),1)/max(abs(x$gcv.ubre),1))
cat("\nBFGS termination condition:\n", dgcv.ubre,"\n",sep = "")
}
}
cat("\n")
invisible(x)
}
###############################################
## anova for scam models (clone of summary.gam())...
## of mgcv versions up to 1.8-11...
## (c) Simon N. Wood
###############################################
anova.scam <- function (object, ..., dispersion = NULL, test = NULL, freq=FALSE,p.type=0)
# clone of summary.gam(): mgcv package
{ # adapted from anova.glm: R stats package
dotargs <- list(...)
named <- if (is.null(names(dotargs)))
rep(FALSE, length(dotargs))
else (names(dotargs) != "")
if (any(named))
warning("The following arguments to anova.glm(..) are invalid and dropped: ",
paste(deparse(dotargs[named]), collapse = ", "))
dotargs <- dotargs[!named]
is.glm <- unlist(lapply(dotargs, function(x) inherits(x,
"glm")))
dotargs <- dotargs[is.glm]
if (length(dotargs) > 0)
return(anova(structure(c(list(object), dotargs), class="glmlist"),
dispersion = dispersion, test = test))
# return(anova.glmlist(c(list(object), dotargs), dispersion = dispersion,
# test = test)) ## modified at BDR's suggestion 19/08/13
if (!is.null(test)) warning("test argument ignored")
if (!inherits(object,"scam")) stop("anova.scam called with non scam object")
sg <- summary(object, dispersion = dispersion, freq = freq,p.type=p.type)
class(sg) <- "anova.scam"
sg
} ## anova.scam
print.anova.scam <- function(x, digits = max(3, getOption("digits") - 3), ...)
{ # print method for class anova.scam resulting from single
# scam model calls to anova. Clone of print.anova.gam(): mgcv package
print(x$family)
cat("Formula:\n")
if (is.list(x$formula)) for (i in 1:length(x$formula)) print(x$formula[[i]]) else
print(x$formula)
if (length(x$pTerms.pv)>0)
{ cat("\nParametric Terms:\n")
printCoefmat(x$pTerms.table, digits = digits, signif.stars = FALSE, has.Pvalue = TRUE, na.print = "NA", ...)
}
cat("\n")
if(x$m>0)
{ cat("Approximate significance of smooth terms:\n")
printCoefmat(x$s.table, digits = digits, signif.stars = FALSE, has.Pvalue = TRUE, na.print = "NA", ...)
}
invisible(x)
} ## print.anova.scam
|
54b80ae0a54ff56c6855134320e39f458d930510
|
1207b078e3918e8b9378158f0aab180b847fb4c4
|
/tests/testthat/test-screen_aov.R
|
f84352d94be87007d0c6e334b3abe66c84e3e918
|
[] |
no_license
|
mimikwang/screenr
|
1ac82ad7c54e476ae5ea2020512ac84a6439f2bf
|
3484f586e89914e08575ee3e8e8c0f7655772ae0
|
refs/heads/master
| 2020-07-18T10:37:56.283779
| 2019-09-21T07:26:13
| 2019-09-21T07:26:13
| 206,230,803
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 1,019
|
r
|
test-screen_aov.R
|
# Normal
test_that("Normal", {
expect_silent(screen_aov(mtcars, c("cyl", "mpg"), c("disp")))
out <- screen_aov(mtcars, c("cyl"), c("disp", "mpg"))
expect_s3_class(out, "screen_aov")
expect_equal(nrow(out$results), 2)
expect_equal(ncol(out$results), 12)
expect_error(screen_aov(data.frame(x = as.numeric(), y = as.numeric()), "x", "y"), NA)
expect_error(print(screen_aov(mtcars, "cyl", "mpg")), NA)
})
# Test for throwing errors
test_that("Test for Throwing Errors", {
# Incorrect Input Type
expect_error(screen_aov(1, 1, 1))
expect_error(screen_aov(c("sdf"), mtcars, mtcars))
expect_error(screen_aov(mtcars, mtcars, "cyl"))
expect_error(screen_aov(mtcars, "cyl", mtcars))
# Responses and Factors not in DataFrame
expect_error(screen_aov(mtcars, c("cyl", "mpg"), "asdf"))
expect_error(screen_aov(mtcars, "asdf", c("cyl", "mpg")))
})
# Test for Warnings
test_that("Test for Warnings", {
# Shared responses and factors
expect_warning(screen_aov(mtcars, c("cyl"), c("cyl", "mpg")))
})
|
ba769aca1ab5e94ff734f7e1ce07e04bb3e1fc1c
|
83ce3b39e88c03e2c98ef2f05174195708ac3dbe
|
/inst/shotGroups_AnalyzeGroups_bs4Dash_05/app_ui_tab_group_accuracy.R
|
2066dac3478dd4686c9b1b8f9b4a9e047db4758e
|
[] |
no_license
|
cran/shotGroups
|
e02467ffb36b8e528fa1c230b2a718512159fc19
|
ae04a8371aa1cc18af598413d1bc41d389762acb
|
refs/heads/master
| 2022-10-01T18:19:20.943958
| 2022-09-17T18:06:04
| 2022-09-17T18:06:04
| 17,699,651
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 1,422
|
r
|
app_ui_tab_group_accuracy.R
|
fluidPage(
fluidRow(
bs4Box(
title="Settings for group accuracy",
width=4,
uiOutput("locGroups"),
sliderInput("locLevel", label=h5("Confidence interval width"),
min=0.5, max=1, value=0.95, step=0.01),
checkboxGroupInput("locCItype", label=h5("Bootstrap CI type"),
choices=CItypes, selected=NULL)
),
bs4Box(
title="Group accuracy",
width=8,
p("For details, see the documentation for",
a("groupLocation()",
href="https://www.rdocumentation.org/packages/shotGroups/functions/groupLocation"),
"and the",
a("shotGroups vignette",
href="https://cran.rstudio.com/web/packages/shotGroups/vignettes/shotGroups.pdf"),
"section 2.5"),
selectizeInput("locationOut", label=h5("Select the output elements you want to see"),
choices=locationOut, multiple=TRUE,
selected=c("1", "3", "5", "6", "7"), width="100%"),
downloadButton("saveLocation", "Save results as text file"),
verbatimTextOutput("location"),
downloadButton("saveLocationPDF", "Save diagram as pdf"),
plotOutput("locationPlot", height="500px")
)
)
)
|
476df843daa935b43cc91e7c857af60db1a67a2a
|
558b5af88e52057276d703452d73d31c0f40c77a
|
/man/tn.th.Rd
|
c66bd24100e49f858483a65b7f9999f25ab46d45
|
[] |
no_license
|
SantanderMetGroup/climate4R.indices
|
9096786d329675f493c5bc1df796d3ec222aa240
|
3d5b6ee1c62b2c4b9341742db5d0bf8255958cdd
|
refs/heads/master
| 2023-07-14T15:17:11.398428
| 2023-06-22T12:58:57
| 2023-06-22T12:58:57
| 183,474,178
| 5
| 6
| null | 2019-10-10T11:46:48
| 2019-04-25T16:45:35
|
R
|
UTF-8
|
R
| false
| true
| 419
|
rd
|
tn.th.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/indices.R
\name{tn.th}
\alias{tn.th}
\title{Days with minimum temperature below a given threshold}
\usage{
tn.th(tn, th = 0)
}
\arguments{
\item{tn}{Vector with minimum temperature data}
\item{th}{Threshold value (Default is 0)}
}
\description{
Annual count of days with maximum temperature above a given threshold
}
\author{
M. Iturbide
}
|
f65f828a7b046c652d2c0836a0cfec756f3d600f
|
064ec5e18fe8fbd99823b08fd757a7ea2da33155
|
/Rlov/wordcloud/noh_wordcloud.R
|
485d1dbd58c645fb0a1611b50992fd11e5d5a7eb
|
[] |
no_license
|
korea7030/R
|
2a75aeffde4898b82a913e50652eb34dc5ec5a0c
|
d3319d041539d43b0a1f268a7b926aa51fe13eee
|
refs/heads/master
| 2020-12-12T12:06:10.161092
| 2017-07-08T10:13:22
| 2017-07-08T10:13:22
| 54,616,555
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 1,718
|
r
|
noh_wordcloud.R
|
getwd()
noh <- readLines("noh.txt")
noh
noh_sentence <- sapply(noh, extractNoun, USE.NAMES=F)
noh_sentence
noh_sentence1 <- unlist(noh_sentence)
noh_sentence1 <- Filter(function(x) { nchar(x) >= 2}, noh_sentence1)
noh_sentence1
res <- gsub("\\d+","",noh_sentence1 )
res <- gsub("이번","",res )
res <- gsub("최초","",res )
res <- gsub("하게","",res )
res <- gsub("시작","",res )
res <- gsub("사상","",res )
res <- gsub("국민","",res )
res <- gsub("모두","",res )
res <- gsub("하기","",res )
res <- gsub("기대","",res )
res <- gsub("여러분","",res )
res <- gsub("대통령","",res )
res
write(res, "noh_1.txt")
word <- read.table("noh_1.txt")
word
wordcount <- table(word)
head(sort(wordcount, decreasing=T))
windowsFonts(malgun=windowsFont("맑은 고딕"))
wordcloud(names(wordcount), freq=wordcount, scale=c(5,0.5),rot.per=0.25, min.freq=1, random.order=T, random.color=T, colors=palete, family = "malgun")
## pie 차트
noh_top10 <- head(sort(wordcount, decreasing=T),10)
pie(noh_top10)
pie(noh_top10, radius=1)
pct <- round(noh_top10/sum(noh_top10)*100, 1)
names(noh_top10)
lab <- paste(names(noh_top10),"\n",pct,"%")
pie(noh_top10,main="노무현 전 대통령님 연설문 분석", cex=0.8, labels=lab)
pie(noh_top10,main="노무현 전 대통령님 연설문 분석", col=rainbow(10), cex=0.8, labels=lab)
## bar 차트
bplot <- barplot(noh_top10, main="노무현 전 대통령님 연설문 분석", col=rainbow(10), cex.names=0.8, las=2, ylim=c(0,30))
pct <- round(noh_top10/sum(noh_top10)*100,1)
pct
text(x=bplot, y=noh_top10*1.05, labels=paste("(",pct,"%",")"), col="black", cex=0.7)
text(x=bplot, y=noh_top10*0.95, labels=paste(noh_top10,"건"), col="black", cex=0.7)
|
f6376d591f68d10063b756f9cab3add5d304d70d
|
c8674dc53aa778b3d8c0759f117b8872196d3009
|
/R/GenerateTumorCharacterCat.R
|
2bd6421af48e8bbf7f8f940fb2e5a4165ff8e0e6
|
[] |
no_license
|
andrewhaoyu/TOP
|
d8acae9cd8668d70f424997cc6c91b21bf5b5dce
|
0de8cd088754079b9b9a11ee785fc6a34f3bab29
|
refs/heads/master
| 2022-10-04T21:39:09.104998
| 2022-08-25T21:19:01
| 2022-08-25T21:19:01
| 148,667,732
| 6
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 613
|
r
|
GenerateTumorCharacterCat.R
|
### Generate the potential tumor characteristic category(binary or categorical)
#' Title
#'
#' @param y.pheno.complete
#'
#' @return
#' @export
#'
#' @examples
GenerateTumorCharacterCat <- function(y.pheno.complete){
tumor.number = ncol(y.pheno.complete)-1
y.tumor.complete = y.pheno.complete[,2:(tumor.number+1),drop=F]
tumor.character.cat = list()
for(i in 1:tumor.number){
unique.tumor.cat = unique(y.tumor.complete[!is.na(y.tumor.complete[,i]),i])
unique.tumor.cat = unique.tumor.cat[order(unique.tumor.cat)]
tumor.character.cat[[i]] = unique.tumor.cat
}
return(tumor.character.cat)
}
|
5fb6b73ead0bde26b1e302df90303b42422aca4c
|
188fdc152f41fb470aade8a3d034830f5e241264
|
/man/post_model.Rd
|
fd04ce8fc994c630d25fdf7b9473aaafa810f162
|
[] |
no_license
|
navigate-cgalvao/trelloR
|
703796227570dd02bef66b56877aee92a1572569
|
21a039f7f7a8e611b9dfe828ca37b4ea2161794d
|
refs/heads/master
| 2020-08-26T20:40:01.233918
| 2019-10-13T16:35:56
| 2019-10-13T16:35:56
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| true
| 1,904
|
rd
|
post_model.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/post_model.R
\name{post_model}
\alias{post_model}
\title{POST data to Trello API}
\usage{
post_model(model, id = NULL, path = NULL, body = list(name = "New"),
token = NULL, response = "content", on.error = "warning",
encode = "json", handle = NULL, verbose = FALSE)
}
\arguments{
\item{model}{Model}
\item{id}{Model id}
\item{path}{Path}
\item{body}{A named list of query paramters (will be passed as body)}
\item{token}{Secure token, see \code{\link{get_token}} (scope must include write permissions)}
\item{response}{Can return \code{"content"} (default), \code{"headers"}, \code{"status"} code or the complete \code{"response"}}
\item{on.error}{Issues either \code{\link[base]{warning}} (default), \code{\link[base]{message}} or error (and \code{\link[base]{stop}}s)}
\item{encode}{Passed to \code{\link[httr]{POST}}}
\item{handle}{Passed to \code{\link[httr]{POST}}}
\item{verbose}{Whether to pass \code{verbose()} to \code{\link[httr]{POST}}}
}
\description{
Issues \code{\link[httr]{POST}} requests for Trello API endpoints.
}
\details{
See \href{https://developers.trello.com/v1.0/reference}{Trello API reference}
for more info about what arguments can be passed to POST requests.
}
\examples{
\dontrun{
# Get token with write access
token = get_token(yourkey, yoursecret, scope = c("read", "write"))
# Get board ID
url = "Your board URL"
bid = get_id_board(url, token)
# Get lists on that board, extract ID of the first one
lid = get_board_lists(bid, token)$id[1]
# Content for the new card
payload = list(
idList = lid,
name = "A new card",
desc = "#This card has been created by trelloR",
pos = "bottom"
)
# Create card and store the response (to capture the ID
# of the newly created model)
r = post_model(model = "card", body = payload, token = token)
# Get ID of the new card
r$id
}
}
|
bd22f85cfe06c07e1abe6c1ba4bbba2197586483
|
7f083ac85aa3eedf47e159192907d0fc7a46b23d
|
/Shiny-Flights/app.R
|
b9ac6a71d384207ac310a204f7e857679be126f8
|
[] |
no_license
|
franciscodelval/Visualizacion
|
d2632ce40afeaad71067546c3973a914402268f4
|
0ff58aa443d9c7ee58b17a330d058991f4dba4ba
|
refs/heads/main
| 2023-02-05T16:05:24.715130
| 2020-12-23T17:12:19
| 2020-12-23T17:12:19
| 323,959,481
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 9,832
|
r
|
app.R
|
#------------------------------------------------------------------------------
# FLIGHT EXPLORER
#------------------------------------------------------------------------------
# GRUPO 9:
# Carlos Rodriguez
# Francisco del Val
# Jose Lopez
# Octavio del Sueldo
# Iñigo Martiarena
#------------------------------------------------------------------------------
# LIBRERIAS
# Shiny
library(shiny)
library(shinyWidgets)
library(shinydashboard)
# Base de datos
library(nycflights13)
library(tidyverse)
# Plots
library(ggplot2)
library(maps)
library(geosphere)
library(ggmap)
library(cowplot)
#------------------------------------------------------------------------------
# DATASET
# Cargamos los datasets de vuelos y los visualizamos
data("flights")
head(flights)
data("airports")
head(airports)
# Realizamos un join con los datasets que queremos, cuya clave de union sera el
# destino con faa
data_raw <- flights %>%
inner_join(airports, c("dest" = "faa"))
# Realizamos un join con los datasets que queremos, cuya clave de union sera el
# origen con faa
# De esta manera, tendremos tanto las coordenadas de destino como de origen
data_raw <- data_raw %>%
inner_join(airports, c("origin" = "faa"))
# Seleccionamos aquellas columnas que nos interesan
data_raw <- select(.data = data_raw, c(sched_dep_time, dep_delay, arr_delay,
origin, dest, distance,
name.x, lat.x, lon.x,
name.y, lat.y, lon.y))
# Cambiamos los nombres de las variables a minusculas y sustituimos los
# .x por dest
data_raw <- rename_with(data_raw, ~ tolower(gsub(".x", "_dest",
.x, fixed = TRUE)))
# Cambiamos los nombres de las variables a minusculas y sustituimos los
# .y por origin
data_raw <- rename_with(data_raw, ~ tolower(gsub(".y", "_origin",
.x, fixed = TRUE)))
# Eliminamos los NaN
data <- drop_na(data_raw)
# Hacemos un attach para trabajar mas comodamente
attach(data)
# Una vez tenemos el dataset limpio ya podemos comenzar a trabajar con shiny
#------------------------------------------------------------------------------
# UI
ui <- dashboardPage(
dashboardHeader(
title = "Flight Explorer"
),
dashboardSidebar(
# Seleccionamos los aeropuertos de origen
pickerInput(inputId = "origin",
label = "Origin",
choices = sort(unique(name_origin), decreasing = F),
options = list("actions-box" = TRUE),
multiple = TRUE,
selected = "John F Kennedy Intl"
),
# Seleccionamos los aeropuertos de destino
pickerInput(inputId = "dest",
label = "Destination",
choices = sort(unique(name_dest), decreasing = F),
options = list("actions-box" = TRUE),
multiple = TRUE,
selected = "Los Angeles Intl"
),
# Seleccionamos la variable agregada
selectInput(inputId = "variable",
label = "Size by",
choices = list("Arrival delay" = "arr_delay",
"Departure delay" = "dep_delay",
"Distance" = "distance"),
multiple = FALSE,
selected = "distance"
)
),
dashboardBody(
# Plot del mapa
plotOutput("map",
brush = "selection"),
# Plot de los histogtamas en box para que salga mas estetico
fluidRow(
box(title = "Arrival delay in minutes",
status = "primary",
plotOutput("hist_arr_delay")
),
box(title = "Departure delay in minutes",
status = "primary",
plotOutput("hist_dep_delay")
),
box(title = "Distance flown in milles",
status = "primary",
plotOutput("hist_dist")),
box(title = "Scheduled departure time",
status = "primary",
plotOutput("hist_depart_time"))
)
)
)
#------------------------------------------------------------------------------
# SERVER
server <- function(input, output) {
#----------------------------------------------------------------------------
# HISTOGRAMAS
# Definimos el filtro con el que haremos los plots
data_filter <- reactive({
# Si no has seleccionado variables el validate muestra un error
validate(
need(input$dest != "", "Please select destination"),
need(input$origin != "", "Please select origin")
)
# Generamos el dataset de la seleccion (brushed)
data_brushed <- brushedPoints(data,
input$selection,
xvar = "lon_dest",
yvar = "lat_dest")
# Creamos una condición, si el usuario ha hecho una selección en el mapa,
# utilizas ese dataset, sino utiliza las varibales indicadasd en el ui
if (nrow(data_brushed) > 0) {
return(data_brushed)
} else {
return(filter(.data = data,
name_origin == input$origin,
name_dest == input$dest))
}
})
# Ploteamos los histogramas
# Histograma arr_delay
output$hist_arr_delay <- renderPlot({
# Si no hay información de los histogramas, utilizamos validate()
validate(
need(nrow(data_filter()) > 0, "No flights in this corridor!")
)
ggplot(data = data_filter(), aes(x = arr_delay)) +
geom_histogram(col = "cadetblue4", fill = "cadetblue3") +
xlab("Minutes")
})
# Histograma dep_delay
output$hist_dep_delay <- renderPlot({
# Si no hay información de los histogramas, utilizamos validate()
validate(
need(nrow(data_filter()) > 0, "No flights in this corridor!")
)
ggplot(data = data_filter(), aes(x = dep_delay)) +
geom_histogram(col = "cadetblue4", fill = "cadetblue3") +
xlab("Minutes")
})
# Histograma distance
output$hist_dist <- renderPlot({
# Si no hay información de los histogramas, utilizamos validate()
validate(
need(nrow(data_filter()) > 0, "No flights in this corridor!"),
need(length(unique(data_filter()$distance)) != 1,
paste0(unique(data_filter()$distance)))
)
ggplot(data = data_filter(), aes(x = distance)) +
geom_histogram(col = "cadetblue4", fill = "cadetblue3") +
xlab("Milles")
})
# Histograma dep_hour
output$hist_depart_time <- renderPlot({
# Si no hay información de los histogramas, utilizamos validate()
validate(
need(nrow(data_filter()) > 0, "No flights in this corridor!")
)
ggplot(data = data_filter(), aes(x = sched_dep_time)) +
geom_histogram(col = "cadetblue4", fill = "cadetblue3") +
xlab("Minutes")
})
#----------------------------------------------------------------------------
# MAPA
output$map <- renderPlot({
usa <- map_data("usa")
states <- map_data("state")
# Mapa de USA
mapa <- ggplot() +
geom_polygon(data = usa,
aes(x = long, y = lat, group = group),
fill = NA,
color = "black") +
coord_fixed(1.3)
# Mapa de los Estados
mapa <- mapa +
geom_polygon(data = states,
aes(x = long, y = lat, group = group),
color = "gray35", fill = "lemonchiffon1") +
guides(fill = FALSE)
# Mapa con los aeropuertos
# Si quisieramos ver la media de las variables agregadas (Size by)
# deberíamos utilizar este dataset, el problema es que no puedes hacer
# la media de un string, por lo que no funciona. En el ggplot, se debería
# cambiar input$variable por data_size$avg.
data_size <- data_filter() %>%
group_by(name_dest, lon_dest, lat_dest) %>%
summarise(avg = mean(input$variable))
# Ploteamos
mapa <- mapa +
geom_point(data = data_filter(),
aes_string(x = "lon_dest", y = "lat_dest", size = input$variable),
color = "tomato1", fill = "tomato1", shape = 1)
mapa <- mapa +
geom_point(data = data_filter(),
aes(x = lon_origin, y = lat_origin),
color = "green", fill = "green", shape = 19, size = 3)
mapa <- mapa +
geom_segment(data = data_filter(),
aes(x = lon_origin, y = lat_origin,
xend = lon_dest, yend = lat_dest),
linetype = 2, lwd = 0.75)
mapa +
theme(panel.background = element_rect(fill = "lightblue",
colour = "lightblue"))
})
}
# Run the application
shinyApp(ui = ui, server = server)
|
381ffedd710dd9c072549c24571cad1fba28332b
|
885d8760545b21f3b85ae05c8d0aaf710d699942
|
/R/rampFastQueryTab3.R
|
5fd0f6cbb4084f2797762bc41308f32e1f2c93f3
|
[] |
no_license
|
Mathelab/RaMP-DB
|
313e2c855db416b84ba1e62351a133ef27f9b676
|
5acaded46849b713acef8686b07d5fac2b56c8ba
|
refs/heads/master
| 2021-06-02T09:57:46.848784
| 2021-04-19T12:22:21
| 2021-04-19T12:22:21
| 114,416,614
| 15
| 4
| null | 2021-04-19T12:16:23
| 2017-12-15T22:34:46
|
R
|
UTF-8
|
R
| false
| false
| 35,444
|
r
|
rampFastQueryTab3.R
|
#' Do fisher test for only one pathway from search result
#' clicked on highchart
#' @param pathwaydf a data frame resulting from getPathwayFromAnalyte
#' @param total_metabolites number of metabolites analyzed in the experiment (e.g. background) (default is 1000; set to 'NULL' to retrieve total number of metabolites that map to any pathway in RaMP). Assumption that analyte_type is "metabolite")
#' @param total_genes number of genes analyzed in the experiment (e.g. background) (default is 20000, with assumption that analyte_type is "genes")
#' @param analyte_type "metabolites" or "genes" (default is "metabolites")
#' @param conpass password for database access (string)
#' @param dbname name of the mysql database (default is "ramp")
#' @param username username for database access (default is "root")
#' @param host host name for database access (default is "localhost")
#' @return a dataframe with columns containing pathway ID, fisher's p value, user analytes in pathway, and total analytes in pathway
runFisherTest <- function(pathwaydf,total_metabolites=NULL,total_genes=20000,
analyte_type="metabolites",conpass=NULL,
dbname="ramp",username="root",
host = "localhost"){
now <- proc.time()
print("Fisher Testing ......")
if(is.null(conpass)) {
stop("Please define the password for the mysql connection")
}
if(analyte_type=="metabolites") {total_analytes=total_metabolites
} else if (analyte_type=="genes") {
total_analytes=total_genes
} else {
stop("Please define the analyte_type variable as 'metabolites' or 'genes'")
}
contingencyTb <- matrix(0,nrow = 2,ncol = 2)
colnames(contingencyTb) <- c("In Pathway","Not In Pathway")
rownames(contingencyTb) <- c("All Metabolites","User's Metabolites")
# Get pathway ids that contain the user analytes
pid <- unique(pathwaydf$pathwayRampId);
list_pid <- sapply(pid,shQuote)
list_pid <- paste(list_pid,collapse = ",")
# Get the total number of metabolites that are mapped to pathways in RaMP (that's the default background)
query <- "select * from analytehaspathway"
con <- DBI::dbConnect(RMySQL::MySQL(), user = username,
password = conpass,
dbname = dbname,
host = host)
allids <- DBI::dbGetQuery(con,query)
DBI::dbDisconnect(con)
allids <- allids[!duplicated(allids),]
if((analyte_type == "metabolites") && (is.null(total_metabolites))) {
wiki_totanalytes <- length(unique(allids$rampId[grep("RAMP_C",allids[which(allids$pathwaySource=="wiki"),"rampId"])]))
react_totanalytes <- length(unique(allids$rampId[grep("RAMP_C",allids[which(allids$pathwaySource=="reactome"),"rampId"])]))
kegg_totanalytes <- length(unique(allids$rampId[grep("RAMP_C",allids[which(allids$pathwaySource=="kegg"),"rampId"])]))
}
if(analyte_type=="genes") {
wiki_totanalytes <- react_totanalytes <- kegg_totanalytes <- total_genes
}
print("Calculating p-values for pathways in input")
# Retrieve the Ramp compound ids associated with the ramp pathway id and count them:
query1 <- paste0("select rampId,pathwayRampId from analytehaspathway where pathwayRampId in (",
list_pid,")")
con <- DBI::dbConnect(RMySQL::MySQL(), user = username,
password = conpass,
dbname = dbname,
host = host)
cids <- DBI::dbGetQuery(con,query1)#[[1]]
DBI::dbDisconnect(con)
# Generate freq table based on input pathwayRampIds.
query2 <- paste0("select * from analytehaspathway where pathwayRampId in (",
list_pid,")")
con <- DBI::dbConnect(RMySQL::MySQL(), user = username,
password = conpass,
dbname = dbname,
host = host)
input_RampIds <- DBI::dbGetQuery(con,query2)
if(is.null(input_RampIds)) {
stop("Data doesn't exist")
} else {
# data frames for metabolites with pathawayRampID, Freq based on Source(kegg, reactome, wiki)
input_RampId_C <- input_RampIds[grep("RAMP_C", input_RampIds$rampId), ]
unique_input_RampId_C <- unique(input_RampId_C[,c("rampId", "pathwayRampId")])
unique_pathwayRampId_source <- unique(input_RampId_C[,c("pathwayRampId", "pathwaySource")])
freq_unique_input_RampId_C <- as.data.frame(table(unique_input_RampId_C[,"pathwayRampId"]))
names(freq_unique_input_RampId_C)[1] = 'pathwayRampId'
merge_Pathwayfreq_source <- merge(freq_unique_input_RampId_C, unique_pathwayRampId_source, by="pathwayRampId")
# subset metabolite data based on source - kegg, reactome, wiki
input_kegg_metab <- subset(merge_Pathwayfreq_source, merge_Pathwayfreq_source$pathwaySource == "kegg")
input_reactome_metab <- subset(merge_Pathwayfreq_source, merge_Pathwayfreq_source$pathwaySource == "reactome")
input_wiki_metab <- subset(merge_Pathwayfreq_source, merge_Pathwayfreq_source$pathwaySource == "wiki")
# data frames for Genes with pathawayRampID, Freq based on Source(kegg, reactome, wiki, hmdb)
input_RampId_G <- input_RampIds[grep("RAMP_G", input_RampIds$rampId), ]
unique_input_RampId_G <- unique(input_RampId_G[,c("rampId", "pathwayRampId")])
unique_pathwayG_source <- unique(input_RampId_G[,c("pathwayRampId", "pathwaySource")])
freq_unique_input_RampId_G <- as.data.frame(table(unique_input_RampId_G[,"pathwayRampId"]))
names(freq_unique_input_RampId_G)[1] = 'pathwayRampId'
merge_PathwayG_source <- merge(freq_unique_input_RampId_G, unique_pathwayG_source, by="pathwayRampId")
# subset gene data based on source - kegg, reactome, wiki
input_kegg_gene <- subset(merge_PathwayG_source, merge_PathwayG_source$pathwaySource == "kegg")
input_reactome_gene <- subset(merge_PathwayG_source, merge_PathwayG_source$pathwaySource == "reactome")
input_wiki_gene <- subset(merge_PathwayG_source, merge_PathwayG_source$pathwaySource == "wiki")
}
# Loop through each pathway, build the contingency table, and calculate Fisher's Exact
# test p-value
pval=totinpath=userinpath=pidused=c()
for (i in pid) {
if(analyte_type=="metabolites") {
if ((!is.na(input_kegg_metab$pathwayRampId[1])) && i %in% input_kegg_metab$pathwayRampId) {
tot_in_pathway <- input_kegg_metab[which(input_kegg_metab[,"pathwayRampId"]==i),"Freq"]
total_analytes <- kegg_totanalytes
} else if ((!is.na(input_wiki_metab$pathwayRampId[1])) && i %in% input_wiki_metab$pathwayRampId) {
tot_in_pathway <- input_wiki_metab[which(input_wiki_metab[,"pathwayRampId"]==i),"Freq"]
total_analytes <- wiki_totanalytes
} else if ((!is.na(input_reactome_metab$pathwayRampId[1])) && i %in% input_reactome_metab$pathwayRampId) {
tot_in_pathway <- input_reactome_metab[which(input_reactome_metab[,"pathwayRampId"]==i),"Freq"]
total_analytes <- react_totanalytes
} else {
tot_in_pathway = 0
}
} else {
if ((!is.na(input_kegg_gene$pathwayRampId[1])) && i %in% input_kegg_gene$pathwayRampId) {
tot_in_pathway <- input_kegg_gene[which(input_kegg_gene[,"pathwayRampId"]==i),"Freq"]
total_analytes <- kegg_totanalytes
} else if ((!is.na(input_wiki_gene$pathwayRampId[1])) && i %in% input_wiki_gene$pathwayRampId) {
tot_in_pathway <- input_wiki_gene[which(input_wiki_gene[,"pathwayRampId"]==i),"Freq"]
total_analytes <- wiki_totanalytes
} else if ((!is.na(input_reactome_gene$pathwayRampId[1])) &&i %in% input_reactome_gene$pathwayRampId) {
tot_in_pathway <- input_reactome_gene[which(input_reactome_gene[,"pathwayRampId"]==i),"Freq"]
total_analytes <- react_totanalytes
} else {
tot_in_pathway = 0
}
}
tot_out_pathway <- total_analytes - tot_in_pathway
# fill the rest of the table out
user_in_pathway <- length(unique(pathwaydf[which(pathwaydf$pathwayRampId==i),"rampId"]))
user_out_pathway <- length(unique(pathwaydf$rampId)) - user_in_pathway
contingencyTb[1,1] <- tot_in_pathway - user_in_pathway
contingencyTb[1,2] <- tot_out_pathway - user_out_pathway
contingencyTb[2,1] <- user_in_pathway
contingencyTb[2,2] <- user_out_pathway
result <- stats::fisher.test(contingencyTb)
pval <- c(pval,result$p.value )
userinpath<-c(userinpath,user_in_pathway)
totinpath<-c(totinpath,tot_in_pathway)
pidused <- c(pidused,i)
} # end for loop
# Now run fisher's tests for all other pids
query <- "select distinct(pathwayRampId) from analytehaspathway where pathwaySource != 'hmdb';"
con <- DBI::dbConnect(RMySQL::MySQL(), user = username,
password = conpass,
dbname = dbname,
host = host)
allpids <- DBI::dbGetQuery(con,query)
DBI::dbDisconnect(con)
pidstorun <- setdiff(allpids[,1],pid)
pidstorunlist <- sapply(pidstorun,shQuote)
pidstorunlist <- paste(pidstorunlist,collapse = ",")
query2 <- paste0("select rampId,pathwayRampId from analytehaspathway where pathwayRampId in (",
pidstorunlist,")")
con <- DBI::dbConnect(RMySQL::MySQL(), user = username,
password = conpass,
dbname = dbname,
host = host)
restcids <- DBI::dbGetQuery(con,query2)#[[1]]
DBI::dbDisconnect(con)
query1 <- paste0("select rampId,pathwayRampId from analytehaspathway;")
con <- DBI::dbConnect(RMySQL::MySQL(), user = username,
password = conpass,
dbname = dbname,
host = host)
allcids <- DBI::dbGetQuery(con,query1)#[[1]]
DBI::dbDisconnect(con)
print("Calculating p-values for all other pathways")
#print(paste0(length(pidstorun),"pathways"))
# calculating p-values for all other pathways
kegg_metab <- kegg_metab
kegg_gene <- kegg_gene
wiki_metab <- wiki_metab
wiki_gene <- wiki_gene
reactome_metab <- reactome_metab
reactome_gene <- reactome_gene
hmdb_metab <- hmdb_metab
hmdb_gene <- hmdb_gene
count=1;
pval2=userinpath2=totinpath2=c()
for (i in pidstorun) {
if(( count %% 100) ==0) {print(paste0("Processed ",count))}
count=count+1
user_in_pathway=0
if(analyte_type=="metabolites") {
if (i %in% kegg_metab$pathwayRampId) {
tot_in_pathway <- kegg_metab[which(kegg_metab[,"pathwayRampId"]==i),"Freq"]
total_analytes <- kegg_totanalytes
} else if (i %in% wiki_metab$pathwayRampId) {
tot_in_pathway <- wiki_metab[which(wiki_metab[,"pathwayRampId"]==i),"Freq"]
total_analytes <- wiki_totanalytes
} else if (i %in% reactome_metab$pathwayRampId) {
tot_in_pathway <- reactome_metab[which(reactome_metab[,"pathwayRampId"]==i),"Freq"]
total_analytes <- react_totanalytes
} else if (i %in% hmdb_metab$pathwayRampId) {
tot_in_pathway <- hmdb_metab[which(hmdb_metab[,"pathwayRampId"]==i),"Freq"]
total_analytes <- NULL
} else {
tot_in_pathway=0
total_analytes <- NULL
}
} else {
if (i %in% kegg_gene$pathwayRampId) {
tot_in_pathway <- kegg_gene[which(kegg_gene[,"pathwayRampId"]==i),"Freq"]
total_analytes <- kegg_totanalytes
} else if (i %in% wiki_gene$pathwayRampId) {
tot_in_pathway <- wiki_gene[which(wiki_gene[,"pathwayRampId"]==i),"Freq"]
total_analytes <- wiki_totanalytes
} else if (i %in% reactome_gene$pathwayRampId) {
tot_in_pathway <- reactome_gene[which(reactome_gene[,"pathwayRampId"]==i),"Freq"]
total_analytes <- react_totanalytes
} else if (i %in% hmdb_gene$pathwayRampId) {
tot_in_pathway <- hmdb_gene[which(hmdb_gene[,"pathwayRampId"]==i),"Freq"]
total_analytes <- NULL
} else {
tot_in_pathway=0
total_analytes <- NULL
}
}
# Check that the pathway being considered has your analyte type, if not, move on
if(is.null(total_analytes)) {next;}
tot_out_pathway <- total_analytes - tot_in_pathway
# fill the rest of the table out
user_out_pathway <- length(unique(pathwaydf$rampId))
#user_out_pathway <- total_analytes - user_in_pathway
contingencyTb[1,1] <- tot_in_pathway - user_in_pathway
contingencyTb[1,2] <- tot_out_pathway - user_out_pathway
contingencyTb[2,1] <- user_in_pathway
contingencyTb[2,2] <- user_out_pathway
result <- stats::fisher.test(contingencyTb)
pval2 <- c(pval2,result$p.value )
userinpath2<-c(userinpath2,user_in_pathway)
totinpath2<-c(totinpath2,tot_in_pathway)
# pidused <- c(pidused,i)
} # end for loop
# only keep pathways that have > 8 or < 100 compounds
keepers <- intersect(which(c(totinpath,totinpath2)>=8),
which(c(totinpath,totinpath2)<100))
#hist(totinpath,breaks=1000)
print(paste0("Keeping ",length(keepers)," pathways"))
#fdr <- stats::p.adjust(c(pval,pval2)[keepers],method="fdr")
#holm <- stats::p.adjust(c(pval,pval2)[keepers],method="holm")
print(paste0("Calculated p-values for ",length(c(pval,pval2))," pathways"))
# format output (retrieve pathway name for each unique source id first
out <- data.frame(pathwayRampId=c(pidused,pidstorun)[keepers],
Pval=c(pval,pval2)[keepers], #FDR.Adjusted.Pval=fdr,
# Holm.Adjusted.Pval=holm,
Num_In_Path=c(userinpath,userinpath2)[keepers],
Total_In_Path=c(totinpath,totinpath2)[keepers])
print(dim(out))
#out2 <- merge(out,pathwaydf[,c("pathwayName","pathwayRampId","pathwaysourceId",
# "pathwaysource","pathwayRampId")],
# by="pathwayRampId",all.x=TRUE)
#finout <- out[,c("pathwayName", "Pval", #"FDR.Adjusted.Pval",
# "Holm.Adjusted.Pval",
# "pathwaysourceId",
# "pathwaysource","Num_In_Path","Total_In_Path","pathwayRampId")]
# finout=finout[!duplicated(finout),]
out = out[!duplicated(out),]
print(colnames(out))
# foruser is the output needed, based on what user input
return(out)
}
#' Do fisher test for only one pathway from search result
#' clicked on highchart
#' @param pathwaydf a data frame resulting from getPathwayFromAnalyte
#' @param total_metabolites number of metabolites analyzed in the experiment (e.g. background) (default is 1000; set to 'NULL' to retrieve total number of metabolites that map to any pathway in RaMP). Assumption that analyte_type is "metabolite")
#' @param total_genes number of genes analyzed in the experiment (e.g. background) (default is 20000, with assumption that analyte_type is "genes")
#' @param min_analyte if the number of analytes (gene or metabolite) in a pathway is
#' < min_analyte, do not report
#' @param conpass password for database access (string)
#' @param dbname name of the mysql database (default is "ramp")
#' @param username username for database access (default is "root")
#' @param host host name for database access (default is "localhost")
#' @return a list containing two entries: [[1]] fishresults, a dataframe containing pathways with Fisher's p values (raw and with FDR and Holm adjustment), number of user analytes in pathway, total number of analytes in pathway, and pathway source ID/database. [[2]] analyte_type, a string specifying the type of analyte input into the function ("genes", "metabolites", or "both")
#'@examples
#'\dontrun{
#' pathwaydf<-getPathwayFromAnalyte(c("MDM2","TP53","glutamate","creatinine"),
#' NameOrIds="names", conpass=conpass)
#' fisher.results <- runCombinedFisherTest(pathwaydf=pathwaydf,conpass=conpass)
#'}
#' @export
runCombinedFisherTest <- function(pathwaydf,total_metabolites=NULL,total_genes=20000,
min_analyte=2,conpass=NULL,
dbname="ramp",username="root",
host = "localhost"){
if(is.null(conpass)) {
stop("Please define the password for the mysql connection")
}
G <- M <- 0
# Grab pathways that contain metabolites to run Fisher on metabolites
# This will return all pathways that have at 8-120 metabolites/genes in them
fishmetab <- pathwaydf[grep("RAMP_C_",pathwaydf$rampId),]
if(nrow(fishmetab) == 0) {outmetab=NULL} else{
M=1
print("Running Fisher's tests on metabolites")
outmetab <- runFisherTest(pathwaydf=fishmetab,analyte_type="metabolites",
total_metabolites=total_metabolites,total_genes=total_genes,
conpass=conpass,dbname=dbname,
username=username,host=host)
}
# Grab pathways that contain genes to run Fisher on genes
fishgene <- pathwaydf[grep("RAMP_G_",pathwaydf$rampId),]
if(nrow(fishgene) == 0) {outgene=NULL} else{
G=1
print("Running Fisher's tests on genes")
outgene <- runFisherTest(pathwaydf=fishgene,analyte_type="genes",
total_metabolites=total_metabolites,total_genes=total_genes,
conpass=conpass,dbname=dbname,
username=username,host=host)
}
if(is.null(outgene) & !is.null(outmetab)) {
out <- outmetab
fdr <- stats::p.adjust(out$Pval,method="fdr")
out<-cbind(out,fdr);colnames(out)[ncol(out)]="Pval_FDR"
holm <- stats::p.adjust(out$Pval,method="holm")
out<-cbind(out,holm);colnames(out)[ncol(out)]="Pval_Holm"
keepers <- which(out$Num_In_Path>=min_analyte)
out2 <- merge(out[keepers,],
pathwaydf[,c("pathwayName","pathwayRampId","pathwaysourceId",
"pathwaysource")],by="pathwayRampId")
} else if (!is.null(outgene) & is.null(outmetab)) {
out <- outgene
fdr <- stats::p.adjust(out$Pval,method="fdr")
out<-cbind(out,fdr);colnames(out)[ncol(out)]="Pval_FDR"
holm <- stats::p.adjust(out$Pval,method="holm")
out<-cbind(out,holm);colnames(out)[ncol(out)]="Pval_Holm"
keepers <- which(out$Num_In_Path>=min_analyte)
out2 <- merge(out[keepers,],
pathwaydf[,c("pathwayName","pathwayRampId","pathwaysourceId",
"pathwaysource")],by="pathwayRampId")
} else {
# merge the results if both genes and metabolites were run
G = M = 1
allfish <- merge(outmetab,outgene,
by="pathwayRampId",all.x=T,all.y=T)
colnames(allfish)[which(colnames(allfish)=="Pval.x")]="Pval.Metab"
colnames(allfish)[which(colnames(allfish)=="Pval.y")]="Pval.Gene"
colnames(allfish)[which(colnames(allfish)=="Total_In_Path.x")]="Total_In_Path.Metab"
colnames(allfish)[which(colnames(allfish)=="Total_In_Path.y")]="Total_In_Path.Gene"
colnames(allfish)[which(colnames(allfish)=="Num_In_Path.x")]="Num_In_Path.Metab"
colnames(allfish)[which(colnames(allfish)=="Num_In_Path.y")]="Num_In_Path.Gene"
# Calculate combined p-values for pathways that have both genes and metabolites
gm <- intersect(which(!is.na(allfish$Pval.Metab)),which(!is.na(allfish$Pval.Gene)))
combpval <- stats::pchisq(-2 * (log(allfish$Pval.Metab[gm])+log(allfish$Pval.Gene[gm])),
df=2,lower.tail=FALSE)
g <- which(is.na(allfish$Pval.Metab))
gpval <- allfish$Pval.Gene[g]
m <- which(is.na(allfish$Pval.Gene))
mpval <- allfish$Pval.Metab[m]
out <- rbind(allfish[gm,],allfish[g,],allfish[m,])
out <- cbind(out,c(combpval,gpval,mpval))
colnames(out)[ncol(out)]="Pval_combined"
fdr <- stats::p.adjust(out$Pval_combined,method="fdr")
out <- cbind(out,fdr)
colnames(out)[ncol(out)]="Pval_combined_FDR"
holm <- stats::p.adjust(out$Pval_combined,method="holm")
out <- cbind(out,holm)
colnames(out)[ncol(out)]="Pval_combined_Holm"
keepers <- intersect(c(which(out$Num_In_Path.Metab>=min_analyte),
which(is.na(out$Num_In_Path.Metab))),
c(which(out$Num_In_Path.Gene>=min_analyte),
which(is.na(out$Num_In_Path.Gene)))
)
# Now that p-values are calculated, only return pathways that are in the list
# of pathways that contain user genes and metabolites
out2 <- merge(out[keepers,],
pathwaydf[,c("pathwayName","pathwayRampId","pathwaysourceId",
"pathwaysource")],by="pathwayRampId")
} # end merging when genes and metabolites were run
out2 <- out2[!duplicated(out2),]
analyte_type=c()
if(G==1 && M==1) {
analyte_type="both"
} else if (G==1 && M==0) {
analyte_type="genes"
} else if (G==0 && M==1) {
analyte_type="metabolites"
}
return(list(fishresults=out2,analyte_type=analyte_type))
}
#' Function that search analytes (gene or compounds) or a list of analytes and
#' returns associated pathways
#'
#' @param analytes a vector of analytes (genes or metabolites) that need to be searched
#' @param find_synonym find all synonyms or just return same synonym (T/F)
#' @param conpass password for database access (string)
#' @param NameOrIds whether input is "names" or "ids" (default is "ids")
#' @param host host name for database access (default is "localhost")
#' @param dbname name of the mysql database (default is "ramp")
#' @param username username for database access (default is "root")
#' @return a list contains all metabolites as name and pathway inside.
#' @examples
#' \dontrun{
#' mypath <- getPathwayFromAnalyte(analytes=c("2-hydroxyglutarate","glutamate"), conpass="mypassword")
#' }
#' @export
getPathwayFromAnalyte<- function(analytes=NULL,
find_synonym = FALSE,
conpass=NULL,
host = "localhost",
dbname="ramp",
username="root",
NameOrIds = "ids"){
if(is.null(conpass)) {
stop("Please define the password for the mysql connection")
}
now <- proc.time()
if(is.null(analytes)) {return(NULL)}
if(NameOrIds == "names"){
print(analytes)
synonym <- rampFindSynonymFromSynonym(synonym=analytes,
find_synonym=find_synonym,
conpass=conpass, host=host, dbname=dbname,username=username)
colnames(synonym)[1]="commonName"
synonym$commonName <- tolower(synonym$commonName)
if(nrow(synonym)==0) {
stop("Could not find any matches to the analytes entered. If pasting, please make sure the names are delimited by end of line (not analyte per line)\nand that you are selecting 'names', not 'ids'");
}
# Get all unique RaMP ids and call it list_metabolite
list_metabolite <- unique(synonym$rampId)
list_metabolite <- sapply(list_metabolite,shQuote)
list_metabolite <- paste(list_metabolite,collapse = ",")
} else if (NameOrIds == "ids"){
sourceramp <- rampFindSourceRampId(sourceId=analytes, conpass=conpass, host=host, dbname=dbname,username=username)
if (nrow(sourceramp)==0) {
stop("Make sure you are actually inputting ids and not names (you have NameOrIds set to 'ids'. If you are, then no ids were matched in the RaMP database.")
}
# get all unique RaMP ids and call it list_metabolite
list_metabolite <- unique(sourceramp$rampId)
#sourceIDTable <- list_metabolite
#list_metabolite <- list_metabolite$rampId
list_metabolite <- sapply(list_metabolite,shQuote)
list_metabolite <- paste(list_metabolite,collapse = ",")
} else {
stop("Make sure NameOrIds is set to 'names' or 'ids'")
}
# Parse data to fit mysql
# Can be simplified here
if(list_metabolite=="") {
warning("Unable to retrieve metabolites")
return(NULL)
}
# Now using the RaMP compound id, retrieve associated pathway ids
query2 <- paste0("select pathwayRampId,rampId from analytehaspathway where
rampId in (",
list_metabolite,");")
con <- RaMP::connectToRaMP(dbname=dbname,username=username,conpass=conpass,host = host)
#print(query2)
df2 <- DBI::dbGetQuery(con,query2)
DBI::dbDisconnect(con)
pathid_list <- df2$pathwayRampId
pathid_list <- sapply(pathid_list,shQuote)
pathid_list <- paste(pathid_list,collapse = ",")
# With pathway ids, retrieve pathway information
if(pathid_list=="") {
warning("The input list of analytes do not map to any pathways")
return(NULL)
}
query3 <- paste0("select pathwayName,sourceId as pathwaysourceId,type as pathwaysource,pathwayRampId from pathway where pathwayRampId in (",
pathid_list,");")
con <- RaMP::connectToRaMP(dbname=dbname,username=username,conpass=conpass,host = host)
df3 <- DBI::dbGetQuery(con,query3)
DBI::dbDisconnect(con)
#Format output
mdf <- merge(df3,df2,all.x = T)
# And with rampIds (list_metabolite), get common names when Ids are input
if(NameOrIds == "ids"){
list_analytes <- sapply(analytes,shQuote)
list_analytes <- paste(list_analytes,collapse = ",")
query4 <-paste0("select sourceId,commonName,rampId from source where sourceId in (",list_analytes,");")
con <- RaMP::connectToRaMP(dbname=dbname,username=username,conpass=conpass,host = host)
df4 <- DBI::dbGetQuery(con,query4)
DBI::dbDisconnect(con)
#convert latin1 encoding to UTF-8
df4$commonName <- sapply(as.character(df4$commonName), function(x) if (stringi::stri_enc_mark(x)=="native") { x <- iconv(x,"latin1","UTF-8") } else {x})
mdf <- merge(mdf,df4,all.x = T,by.y = "rampId")
mdf$commonName=tolower(mdf$commonName)
} else{ # Just take on the name
mdf <- merge(mdf,synonym,all.x = T,by.y = "rampId")
}
out<-mdf[!duplicated(mdf),]
# For now, not returning HMDB pathways because they include the 30K
# new pathways that are mainly drug and lipid pathways (need more proper
# structural resolution matching)
return(out[which(out$pathwaysource!="hmdb"),c("rampId","pathwayRampId","pathwayName",
"pathwaysourceId","pathwaysource","commonName")])
}
#' Perform fuzzy multiple linkage partitioning clustering on pathways identified by
#' Fisher's test
#'
#' @param fishers_df The data frame generated by runFisherTest
#' @param perc_analyte_overlap Minimum overlap for pathways to be considered similar
#' (Default = 0.5)
#' @param min_pathway_tocluster Minimum number of 'similar' pathways required to start
#' a cluster (medoid) (Default = 3)
#' @param perc_pathway_overlap Minimum overlap for clusters to merge (Default = 0.5)
#'
#' @return list:[[1]] Pathway enrichment result dataframe with cluster assignment column added
#' [[2]] analyte type
#' [[3]] cluster assignment in the list form
#'@examples
#'\dontrun{
#' pathwaydf<-getPathwayFromAnalyte(c("MDM2","TP53","glutamate","creatinine"),
#' NameOrIds="names", conpass=conpass)
#' fisher.results <- runCombinedFisherTest(pathwaydf=pathwaydf,conpass=conpass)
#' filtered.fisher.results <- FilterFishersResults(fisher.results,p_holmadj_cutoff=0.05)
#' filteredclust.fisher.results <- findCluster(filtered.fisher.results)
#'}
#' @export
findCluster <- function(fishers_df,perc_analyte_overlap = 0.5,
min_pathway_tocluster = 2,perc_pathway_overlap = 0.5){
if(perc_analyte_overlap <= 0 || perc_analyte_overlap >= 1 ||
perc_pathway_overlap <= 0 || perc_pathway_overlap >= 1){
return(NULL)
}
analyte_type=fishers_df$analyte_type
fishers_df=fishers_df$fishresults
if(nrow(fishers_df)==0){
return(NULL)
}else if(nrow(fishers_df)==1){
fishers_df$cluster_assignment="Did not cluster"
fishers_df$rampids<-fishers_df$pathwayRampId
fishers_df$pathwayRampId<-NULL
output<-list(fishresults=fishers_df,analyte_type=analyte_type,cluster_list="Did not cluster")
return(output)
} else {
#similarity_matrix_list<-loadOverlapMatrices()
similarity_matrix_gene <- genes_result
similarity_matrix_analyte <- analyte_result
similarity_matrix_metab <- metabolites_result
if(analyte_type=="both"){
#similarity_matrix = similarity_matrix_list[["analyte"]]
similarity_matrix = similarity_matrix_analyte
}else if(analyte_type=="metabolites"){
#similarity_matrix = similarity_matrix_list[["metab"]]
similarity_matrix = similarity_matrix_metab
} else if(analyte_type=="genes"){
#similarity_matrix = similarity_matrix_list[["gene"]]
similarity_matrix = similarity_matrix_gene
} else {
stop("analyte_type should be 'genes' or metabolites'")
}
pathway_list<-fishers_df[,"pathwayRampId"]
pathway_indices<-match(pathway_list,rownames(similarity_matrix))
if(length(which(is.na(pathway_indices)))>0){
pathway_indices<-pathway_indices[-which(is.na(pathway_indices))]
}
pathway_matrix<-similarity_matrix[pathway_indices,pathway_indices]
unmerged_clusters<-apply(pathway_matrix, 1, function(x){
# if(length(which(x>=perc_analyte_overlap))>(min_pathway_tocluster+1)){
if(length(which(x>=perc_analyte_overlap))>(min_pathway_tocluster-1)){
return(colnames(pathway_matrix)[which(x>=perc_analyte_overlap)])
} else {
return(NA)
}
})
# Remove the unmerged clusters
if(length(which(is.na(unmerged_clusters)))>0){
unmerged_clusters<-unmerged_clusters[-which(is.na(unmerged_clusters))]
}
if(length(unmerged_clusters)==0){
#stop("No medoids found, make perc_analyte_overlap or min_pathway_tocluster smaller")
cluster_list<-rep("Did not cluster",times = nrow(fishers_df))
}else{
# Evaluate similarity between clusters
cluster_similarity<-matrix(0,ncol = length(unmerged_clusters),nrow = length(unmerged_clusters))
for(i in 1:length(unmerged_clusters)){
for(j in 1:length(unmerged_clusters)){
cluster_similarity[i,j]<-length(intersect(unmerged_clusters[[i]],unmerged_clusters[[j]]))/
length(unique(c(unmerged_clusters[[i]],unmerged_clusters[[j]])))
}
}
colnames(cluster_similarity)<-rownames(cluster_similarity)<-names(unmerged_clusters)
unmerged_cluster_similarity<-cluster_similarity
cluster_list<-unmerged_clusters
# Merge Clusters
count = 1
while(length(which(cluster_similarity >= perc_pathway_overlap)) > nrow(cluster_similarity)){
cluster_similarity_mod<-cluster_similarity
for(i in 1:nrow(cluster_similarity_mod)){
cluster_similarity_mod[i,i]<-0
}
clusters_to_merge<-which(cluster_similarity_mod == max(cluster_similarity_mod), arr.ind = TRUE)
clusters_to_merge<-unique(t(apply(clusters_to_merge, 1, sort)))
for(i in 1:nrow(clusters_to_merge)){
if(!is.na(cluster_list[[clusters_to_merge[i,1]]])&&!is.na(cluster_list[[clusters_to_merge[i,2]]])){
cluster_list[[clusters_to_merge[i,1]]]<-unique(unlist(cluster_list[c(clusters_to_merge[i,1],clusters_to_merge[i,2])]))
cluster_list[[clusters_to_merge[i,2]]]<-NA
}
}
if(length(which(is.na(cluster_list)))>0){
cluster_list<-cluster_list[-which(is.na(cluster_list))]
}
cluster_similarity<-matrix(0,ncol = length(cluster_list),nrow = length(cluster_list))
for(i in 1:length(cluster_list)){
for(j in 1:length(cluster_list)){
cluster_similarity[i,j]<-length(intersect(cluster_list[[i]],cluster_list[[j]]))/
length(unique(c(cluster_list[[i]],cluster_list[[j]])))
}
}
if(nrow(cluster_similarity)==1){
#stop("Clusters converged, use larger perc_pathway_overlap")
#return(rep(1,times = nrow(fishers_df)))
cluster_list<-rep("Did not cluster",times = nrow(fishers_df))
}
count = count + 1
if(count == length(unmerged_clusters)+1){
stop("ERROR: while loop failed to terminate")
#return(rep(1,times = nrow(fishers_df)))
#cluster_list<-rep("Did not cluster",times = nrow(fishers_df))
}
}
if(length(unique(cluster_list))!=1){
colnames(cluster_similarity) = rownames(cluster_similarity) = paste0("cluster_",c(1:length(cluster_list)))
}
}
#return(cluster_list)
# Reformat cluster list to embed into results file
rampids<-as.vector(fishers_df$pathwayRampId)
fishers_df$pathwayRampId<-NULL
if(length(cluster_list)>1){
cluster_assignment<-sapply(rampids,function(x){
pathway<-x
clusters<-""
for(i in 1:length(cluster_list)){
if(pathway %in% cluster_list[[i]]){
clusters<-paste0(clusters,i,sep = ", ",collapse = ", ")
}
}
if(clusters!=""){
clusters=substr(clusters,1,nchar(clusters)-2)
}else{
clusters = "Did not cluster"
}
return(clusters)
})
fishers_df<-cbind(fishers_df,cluster_assignment)
}else{
fishers_df<-cbind(fishers_df,rep("Did not cluster",times=nrow(fishers_df)))
}
fishers_df$rampids<-rampids
output<-list(fishresults=fishers_df,analyte_type=analyte_type,cluster_list=cluster_list)
return(output)
}
}
#' Filter pathways by p-value cutoff for display and clustering
#' @param fishers_df The data frame generated by runFisherTest
#' @param p_holmadj_cutoff return pathways where Holm adjusted pvalues are < p_holmadj_cutoff
#' @param p_fdradj_cutoff return pathways where FDR adjusted pvalues are < p_fdradj_cutoff
#' @return list:[[1]]Dataframe with pathway enrichment results, only significant pathways
#' [[2]]analyte type
#'@examples
#'\dontrun{
#' pathwaydf<-getPathwayFromAnalyte(c("MDM2","TP53","glutamate","creatinine"),
#' NameOrIds="names", conpass=conpass)
#' fisher.results <- runCombinedFisherTest(pathwaydf=pathwaydf,conpass=conpass)
#' filtered.fisher.results <- FilterFishersResults(fisher.results,p_holmadj_cutoff=0.05)
#'}
#' @export
FilterFishersResults<-function(fishers_df,p_holmadj_cutoff=NULL,
p_fdradj_cutoff=NULL){
# Check to see whether the output is from ORA performed on genes and metabolites
# or genes or metabolites
analyte_type=fishers_df$analyte_type
fishers_df=fishers_df$fishresults
if(length(grep("Pval_combined",colnames(fishers_df)))==0) {
if(!is.null(p_holmadj_cutoff)) {
return(list(fishresults=fishers_df[which(fishers_df[,"Pval_Holm"] <=
p_holmadj_cutoff),],analyte_type=analyte_type))
} else if (!is.null(p_fdradj_cutoff)) {
return(list(fishresults=fishers_df[which(fishers_df[,"Pval_FDR"] <=
p_fdradj_cutoff),],analyte_type=analyte_type))
} else {
stop("Please set a cutoff for Holm Adjusted pvalues
(p_holmadj_cutoff paramter) or FDR Adjusted pvalues
(p_fdradj_cutoff)")
}
} else { # ORA was performed on both genes and metabolites:
if(!is.null(p_holmadj_cutoff)) {
return(list(fishresults=fishers_df[which(fishers_df[,"Pval_combined_Holm"] <=
p_holmadj_cutoff),],analyte_type=analyte_type))
} else if (!is.null(p_fdradj_cutoff)) {
return(list(fishresults=fishers_df[which(fishers_df[,"Pval_combined_FDR"] <=
p_fdradj_cutoff),],analyte_type=analyte_type))
} else {
stop("Please set a cutoff for Holm Adjusted pvalues
(p_holmadj_cutoff paramter) or FDR Adjusted pvalues
(p_fdradj_cutoff)")
}
}
}
|
43dd96ef6f14e6c87cee358d5da72ed31370f871
|
cba292795ae57c1e058b1112af78c8fb10ec8269
|
/search_pubmed.R
|
31a1562abcfa315500fead482b5f9995c45cc5d4
|
[] |
no_license
|
joebrew/pubmed
|
9c23eeb1972f8cf4130252a12e7f119e36b91bb1
|
4f93cfa0426fdc90be4e09eafe056af8461a4c59
|
refs/heads/master
| 2021-01-20T19:15:38.152816
| 2016-07-18T12:12:50
| 2016-07-18T12:12:50
| 60,718,720
| 1
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 10,155
|
r
|
search_pubmed.R
|
library(RISmed)
library(dplyr)
library(ggplot2)
library(tidyr)
# http://amunategui.github.io/pubmed-query/
# Define function for getting results from pubmed
pubmed <- function(start_year = 2014,
end_year = 2016,
search_topic = 'malaria',
counts_only = TRUE){
# Define a year range
years <- start_year:end_year
# If counts only, simply get the counts by year
if(counts_only){
# JUST COUNTING RESULTS
# Create a placeholder for results
return_object <- data.frame(year = years,
n = NA)
for (year in 1:length(years)){
message(paste0('Fetching records for year ', years[year]))
# Perform search
search_query <- try({
EUtilsSummary(search_topic,
db="pubmed",
mindate=years[year],
maxdate=years[year],
retmax = 5000)})
if(inherits(search_query, 'try-error')){
n <- 0
} else {
n <- QueryCount(search_query)
}
# Populate results dataframe
return_object$n[year] <- n
}
} else {
# NOT JUST COUNTS. ACTUALLY RETRIEVE RESULTS
# Create an empty list for sticking results
results_list <- list()
# Create another empty list for sticking abstracts
abstracts_list <- list()
# Loop through each year to get results
for (year in 1:length(years)){
message(paste0('Fetching records for year ', years[year]))
try({
# Perform search
search_query <-
EUtilsSummary(search_topic,
db="pubmed",
mindate=years[year],
maxdate=years[year],
retmax = 5000)
# See results summary
# summary(search_query)
# See number of results
# QueryCount(search_query)
# Get IDs of the articles returned in our query
# Qids <- queryId(search_query)
# Actually fetch data
records <- EUtilsGet(search_query)
# Turn into a data.frame
pubmed_data <-
data.frame('title' = ArticleTitle(records),
# 'Abstract' = AbstractText(records),
'language' = Language(records),
'country' = Country(records),
'id' = ArticleId(records),
'year' = years[year],
'month' = MonthPubmed(records),
'day' = DayPubmed(records),
'affiliation' = Affiliation(records))
# Cited(records)
# RefSource(records)
# Create separate dataframe for abstracts
abstracts <- data.frame('id' = ArticleId(records),
'abstract' = AbstractText(records))
# Add authors separately
temp <- Author(records)
first_authors <- lapply(temp,
function(x){
x[1,]
})
last_authors <- lapply(temp,
function(x){
x[nrow(x),]
})
for (i in c('first', 'last')){
# Last name
pubmed_data[,paste(i, '_author_last_name')] <-
unlist(lapply(get(paste0(i, '_authors')), function(x){x['LastName']}))
# First name
pubmed_data[,paste(i, '_author_first_name')] <-
unlist(lapply(get(paste0(i, '_authors')), function(x){x['ForeName']}))
# Initials
pubmed_data[,paste(i, '_author_initials')] <-
unlist(lapply(get(paste0(i, '_authors')), function(x){x['Initials']}))
}
# All authors
pubmed_data$all_authors <-
unlist(lapply(Author(records), function(x){
paste0(x$ForeName,
' ',
x$LastName,
collapse = ', ')}))
# Add the year
pubmed_data$year <- years[year]
# Add results to results_list
results_list[[year]] <- pubmed_data
# Add abstract to abstracts list
abstracts_list[[year]] <- abstracts
# Remove unecessary objects
rm(pubmed_data, abstracts)
Sys.sleep(0.3)
})
}
# Bind together the results
results <- do.call('rbind', results_list)
# Bind together the abstracts
abstracts <- do.call('rbind', abstracts_list)
# Put into list
return_object <-
list(results = results,
abstracts = abstracts)
}
# Return
return(return_object)
}
# # Use pubmed to get results for malaria eradication
# malaria_eradication <-
# pubmed(start_year = 1945,
# end_year = 2015,
# search_topic = paste0('(malaria[Title/Abstract])',
# 'AND (elimination OR eradication)'))
#
# # Use pubmed to get results for malaria more generally
# malaria <-
# pubmed(start_year = 1945,
# end_year = 2015,
# search_topic = paste0('(malaria[Title/Abstract])'))
#
# # Horizontally bind
# combined <-
# left_join(malaria_eradication %>%
# rename(eradication = n),
# malaria %>%
# rename(malaria = n),
# by = 'year') %>%
# mutate(p = eradication / malaria * 100)
#
# # Rename to make more clear
# combined <-
# combined %>%
# rename(`Mentions eradication or elmination` = eradication,
# `General malaria` = malaria)
#
# # Gather to make long
# combined <- gather(combined,
# key,
# value, `Mentions eradication or elmination`:p)
#
# # Visualize
# ggplot(data = combined %>%
# filter(key != 'p'),
# aes(x = year,
# y = value,
# group = key,
# fill = key)) +
# geom_area() +
# xlab('Year') +
# ylab('Publications') +
# scale_fill_manual(values = c('darkgrey', 'red'),
# name = '') +
# theme_bw() +
# ggtitle(expression(atop('Papers containing "malaria" in title/abstract: 1945-present',
# atop(italic("Retrieved from PubMed"), "")))) +
# theme(legend.position = 'bottom')
# ggsave('pubmed.pdf')
#
# ggplot(data = combined %>%
# filter(key == 'p'),
# aes(x = year,
# y = value)) +
# geom_area(alpha = 0.6,
# color = 'black') +
# xlab('Year') +
# ylab('Percentage') +
# theme_bw() +
# ggtitle(expression(atop('Papers containing "eradication" or "elimination"',
# atop(italic('As % of all "malaria" papers, searching title/abstract only, retrieved from PubMed'), ""))))
# ggsave('pubmed2.pdf')
#
# # NTDs
#
# ntds <-
# pubmed(start_year = 1990,
# end_year = 2015,
# search_topic = paste0('(onchocerciasis[Title/Abstract])',
# ' OR (leishmaniasis[Title/Abstract])',
# ' OR (human african trypanosomiasis[Title/Abstract])',
# ' OR (lymphatic filariasis[Title/Abstract])',
# ' OR (chagas[Title/Abstract])',
# ' OR (schistosomiasis[Title/Abstract])',
# ' OR (oncocerchiasis[Title/Abstract])'))
#
# ntds_eradication <-
# pubmed(start_year = 1990,
# end_year = 2015,
# search_topic = paste0('(onchocerciasis[Title/Abstract])',
# ' OR (leishmaniasis[Title/Abstract])',
# ' OR (human african trypanosomiasis[Title/Abstract])',
# ' OR (lymphatic filariasis[Title/Abstract])',
# ' OR (chagas[Title/Abstract])',
# ' OR (schistosomiasis[Title/Abstract])',
# ' OR (oncocerchiasis[Title/Abstract])',
# ' (AND (public-private partnership)',
# ' OR (ppp)',
# ' OR (pdp)',
# ' OR (public private))'))
#
#
# # Horizontally bind
# combined <-
# left_join(ntds_eradication %>%
# rename(eradication = n),
# ntds %>%
# rename(ntds = n),
# by = 'year') %>%
# mutate(p = eradication / ntds * 100)
#
# # Rename to make more clear
# combined <-
# combined %>%
# rename(`Mentions eradication or elmination` = eradication,
# `General NTDs` = ntds)
#
# # Gather to make long
# combined <- gather(combined,
# key,
# value, `Mentions eradication or elmination`:p)
#
# # Visualize
# g1 <- ggplot(data = combined %>%
# filter(key != 'p'),
# aes(x = year,
# y = value,
# group = key,
# fill = key)) +
# geom_area() +
# xlab('Year') +
# ylab('Publications') +
# scale_fill_manual(values = c('darkgrey', 'red'),
# name = '') +
# theme_bw() +
# ggtitle(expression(atop('Papers containing NTDs in title/abstract: 1990-present',
# atop(italic("Retrieved from PubMed"), "")))) +
# theme(legend.position = 'bottom')
# ggsave('')
#
# g2 <- ggplot(data = combined %>%
# filter(key == 'p'),
# aes(x = year,
# y = value)) +
# geom_area(alpha = 0.6,
# color = 'black') +
# xlab('Year') +
# ylab('Percentage') +
# theme_bw() +
# ggtitle(expression(atop('Papers mentioning PPP in abstract',
# atop(italic('As % of all NTDs papers, searching title/abstract only, retrieved from PubMed'), ""))))
#
# source('multiplot.R')
# multiplot(g1, g2)
#
# # example for celine
#
# x <-
# pubmed(start_year = 2015,
# end_year = 2015,
# search_topic = paste0('(ntds[Title/Abstract])'),
# counts_only = FALSE)
|
15ba4982f4f13e742055ac2c62704bd05b0c208c
|
b696ca1a14b4e4f49d299298acdc5dbfc94063c7
|
/energy_consumption/currentcost_idle_1h.r
|
bf955ed9eb736b875ed41d396d1a9dcfa1ce59a0
|
[
"Apache-2.0"
] |
permissive
|
jvilaplana/openstack-greenc
|
7af677806b6c90eb42cfb3ba569f2d0bbef98dca
|
5f6fd37e80d60fa9a9bcc28d456fe48d1f8aafc5
|
refs/heads/master
| 2021-01-20T12:00:42.738231
| 2014-07-25T11:24:41
| 2014-07-25T11:24:41
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 2,691
|
r
|
currentcost_idle_1h.r
|
data <- read.csv("currentcost_idle_1h.csv", header=FALSE)
compute01 <- data[data$V5 == '1',]
compute02 <- data[data$V5 == '2',]
compute03 <- data[data$V5 == '3',]
compute04 <- data[data$V5 == '4',]
postscript("currentcost_idle_1h.eps")
plot(compute01$V8, ylab="", xlab="", type="l",col="blue",ylim=c(110,180), lwd=1,xaxt="n", cex.axis=1.4, cex.lab=1.4)
axis(1,labels=FALSE, tck=0)
lab <- c(levels(compute01$V3)[1], levels(compute01$V3)[409], levels(compute01$V3)[818], levels(compute01$V3)[1227], levels(compute01$V3)[1636], levels(compute01$V3)[2044])
text(c(0, 109, 218, 336, 445, 545), par("usr")[3]-1.80,srt=45,adj=1,labels=lab,xpd=TRUE, cex=1.4)
lines(compute04$V8,col="orange", lwd=2)
lines(compute03$V8,col="green", lwd=2)
lines(compute02$V8,col="red", lwd=2)
dev.off()
postscript("currentcost_idle_1h_compute01.eps")
plot(compute01$V8, ylab="", xlab="", type="l",col="blue",ylim=c(110,180), lwd=3,xaxt="n", cex.axis=1.8, cex.lab=1.8)
axis(1,labels=FALSE, tck=0)
lab <- c(levels(compute01$V3)[1], levels(compute01$V3)[409], levels(compute01$V3)[818], levels(compute01$V3)[1227], levels(compute01$V3)[1636], levels(compute01$V3)[2044])
text(c(0, 109, 218, 336, 445, 545), par("usr")[3]-1.80,srt=45,adj=1,labels=lab,xpd=TRUE, cex=1.8)
dev.off()
postscript("currentcost_idle_1h_compute02.eps")
plot(compute02$V8, ylab="", xlab="", type="l",col="red",ylim=c(110,180), lwd=3,xaxt="n", cex.axis=1.8, cex.lab=1.8)
axis(1,labels=FALSE, tck=0)
lab <- c(levels(compute01$V3)[1], levels(compute01$V3)[409], levels(compute01$V3)[818], levels(compute01$V3)[1227], levels(compute01$V3)[1636], levels(compute01$V3)[2044])
text(c(0, 109, 218, 336, 445, 545), par("usr")[3]-1.80,srt=45,adj=1,labels=lab,xpd=TRUE, cex=1.8)
dev.off()
postscript("currentcost_idle_1h_compute03.eps")
plot(compute03$V8, ylab="", xlab="", type="l",col="green",ylim=c(110,180), lwd=3,xaxt="n", cex.axis=1.8, cex.lab=1.8)
axis(1,labels=FALSE, tck=0)
lab <- c(levels(compute01$V3)[1], levels(compute01$V3)[409], levels(compute01$V3)[818], levels(compute01$V3)[1227], levels(compute01$V3)[1636], levels(compute01$V3)[2044])
text(c(0, 109, 218, 336, 445, 545), par("usr")[3]-1.80,srt=45,adj=1,labels=lab,xpd=TRUE, cex=1.8)
dev.off()
postscript("currentcost_idle_1h_compute04.eps")
plot(compute04$V8, ylab="", xlab="", type="l",col="orange",ylim=c(110,180), lwd=3,xaxt="n", cex.axis=1.8, cex.lab=1.8)
axis(1,labels=FALSE, tck=0)
lab <- c(levels(compute01$V3)[1], levels(compute01$V3)[409], levels(compute01$V3)[818], levels(compute01$V3)[1227], levels(compute01$V3)[1636], levels(compute01$V3)[2044])
text(c(0, 109, 218, 336, 445, 545), par("usr")[3]-1.80,srt=45,adj=1,labels=lab,xpd=TRUE, cex=1.8)
dev.off()
|
70d2e062cd55f9b12cb96414c6b284f90d749aa8
|
ffdea92d4315e4363dd4ae673a1a6adf82a761b5
|
/data/genthat_extracted_code/castor/examples/get_tree_traversal_root_to_tips.Rd.R
|
b2207d2134551c4411fa846c8a911f8bec6dbaf1
|
[] |
no_license
|
surayaaramli/typeRrh
|
d257ac8905c49123f4ccd4e377ee3dfc84d1636c
|
66e6996f31961bc8b9aafe1a6a6098327b66bf71
|
refs/heads/master
| 2023-05-05T04:05:31.617869
| 2019-04-25T22:10:06
| 2019-04-25T22:10:06
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 284
|
r
|
get_tree_traversal_root_to_tips.Rd.R
|
library(castor)
### Name: get_tree_traversal_root_to_tips
### Title: Traverse tree from root to tips.
### Aliases: get_tree_traversal_root_to_tips
### Keywords: traversal
### ** Examples
## Not run:
##D get_tree_traversal_root_to_tips(tree, include_tips=TRUE)
## End(Not run)
|
72cb87ed42979f40df4847c7c877dfb2524c4e72
|
40f4cb44ab742a168ca3f82d36a3e38dcaa6f844
|
/R/getRelevantIds.R
|
7c62fabcf650bf3692f39000bf3081435d7a1651
|
[] |
no_license
|
sankleta/BED
|
34e3f91fceffbb1164e65ab8a4cb24e6431b898b
|
85c5c5ba4bbc927155d454dc6612512c7b197805
|
refs/heads/master
| 2021-04-30T05:55:28.535605
| 2018-02-06T11:18:59
| 2018-02-06T11:18:59
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 6,786
|
r
|
getRelevantIds.R
|
#' Get relevant IDs for a formerly identified BE in a context of interest
#'
#' This function is meant to be used with \code{\link{searchId}} in order
#' to implement a dictonary of identifiers of interest. First
#' the \code{\link{searchId}} function is used to search a term.
#' Then the \code{\link{getRelevantIds}} function
#' is used to find the corresponding IDs in a context of interest.
#'
#' @param d the data.frame returned by \code{\link{searchId}}.
#' @param selected the rows of interest in d
#' @param be the BE in the context of interest
#' @param source the source of the identifier in the context of interest
#' @param organism the organism in the context of interest
#' @param restricted boolean indicating if the results should be restricted to
#' current version of to BEID db. If FALSE former BEID are also returned:
#' \strong{Depending on history it can take a very long time to return
#' a very large result!}
#' @param simplify if TRUE (default) duplicated IDs are removed from the output
#' @param verbose if TRUE, the CQL query is shown
#'
#' @return The d data.frame with a new column providing the relevant ID
#' in the context of interest and without the gene field.
#'
#' @export
#'
#' @seealso \code{\link{searchId}}
#'
getRelevantIds <- function(
d, selected=1,
be=c(listBe(), "Probe"), source, organism,
restricted=TRUE,
simplify=TRUE,
verbose=FALSE
){
selected <- intersect(selected, 1:nrow(d))
if(length(selected)<1){
stop("Only one row of d can be selected by sel parameter")
}
dcols <- c("found", "entity", "be", "source", "organism", "gene")
if(!all(dcols %in% colnames(d))){
stop(
sprintf(
"d should be a data.frame with the following columns: %s.",
paste(dcols, collapse=", ")
),
" This data.frame is returned by the searchId function."
)
}
match.arg(be, c("Probe", listBe()), several.ok=FALSE)
##
tax <- getTaxId(organism)
organism <- getOrgNames(tax)
organism <- organism$name[which(organism$nameClass=="scientific name")]
##
toRet <- NULL
for(sel in selected){
from <- d[sel, "ebe"]
from.entity <- d[sel, "entity"]
from.source <- d[sel, "source"]
from.org <- d[sel, "organism"]
from.tax <- getTaxId(from.org)
from.gene <- d[sel, "gene"][[1]]
##
if(tax!=from.tax){
hqs <- c(
'MATCH (fg:Gene)<-[:identifies]-(:GeneID)',
'-[:is_member_of]->(:GeneIDFamily)<-[:is_member_of]-',
'(:GeneID)-[:identifies]->(tg:Gene)-[:belongs_to]->(tid:TaxID)',
'WHERE id(fg) IN $fromGene AND tid.value=$tax',
'RETURN id(tg) as gene'
)
targGene <- unique(bedCall(
f=cypher,
query=prepCql(hqs),
parameters=list(fromGene=as.list(from.gene), tax=tax)
))[,"gene"]
if(length(targGene)==0){
next()
}
from.entity <- targGene
from <- "Gene"
}
##
if(be=="Probe"){
qs <- genProbePath(platform=source)
targBE <- attr(qs, "be")
qs <- paste0(
sprintf(
'(t:ProbeID {platform:"%s"})',
source
),
qs,
sprintf(
'(tbe:%s)',
targBE
)
)
}else{
targBE <- be
if(source=="Symbol"){
qs <- paste0(
'(t:BESymbol)<-[fika:is_known_as]-',
sprintf(
'(tid:%s)',
paste0(targBE, "ID")
),
'-[:is_replaced_by|is_associated_to*0..]->()',
'-[:identifies]->',
sprintf(
'(tbe:%s)',
targBE
)
)
}else{
qs <- paste0(
sprintf(
'(t:%s {database:"%s"})',
paste0(targBE, "ID"), source
),
ifelse(
restricted,
'-[:is_associated_to*0..]->',
'-[:is_replaced_by|is_associated_to*0..]->'
),
# '-[:is_replaced_by|is_associated_to*0..]->',
sprintf(
'(:%s)',
paste0(targBE, "ID")
),
'-[:identifies]->',
sprintf(
'(tbe:%s)',
targBE
)
)
}
}
##
qs <- paste('MATCH', qs)
##
if(from!="Gene"){
if(targBE=="Gene"){
qs <- c(
qs,
'WHERE id(tbe) IN $fromGene'
)
}else{
qs <- c(
qs,
paste0(
'MATCH (tbe)',
genBePath(targBE, "Gene"),
'(tGene)'
),
'WHERE id(tGene) IN $fromGene'
)
}
}
##
if(targBE==from){
qs <- c(
qs,
'MATCH (tbe) WHERE id(tbe) IN $fromEntity'
)
}else{
qs <- c(
qs,
paste0(
'MATCH (fbe)',
genBePath(from, targBE),
'(tbe)'
),
'WHERE id(fbe) IN $fromEntity'
)
}
##
qs <- c(
qs,
'RETURN t.preferred as preferred, t.value as id'
)
if(verbose){
message(prepCql(qs))
}
value <- unique(bedCall(
f=cypher,
query=prepCql(qs),
parameters=list(
fromGene=as.list(from.gene),
fromEntity=as.list(from.entity)
)
))#$id
if(!is.null(value) && nrow(value) > 0){
toAdd <- d[rep(sel, nrow(value)),]
toAdd$preferred <- value$preferred
toAdd$id <- value$id
rownames(toAdd) <- NULL
toRet <- rbind(toRet, toAdd)
}
}
if(!is.null(toRet) && ncol(toRet)>0){
colnames(toRet)[ncol(toRet)] <- paste0(
be, " from ", source,
" (", organism, ")"
)
rownames(toRet) <- NULL
toRet <- toRet[,which(colnames(toRet)!="gene")]
if(simplify){
toRet <- toRet[order(toRet$canonical, decreasing=TRUE),]
toRet <- toRet[
order(toRet$found==toRet[,ncol(toRet)], decreasing=TRUE),
]
toRet <- toRet[order(toRet$source==source, decreasing=TRUE),]
toRet <- toRet[order(toRet$be==be, decreasing=TRUE),]
toRet <- toRet[order(toRet$preferred, decreasing=TRUE),]
toRet <- toRet[order(toRet$organism==organism, decreasing=TRUE),]
toRet <- toRet[!duplicated(toRet[,ncol(toRet)]),]
}
}
return(toRet)
}
|
631cff91b868bb6dbf5ca7b9867fd70b02251300
|
dcc6db13df6c4f0f22985b0380ac1d9e1c28d758
|
/man/sort_matrix.Rd
|
f36cbb378e806d969e55afe7f5a303776a6a6b2c
|
[
"MIT"
] |
permissive
|
MyersGroup/testisAtlas
|
b6f247756f2ec0a7c77665592c7f93378a9d285f
|
53ed0d70ee7d06fe739cbbf7135907ac8999bd79
|
refs/heads/master
| 2021-06-23T05:56:26.949219
| 2020-11-28T13:17:19
| 2020-11-28T13:17:19
| 140,632,831
| 13
| 0
| null | null | null | null |
UTF-8
|
R
| false
| true
| 582
|
rd
|
sort_matrix.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/functions_single_cell.R
\name{sort_matrix}
\alias{sort_matrix}
\title{Sort matrix to highest correlations are along the diagonal}
\usage{
sort_matrix(mat = cross_cor, order1 = max_cor_per_compoent)
}
\arguments{
\item{mat}{numeric matrix to be sorted}
\item{order1}{order of rows}
}
\value{
numeric matrix
}
\description{
Sort matrix to highest correlations are along the diagonal
}
\details{
For a given row order, sorts the columns so that each row
has the highest correlation (in a greedy fashion)
}
|
b051072d4a4107667019856488ab4f3785d8ce12
|
dbc6954f25fb1fa4f584eb3e6c7f6b90d1766fbb
|
/MVE_BASED/species_exposure_calculate.r
|
a0ce4217d935455b5ef1be60224dacf37e16b14b
|
[] |
no_license
|
RannieWan/diversity_in_e
|
05da7390903972e3574861d61fa3344f3c668611
|
72649009420c0ebc068ecd81c2f36a4dc6f3b6a6
|
refs/heads/master
| 2023-03-10T17:07:20.147042
| 2021-03-01T08:54:23
| 2021-03-01T08:54:23
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 2,957
|
r
|
species_exposure_calculate.r
|
library(raster)
library(rgdal)
library(rgeos)
library(MASS)
library(cluster)
library(dplyr)
source("functions.r")
fun_exposure<-function(t_mask_index, t_year, t_GCM, t_SSP, t_is_out, df, exposure_year){
if (t_is_out==0){
return(0)
}else{
sub_item<-df%>%dplyr::filter((mask_index==t_mask_index)&
(GCM==t_GCM)&
(SSP==t_SSP)&
(between(year, t_year, t_year+exposure_year-1)))
sum_out<-sum(sub_item$is_out)
if (sum_out==nrow(sub_item)){
return(1)
}else{
return(0)
}
}
}
fun_exposure_sub<-function(t_mask_index, t_year, t_is_out, df_sub, exposure_year){
if (t_is_out==0){
return(0)
}else{
sub_item<-df_sub%>%dplyr::filter((mask_index==t_mask_index)&
(between(year, t_year, t_year+exposure_year-1)))
sum_out<-sum(sub_item$is_out)
if (sum_out==nrow(sub_item)){
return(1)
}else{
return(0)
}
}
}
args = commandArgs(trailingOnly=TRUE)
group<-args[1]
setwd("/media/huijieqiao/Speciation_Extin/Sp_Richness_GCM/Script/diversity_in_e")
if (is.na(group)){
group<-"Amphibians"
}
GCMs<-c("EC-Earth3-Veg", "MRI-ESM2-0", "UKESM1")
SSPs<-c("SSP119", "SSP245", "SSP585")
Labels<-expand.grid(GCM=GCMs, SSP=SSPs)
df_list<-readRDS(sprintf("../../Objects/IUCN_List/%s.rda", group))
i=100
df_list<-df_list[sample(nrow(df_list), nrow(df_list)),]
exposure_year<-5
year_range<-c(2015:2100)
for (i in c(1:nrow(df_list))){
item<-df_list[i,]
item$sp<-gsub(" ", "_", item$sp)
if (item$area<=0){
next()
}
target_folders<-c(sprintf("../../Objects/Niche_Models_Mean_GCM/%s/%s", group, item$sp))
target_folder<-target_folders[1]
for (target_folder in target_folders){
target<-sprintf("%s/exposure", target_folder)
target_rda<-sprintf("%s/exposure_end.rda", target)
if (file.exists(target_rda)){
next()
}
saveRDS(NULL, target_rda)
df<-readRDS(sprintf("%s/exposure.rda", target))
print(paste(group, i, nrow(df_list), item$sp, target, nrow(df)))
#ll<-df%>%dplyr::distinct(x, y)
#plot(ll$x, ll$y)
#0: in range 1: out of mve but in range box 2: out because of temp 3: out because of prec 4: out because of temp and prec
#table(df$range_type)
df$is_out<-1
df[which(df$range_type==0), "is_out"]<-0
print(system.time({
final_df<-NULL
for (label_i in c(1:nrow(Labels))){
label<-Labels[label_i,]
df_item<-df%>%dplyr::filter((GCM==label$GCM)&(SSP==label$SSP))
df_item<-df_item %>%dplyr::rowwise()%>%
mutate(is_exposure=fun_exposure_sub(mask_index, year, is_out, df_item, exposure_year))
final_df<-bind(final_df, df_item)
}
}))
saveRDS(final_df, target_rda)
}
}
if (F){
df<-readRDS(target_rda)
test<-df[2,]
t_mask_index<-test$mask_index
t_GCM<-test$GCM
t_SSP<-test$SSP
t_year<-test$year
t_is_out<-test$is_out
}
|
a3543f15b62b648f91d21044264d2abfba898eb5
|
e5a1e0780d2b93689dbb153e5ab733c4049f8839
|
/R/V1_T2.1.R
|
deba2ad65def9a307e60e8cc875e8e74828974e4
|
[
"MIT"
] |
permissive
|
LucyNjoki/rKenyaCensus
|
5b86efcdb7604067500087be68c463587daf362d
|
6db00e5b1b71a781e6def15dd98a4828b6d960bc
|
refs/heads/master
| 2022-11-06T17:58:34.520255
| 2020-06-29T11:34:02
| 2020-06-29T11:34:02
| 276,578,774
| 0
| 1
|
NOASSERTION
| 2020-07-02T07:30:50
| 2020-07-02T07:30:49
| null |
UTF-8
|
R
| false
| false
| 276
|
r
|
V1_T2.1.R
|
#' Volume I: Table 2.1
#'
#' Census Indicators at a Glance, 2019
#' @docType data
#'
#' @usage data(V1_T2.1)
#'
#' @format A data frame with 2 variables:
#' \describe{
#' \item{\code{Indicator}}{Indicator}
#' \item{\code{Value}}{Value}
#'}
#' @keywords datasets
#'
"V1_T2.1"
|
27861656c8112fb97bcd79dda2b6bd995fcc8031
|
1b86aeca71da56029575093fb47a16cac881a04d
|
/plot3.R
|
c90dc91c3d78cadc36d06496e04f7ec1ed9bf050
|
[] |
no_license
|
edzai/ExData_Plotting1
|
ff470f6381feb7ac7c5bc0e3ee04d194836feeea
|
48fe2e3abaf586a459649a439569515bb819d837
|
refs/heads/master
| 2021-01-11T02:38:14.204419
| 2016-10-20T13:54:50
| 2016-10-20T13:54:50
| 70,948,498
| 0
| 0
| null | 2016-10-14T21:27:57
| 2016-10-14T21:27:56
| null |
UTF-8
|
R
| false
| false
| 682
|
r
|
plot3.R
|
edata = read.table("C:/Users/Edzai/Downloads/electricity/power.txt", header=T, sep=";", na.strings="?")
edata <- edata[edata$Date %in% c("1/2/2007","2/2/2007"),]
Time <-strptime(paste(edata$Date, edata$Time, sep=" "),"%d/%m/%Y %H:%M:%S")
edata <- cbind(Time, edata)
#Plot 3
columnlines <- c("black", "red", "blue")
labels <- c("Sub_metering_1", "Sub_metering_2", "Sub_metering_3")
plot(edata$Time, edata$Sub_metering_1, type="l", col=columnlines[1], xlab="", ylab="Energy sub metering")
lines(edata$Time, edata$Sub_metering_2, col=columnlines[2])
lines(edata$Time, edata$Sub_metering_3, col=columnlines[3])
legend("topright", legend=labels, col=columnlines, lty="solid")
|
0d2e30f3d06ca96ced07148c60a9e19d7ffc1a9f
|
29585dff702209dd446c0ab52ceea046c58e384e
|
/spartan/R/efast_sd.R
|
e9edcad0ccdecd7921d172bcd396c88512270612
|
[] |
no_license
|
ingted/R-Examples
|
825440ce468ce608c4d73e2af4c0a0213b81c0fe
|
d0917dbaf698cb8bc0789db0c3ab07453016eab9
|
refs/heads/master
| 2020-04-14T12:29:22.336088
| 2016-07-21T14:01:14
| 2016-07-21T14:01:14
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 6,027
|
r
|
efast_sd.R
|
efast_sd <-
function(RESULTSARRAY,OMi,MI,OUTMEASURES,NUMPARAMS,NUMCURVES)
{
# RESULTSMAT FORMAT - FIRST: PARAM SET NUMBER,2ND: TIMEPOINT (NOT USED BUT IS IN EXAMPLE),3RD: RESULT VALUE ARRAY,4TH:PARAMETER,5TH: CURVE
#[a b c NUMPARAMS NUMCURVES]=size(RESULTSMAT);
#if nargin<5
# display(['ERROR = Choose one or more outputs from var 1 and variable ',num2str(c),' of the model'])
# error('eFAST: the output components for the sensitivity is missing. Not enough input arguments.')
Si<-array(0,dim=c(NUMPARAMS,1,OUTMEASURES))
STi<-array(0,dim=c(NUMPARAMS,1,OUTMEASURES))
rangeSi<-array(0,dim=c(NUMPARAMS,NUMCURVES,OUTMEASURES))
rangeSTi<-array(0,dim=c(NUMPARAMS,NUMCURVES,OUTMEASURES))
Vci<-array(0,dim=c(1,NUMCURVES,1))
Vi<-array(0,dim=c(1,NUMCURVES,1))
V<-array(0,dim=c(1,NUMCURVES,1))
for(MEASURE in 1:OUTMEASURES)
{
for(PARAMNUM in 1:NUMPARAMS) #loop through parameters
{
# Initialize AV,AVi,AVci to zero.
# THOUGH THESE SEEM TO BE HIGHLIGHTED OUT OF THE ORIGINAL
AV<-0;
AVi<-0;
AVci<-0;
for(CURVENUM in 1:NUMCURVES)
{
# GET THE RESULTS FOR THIS CURVE, FOR THIS PARAMETER, FOR THIS MEASURE
# THEN SUBTRACT THE MEAN OF THE COLUMN FROM THE OUTPUT VALUE
#print(RESULTSARRAY[,(((PARAMNUM*OUTMEASURES)-OUTMEASURES)+MEASURE),CURVENUM])
MEASURE_RESULTS_FOR_PARAM<-na.omit(RESULTSARRAY[,(((PARAMNUM*OUTMEASURES)-OUTMEASURES)+MEASURE),CURVENUM])
MEASURE_RESULTS_FOR_PARAM<-MEASURE_RESULTS_FOR_PARAM-t(mean(MEASURE_RESULTS_FOR_PARAM))
#RESULTSARRAY[,(((PARAMNUM*OUTMEASURES)-OUTMEASURES)+MEASURE),CURVENUM]<-
# RESULTSARRAY[,(((PARAMNUM*OUTMEASURES)-OUTMEASURES)+MEASURE),CURVENUM] -
# t(mean(RESULTSARRAY[,(((PARAMNUM*OUTMEASURES)-OUTMEASURES)+MEASURE),CURVENUM]))
# Fourier coeff. at [1:OMi/2].
# GET THE NUMBER OF SAMPLES FOR THIS OUTPUT
N<-length(MEASURE_RESULTS_FOR_PARAM)
#N<-length(RESULTSARRAY[,(((PARAMNUM*OUTMEASURES)-OUTMEASURES)+MEASURE),CURVENUM])
# NQ GOES JUST BELOW THE MIDPOINT
NQ<-(N-1)/2
# NO GOES JUST ABOVE THE MIDPOINT
N0<-NQ+1
Y_VECP <- MEASURE_RESULTS_FOR_PARAM[N0+(1:NQ)] + MEASURE_RESULTS_FOR_PARAM[N0-(1:NQ)]
Y_VECM <- MEASURE_RESULTS_FOR_PARAM[N0+(1:NQ)] - MEASURE_RESULTS_FOR_PARAM[N0-(1:NQ)]
#Y_VECP = RESULTSARRAY[N0+(1:NQ),(((PARAMNUM*OUTMEASURES)-OUTMEASURES)+MEASURE),CURVENUM] +
# RESULTSARRAY[N0-(1:NQ),(((PARAMNUM*OUTMEASURES)-OUTMEASURES)+MEASURE),CURVENUM]
#Y_VECM = RESULTSARRAY[N0+(1:NQ),(((PARAMNUM*OUTMEASURES)-OUTMEASURES)+MEASURE),CURVENUM] -
# RESULTSARRAY[N0-(1:NQ),(((PARAMNUM*OUTMEASURES)-OUTMEASURES)+MEASURE),CURVENUM]
AC<-array(0,dim=c(1,4,1))
BC<-array(0,dim=c(1,4,1))
COMPL<-0
rangeJ<-OMi/2
for(j in 1:rangeJ)
{
ANGLE<-(j*2*(1:NQ)*pi/N)
C_VEC<-cos(ANGLE)
S_VEC<-sin(ANGLE)
AC[j]<-(MEASURE_RESULTS_FOR_PARAM[N0] + t(Y_VECP) %*% C_VEC)/N
#AC[j]<-(RESULTSARRAY[N0,(((PARAMNUM*OUTMEASURES)-OUTMEASURES)+MEASURE),CURVENUM]+t(Y_VECP) %*% C_VEC)/N
BC[j] = t(Y_VECM) %*% S_VEC/N
COMPL<-COMPL+AC[j]^2+BC[j]^2
}
# Computation of V_{(ci)}.
Vci[CURVENUM]<-2*COMPL
# Fourier coeff. at [P*OMi, for P=1:MI].
COMPL = 0
Y_VECP <- MEASURE_RESULTS_FOR_PARAM[N0+(1:NQ)] + MEASURE_RESULTS_FOR_PARAM[N0-(1:NQ)]
Y_VECM <- MEASURE_RESULTS_FOR_PARAM[N0+(1:NQ)] - MEASURE_RESULTS_FOR_PARAM[N0-(1:NQ)]
#Y_VECP = RESULTSARRAY[N0+(1:NQ),(((PARAMNUM*OUTMEASURES)-OUTMEASURES)+MEASURE),CURVENUM] +
#RESULTSARRAY[N0-(1:NQ),(((PARAMNUM*OUTMEASURES)-OUTMEASURES)+MEASURE),CURVENUM]
#Y_VECM = RESULTSARRAY[N0+(1:NQ),(((PARAMNUM*OUTMEASURES)-OUTMEASURES)+MEASURE),CURVENUM] -
#RESULTSARRAY[N0-(1:NQ),(((PARAMNUM*OUTMEASURES)-OUTMEASURES)+MEASURE),CURVENUM]
for(i in seq(OMi,OMi*MI,OMi))
{
ANGLE<-i*2*(1:NQ)*pi/N
C_VEC<-cos(ANGLE)
S_VEC<-sin(ANGLE)
AC[j]<-(MEASURE_RESULTS_FOR_PARAM[N0] + t(Y_VECP) %*% C_VEC)/N
#AC[j]<-(RESULTSARRAY[N0,(((PARAMNUM*OUTMEASURES)-OUTMEASURES)+MEASURE),CURVENUM]+t(Y_VECP) %*% C_VEC)/N
BC[j] = t(Y_VECM) %*% S_VEC/N
COMPL<-COMPL+AC[j]^2+BC[j]^2
}
# Computation of V_i.
Vi[CURVENUM]<-2*COMPL
# AVi = AVi+Vi;
# Computation of the total variance in the time domain.
V[CURVENUM]<- t(MEASURE_RESULTS_FOR_PARAM) %*% MEASURE_RESULTS_FOR_PARAM/N
#V[CURVENUM]<- t(RESULTSARRAY[,(((PARAMNUM*OUTMEASURES)-OUTMEASURES)+MEASURE),CURVENUM]) %*%
# RESULTSARRAY[,(((PARAMNUM*OUTMEASURES)-OUTMEASURES)+MEASURE),CURVENUM]/N
} # END CURVE NUMBER LOOP
# CALCULATE SENSITIVITY INDEXES
Si[PARAMNUM,1,MEASURE]<-mean(Vi)/mean(V)
STi[PARAMNUM,1,MEASURE]<-1-mean(Vci)/mean(V)
rangeSi[PARAMNUM,,MEASURE]<-Vi/V
rangeSTi[PARAMNUM,,MEASURE]<-1-(Vci/V)
if(is.nan(Si[PARAMNUM,1,MEASURE]))
{
Si[PARAMNUM,1,MEASURE]<-0
}
if(is.nan(STi[PARAMNUM,1,MEASURE]))
{
STi[PARAMNUM,1,MEASURE]<-0
}
for(i in seq(1:length(rangeSi[PARAMNUM,,MEASURE])))
{
if(is.nan(rangeSi[PARAMNUM,i,MEASURE]))
{
rangeSi[PARAMNUM,i,MEASURE]<-0
}
}
for(i in seq(1:length(rangeSTi[PARAMNUM,,MEASURE])))
{
if(is.nan(rangeSTi[PARAMNUM,i,MEASURE]))
{
rangeSTi[PARAMNUM,i,MEASURE]<-0
}
}
#if(is.nan(rangeSi[PARAMNUM,,MEASURE]))
#{
# rangeSi[PARAMNUM,,MEASURE]<-0
#}
#if(is.nan(rangeSTi[PARAMNUM,,MEASURE]))
#{
# rangeSTi[PARAMNUM,,MEASURE]<-0
#}
} # END PARAMNUM
}
# THE Si, STi, RANGESi, AND RANGESTi ARRAYS ARE RETURNED AS A LIST
return(list(Si=Si,STi=STi,rangeSi=rangeSi,rangeSTi=rangeSTi))
}
|
5f81b8e458418a5f08f5f6a458528067eff11308
|
799f2e7659b5d3e9957f724f9b22567f489884f6
|
/R/ContactWorker.R
|
d0fe21f347db33f252c2066f8cbc928aedbe26a5
|
[] |
no_license
|
cloudyr/pyMTurkR
|
1843d3e7b97c32e5249dd94c110e5a131b8637a2
|
17ec76724f019bf82d43537d8b19d3d210ed7e7e
|
refs/heads/master
| 2022-01-27T00:59:50.066624
| 2022-01-19T19:24:12
| 2022-01-19T19:24:12
| 186,131,921
| 15
| 10
| null | 2022-01-19T19:24:13
| 2019-05-11T12:45:41
|
R
|
UTF-8
|
R
| false
| false
| 9,827
|
r
|
ContactWorker.R
|
#' Contact Worker(s)
#'
#' Contact one or more workers. This sends an email with specified subject line
#' and body text to one or more workers. This can be used to recontact workers
#' in panel/longitudinal research or to send follow-up work.
#'
#' Send an email to one or more workers, either with a common subject and body
#' text or subject and body customized for each worker.
#'
#' In batch mode (when \code{batch=TRUE}), workers are contacted in batches of
#' 100 with a single identical email. If one email fails (e.g., for one worker)
#' the other emails should be sent successfully. That is to say, the request as
#' a whole will be valid but will return additional information about which
#' workers were not contacted. This information can be found in the MTurkR log
#' file and viewing the XML responses directly.
#'
#' Note: It is only possible to contact workers who have performed work for you
#' previously. When attempting to contact a worker who has not worked for you
#' before, this function will indicate that the request was successful even
#' though the email is not sent. The function will return a value of
#' \dQuote{HardFailure} for \code{Valid} when this occurs. The printed results
#' may therefore appear contradictory because MTurk reports that requests to
#' contact these workers are \code{Valid}, but they are not actually contacted.
#' In batch, this means that a batch will be valid but individual ineligible
#' workers will be reported as not contacted.
#'
#' \code{ContactWorkers()}, \code{contact()}, \code{NotifyWorkers},
#' \code{NotifyWorker()}, and \code{notify()} are aliases.
#'
#' @aliases ContactWorker ContactWorkers contact NotifyWorkers notify NotifyWorker
#' @param subjects A character string containing subject line of an email, or a
#' vector of character strings of of length equal to the number of workers to
#' be contacted containing the subject line of the email for each worker.
#' Maximum of 200 characters.
#' @param msgs A character string containing body text of an email, or a vector
#' of character strings of of length equal to the number of workers to be
#' contacted containing the body text of the email for each worker. Maximum of
#' 4096 characters.
#' @param workers A character string containing a WorkerId, or a vector of
#' character strings containing multiple WorkerIds.
#' @param batch A logical (default is \code{FALSE}), indicating whether workers
#' should be contacted in batches of 100 (the maximum allowed by the API). This
#' significantly reduces the time required to contact workers, but eliminates
#' the ability to send customized messages to each worker.
#' @param verbose Optionally print the results of the API request to the
#' standard output. Default is taken from \code{getOption('pyMTurkR.verbose',
#' TRUE)}.
#' @return A data frame containing the list of workers, subjects, and messages,
#' and whether the request to contact each of them was valid.
#' @author Tyler Burleigh, Thomas J. Leeper
#' @references
#' \href{https://docs.aws.amazon.com/AWSMechTurk/latest/AWSMturkAPI/ApiReference_NotifyWorkersOperation.html}{API
#' Reference}
#' @keywords Workers
#' @examples
#'
#' \dontrun{
#' a <- "Complete a follow-up survey for $.50"
#' b <- "Thanks for completing my HIT!
#' I will pay a $.50 bonus if you complete a follow-up survey by Friday at 5:00pm.
#' The survey can be completed at
#' http://www.surveymonkey.com/s/pssurvey?c=A1RO9UEXAMPLE."
#'
#' # contact one worker
#' c1 <- "A1RO9UEXAMPLE"
#' d <- ContactWorker(subjects = a,
#' msgs = b,
#' workers = c1)
#'
#' # contact multiple workers in batch
#' c2 <- c("A1RO9EXAMPLE1","A1RO9EXAMPLE2","A1RO9EXAMPLE3")
#' e <- ContactWorker(subjects = a,
#' msgs = b,
#' workers = c2,
#' batch = TRUE)
#' }
#'
#' @export ContactWorker
#' @export contact
#' @export ContactWorkers
#' @export NotifyWorkers
#' @export NotifyWorker
#' @export notify
ContactWorker <-
contact <-
ContactWorkers <-
NotifyWorkers <-
NotifyWorker <-
notify <-
function (subjects,
msgs,
workers,
batch = FALSE,
verbose = getOption('pyMTurkR.verbose', TRUE)){
GetClient() # Boto3 client
if (is.factor(subjects)) {
subjects <- as.character(subjects)
}
if (is.factor(msgs)) {
msgs <- as.character(msgs)
}
if (is.factor(workers)) {
workers <- as.character(workers)
}
if (length(workers) > length(unique(workers))) {
warning("Duplicated WorkerIds removed from 'workers'")
workers <- unique(workers)
}
# Batch run
if (batch) {
if (length(msgs) > 1) {
stop("If 'batch'==TRUE, only one message can be used")
} else if (nchar(subjects) > 200) {
stop("Subject Too Long (200 char max)")
}
if (length(subjects) > 1) {
stop("If 'batch'==TRUE, only one subject can be used")
} else if (nchar(msgs) > 4096) {
stop("Message Text Too Long (4096 char max)")
}
for (i in 1:length(workers)) {
if (nchar(workers[i]) > 64) {
stop(paste("WorkerId ", workers[i], " Too Long (64 char max)", sep = ""))
} else if (nchar(workers[i]) < 1) {
stop(paste("WorkerId ", workers[i], " Too Short (1 char min)", sep = ""))
} else if (regexpr("^A[A-Z0-9]+$", workers[i])[[1]] == -1) {
stop(paste("WorkerId ", workers[i], " Invalid format", sep = ""))
}
}
# Prepare data frame for return
Notifications <- emptydf(length(workers), 4, c("WorkerId", "Subject", "Message", "Valid"))
Notifications$WorkerId <- workers
Notifications$Subject <- subjects
Notifications$Message <- msgs
# Put into batches, then process
workerbatch <- split(workers, rep(1:((length(workers) %/% 100) + 1), each = 100)[1:length(workers)])
for (i in 1:length(workerbatch)) {
response <- try(pyMTurkR$Client$notify_workers(
Subject = subjects,
WorkerIds = as.list(workerbatch[[i]]),
MessageText = msgs
), silent = !verbose)
if (class(response) != "try-error") {
Notifications$Valid[Notifications$WorkerId %in% workerbatch[[i]]] <- TRUE
if (verbose) {
message(i, ": Workers ", workerbatch[[i]][1], " to ", utils::tail(workerbatch[[i]],1), " Notified")
}
if (length(response$NotifyWorkersFailureStatuses) > 0) {
for (k in 1:length(response$NotifyWorkersFailureStatuses)) {
fail <- response$NotifyWorkersFailureStatuses[[k]]
Notifications$Valid[Notifications$WorkerId == fail$WorkerId] <- 'HardFailure'
if (verbose) {
message(paste("Invalid Request for worker ", fail$WorkerId, ": ", fail$NotifyWorkersFailureMessage, sep=""))
}
}
}
} else {
Notifications$Valid[Notifications$WorkerId %in% workerbatch[[i]]] <- FALSE
if (verbose) {
warning(i,": Invalid Request for workers ", workerbatch[[i]][1], " to ", utils::tail(workerbatch[[i]],1))
}
}
}
} else { # Not running as a batch
# Check validity of parameters
for (i in 1:length(workers)) {
if (nchar(workers[i]) > 64) {
stop(paste("WorkerId ", workers[i], " Too Long (64 char max)", sep = ""))
} else if (nchar(workers[i]) < 1) {
stop(paste("WorkerId ", workers[i], " Too Short (1 char min)", sep = ""))
} else if (regexpr("^A[A-Z0-9]+$", workers[i])[[1]] == -1) {
stop(paste("WorkerId ", workers[i], " Invalid format", sep = ""))
}
}
for (i in 1:length(subjects)) {
if (nchar(subjects[i]) > 200) {
stop(paste("Subject ", i, " Too Long (200 char max)", sep = ""))
}
}
for (i in 1:length(msgs)) {
if (nchar(msgs[i]) > 4096) {
stop(paste("Message ", i, "Text Too Long (4096 char max)", sep = ""))
}
}
if (length(subjects) == 1) {
subjects <- rep(subjects[1], length(workers))
} else if (!length(subjects) == length(workers)) {
stop("Number of subjects is not 1 nor length(workers)")
}
if (length(msgs) == 1) {
msgs <- rep(msgs[1], length(workers))
} else if (!length(msgs) == length(workers)) {
stop("Number of messages is not 1 nor length(workers)")
}
Notifications <- emptydf(length(workers), 4, c("WorkerId", "Subject", "Message", "Valid"))
for (i in 1:length(workers)) {
response <- try(pyMTurkR$Client$notify_workers(
Subject = subjects[i],
WorkerIds = as.list(workers[i]),
MessageText = msgs[i]
), silent = !verbose)
# Check if failure
if (length(response$NotifyWorkersFailureStatuses) > 0) {
valid <- response$NotifyWorkersFailureStatuses[[1]]$NotifyWorkersFailureCode
} else {
valid <- TRUE
}
Notifications[i, ] <- c(workers[i], subjects[i], msgs[i], valid)
# Message with results
if (valid != TRUE) {
if(verbose) {
message(i, ": Worker (", workers[i], ") not contacted: ",
response$NotifyWorkersFailureStatuses[[1]]$NotifyWorkersFailureMessage)
}
}
if (class(response) != "try-error" & valid == TRUE) {
if (verbose) {
message(i, ": Worker (", workers[i], ") Notified")
}
} else {
if (verbose) {
warning(i,": Invalid Request for worker ", workers[i])
}
}
}
}
Notifications$Valid <- factor(Notifications$Valid, levels=c('TRUE','FALSE','HardFailure'))
return(Notifications)
}
|
099ac3043d49b5783d4d97fbcb245abd32f6182f
|
c9e23795383f95c595785369c8e76b697d1c9358
|
/man/rci.Rd
|
a85ab036e1c9eab9312603791852be074eb2596a
|
[] |
no_license
|
zieglema/ClinicalSig
|
fdebfbaf709c9c56d3a8a543fe156ae8cb72e552
|
cf8441c134b5c634d9175b750dce547ead786e77
|
refs/heads/master
| 2021-01-20T18:16:06.840865
| 2019-05-14T10:17:57
| 2019-05-14T10:17:57
| 60,799,620
| 1
| 1
| null | null | null | null |
UTF-8
|
R
| false
| true
| 923
|
rd
|
rci.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/Code_for_ClinicalSig.R
\name{rci}
\alias{rci}
\title{Function for the reliable change index}
\usage{
rci(pre, post, data, rtt, sdNorm)
}
\arguments{
\item{pre}{The variable containing the values from before the intervention}
\item{post}{The variable containing the values from after the intervention}
\item{data}{Data set containing the variables}
\item{rtt}{Test-retest correlation for the used measure. Should be from the relevant population and with no intervention in between}
\item{sdNorm}{Standard deviation of the used measure in the norm group.}
}
\value{
Returns the RCI for each person in the data set
}
\description{
Function for the reliable change index
}
\examples{
dataRci <- rci(pre="U1_GDS_G", post="U2_GDS_G", data=dat, rtt=.83, sdNorm=6.8)
hist(dataRci$rci, main="Histogram of RCI", xlab="RCI", breaks=30, col="blue")
}
|
a5fab4d98479be4b7b2dd1ba8acee19da903cdc7
|
e8b4e0b6c7d61d9eccc723280f05bd3765076b78
|
/man/ConservatismPermTest.Rd
|
7fe80a85e8127efa862715eaac518c9af45e0284
|
[
"MIT"
] |
permissive
|
SimmonsBI/RolePredict
|
f00630ef912ec49439a181bfe3c0bfa82d539708
|
f8f67510d51fc15b068b10cd26dd37524e864fdc
|
refs/heads/master
| 2023-01-22T09:27:12.563284
| 2020-12-01T17:22:51
| 2020-12-01T17:22:51
| 291,089,758
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| true
| 407
|
rd
|
ConservatismPermTest.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/ConservatismPermTest.R
\name{ConservatismPermTest}
\alias{ConservatismPermTest}
\title{placeholder}
\usage{
ConservatismPermTest(roles, n_it, species)
}
\arguments{
\item{roles}{A number.}
\item{n_it}{A number.}
\item{species}{A number.}
}
\value{
The sum of \code{x} and \code{y}.
}
\description{
placeholder
}
\examples{
#
}
|
27bba6859e552ce365db5ebc188ced3a42f330e5
|
a41bd0086c12624bff1fd6661d140075f87d5229
|
/man/mosaic.Rd
|
007fd206d85f46582540ca0fb18e766e3a13bbf4
|
[] |
no_license
|
cran/regclass
|
e0e019d36dbc15d05fda4965e0dc26ff5cf1ffc1
|
548f2ed1dc66fbf1a58b86f9ada26f9da05483c4
|
refs/heads/master
| 2021-01-11T23:18:12.857264
| 2020-02-21T17:00:07
| 2020-02-21T17:00:07
| 78,563,828
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 2,740
|
rd
|
mosaic.Rd
|
\name{mosaic}
\alias{mosaic}
\title{
Mosaic plot
}
\description{
Provides a mosaic plot to visualize the association between two categorical variables }
\usage{
mosaic(formula,data,color=TRUE,labelat=c(),xlab=c(),ylab=c(),
magnification=1,equal=FALSE,inside=FALSE,ordered=FALSE)
}
\arguments{
\item{formula}{
A standard R formula written as y~x, where y is the name of the variable playing the role of y and x is the name of the variable playing the role of x.
}
\item{data}{
An optional argument giving the name of the data frame that contains x and y. If not specified, the function will use existing definitions in the parent environment.
}
\item{color}{
\code{TRUE} or \code{FALSE}. If \code{FALSE}, plots are presented in greyscale. If \code{TRUE}, an intelligent color scheme is chosen to shade the plot.
}
\item{labelat}{a vector of factor levels of \code{x} to be labeled (in the case that you want only certain levels to be labeled) }
\item{xlab}{Label of horizontal axis if you want something different that the name of the \code{x} variable}
\item{ylab}{Label of vertical axis if you want something different that the name of the \code{y} variable}
\item{magnification}{Magnification of the labels of the \code{x} variable. A number smaller than 1 shrinks everything. A number larger than 1 makes everything larger }
\item{equal}{If \code{FALSE}, the bar widths are proportional to the frequency of the corresponding level. If \code{TRUE}, the bar widths are all equal (useful if there are many levels or some are extremely rare).}
\item{inside}{If \code{FALSE}, labels are beneath the bars. If \code{TRUE}, labels are placed inside the bars and rotated (useful if the levels have long names) }
\item{ordered}{If \code{FALSE}, bars are in alphabetical order. If \code{TRUE}, the ordering of the bars reflects the ordering of the factor levels.}
}
\details{
This function shows a mosaic plot to visualize the conditional distributions of \code{y} for each level of \code{x}, along with the marginal distribution of \code{y} to the right of the plot. The widths of the segmented bar charts are proportional to the frequency of each level of \code{x}. These plots are the same that appear using \code{associate}.
}
\references{
Introduction to Regression and Modeling
}
\author{
Adam Petrie
}
\seealso{ \code{\link{associate}}}
\examples{
data(ACCOUNT)
mosaic(Area.Classification~Purchase,data=ACCOUNT,color=TRUE)
data(EX6.CLICK)
#Default presentation: not very useful
mosaic(Click~DeviceModel,data=EX6.CLICK)
#Better presentation
mosaic(Click~DeviceModel,data=EX6.CLICK,equal=TRUE,inside=TRUE,magnification=0.8)
}
|
b74d87f17a3e2df6f6b422e5ebfc6bf80fa79f25
|
1fa222b5df3214b040da125f8b19290eb355b940
|
/VennDiagram_CancerPanels/VennDiagramPackage_Script.R
|
9708dba43297b6431a16e324085d805e4da91644
|
[] |
no_license
|
bl24/R_scripts
|
6b520f6a7580312e3a8e601038fbf2f958f3848b
|
9591a202b39fa147686809e0f15790218c59d21b
|
refs/heads/master
| 2021-01-10T14:18:47.102101
| 2016-03-08T18:12:43
| 2016-03-08T18:12:43
| 53,434,318
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 2,399
|
r
|
VennDiagramPackage_Script.R
|
install.packages('VennDiagram')
library(VennDiagram)
# Alex's version
# Written for 3 datasets
venn.diagram(
x = list(
'Foundation\nMedicine\n(315 genes)' = c(1:3,4:26,27:117,135:332),
'TruSight\nTumor\n(26 genes)' = c(1:3,4:26),
'NGS\nGateway\n(131 genes)' = c(4:26,27:117,118:134)
),
filename = "VennDiagTest",
col = "transparent",
fill = c("red", "blue", "green"),
alpha = 0.5,
label.col = c("darkred", "white", "darkblue", "white", "white", "white", "darkgreen"),
cex = 2.5,
fontfamily = "serif",
fontface = "bold",
cat.default.pos = "text",
cat.col = c("darkred", "darkblue", "darkgreen"),
cat.cex = 2.0,
cat.fontfamily = "serif",
cat.dist = c(0.36, -0.16, 0.16),
cat.pos = -10
)
# My version/redo
# Written for 4 datasets
# Calculating the overlaps
calculate.overlap()
# To get a list of non-overlapping genes
data.frame(panel_overlaps$a9) # Change "a" coordinates where appropriate
# Drawing the diagram
grid.newpage()
venn.plot <- draw.quad.venn(area1 = 131, #a9
area2 = 315, #a14
area3 = 26, #a1
area4 = 35, #a3
n1234 = 15, #a6
n123 = 23, #a12
n124 = 33, #a11
n134 = 15, #a5
n234 = 15, #a7
n12 = 114, #a15
n13 = 23, #a4
n14 = 33, #a10
n23 = 26, #a13
n24 = 35, #a8
n34 = 15, #a2
category = c("NGS\nGateway\n(131 genes)", "Foundation\nMedicine\n(315 genes)", "TruSight Tumor\n(26 genes)", "Oncomine\n(35 genes)"),
cat.pos = c(-0.8, 10, 165, 1),
cat.dist = c(0.16, 0.15, -0.11, 0.11),
fill = c("blue", "red", "green", "yellow"),
alpha = 0.2,
scaled = T,
lty = "blank",
cex = 2,
cat.cex = 2,
cat.col = c("black", "black", "black", "black"))
grid.draw(venn.plot)
|
82bc3cfee8f3ca76ccef845c487d7c79d30f4df3
|
c3336c90567e9232ed60bd6bd6ea2a35195a0ef3
|
/data-raw/nextrain.R
|
25ce1764fd5fb8c2dc858723ee98180c1f561c0e
|
[] |
no_license
|
khvorov45/ab-landscape
|
f7c2005772273cfef8ee30fa284340e0f14c9e9e
|
92f49b37b5605452e01a1c8a88e0bb411e9eb05b
|
refs/heads/master
| 2023-06-28T09:38:29.403664
| 2021-07-30T04:40:06
| 2021-07-30T04:40:06
| 283,342,900
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 1,295
|
r
|
nextrain.R
|
library(tidyverse)
nextstrain_tree <- httr::GET(
"https://nextstrain.org/charon/getDataset?prefix=/flu/seasonal/h3n2/ha/12y"
) %>%
httr::content()
process_child <- function(child) {
children <- tibble()
if (!is.null(child$children)) {
children <- map_dfr(child$children, process_child)
}
bind_rows(
tibble(name = child$name, clade = child$node_attrs$clade_membership$value),
children
)
}
nextstrain_viruses <- map_dfr(nextstrain_tree$tree$children, process_child) %>%
filter(!str_starts(name, "NODE"))
nextstain_freqs <- httr::GET(
"https://nextstrain.org/charon/getDataset?prefix=/flu/seasonal/h3n2/ha/12y&type=tip-frequencies"
) %>%
httr::content()
process_freq <- function(freq, name, pivots) {
if (name == "generated_by" | name == "pivots") {
return(tibble())
}
imap_dfr(
freq$frequencies,
~ tibble(name = name, n = .y, freq = .x, year = pivots[[.y]])
)
}
nextstrain_freq_table <- imap_dfr(
nextstain_freqs, process_freq, nextstain_freqs$pivots
)
setdiff(nextstrain_freq_table$name, nextstrain_viruses$name)
setdiff(nextstrain_viruses$name, nextstrain_freq_table$name)
freq_table_extra <- nextstrain_freq_table %>%
inner_join(nextstrain_viruses, "name")
write_csv(freq_table_extra, "data-raw/nexstrain-virus-frequencies.csv")
|
f7d6b977cda7993c512a4b1b5ba38793081adc35
|
19ca3425955df745d43deca7bf1b67801566a199
|
/run_analysis.R
|
2eb14de637e5ff3184b1ee9cf14e0601e8e33c1a
|
[] |
no_license
|
BillSeliger/gettingandcleaningdata
|
2311d328a92c7177fed102712763c3a974182857
|
ba36f82d94b6826ff1e13c37decdda919a754871
|
refs/heads/master
| 2016-09-10T00:46:20.753757
| 2015-05-03T12:54:08
| 2015-05-03T12:54:08
| 28,794,750
| 1
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 3,444
|
r
|
run_analysis.R
|
## R Script file for Coursera_Getting and Cleaning Data
## You should create one R script called run_analysis.R that does the following.
## 1. Merges the training and the test sets to create one data set.
## 2. Extracts only the measurements on the mean and standard deviation for each measurement.
## 3. Uses descriptive activity names to name the activities in the data set
## 4. Appropriately labels the data set with descriptive variable names.
## 5. From the data set in step 4, creates a second, independent tidy data set with the average of each
## variable for each activity and each subject.
## Read in all of the datasets - I kept it in the original file structure so I am changing working directories
##
## Read in the subjects, features, and activities for the train data
setwd("C:/Users/rr046302/Documents/Bill's Stuff/Coursera/Getting and Cleaning Data/Getting and Cleaning Data Course Project/gettingandcleaningdata/UCI HAR Dataset/train")
train_subjects <- read.table("subject_train.txt")
train_features <- read.table("X_train.txt")
train_activity <- read.table("y_train.txt")
## Read in the subjects, features, and activities for the test data
setwd("C:/Users/rr046302/Documents/Bill's Stuff/Coursera/Getting and Cleaning Data/Getting and Cleaning Data Course Project/gettingandcleaningdata/UCI HAR Dataset/test")
test_features <- read.table("X_test.txt")
test_subjects <- read.table("subject_test.txt")
test_activity <- read.table("y_test.txt")
## rbind the test and train datasets
features <- rbind(test_features, train_features)
subjects <- rbind(test_subjects, train_subjects)
activities <- rbind(test_activity, train_activity)
merged_data <- cbind(features, subjects, activities)
setwd("C:/Users/rr046302/Documents/Bill's Stuff/Coursera/Getting and Cleaning Data/Getting and Cleaning Data Course Project/gettingandcleaningdata/UCI HAR Dataset")
features_names <- read.table("features.txt")
activity_labels <- read.table("activity_labels.txt", col.names = c("activity", "activity_name"))
colnames(merged_data) <- features_names[,2]
colnames(merged_data)[562] <- "subject"
colnames(merged_data)[563] <- "activity"
## select only the variables that include mean or standard deviation in their variable name
## also select with the subject and activity columns
selected_data <- merged_data[,grepl("mean|std|subject|activity", names(merged_data))]
## add the activity names back to the activity variable
require(plyr)
selected_data <- join(selected_data, activity_labels, by = "activity", match = "first")
# lean up the variable names
names = colnames(selected_data)
for (i in 1:length(names))
{
names[i] = gsub("\\()","",names[i])
names[i] = gsub("-std$","StdDev",names[i])
names[i] = gsub("-mean","Mean",names[i])
names[i] = gsub("^(t)","time",names[i])
names[i] = gsub("^(f)","freq",names[i])
names[i] = gsub("([Gg]ravity)","Gravity",names[i])
names[i] = gsub("([Bb]ody[Bb]ody|[Bb]ody)","Body",names[i])
names[i] = gsub("[Gg]yro","Gyro",names[i])
names[i] = gsub("AccMag","AccMagnitude",names[i])
names[i] = gsub("([Bb]odyaccjerkmag)","BodyAccJerkMagnitude",names[i])
names[i] = gsub("JerkMag","JerkMagnitude",names[i])
names[i] = gsub("GyroMag","GyroMagnitude",names[i])
};
colnames(selected_data) <- names
final_data <- selected_data %>% group_by(subject, activity_name) %>% summarise_each(funs(mean))
write.table(final_data,file = "finalDataSet.txt",row.names = FALSE)
|
961af7a5e19fb19eed4e803909998175ea87c866
|
dd1102ed8f681e5dfb675075b870ee948f017ccc
|
/LjAt_SC_invasion.R
|
6e90e919cf92fc54c963f1456aaebc8f95eb7ceb
|
[] |
no_license
|
garridoo/ljsphere
|
a726ec88922bd967bcee1c44ff13f73de8e146dc
|
647a1bc7d6a8ae15f50a4f751c94baae89727771
|
refs/heads/master
| 2021-06-12T19:59:49.903325
| 2021-05-20T11:25:13
| 2021-05-20T11:25:13
| 254,386,539
| 3
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 5,197
|
r
|
LjAt_SC_invasion.R
|
# originally by Ruben Garrido-Oter
# garridoo@mpipz.mpg.de
source("LjAt_SC_invasion_load_data.R")
at_strains_ra <- colSums(otu_table[rownames(otu_table) %in% at_strains, ])
lj_strains_ra <- colSums(otu_table[rownames(otu_table) %in% lj_strains, ])
otu_table <- rbind(otu_table, at_strains_ra, lj_strains_ra)
df <- melt(otu_table)
colnames(df) <- c("strain", "SampleID", "RA")
df$genotype <- design$genotype[match(df$SampleID, design$SampleID)]
df <- df[!df$genotype=="input", ]
idx <- match(df$SampleID, design$SampleID)
df$treatment <- paste(design$genotype[idx], design$treatment[idx])
df$family <- taxonomy$family[match(df$strain, taxonomy$strain)]
df$family <- as.character(df$family)
df$family[is.na(df$family)] <- "All strains"
df$strain <- as.character(df$strain)
df$strain[df$strain=="at_strains_ra"] <- "At-SPHERE strains"
df$strain[df$strain=="lj_strains_ra"] <- "Lj-SPHERE strains"
df$family_strain <- paste(df$family, " (", df$strain, ")", sep="")
df$RA_log <- log10(df$RA * 1000 + 1)
colors <- data.frame(group=c("col0", "gifu",
"col0 SC32 mock", "gifu SC32 mock",
"col0 AtSC LjSC", "gifu AtSC LjSC",
"col0 LjSC AtSC", "gifu LjSC AtSC"),
color=c("#f8756b", "#00b8e3",
"#f8756b", "#00b8e3",
"#f8756b", "#00b8e3",
"#f8756b", "#00b8e3"))
fmt_dcimals <- function(decimals=0) {
function(x) format(x, nsmall=decimals, scientific=F)
}
family <- "All strains"
df_family <- df[which(df$family==family), ]
if (family!="All strains") df_family$strain <- factor(df_family$strain, levels=rev(sort(unique(df_family$strain))))
colors <- colors[colors$group %in% df_family$treatment, ]
df_family$treatment <- factor(df_family$treatment, levels=colors$group)
df_family <- unique(df_family)
df_family_at <- df_family[df_family$strain %in% c("At-SPHERE strains", at_strains), ]
df_family_lj <- df_family[df_family$strain %in% c("Lj-SPHERE strains", lj_strains), ]
if (dim(df_family_at)[1] > 0 & dim(df_family_lj)[1] > 0) {
# Kruskal-Wallis test of group differences
pval <- kruskal.test(RA ~ genotype, data=df_family_at)$p.value
lim_padding <- 0.2
idx <- df_family_at$treatment %in% c("col0 SC32 mock", "gifu SC32 mock")
lim <- mean(df_family_at$RA[idx])
h_lim <- min(1, lim+lim_padding)
l_lim <- max(0, lim-lim_padding)
h_lim <- 1
l_lim <- 0
p1 <- ggplot(df_family_at, aes(x=strain, y=RA, color=treatment, fill="transparent")) +
geom_boxplot(alpha=1, outlier.size=0, size=0.5, width=.2*length(unique(df_family$treatment)), fill="transparent") +
geom_jitter(position=position_jitterdodge(jitter.width=.1*length(unique(df_family$treatment)),
dodge.width=.2*length(unique(df_family$treatment))), size=.3, alpha=0.7) +
scale_y_continuous(position="left", labels=percent, limits=c(l_lim, h_lim), breaks=seq(0, 1, by=.1)) +
scale_colour_manual(values=as.character(colors$color)) +
labs(x="", y="Relative abundance") +
theme(axis.text.x=element_text(size=7.5)) +
theme(axis.text.y=element_text(size=7.5)) +
theme(axis.title=element_text(size=9)) +
theme(plot.margin=unit(c(5, 0, 2, 5), "mm")) +
main_theme +
theme(legend.position="none")
idx <- df_family_at$treatment %in% c("col0 SC32 mock", "gifu SC32 mock")
lim <- mean(df_family_lj$RA[idx])
h_lim <- min(1, lim+lim_padding)
l_lim <- max(0, lim-lim_padding)
h_lim <- 1
l_lim <- 0
p2 <- ggplot(df_family_lj, aes(x=strain, y=RA, color=treatment, fill="transparent")) +
geom_boxplot(alpha=1, outlier.size=0, size=0.5, width=.2*length(unique(df_family$treatment)), fill="transparent") +
geom_jitter(position=position_jitterdodge(jitter.width=.1*length(unique(df_family$treatment)),
dodge.width=.2*length(unique(df_family$treatment))), size=.3, alpha=0.7) +
scale_y_continuous(position="right", labels=percent, limits=c(l_lim, h_lim), breaks=seq(0, 1, by=0.1)) +
scale_colour_manual(values=as.character(colors$color)) +
labs(x="", y="Relative abundance") +
theme(axis.text.x=element_text(size=7.5)) +
theme(axis.text.y=element_text(size=7.5)) +
theme(axis.title=element_text(size=9)) +
theme(plot.margin=unit(c(5, 0, 2, 5), "mm")) +
main_theme +
theme(legend.position="none")
gA <- ggplotGrob(p1)
gB <- ggplotGrob(p2)
maxHeight = grid::unit.pmax(gA$heights, gB$heights)
gA$heights <- as.list(maxHeight)
gB$heights <- as.list(maxHeight)
pg1 <- grid.arrange(gA, gB, ncol=2, top=paste(run, "; P=", format(pval, digits=2), sep=""))
ggsave(paste(figures.dir, run, "_host_preference.pdf", sep=""), pg1, width=6, height=5)
}
|
2462131e4ba9e89668a0b895d48080c6e294722b
|
344176ee0f52389607d968b228ef086745a2d349
|
/R/scz/create_gtex_adapt_results.R
|
3b46c492dd1cb30b93122e603646a5b5ac79dace
|
[] |
no_license
|
SitaZhou/AdaPT-GWAS-manuscript-code
|
a1aface75adf00c41050ebff990cbfce9f9ba611
|
1f789b51a93e33fad0ba3b2174ad5dd768883066
|
refs/heads/master
| 2022-04-13T12:24:09.087633
| 2020-03-29T01:23:31
| 2020-03-29T01:23:31
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 3,418
|
r
|
create_gtex_adapt_results.R
|
# PURPOSE: Generate the AdaPT CV results using the GTEx eSNPs with all variables
# and then the intercept-only results
# Author: Ron Yurko
# Access necessary packages:
library(tidyverse)
library(future)
# Use the development version of furrr:
# devtools::install_github("DavisVaughan/furrr")
library(furrr)
# Use the modified version of AdaPT:
# devtools::install_github("ryurko/adaptMT")
library(adaptMT)
# Load the data:
bip_scz_gtex_data <- read_csv("data/bip_schz_data/bip_scz_data_14_18_gtex_eqtls_cortical_wgcna.csv")
# Also a vector of just BD with the cortical variables:
cortical_variables <- c("z_bip_14", "ave_abs_cortical_eqtl_slope",
"brain_anterior_cingulate_cortex_ba24_ave_abs_eqtl_slope",
"brain_frontal_cortex_ba9_ave_abs_eqtl_slope",
colnames(bip_scz_gtex_data)[which(str_detect(colnames(bip_scz_gtex_data), "cortical_any_"))])
args_search <- list("nrounds100md2" = list("nrounds" = 100,
"max_depth" = 2,
"min_child_weight" = 1,
"verbose" = 0,
"nthread" = 10),
"nrounds150md2" = list("nrounds" = 150,
"max_depth" = 2,
"min_child_weight" = 1,
"verbose" = 0,
"nthread" = 10),
"nrounds100md3" = list("nrounds" = 100,
"max_depth" = 3,
"min_child_weight" = 1,
"verbose" = 0,
"nthread" = 10),
"nrounds150md3" = list("nrounds" = 150,
"max_depth" = 3,
"min_child_weight" = 1,
"verbose" = 0,
"nthread" = 10))
# Generate the AdaPT CV results with only cortical variables
scz_cortical_adapt_cv_results <- adapt_xgboost_cv(as.matrix(
bip_scz_gtex_data[,cortical_variables]),
bip_scz_gtex_data$scz_14_P,
verbose = list(print = FALSE,
fit = FALSE,
ms = FALSE),
piargs = args_search,
muargs = args_search,
n_folds = 5,
niter_ms = 10,
nms = as.integer(2),
s0 = rep(.05, nrow(bip_scz_gtex_data)))
# Save these results:
# saveRDS(scz_cortical_adapt_cv_results,
# "data/bip_schz_data/gtex_results/cv_tune_results/scz_gtex_cortical_s05_2cv.rds")
# Next generate intercept-only results:
adapt_intercept_results <- adapt_glm(x = bip_scz_gtex_data,
pvals = bip_scz_gtex_data$scz_14_P,
pi_formulas = "1",
mu_formulas = "1",
verbose = list(print = FALSE,
fit = FALSE,
ms = FALSE),
s0 = rep(0.05, nrow(bip_scz_gtex_data)))
# saveRDS(adapt_intercept_results,
# "data/bip_schz_data/gtex_results/scz_gtex_intercept_only_s05.rds")
|
d6bca2242bd44ba5c51e1007496eaeb2e46cdc1b
|
0ebc9231a21649753eb0546fbc3554570d1dfd17
|
/data-raw/SRP073808/SRP073808_generate_pheno.R
|
ab4e1dc18d7ec8c3134c8c296f373e9f215939e7
|
[
"MIT"
] |
permissive
|
markrobinsonuzh/conquer
|
917765c151751ae7ae539b0b2e320093ab25b99b
|
670872217b646c56e460616fc243ff30bbe32cf4
|
refs/heads/master
| 2020-12-26T00:26:46.908398
| 2018-05-26T14:53:28
| 2018-05-26T14:53:28
| 63,949,425
| 52
| 14
|
MIT
| 2018-04-11T06:00:04
| 2016-07-22T11:47:31
|
HTML
|
UTF-8
|
R
| false
| false
| 482
|
r
|
SRP073808_generate_pheno.R
|
library(dplyr)
x <- read.delim("SRP073808_SraRunInfo.csv", header = TRUE, as.is = TRUE,
sep = ",")
xp <- x[, c("Run", "LibraryName"), drop = FALSE]
xp$LibraryName <- gsub("_single-cell: In vitro cultured H7 human embryonic stem cells \\(WiCell\\) and H7-derived downstream early mesoderm progenitors", "", xp$LibraryName)
rownames(xp) <- xp$Run
write.table(xp, file = "SRP073808_pheno.txt", row.names = TRUE, col.names = TRUE,
sep = "\t", quote = FALSE)
|
84eb7d555f6e710be82257aab9387a6fd44d617f
|
788e8885cf3db8c340d2ad01e0b05e9f8eb011a5
|
/man/Total_Deaths_Country_Stats.Rd
|
c4a35bce88018ec8c97b4193f6734ffe377e3d67
|
[
"MIT"
] |
permissive
|
oonyechi123/CovidAPIWrapper
|
59cbdc2be752e3ca87fecdef93ef42054fb9b37f
|
ab19ede9ea6b47a576379540c641deaaef607458
|
refs/heads/main
| 2023-01-22T02:35:26.083929
| 2020-12-02T23:56:21
| 2020-12-02T23:56:21
| 318,020,148
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| true
| 991
|
rd
|
Total_Deaths_Country_Stats.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/Total_Deaths_Country_Stats.R
\name{Total_Deaths_Country_Stats}
\alias{Total_Deaths_Country_Stats}
\title{Total Deaths Country Statistics}
\usage{
Total_Deaths_Country_Stats(stat)
}
\arguments{
\item{stat}{This should be the statistic the end user wants to analyze.
It can be "maximum", "minimum", "average" or "sum".}
}
\value{
The output depends on the stat parameter input.
If the stat is the maximum or minimum, the function outputs a data frame of the name and total Covid-19 deaths for the country/countries with the most or fewest total deaths, respectively.
If the stat is the average, it outputs the average total Covid-19 deaths by a world country.
If the stat is the sum, it outputs the sum of all the world countries' total Covid-19 deaths.
}
\description{
Total Deaths Country Statistics
}
\examples{
Total_Deaths_Country_Stats("maximum")
##to find the country with the most total Covid-19 deaths.
}
|
afdf4deaeb8838943ebede667b5dd1638ae0b802
|
5264913a140ee3230721e06d399b9841a3053224
|
/Figure3_CES.R
|
473e5959fd3cbcfe577ce05de84e032e855e9a90
|
[] |
no_license
|
alexander-y-yang/PRAD
|
7d884d4bf37df11eb9299fde6a58b08b68721239
|
5e57943690c30380dd6b1e19e20863a3cf7869cb
|
refs/heads/main
| 2023-03-14T05:54:55.328311
| 2021-03-06T22:33:24
| 2021-03-06T22:33:24
| 342,624,311
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 26,307
|
r
|
Figure3_CES.R
|
library(cancereffectsizeR)
library(scales)
library(stringr)
library(dplyr)
library(ggplot2)
library(cowplot)
scientific <- function(x){
ifelse(x==0, "0", parse(text=gsub("[+]", "", gsub("e", " %*% 10^", label_scientific()(x)))))
}
#Load in your file
threestage_final <- load_cesa("threestage_final.rds")
threestage_results <- snv_results(threestage_final)
threestage_results <- threestage_results$selection.1
threestage_results$variant_name <- str_replace(threestage_results$variant_name, "_", " ")
aac <- threestage_results$variant_type == "aac"
threestage_results <- threestage_results[aac,]
threestage_results <- threestage_results[,c(1,3:5,7:12,19,37,39,41)]
#Separating the data in to early/late/met
threestage_results_early <- threestage_results[,c(1:2,5:6,11,12)]
all <-threestage_results_early$maf_freq_in_Early >=1
threestage_results_early <- threestage_results_early[all,]
threestage_results_early <- threestage_results_early[order(-si_1),]
colnames(threestage_results_early)[2] <- "si"
threestage_results_early$progression <- rep("Early", length(threestage_results_early$variant_name))
threestage_results_late <- threestage_results[,c(1,3,7:8,11,13)]
all <-threestage_results_late$maf_freq_in_Late >=1
threestage_results_late <- threestage_results_late[all,]
threestage_results_late <- threestage_results_late[order(-si_2),]
colnames(threestage_results_late)[2] <- "si"
threestage_results_late$progression <- rep("Late", length(threestage_results_late$variant_name))
threestage_results_met <- threestage_results[,c(1,4,9:10,11,14)]
all <-threestage_results_met$maf_freq_in_Metastasis >=1
threestage_results_met <- threestage_results_met[all,]
threestage_results_met <- threestage_results_met[order(-si_3),]
colnames(threestage_results_met)[2] <- "si"
threestage_results_met$progression <- rep("Metastasis", length(threestage_results_met$variant_name))
#Extracting recurrent variants
recurrent <- threestage_results_early$maf_freq_in_Early > 1
threestage_results_early_recur <- threestage_results_early[recurrent,]
recurrent <- threestage_results_late$maf_freq_in_Late > 1
threestage_results_late_recur <- threestage_results_late[recurrent,]
recurrent <- threestage_results_met$maf_freq_in_Metastasis > 1
threestage_results_met_recur <- threestage_results_met[recurrent,]
##########################################
#Summary function
summary_gene <- function(data) {
data_clean <- data %>%
arrange(desc(si)) %>%
filter(si > 1)
# Summarise information of gene with multiple variants
info1 <- data_clean %>% group_by(gene) %>%
summarise(cum_si = sum(si),
mean_si = mean(si),
median_si = median(si),
sd = sd(si),
max_si = max(si),
n_variant = n_distinct(variant_name)) %>%
filter(n_variant > 1)
top_variant <- data_clean %>%
group_by(gene) %>% filter(row_number() == 1)
merge_info <- merge(info1, top_variant[, -3], by.x = "gene") %>%
arrange(desc(cum_si), desc(n_variant))
return(merge_info)
}
#Used to find genes that have at least one recurrent variants
summary_gene_recur <- function(data) {
data_clean <- data %>%
arrange(desc(si)) %>%
filter(si > 1)
# Summarise information of gene with multiple variants
info1 <- data_clean %>% group_by(gene) %>%
summarise(cum_si = sum(si), # change sum to mean and sd
mean_si = mean(si),
median_si = median(si),
sd = sd(si),
max_si = max(si),
n_variant = n_distinct(variant_name)) %>%
filter(n_variant > 0)
top_variant <- data_clean %>%
group_by(gene) %>% filter(row_number() == 1)
merge_info <- merge(info1, top_variant[, -3], by.x = "gene") %>%
arrange(desc(cum_si), desc(n_variant))
return(merge_info)
}
###############################################################################################
#Gene level SI
################################################################################################
early_data <- data.frame(variant_name = threestage_results_early$variant_name,
gene = threestage_results_early$gene,
si = threestage_results_early$si)
late_data <- data.frame(variant_name = threestage_results_late$variant_name,
gene = threestage_results_late$gene,
si = threestage_results_late$si)
met_data <- data.frame(variant_name = threestage_results_met$variant_name,
gene = threestage_results_met$gene,
si = threestage_results_met$si)
early_info <- summary_gene(early_data)
late_info <- summary_gene(late_data)
met_info <- summary_gene(met_data)
early_data_recur <- data.frame(variant_name = threestage_results_early_recur$variant_name,
gene = threestage_results_early_recur$gene,
si = threestage_results_early_recur$si)
late_data_recur <- data.frame(variant_name = threestage_results_late_recur$variant_name,
gene = threestage_results_late_recur$gene,
si = threestage_results_late_recur$si)
met_data_recur <- data.frame(variant_name = threestage_results_met_recur$variant_name,
gene = threestage_results_met_recur$gene,
si = threestage_results_met_recur$si)
early_info_recur <- summary_gene_recur(early_data_recur)
late_info_recur <- summary_gene_recur(late_data_recur)
met_info_recur <- summary_gene_recur(met_data_recur)
#Filtering out all genes that have NO recurrent variants,
#aka filtering in genes that have at least ONE recurrent variant
early_info <- early_info[which(early_info$gene %in% early_info_recur$gene),]
late_info <- late_info[which(late_info$gene %in% late_info_recur$gene),]
met_info <- met_info[which(met_info$gene %in% met_info_recur$gene),]
fill_na <- function(x, fill = 0) {
x = ifelse(is.na(x), fill, x)
return(x)
}
prim_info <- merge(early_info, late_info, by = "gene", all = T,
suffixes = c(".e", ".l")) %>%
mutate_at(c("cum_si.e", "cum_si.l",
"mean_si.e", "mean_si.l",
"sd.e", "sd.l",
"n_variant.e", "n_variant.l"), fill_na) %>%
mutate(n_variant_prim = n_variant.e + n_variant.l,
mean_si_prim = (cum_si.e + cum_si.l) / n_variant_prim) %>%
arrange(desc(n_variant_prim))
colnames(met_info) <- paste(colnames(met_info), ".m", sep = "")
colnames(met_info)[1] <- "gene"
stage_merge <- merge(prim_info, met_info, by = "gene", all = T) %>%
mutate_at(c("cum_si.e", "cum_si.l", "cum_si.m",
"mean_si.e", "mean_si.l", "mean_si.m",
"sd.e", "sd.l", "sd.m",
"n_variant.e", "n_variant.l", "n_variant.m"), fill_na) %>%
mutate(n_variant_total = n_variant.e + n_variant.l + n_variant.m,
mean_si_total = (cum_si.e + cum_si.l + cum_si.m) / n_variant_total) %>%
arrange(desc(n_variant_total))
########################################################################################
# Early
stage_merge_early_ordered <- stage_merge[order(-stage_merge$mean_si.e),]
selected_early_genes <- stage_merge_early_ordered$gene[1:10]
#select all variants within gene list
early_list.e <- threestage_results_early %>%
filter(gene %in% selected_early_genes) %>%
select(1,2,7,6,5)
early_list.l <- threestage_results_late %>%
filter(gene %in% selected_early_genes) %>%
select(1,2,7,6,5)
early_list.m <- threestage_results_met %>%
filter(gene %in% selected_early_genes) %>%
select(1,2,7,6,5)
colnames(early_list.e)[4] <- "maf_freq"
colnames(early_list.l)[4] <- "maf_freq"
colnames(early_list.m)[4] <- "maf_freq"
early_list <- rbind(early_list.e, early_list.l)
early_list <- rbind(early_list, early_list.m)
#set order of genes for plot
early_list$gene <- early_list$gene %>%
factor(levels = selected_early_genes)
early_jitter <- ggplot(early_list, aes(x=gene, y=si, color=progression)) +
geom_point(position = position_jitterdodge(jitter.width = 0.1))+
xlab("Gene or protein") + ylab("Scaled selection coefficient") + theme_bw() +
theme(panel.grid.major = element_blank(), panel.grid.minor = element_blank(),
panel.background = element_blank(), axis.line = element_line(color = "black"),
panel.border = element_blank(),
legend.position = c(0.95, 0.85))+
scale_color_discrete(name = "Stage", labels = c("Lower-risk", "Higher-risk", "Metastasis")) +
geom_vline(xintercept=seq(1.5, length(unique(early_list$gene))-0.5, 1),
lwd=.5, colour="lightgrey") +
scale_y_continuous(labels=scientific, limits = c(-0.8e4, 1e5))
early_jitter
########################################################################################
# Late
stage_merge_late_ordered <- stage_merge[order(-stage_merge$mean_si.l),]
selected_late_genes <- stage_merge_late_ordered$gene[1:10]
#select all variants within gene list
late_list.e <- threestage_results_early %>%
filter(gene %in% selected_late_genes) %>%
select(1,2,7,6,5)
late_list.l <- threestage_results_late %>%
filter(gene %in% selected_late_genes) %>%
select(1,2,7,6,5)
late_list.m <- threestage_results_met %>%
filter(gene %in% selected_late_genes) %>%
select(1,2,7,6,5)
colnames(late_list.e)[4] <- "maf_freq"
colnames(late_list.l)[4] <- "maf_freq"
colnames(late_list.m)[4] <- "maf_freq"
late_list <- rbind(late_list.e, late_list.l)
late_list <- rbind(late_list, late_list.m)
#set order of genes for plot
late_list$gene <- late_list$gene %>%
factor(levels = selected_late_genes)
#Dummy points
dummy_OR4N4.e <- list("OR4N4 Variant.e", as.double(0.001), "Early", "1", "OR4N4")
dummy_CHRNA6.e <- list("CHRNA6 Variant.e", as.double(0.001), "Early", "1", "CHRNA6")
late_list <- late_list %>%
rbind(dummy_OR4N4.e) %>%
rbind(dummy_CHRNA6.e)
library(ggplot2)
late_jitter<- ggplot(late_list, aes(x=gene, y=si, color=progression)) +
geom_point(position = position_jitterdodge(jitter.width = 0.1))+
xlab("Gene or protein") + ylab("Scaled selection coefficient") + theme_bw() +
theme(panel.grid.major = element_blank(), panel.grid.minor = element_blank(),
panel.background = element_blank(), axis.line = element_line(color = "black"),
panel.border = element_blank(),
legend.position = c(0.95, 0.85))+
scale_color_discrete(name = "Stage", labels = c("Lower-risk", "Higher-risk", "Metastasis")) +
geom_vline(xintercept=seq(1.5, length(unique(late_list$gene))-0.5, 1),
lwd=.5, colour="lightgrey") +
scale_y_continuous(labels=scientific, limits = c(-.24e4, 3e4), breaks = c(0, 1e4, 1.5e4, 2e4, 3e4))
late_jitter
########################################################################################
# Metastasis
stage_merge_met_ordered <- stage_merge[order(-stage_merge$mean_si.m),]
selected_met_genes <- stage_merge_met_ordered$gene[1:10]
#select all variants within gene list
met_list.e <- threestage_results_early %>%
filter(gene %in% selected_met_genes) %>%
select(1,2,7,6,5)
met_list.l <- threestage_results_late %>%
filter(gene %in% selected_met_genes) %>%
select(1,2,7,6,5)
met_list.m <- threestage_results_met %>%
filter(gene %in% selected_met_genes) %>%
select(1,2,7,6,5)
colnames(met_list.e)[4] <- "maf_freq"
colnames(met_list.l)[4] <- "maf_freq"
colnames(met_list.m)[4] <- "maf_freq"
met_list <- rbind(met_list.e, met_list.l)
met_list <- rbind(met_list, met_list.m)
#set order of genes for plot
met_list$gene <- met_list$gene %>%
factor(levels = selected_met_genes)
#Dummy points
dummy_ORC3.e <- list("ORC3 Variant.e", as.double(0.001), "Early", "1", "ORC3")
dummy_ORC3.l <- list("ORC3 Variant.l", as.double(0.001), "Late", "1", "ORC3")
dummy_ZNF780B.l <- list("ZNF780B Variant.l", as.double(0.001), "Late", "1", "ZNF780B")
dummy_DIMT1.e <- list("DIMT1 Variant.e", as.double(0.001), "Early", "1", "DIMT1")
dummy_DIMT1.l <- list("DIMT1 Variant.l", as.double(0.001), "Late", "1", "DIMT1")
dummy_KRTAP13_3.l <- list("KRTAP13-3 Variant.l", as.double(0.001), "Late", "1", "KRTAP13-3")
dummy_ZNF714.e <- list("ZNF714 Variant.e", as.double(0.001), "Early", "1", "ZNF714")
dummy_GRB7.e <- list("GRB7 Variant.e", as.double(0.001), "Early", "1", "GRB7")
dummy_APCS.e <- list("APCS Variant.e", as.double(0.001), "Early", "1", "APCS")
met_list <- met_list %>%
rbind(dummy_ORC3.e) %>%
rbind(dummy_ORC3.l) %>%
rbind(dummy_ZNF780B.l)%>%
rbind(dummy_DIMT1.e)%>%
rbind(dummy_DIMT1.l)%>%
rbind(dummy_KRTAP13_3.l)%>%
rbind(dummy_ZNF714.e)%>%
rbind(dummy_GRB7.e)%>%
rbind(dummy_APCS.e)
library(ggplot2)
met_jitter <- ggplot(met_list, aes(x=gene, y=si, color=progression)) +
geom_point(position = position_jitterdodge(jitter.width = 0.1))+
xlab("Gene or protein") + ylab("Scaled selection coefficient") + theme_bw() +
theme(panel.grid.major = element_blank(), panel.grid.minor = element_blank(),
panel.background = element_blank(), axis.line = element_line(color = "black"),
panel.border = element_blank(),
legend.position = c(0.95, 0.85))+
scale_color_discrete(name = "Stage", labels = c("Lower-risk", "Higher-risk", "Metastasis")) +
geom_vline(xintercept=seq(1.5, length(unique(met_list$gene))-0.5, 1),
lwd=.5, colour="lightgrey") +
scale_y_continuous(labels=scientific, limits = c(-0.9e4, 1.051e5))
met_jitter
##############################################################################
#Wilcoxon test
early.e <- list()
early.l <- list()
early.m <- list()
for (x in unique(early_list$gene)) {
values <- early_list %>%
filter(gene == x) %>%
filter(progression == "Early") %>%
pull(si)
early.e <- c(early.e, list(values))
}
names(early.e) <- unique(early_list$gene)
for (x in unique(early_list$gene)) {
values <- early_list %>%
filter(gene == x) %>%
filter(progression == "Late") %>%
pull(si)
early.l <- c(early.l, list(values))
}
names(early.l) <- unique(early_list$gene)
for (x in unique(early_list$gene)) {
values <- early_list %>%
filter(gene == x) %>%
filter(progression == "Metastasis") %>%
pull(si)
early.m <- c(early.m, list(values))
}
names(early.m) <- unique(early_list$gene)
late.e <- list()
late.l <- list()
late.m <- list()
for (x in unique(late_list$gene)) {
values <- late_list %>%
filter(gene == x) %>%
filter(progression == "Early") %>%
pull(si)
late.e <- c(late.e, list(values))
}
names(late.e) <- unique(late_list$gene)
for (x in unique(late_list$gene)) {
values <- late_list %>%
filter(gene == x) %>%
filter(progression == "Late") %>%
pull(si)
late.l <- c(late.l, list(values))
}
names(late.l) <- unique(late_list$gene)
for (x in unique(late_list$gene)) {
values <- late_list %>%
filter(gene == x) %>%
filter(progression == "Metastasis") %>%
pull(si)
late.m <- c(late.m, list(values))
}
names(late.m) <- unique(late_list$gene)
met.e <- list()
met.l <- list()
met.m <- list()
for (x in unique(met_list$gene)) {
values <- met_list %>%
filter(gene == x) %>%
filter(progression == "Early") %>%
pull(si)
met.e <- c(met.e, list(values))
}
names(met.e) <- unique(met_list$gene)
for (x in unique(met_list$gene)) {
values <- met_list %>%
filter(gene == x) %>%
filter(progression == "Late") %>%
pull(si)
met.l <- c(met.l, list(values))
}
names(met.l) <- unique(met_list$gene)
for (x in unique(met_list$gene)) {
values <- met_list %>%
filter(gene == x) %>%
filter(progression == "Metastasis") %>%
pull(si)
met.m <- c(met.m, list(values))
}
names(met.m) <- unique(met_list$gene)
#Tests for Lower-risk
wilcox_early.e_l <- c()
for (x in 1:10){
wilcox <- wilcox.test(early.e[[x]], early.l[[x]])
wilcox_early.e_l <- c(wilcox_early.e_l, wilcox$p.value)
}
wilcox_early.e_m <- c()
for (x in 1:10){
wilcox <- wilcox.test(early.e[[x]], early.m[[x]])
wilcox_early.e_m <- c(wilcox_early.e_m, wilcox$p.value)
}
wilcox_early.l_m <- c()
for (x in 1:10){
wilcox <- wilcox.test(early.l[[x]], early.m[[x]])
wilcox_early.l_m <- c(wilcox_early.l_m, wilcox$p.value)
}
early.wilcox <- data.frame(early_late = wilcox_early.e_l,
early_met = wilcox_early.e_m,
late_met = wilcox_early.l_m)
row.names(early.wilcox) <- unique(early_list$gene)
#Tests for Higher-risk
wilcox_late.e_l <- c()
for (x in 1:10){
wilcox <- wilcox.test(late.e[[x]], late.l[[x]])
wilcox_late.e_l <- c(wilcox_late.e_l, wilcox$p.value)
}
wilcox_late.e_m <- c()
for (x in 1:10){
wilcox <- wilcox.test(late.e[[x]], late.m[[x]])
wilcox_late.e_m <- c(wilcox_late.e_m, wilcox$p.value)
}
wilcox_late.l_m <- c()
for (x in 1:10){
wilcox <- wilcox.test(late.l[[x]], late.m[[x]])
wilcox_late.l_m <- c(wilcox_late.l_m, wilcox$p.value)
}
late.wilcox <- data.frame(early_late = wilcox_late.e_l,
early_met = wilcox_late.e_m,
late_met = wilcox_late.l_m)
row.names(late.wilcox) <- unique(late_list$gene)
#Tests for Metastasis
wilcox_met.e_l <- c()
for (x in 1:10){
wilcox <- wilcox.test(met.e[[x]], met.l[[x]])
wilcox_met.e_l <- c(wilcox_met.e_l, wilcox$p.value)
}
wilcox_met.e_m <- c()
for (x in 1:10){
wilcox <- wilcox.test(met.e[[x]], met.m[[x]])
wilcox_met.e_m <- c(wilcox_met.e_m, wilcox$p.value)
}
wilcox_met.l_m <- c()
for (x in 1:10){
wilcox <- wilcox.test(met.l[[x]], met.m[[x]])
wilcox_met.l_m <- c(wilcox_met.l_m, wilcox$p.value)
}
met.wilcox <- data.frame(early_late = wilcox_met.e_l,
early_met = wilcox_met.e_m,
late_met = wilcox_met.l_m)
row.names(met.wilcox) <- unique(met_list$gene)
# write.table(early.wilcox, file = "wilcox_early.txt", sep = "\t",
# row.names = TRUE, col.names = TRUE, quote = FALSE)
# write.table(late.wilcox, file = "wilcox_late.txt", sep = "\t",
# row.names = TRUE, col.names = TRUE, quote = FALSE)
# write.table(met.wilcox, file = "wilcox_met.txt", sep = "\t",
# row.names = TRUE, col.names = TRUE, quote = FALSE)
###########################################################################
#Extracting data for Bayesian approach
#Extracting SI + 95% CI for each variant in each of the featured top 10 genes
#Early
early_list.e <- threestage_results_early %>%
filter(gene %in% selected_early_genes) %>%
select(1,2,3,4,7,6,5)
early_list.l <- threestage_results_late %>%
filter(gene %in% selected_early_genes) %>%
select(1,2,3,4,7,6,5)
early_list.m <- threestage_results_met %>%
filter(gene %in% selected_early_genes) %>%
select(1,2,3,4,7,6,5)
colnames(early_list.e)[3] <- "ci_low_95"
colnames(early_list.l)[3] <- "ci_low_95"
colnames(early_list.m)[3] <- "ci_low_95"
colnames(early_list.e)[4] <- "ci_high_95"
colnames(early_list.l)[4] <- "ci_high_95"
colnames(early_list.m)[4] <- "ci_high_95"
colnames(early_list.e)[6] <- "maf_freq"
colnames(early_list.l)[6] <- "maf_freq"
colnames(early_list.m)[6] <- "maf_freq"
early_list <- rbind(early_list.e, early_list.l)
early_list <- rbind(early_list, early_list.m)
#Late
late_list.e <- threestage_results_early %>%
filter(gene %in% selected_late_genes) %>%
select(1,2,3,4,7,6,5)
late_list.l <- threestage_results_late %>%
filter(gene %in% selected_late_genes) %>%
select(1,2,3,4,7,6,5)
late_list.m <- threestage_results_met %>%
filter(gene %in% selected_late_genes) %>%
select(1,2,3,4,7,6,5)
colnames(late_list.e)[3] <- "ci_low_95"
colnames(late_list.l)[3] <- "ci_low_95"
colnames(late_list.m)[3] <- "ci_low_95"
colnames(late_list.e)[4] <- "ci_high_95"
colnames(late_list.l)[4] <- "ci_high_95"
colnames(late_list.m)[4] <- "ci_high_95"
colnames(late_list.e)[6] <- "maf_freq"
colnames(late_list.l)[6] <- "maf_freq"
colnames(late_list.m)[6] <- "maf_freq"
late_list <- rbind(late_list.e, late_list.l)
late_list <- rbind(late_list, late_list.m)
#Metastasis
met_list.e <- threestage_results_early %>%
filter(gene %in% selected_met_genes) %>%
select(1,2,3,4,7,6,5)
met_list.l <- threestage_results_late %>%
filter(gene %in% selected_met_genes) %>%
select(1,2,3,4,7,6,5)
met_list.m <- threestage_results_met %>%
filter(gene %in% selected_met_genes) %>%
select(1,2,3,4,7,6,5)
colnames(met_list.e)[3] <- "ci_low_95"
colnames(met_list.l)[3] <- "ci_low_95"
colnames(met_list.m)[3] <- "ci_low_95"
colnames(met_list.e)[4] <- "ci_high_95"
colnames(met_list.l)[4] <- "ci_high_95"
colnames(met_list.m)[4] <- "ci_high_95"
colnames(met_list.e)[6] <- "maf_freq"
colnames(met_list.l)[6] <- "maf_freq"
colnames(met_list.m)[6] <- "maf_freq"
met_list <- rbind(met_list.e, met_list.l)
met_list <- rbind(met_list, met_list.m)
############################################################################
write.table(early_list, file = "lower-risk_gene_variants.txt", sep = "\t",
row.names = FALSE, col.names = TRUE, quote = FALSE)
write.table(late_list, file = "higher-risk_gene_variants.txt", sep = "\t",
row.names = FALSE, col.names = TRUE, quote = FALSE)
write.table(met_list, file = "metastatic_gene_variants.txt", sep = "\t",
row.names = FALSE, col.names = TRUE, quote = FALSE)
##############################################################################
# Variants
early_top10 <- threestage_results_early_recur[1:10,]
late_top10 <- threestage_results_late_recur[1:10,]
met_top10 <- threestage_results_met_recur[1:10,]
unique_jitter_early_all <- ggplot(data = early_top10, aes(x = 1, y = si, color=progression)) +
geom_jitter(position=position_jitter(0.0, seed = 5))+
theme(axis.ticks.x = element_blank(),
panel.grid.major = element_blank(), panel.grid.minor = element_blank(),
panel.background = element_blank(), axis.line = element_line(color = "black"),
legend.position="none")+
xlab("Top 10 variants") + ylab("") +
scale_color_manual(values = "#F8766D") +
scale_x_continuous(limits = c(0.975, 1.25)) +
scale_y_continuous(labels=scientific, limits = c(-0.8e4, 1e5))
unique_jitter_late_all<- ggplot(data = late_top10, aes(x = 1, y = si, color=progression)) +
geom_jitter(position=position_jitter(0.0, seed = 5))+
theme(axis.ticks.x = element_blank(),
panel.grid.major = element_blank(), panel.grid.minor = element_blank(),
panel.background = element_blank(), axis.line = element_line(color = "black"),
legend.position="none")+
xlab("Top 10 variants") + ylab("") +
scale_color_manual(values = "#00BA38") +
scale_x_continuous(limits = c(0.975, 1.25)) +
scale_y_continuous(labels=scientific, limits = c(-.24e4, 3e4), breaks = c(0, 1e4, 1.5e4, 2e4, 3.0e4))
unique_jitter_met_all<- ggplot(data = met_top10, aes(x = 1, y = si, color=progression)) +
geom_jitter(position=position_jitter(0.0, seed = 5))+
theme(axis.ticks.x = element_blank(),
panel.grid.major = element_blank(), panel.grid.minor = element_blank(),
panel.background = element_blank(), axis.line = element_line(color = "black"),
legend.position="none")+
xlab("Top 10 variants") + ylab("") +
scale_color_manual(values = "#619CFF") +
scale_x_continuous(limits = c(0.975, 1.25)) +
scale_y_continuous(labels=scientific, limits = c(-0.9e4, 1.051e5))
#########################################
early_title <- ggdraw() +
draw_label(
"Lower-risk",
fontface = 'bold',
x = 0,
hjust = 0
) +
theme(
# add margin on the left of the drawing canvas,
# so title is aligned with left edge of first plot
plot.margin = margin(0, 0, 0, 49)
)
late_title <- ggdraw() +
draw_label(
"Higher-risk",
fontface = 'bold',
x = 0,
hjust = 0
) +
theme(
# add margin on the left of the drawing canvas,
# so title is aligned with left edge of first plot
plot.margin = margin(0, 0, 0, 49)
)
met_title <- ggdraw() +
draw_label(
"Metastases",
fontface = 'bold',
x = 0,
hjust = 0
) +
theme(
# add margin on the left of the drawing canvas,
# so title is aligned with left edge of first plot
plot.margin = margin(0, 0, 0, 49)
)
#######################################
#Combining all the graphs + titles
early_combined <- plot_grid(early_jitter, unique_jitter_early_all,
align = "h", axis = "t", nrow = 1, ncol = 2, scale = 1, rel_widths = c(5,1),
labels = c("A", "B"), label_size = 10)
early_combined_title <- plot_grid(early_title, early_combined,
align = "h", axis = "t", nrow = 2, ncol = 1, scale = 1, rel_heights = c(0.1,1))
late_combined <- plot_grid(late_jitter, unique_jitter_late_all,
align = "h", axis = "t", nrow = 1, ncol = 2, scale = 1, rel_widths = c(5,1),
labels = c("C", "D"), label_size = 10)
late_combined_title <- plot_grid(late_title, late_combined,
align = "h", axis = "t", nrow = 2, ncol = 1, scale = 1, rel_heights = c(0.1,1))
met_combined <- plot_grid(met_jitter, unique_jitter_met_all,
align = "h", axis = "t", nrow = 1, ncol = 2, scale = 1, rel_widths = c(5,1),
labels = c("E", "F"), label_size = 10)
met_combined_title <- plot_grid(met_title, met_combined,
align = "h", axis = "t", nrow = 2, ncol = 1, scale = 1, rel_heights = c(0.1,1))
bar_box_combined <- plot_grid(early_combined_title, late_combined_title, met_combined_title,
align = "h", axis = "t", nrow = 3, ncol = 1, scale = 1)
bar_box_combined
ggsave("PRAD_figures/bar_jitter_test.png", width = 12.5, height = 12.5)
|
d77a949677635db6ea616de6c60402c136053ca0
|
08e7c9be71d6186fae233b8b2f03d4b269d5c5ea
|
/post-processing/4_BitExtractionAlgorithms/Sadzadi/Sadzadi_RSSQuantization_bak.R
|
5f609abc9bd7903a5cdf044e796cdf34ada09fb6
|
[] |
no_license
|
surendra060/KeyDerivationWirelessChannel
|
da54f39cf2537e69e60aab29320e0819d38c3d4c
|
07e48609fbeee44c787ae259cb48212bcd12a3ee
|
refs/heads/master
| 2021-08-23T08:42:04.876558
| 2017-12-04T10:32:19
| 2017-12-04T10:32:19
| 112,205,445
| 2
| 1
| null | null | null | null |
UTF-8
|
R
| false
| false
| 8,282
|
r
|
Sadzadi_RSSQuantization_bak.R
|
##### ************************* SADZADI ****************************************************************
# BitString Generation using Sadzadi et al
nodeData_Sadzadi = function(nodeRSSA, infileA, outfileBitStringA,outfileDataA, nodeRSSB, infileB, outfileBitStringB,outfileDataB, entropyTestFile){
nodeRSSA = read.table(textConnection(gsub("&", "\t", readLines(infileA))), header = F, sep = ";", stringsAsFactors = T, colClasses = c(rep("integer", 1000), rep("NULL", 1)))
nodeRSSB = read.table(textConnection(gsub("&", "\t", readLines(infileB))), header = F, sep = ";", stringsAsFactors = T, colClasses = c(rep("integer", 1000), rep("NULL", 1)))
rowindex = 1:nrow(nodeRSSA)
colindex = 1:ncol(nodeRSSA)
outLier_i = 10;
rowno=0;
AvgBitExtractionRate = 0;
AvgLenMismatchRate = 0;
AvgBitMismatchRate = 0;
cat("Median : Range : BitString_Len : rowLenMismatch : rowBitMismatchCount : MatchBits : Bitstring \n", file = outfileDataA, sep = " ", append = T)
cat("Median : Range : BitString_Len : rowLenMismatch : rowBitMismatchCount : MatchBits : Bitstring\n", file = outfileDataB, sep = " ", append = T)
for(rowcount in rowindex)
{
lenA=1; lenB=1; bitstringA = c(rep(100,1000)) ; bitstringB = c(rep(100,1000))
MatchBits = 0; rowBitMismatchCount = 0; rowLenMismatch = 0;
medValueA = median(as.numeric(nodeRSSA[rowcount, 1:1000]))
medValueB = median(as.numeric(nodeRSSB[rowcount, 1:1000]))
rngA =c(rep(0,2));rngB =c(rep(0,2));
RngA = range(as.numeric(nodeRSSA[rowcount, 1:1000]))
RngB = range(as.numeric(nodeRSSB[rowcount, 1:1000]))
rngA[1] = as.numeric(sort(nodeRSSA[rowcount, 1:1000],partial=outLier_i)[outLier_i])
rngA[2] = as.numeric(sort(nodeRSSA[rowcount, 1:1000],partial=ncol(nodeRSSA)-outLier_i)[ncol(nodeRSSA)-outLier_i])
rngB[1] = as.numeric(sort(nodeRSSB[rowcount, 1:1000],partial=outLier_i)[outLier_i])
rngB[2] = as.numeric(sort(nodeRSSB[rowcount, 1:1000],partial=ncol(nodeRSSB)-outLier_i)[ncol(nodeRSSB)-outLier_i])
# Change of LeftTh and RightTh values bwetween Aono and Sadzadi ;
# LeftThA = medValueA - ((medValueA - rngA[1])/2)
# RightThA = medValueA + ((rngA[2] - medValueA)/4)
# LeftThB = medValueB - ((medValueB - rngB[1])/2)
# RightThB = medValueB + ((rngB[2] - medValueB)/4)
LeftThA = medValueA
RightThA = medValueA
LeftThB = medValueB
RightThB = medValueB
for(colcount in colindex){
flagA = 1000;flagB=1000;
if (nodeRSSA[rowcount, colcount]<LeftThA)
{
bitstringA[lenA] = 0
flagA = 0
lenA=lenA+1
}
else if (nodeRSSA[rowcount, colcount]>RightThA)
{
bitstringA[lenA] = 1
flagA = 1
lenA=lenA+1
}
if (nodeRSSB[rowcount, colcount]<LeftThB)
{
bitstringB[lenB] = 0
flagB = 0
lenB=lenB+1
}
else if (nodeRSSB[rowcount, colcount]>RightThB)
{
bitstringB[lenB] = 1
flagB = 1
lenB=lenB+1
}
if ((flagA == flagB) && flagA !=1000 && flagB!=1000)
MatchBits = MatchBits + 1;
}
rowLenMismatch = ((lenA-1) -(lenB-1))
rowBitMismatchCount = min((lenA-1), (lenB-1)) - MatchBits;
AvgBitExtractionRate = (AvgBitExtractionRate*(rowcount-1) + (min((lenA-1), (lenB-1))))/rowcount
if (max((lenA-1), (lenB-1)) > 0){
AvgLenMismatchRate = (AvgLenMismatchRate*(rowcount-1) + (abs(rowLenMismatch)/max((lenA-1), (lenB-1))))/rowcount
}
if (min((lenA-1), (lenB-1)) > 0 ){
AvgBitMismatchRate = (AvgBitMismatchRate*(rowcount-1) + (rowBitMismatchCount/min((lenA-1), (lenB-1))))/rowcount
}
print(paste("RowA = ", rowcount, "rowLenMismatch = ", rowLenMismatch, " : rowBitMismatchCount = ", rowBitMismatchCount, " : LenBS_A = ", lenA-1, " : LenBS_B = ", lenB-1))
print(paste("MedianA = ", medValueA, " : RangeA = ", rngA[1],"-", rngA[2], "LeftThA - RightThA ", LeftThA,"<->", RightThA))
print(paste("MedianB = ", medValueB, " : RangeB = ", rngB[1],"-", rngB[2], "LeftThB - RightThB ", LeftThB,"<->", RightThB))
cat(rbind(bitstringA[1:lenA-1]), file = outfileBitStringA, sep = " ", append = T)
cat("\n", file = outfileBitStringA, sep = " ", append = T)
cat(rbind(bitstringB[1:lenB-1]), file = outfileBitStringB, sep = " ", append = T)
cat("\n", file = outfileBitStringB, sep = " ", append = T)
if(lenA<=lenB) {
cat(rbind(bitstringA[1:lenA-1]), file = entropyTestFile, sep = " ", append = T)
cat("\n", file = entropyTestFile, sep = " ", append = T)
}
else {
cat(rbind(bitstringB[1:lenB-1]), file = entropyTestFile, sep = " ", append = T)
cat("\n", file = entropyTestFile, sep = " ", append = T)
}
cat(medValueA, file = outfileDataA, sep = " ", append = T)
cat(":", file = outfileDataA, sep = " ", append = T)
cat(rngA, file = outfileDataA, sep = " ", append = T)
cat(":", file = outfileDataA, sep = " ", append = T)
cat(lenA-1, file = outfileDataA, sep = " ", append = T)
cat(":", file = outfileDataA, sep = " ", append = T)
cat(rowLenMismatch, file = outfileDataA, sep = " ", append = T)
cat(":", file = outfileDataA, sep = " ", append = T)
cat(rowBitMismatchCount, file = outfileDataA, sep = " ", append = T)
cat(":", file = outfileDataA, sep = " ", append = T)
cat(MatchBits, file = outfileDataA, sep = " ", append = T)
cat(":", file = outfileDataA, sep = " ", append = T)
cat(medValueB, file = outfileDataB, sep = " ", append = T)
cat(":", file = outfileDataB, sep = " ", append = T)
cat(rngB, file = outfileDataB, sep = " ", append = T)
cat(":", file = outfileDataB, sep = " ", append = T)
cat(lenB-1, file = outfileDataB, sep = " ", append = T)
cat(":", file = outfileDataB, sep = " ", append = T)
cat(rowLenMismatch, file = outfileDataB, sep = " ", append = T)
cat(":", file = outfileDataB, sep = " ", append = T)
cat(rowBitMismatchCount, file = outfileDataB, sep = " ", append = T)
cat(":", file = outfileDataB, sep = " ", append = T)
cat(MatchBits, file = outfileDataB, sep = " ", append = T)
cat(":", file = outfileDataB, sep = " ", append = T)
cat(rbind(bitstringA[1:lenA-1]), file = outfileDataA, sep = " ", append = T)
cat("\n", file = outfileDataA, sep = " ", append = T)
cat(rbind(bitstringB[1:lenB-1]), file = outfileDataB, sep = " ", append = T)
cat("\n", file = outfileDataB, sep = " ", append = T)
}
print(paste("AvgLenMismatchRate = ", round(AvgLenMismatchRate,4) , " : AvgBitMismatchRate = ", round(AvgBitMismatchRate,4), "AvgBitExtractionRate", round(AvgBitExtractionRate,4)))
cat("\n\n AvgLenMismatchRate : ", file = outfileDataA, sep = " ", append = T)
cat(round(AvgLenMismatchRate,4), file = outfileDataA, sep = " ", append = T)
cat("\n AvgBitMismatchRate", file = outfileDataA, sep = " ", append = T)
cat(round(AvgBitMismatchRate,4) , file = outfileDataA, sep = " ", append = T)
cat("\n AvgBitExtractionRate", file = outfileDataA, sep = " ", append = T)
cat(round(AvgBitExtractionRate,4), file = outfileDataA, sep = " ", append = T)
cat("\n\n AvgLenMismatchRate : ", file = outfileDataB, sep = " ", append = T)
cat(round(AvgLenMismatchRate,4), file = outfileDataB, sep = " ", append = T)
cat("\n AvgBitMismatchRate : ", file = outfileDataB, sep = " ", append = T)
cat(round(AvgBitMismatchRate,4), file = outfileDataB, sep = " ", append = T)
cat("\n AvgBitExtractionRate : ", file = outfileDataB, sep = " ", append = T)
cat(round(AvgBitExtractionRate,4), file = outfileDataB, sep = " ", append = T)
}
system("rm nodeA_*", wait=FALSE)
system("rm nodeB_*", wait=FALSE)
system("rm nodeAB_*", wait=FALSE)
nodeData_Sadzadi(A, "nodeA.txt", "nodeA_BS_Sadzadi.txt", "nodeA_BS_data_Sadzadi.txt", B, "nodeB.txt", "nodeB_BS_Sadzadi.txt", "nodeB_BS_data_Sadzadi.txt","nodeAB_Sadzadi_Entropy.txt")
print(paste(" Sadzadi - TX-#0's : TX - #1's "))
NumZero <- system("grep -o '0' nodeA_BS_Sadzadi.txt | wc -l", wait=TRUE)
NumOne <- system("grep -o '1' nodeA_BS_Sadzadi.txt | wc -l", wait=TRUE)
print(paste(NumZero , "<->", NumOne ))
|
55c61a6266c4afec6fe3ecb2e1ae360c19568cd7
|
fc94030dcfae022fe0ae372b250dcaf541525a7c
|
/man/cast_font_sf.Rd
|
ca3ef3249a30a433f2455c47275d07fdcc17592b
|
[
"MIT"
] |
permissive
|
njtierney/ishihara
|
fc31bf451686111079b29624a508026edd8dcfe4
|
a95654b3514c94900e3ded05a82702f32c906bdb
|
refs/heads/master
| 2022-09-07T03:10:00.368574
| 2020-05-31T05:57:02
| 2020-05-31T05:57:02
| 262,525,820
| 12
| 1
| null | null | null | null |
UTF-8
|
R
| false
| true
| 446
|
rd
|
cast_font_sf.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/cast-sf.R
\name{cast_font_sf}
\alias{cast_font_sf}
\title{Convert font into Spatial SF object}
\usage{
cast_font_sf(font_df)
}
\arguments{
\item{font_df}{font created by \code{\link[=glyph]{glyph()}}}
}
\value{
font polygon as an SF object
}
\description{
Convert font into Spatial SF object
}
\examples{
letter_s <- glyph("s")
letter_s_sf <- cast_font_sf(letter_s)
}
|
bb5c98e1cb6716bfcc1d7276a1b69a2fa21a7d68
|
b957b00776e912ef5c28f2c4e1dd88c29d9dcda8
|
/Plot_null_cycle.R
|
b5fa7f60fa1f2e1c01a6474bb56d45fd386be77e
|
[] |
no_license
|
lauren-baugh/LLM-Prot-Data
|
d4a040079de0162a254b4511f2d2c19d2db1ddaa
|
0028febe3a7fd4e8fdd341fd0af19b8748431f2b
|
refs/heads/main
| 2023-06-18T00:09:53.849500
| 2021-07-13T18:12:40
| 2021-07-13T18:12:40
| 385,692,639
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 3,098
|
r
|
Plot_null_cycle.R
|
#Run a PCA for proteomics data
#Lauren Baugh 12/12/19
#uses http://www.sthda.com/english/articles/31-principal-component-methods-in-r-practical-guide/112-pca-principal-component-analysis-essentials/
#http://www.sthda.com/english/wiki/fviz-pca-quick-principal-component-analysis-data-visualization-r-software-and-data-mining
setwd("C:/Users/Lauren/Google Drive/Lauffenburger Lab/Proteomics/Combined_uterine/Mixed Model")
file="786_830_cycle_null_PCA_v2.csv"
data=read.table(file, header=TRUE, sep=',', stringsAsFactors = FALSE)
data = data[,-1]
data.trans = t(data)
colnames(data.trans)=data.trans[1,]
data.r=data.trans[-1,]
data.ready=data.frame(data.matrix(data.r))
#rownames(data.ready)=c(1:nrow(data.ready))
data.pca=prcomp(data.matrix(data.ready[1:ncol(data.ready)]),center=TRUE, scale.=TRUE)
summary(data.pca)
library(factoextra)
fviz_eig(data.pca)
rn=row.names(data.r)
remove.num=gsub('[[:digit:]]+', '', rn) #remove numbers from strings
colors=remove.num
# colors=c('c','o','c','c','o','c','o','o','c','c')
# colors=c('o','c','c','o','o','o','o','o','o')
fviz_pca_ind(data.pca,
axes = c(1, 2),
col.ind=colors,
#col.ind = "cos2", # Color by the quality of representation
#gradient.cols = c("#00AFBB", "#E7B800", "#FC4E07"),
repel = TRUE # Avoid text overlapping
)
fviz_cos2(data.pca, choice = "var", axes = 1:2)+ coord_flip()
# Contributions of variables to PC1
fviz_contrib(data.pca, choice = "var", axes = 1, top = 10) #redline is average contribution
# Contributions of variables to PC2
fviz_contrib(data.pca, choice = "var", axes = 2, top = 10)
fviz_pca_ind(data.pca,
axes = c(1, 2),
col.ind=colors,
addEllipses=TRUE, ellipse.level=0.95,
repel = TRUE # Avoid text overlapping
)
library("corrplot")
var=get_pca_var(data.pca)
corrplot(var$contrib, is.corr=FALSE)
#volcano plot
#https://bioconductor.org/packages/release/bioc/vignettes/EnhancedVolcano/inst/doc/EnhancedVolcano.html
library(EnhancedVolcano)
setwd("C:/Users/Lauren/Dropbox (MIT)/Endometriosis_scRNAseq/Mixed_Models/Proteomics/786_830")
LRT=read.table('nc_t.value.pLRT.value.csv', header=TRUE, sep=',', stringsAsFactors = FALSE)
rownames(LRT)=LRT$X
pvals=data.frame(LRT$pvalue)
rownames(pvals)=LRT$X
data.vol=data[,c(-1,-2)]
rownames(data.vol)=data$Protein.Descriptions
control=data.vol[,c(1,3,5,6)]#control patients
osis=data.vol[,c(2,4,7,8)]#osis patients
c.avg=data.frame(rowMeans(control)) #average of log2 fold change
o.avg=data.frame(rowMeans(osis))#average of log2 fold change
log2fd=data.frame(c.avg-o.avg)
colnames(log2fd)='Log2FoldChange'
lrt.subset=merge(log2fd, pvals, by = 0,all.x=TRUE)
EnhancedVolcano(toptable=lrt.subset,
lab = lrt.subset$Row.names,
x = 'Log2FoldChange',
y = 'LRT.pvalue',
title= "Control Vs Osis",
pCutoff = 0.05,
FCcutoff = 0.5,
labSize = 4.0)
|
1b5a7a7edc77371c9f7be33b1f5925e581b87d09
|
728315d8c5d09e13c67641030b92d59c5e7e2222
|
/easy/compare_points.r
|
83243bd7c20887344bb1c8ebba2305c2503bc7f5
|
[
"MIT"
] |
permissive
|
shortthirdman/code-eval-challenges
|
88ea93c0e9385b2a0db95a05b1f3f753c900a62d
|
cf2197927830326539399fdd3e16c9b8a4468f7d
|
refs/heads/master
| 2023-03-06T19:34:44.607154
| 2023-02-26T13:30:56
| 2023-02-26T13:30:56
| 92,970,178
| 4
| 0
|
MIT
| 2023-02-26T13:30:57
| 2017-05-31T17:14:59
|
Go
|
UTF-8
|
R
| false
| false
| 394
|
r
|
compare_points.r
|
cat(sapply(lapply(strsplit(readLines(tail(commandArgs(), n=1)), " ", fixed=TRUE), as.integer), function(s) {
ifelse(s[1] == s[3], ifelse(s[2] == s[4], "here", ifelse(s[2] < s[4], "N", "S")),
ifelse(s[2] == s[4], ifelse(s[1] < s[3], "E", "W"),
ifelse(s[1] < s[3], ifelse(s[2] < s[4], "NE", "SE"),
ifelse(s[2] < s[4], "NW", "SW"))))
}), sep="\n")
|
090bc506f431ec121cd1b0ca8baee7aa6b0f0a6e
|
59f8189cebe437a3771ae473a9493385382a32cf
|
/server.R
|
b42df5c732790577a9546f46efcb27e5053b57f3
|
[] |
no_license
|
jdeneumostier/rshiny1
|
d853245db4b84fe6eb66f17dbe9541831ca4382f
|
5711dffbb6269c7c247ab2afce18338055044cbd
|
refs/heads/master
| 2021-01-08T21:23:18.453176
| 2020-02-28T16:12:13
| 2020-02-28T16:12:13
| 242,146,942
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 4,221
|
r
|
server.R
|
source("utils.R")
server <- function(input, output, session) {
# Tests de normalite (Kolmorogorov, Shapiro)
normCheck <- reactiveVal(FALSE);
normality <- reactive( {
# Ne pas afficher le warning: ties should not be present for the Kolmogorov-Smirnov test
values <- getData()
kTest = suppressWarnings(ks.test(values, "pnorm", mean=mean(values), sd=sd(values)))
sTest = shapiro.test(values)
normCheck(kTest$p.value > input$alpha && sTest$p.value > input$alpha)
results <- data.frame(
"test" = c("Kolmogorov-Smirnov", "Shapiro-Wilk"),
"statistic" = c(paste("D = ",round(kTest$statistic, 5)), paste("W = ",round(sTest$statistic, 5)) ),
"pvalue" = c(kTest$p.value, sTest$p.value)
)
names(results) <- c("Test", "Statistic", "P value")
return(results)
})
errorMsg <-reactiveVal(NULL);
getData <- reactive({
tryCatch(
{
data <- callModule(csvFile, "myFile")
errorMsg(NULL)
return(data)
},
error = function(e) {
errorMsg(paste("Error:", e$message))
return(NULL)
},
warning = function(w) {
errorMsg(paste("Warning:", w$message))
return(NULL)
}
)
})
output$error <- renderUI(
if (!is.null(errorMsg())) {
p(errorMsg(), class="bg-danger text-danger result")
}
)
# Affichage des stats
output$stats <- renderTable(
{
mydata = getData()
if (!is.null(mydata)) computeStats(mydata, input$alpha);
},
rownames = TRUE, colnames = FALSE, spacing = c("l"), digits=2, align = "?",
caption="<span class='tableTitle'>Descriptive statistics</span>",
caption.placement = getOption("xtable.caption.placement", "top")
)
# # Histogramme
output$histo <- renderPlot({
mydata = getData()
if (!is.null(mydata)) {
hist(mydata, col = "#75AADB", border = "white", main="Distribution", xlab="Values")
}
})
# QQplot
output$qqplot <- renderPlot({
mydata = getData()
if (is.null(mydata)) return(NULL)
qqnorm(mydata, frame=FALSE)
qqline(mydata, col="#75AADB", lwd=2)
})
# Tests de normalite
output$norm <- renderTable(
{
mydata = getData()
if (!is.null(mydata)) {
normality()
}
},
rownames = FALSE, colnames = TRUE, digits=4,
caption="<span class='tableTitle'>Tests for Normality</span>",
caption.placement = getOption("xtable.caption.placement", "top")
)
# Ccl tests normalite
output$norm_result <- renderUI({
mydata = getData()
if (is.null(mydata)) return(NULL)
if (normCheck()) {
p("According to the tests, data is normally distributed", class="bg-success text-success result")
} else {
p("According to the tests, data is not normally distributed", class="bg-danger text-danger result")
}
})
# T-test form
output$t_test <- renderUI({
mydata = getData()
if (is.null(mydata)) return(NULL)
tagList(
tags$h4(HTML(paste("T-test for Mean (α=",input$alpha,")",sep=""))),
numericInput("m0", "Hypothesized Mean (H0)", value=isolate(input$m0), width="50%"),
selectInput("alternative", label="Alternative hypothesis (H1)",
choices=c("less then "="less", "greater then"="greater", "2-sided"="two.sided"),
selected=isolate(input$alternative)),
actionButton("test_mean", label="Proceed")
)
})
# Ajout d'un listener sur le bouton "test_mean"
observeEvent(input$test_mean, {
mydata = getData()
if (is.null(mydata)) return(NULL)
result <- t.test(mydata, mu=input$m0, alternative=input$alternative, conf.level=1-input$alpha)
result$p.value <- formatPvalue(result$p.value)
tTest = data.frame(
"df" = result$parameter,
"statistic" = result$statistic,
"pvalue" = result$p.value
)
names(tTest) = c("DF", "T Statistic", "P value")
output$t_test_result <- renderTable({
if (is.null(errorMsg())) {
tTest
}
else return()
},
caption="<span class='tableTitle'>Test Mean</span>",
caption.placement = getOption("xtable.caption.placement", "top")
)
})
}
|
bcf3776e9f1d103d1c233359c4a9e9d21011a3ac
|
d642254d4cc5b1ee5c45a2a56b4546adc06871b5
|
/lecture_note/huber.R
|
6ce79536e9fdccc3d6c7fe47edd500483c7d7f1b
|
[] |
no_license
|
Chad4545/datamining
|
ba52c1992a44e8113a1277e7e39c250133ce6d38
|
2f4858489ac8500636a62881c5b46bd33439f4dc
|
refs/heads/master
| 2020-04-28T08:39:34.505789
| 2019-05-08T10:21:37
| 2019-05-08T10:21:37
| 175,136,657
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 642
|
r
|
huber.R
|
rm(list=ls())
getwd()
setwd("/Users/sungjinpark/Desktop/OneDrive - konkuk.ac.kr/datamining/lecture_note")
'''
library(MASS)#huber , rlm
library(robustreg)#robustRegH()
library(quantreg)#L1 , rq
'''
# huberized loss function
huber_loss = function(y,x.vec,b,del){
x.mat = cbind(1,x.vec)
r.vec = y - x.mat%*%b
yes_or_no = abs(r.vec)<=del
return(mean(r.vec^2 + (2*del*abs(r.vec) - del^2))/2)
}
## Huber loss function
y.vec = rnorm(10) # vector
x.vec = rnorm(10)
x.mat = cbind(1,x.vec) # matrix
plot(x.vec,y.vec)
del = 1.345
b.vec.01 = c(1,1)
b.vec.02 = c(1,0)
huber_loss(y.vec,x.vec,b.vec.01,del)
huber_loss(y.vec,x.vec,b.vec.02,del)
|
284bb323ed9984d674aeee1aebcc3668510ba947
|
da5d74f9895e2c00947a42805f2af73d209082a1
|
/Fig2_SheepClocks.R
|
0ba72906d96ce2296c19f67f9a393c33f7bf459d
|
[
"MIT"
] |
permissive
|
xuefenfei712/sheepclock
|
b3321bdbee158f249af153d38599e75eeaf68ee2
|
b0eaec0b96afcc35f0d60982eb3d1215ea329d64
|
refs/heads/main
| 2023-06-03T08:50:48.750018
| 2021-06-24T00:49:17
| 2021-06-24T00:49:17
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 3,149
|
r
|
Fig2_SheepClocks.R
|
library(ggplot2)
library("cowplot")
setwd("/Users/victoriasugrue/Dropbox/N18.2018-9300-1SheepSkinBloodTimHoreRussellSnell/Subset_Sheep_PredictionResultsJosephZoller/")
#figure A: sheep pan tissue, sheep clock
sheepA <- read.csv("datPredictedAgeN18FinalJosephZoller.csv")
#figure B: sheep blood
sheepB <- read.csv("datPredictedAgeN18FinalJosephZoller.csv")
sheepB <- sheepB[1:168,]
#figure c: sheep ear
sheepC <- read.csv("datPredictedAgeN18FinalJosephZoller.csv")
sheepC <- sheepC[169:432,]
#figure d: human+sheep, normal age, both
sheephumanD <- read.csv("datPredictedAge_HumanSheep_FinalJosephZoller.csv")
#figure e: human+sheep, normal age, sheep only
sheephumanE <- read.csv("datPredictedAge_HumanSheep_FinalJosephZoller.csv")
sheephumanE <- sheephumanE[1878:2309,]
#figure f: human+sheep, relage, both
sheephumanF <- read.csv("datPredictedRelativeAge_HumanSheep_FinalJosephZoller.csv")
#figure g: human+sheep, relage, sheep only
sheephumanG <- read.csv("datPredictedRelativeAge_HumanSheep_FinalJosephZoller.csv")
sheephumanG <- sheephumanG[1878:2309,]
p1 <- ggplot(sheepA, aes(x=Age, y=DNAmAgeLOO, colour=Tissue)) +
geom_point() +
geom_abline(intercept = 0, slope = 1, linetype="dashed") +
geom_smooth(method='lm', se= F, size = 0.6, colour="black", aes(group=1))
p1 <- p1 + theme(legend.position = "none")
p2 <- ggplot(sheepB, aes(x=Age, y=DNAmAgeLOO, colour=Tissue)) +
geom_point() +
geom_abline(intercept = 0, slope = 1, linetype="dashed") +
geom_smooth(method='lm', se= F, size = 0.6, colour="black", aes(group=1))
p2 <- p2 + theme(legend.position = "none")
p3 <- ggplot(sheepC, aes(x=Age, y=DNAmAgeLOO, colour=Tissue)) +
geom_point() +
geom_abline(intercept = 0, slope = 1, linetype="dashed") +
geom_smooth(method='lm', se= F, size = 0.6, colour="black", aes(group=1))
p3 <- p3 + theme(legend.position = "none")
p4 <- ggplot(sheephumanD, aes(x=Age, y=DNAmAgeLOFO10Balance, colour=SpeciesLatinName)) +
geom_point() +
geom_abline(intercept = 0, slope = 1, linetype="dashed") +
geom_smooth(method='lm', se= F, size = 0.6, colour="black", aes(group=1))
p4 <- p4 + theme(legend.position = "none")
p5 <- ggplot(sheephumanE, aes(x=Age, y=DNAmAgeLOFO10Balance, colour=Tissue)) +
geom_point() +
geom_abline(intercept = 0, slope = 1, linetype="dashed") +
geom_smooth(method='lm', se= F, size = 0.6, colour="black", aes(group=1))
p5 <- p5 + theme(legend.position = "none")
p6 <- ggplot(sheephumanF, aes(x=RelAge, y=DNAmRelAgeLOFO10Balance, colour=SpeciesLatinName)) +
geom_point() +
geom_abline(intercept = 0, slope = 1, linetype="dashed") +
geom_smooth(method='lm', se= F, size = 0.6, colour="black", aes(group=1))
p6 <- p6 + theme(legend.position = "none")
p7 <- ggplot(sheephumanG, aes(x=RelAge, y=DNAmRelAgeLOFO10Balance, colour=Tissue)) +
geom_point() +
geom_abline(intercept = 0, slope = 1, linetype="dashed") +
geom_smooth(method='lm', se= F, size = 0.6, colour="black", aes(group=1))
p7 <- p7 + theme(legend.position = "none")
prow <- plot_grid(p1, p2, p3, p4, p5, NULL, p6, p7, labels = c('A', 'B', 'C', 'D', 'E', "", 'F', 'G'), label_size = 15, ncol=3)
prow
|
02e14cd03beea97cdf0e1340edd05942d9ba749d
|
c2073992efdb252ffca0bb25e1458889fcd31491
|
/man/rollup.pct.Rd
|
767377d0f7221770060f9891e03c0b55e47448aa
|
[
"MIT"
] |
permissive
|
HKCaesar/ejanalysis
|
36d834e969a040716cafd57d8ac5a78c4821a860
|
258237c0489b68a080c1c9bd6859df21511bb78c
|
refs/heads/master
| 2021-07-24T12:26:00.232791
| 2017-11-04T23:10:24
| 2017-11-04T23:10:24
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| true
| 766
|
rd
|
rollup.pct.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/rollup.pct.R
\name{rollup.pct}
\alias{rollup.pct}
\title{Calculate a/b for each Subset}
\usage{
rollup.pct(a, b, zone)
}
\arguments{
\item{a}{Required numeric vector, numerator}
\item{b}{Required numeric vector, denominator. Same length as a.}
\item{zone}{Optional, vector to group by. Same length as a and b.}
}
\value{
Returns a table with a/b calculated within each zone.
}
\description{
Uses data.table package to quickly calculate
ratio of a/b within each subset of a dataset (e.g., by zone).
This will be superseded by \code{\link{rollup}} once that is completed.
}
\examples{
pre1960=1:100; builtunits=rep(c(10, 100),50); zone=rep(c('NY','MA'),50)
rollup.pct(a,b,zone)
}
|
e8d5d37ee1dacce54ecd5e8023173f5ab53f3eb3
|
1aac8eda0a0453555fd141a4e280fbbb42e2a45f
|
/man/population.Rd
|
8ed2a7d0d278eedcb612e6fce87499908272713f
|
[] |
no_license
|
cran/edpclient
|
9d299cf54391dfe0694c3fbe81b30acc150d63b4
|
97f16945da7ab31aac6d776e18386917ebecc1cc
|
refs/heads/master
| 2021-01-22T10:27:12.548768
| 2018-04-25T02:11:01
| 2018-04-25T02:11:01
| 92,644,478
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 728
|
rd
|
population.Rd
|
\name{population}
\alias{population}
\alias{is.population}
\title{EDP population objects}
\description{Create or test for \code{population} objects.}
\usage{
population(sess, pid)
is.population(x)
}
\arguments{
\item{sess}{an \code{edp_session} from \code{edp_session(...)}}
\item{pid}{a population id, a length-one character vector starting with "p-"}
\item{x}{object to be tested}
}
\value{
\code{population} creates an EDP population object. \code{is.population}
returns a logical indicating whether the argument came from \code{population}.
}
\seealso{
\code{\link{edp_session}}, \code{\link{populations}}
}
\examples{
\dontrun{p <- population(sess, "p-abcde0123456789")}
\dontrun{is.population(p) # TRUE}
}
|
6e196ea6dcafb0a4fc146be10bc11a43a4fa3493
|
005d2558b95d14b8829eb612e2446b5e420501cb
|
/plot3.R
|
6044a59721f52387b227bc1c143e0df9ce88e658
|
[] |
no_license
|
kzellman/ExData_Plotting1
|
6b5ba6134f055217090f76d611f9c529a454022b
|
0f708d6ae0babbdcee4a59b38d44842c3e2cafbe
|
refs/heads/master
| 2021-01-17T23:13:39.272148
| 2014-08-10T19:10:46
| 2014-08-10T19:10:46
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 849
|
r
|
plot3.R
|
#read in and subset power using sqldf package
require("sqldf")
sql <- "SELECT * from file WHERE Date = '1/2/2007' OR Date = '2/2/2007'"
txt <- "household_power_consumption.txt"
power <- read.csv2.sql(txt, sql)
#reformat date and time
power$DateTime <- paste(power$Date, power$Time)
power$Date <- as.Date(power$Date, format="%d/%m/%Y")
power$DateTime <- strptime(power$DateTime, "%d/%m/%Y %H:%M:%S")
#create png plot
png(filename = "plot3.png", width = 480, height = 480)
plot(power$DateTime, power$Sub_metering_1, type = "l", xlab = "", ylab = "Energy sub metering", main = "")
lines(power$DateTime, power$Sub_metering_2, col = "red")
lines(power$DateTime, power$Sub_metering_3, col = "blue")
legend("topright", lty = 1, legend = c("Sub_metering_1","Sub_metering_2","Sub_metering_3"), col = c("black","red","blue"))
#close device driver
dev.off()
|
53f81c6380fcfdcca996f4f04382ee4a5191f1da
|
a2c4863bc45b4bf9e0a0f08f8b612fd40aca6a53
|
/R/zzz.R
|
3a1c5c912de48e582da1754f2a19e24b11c54254
|
[
"MIT"
] |
permissive
|
pfistfl/mlr3learners
|
69bed845a414a0c51ffa848f8f4167f4e47270bb
|
88ee2b85a057fc6f764880631c14d7b9fac46930
|
refs/heads/master
| 2020-06-14T05:25:55.601031
| 2019-07-02T11:42:49
| 2019-07-02T11:42:49
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 1,388
|
r
|
zzz.R
|
#' @rawNamespace import(data.table, except = transpose)
#' @import paradox
#' @import mlr3misc
#' @importFrom R6 R6Class
#' @importFrom mlr3 mlr_learners LearnerClassif LearnerRegr
"_PACKAGE"
register_mlr3 = function() {
x = utils::getFromNamespace("mlr_learners", ns = "mlr3")
# classification learners
x$add("classif.glmnet", LearnerClassifGlmnet)
x$add("classif.kknn", LearnerClassifKKNN)
x$add("classif.lda", LearnerClassifLDA)
x$add("classif.log_reg", LearnerClassifLogReg)
x$add("classif.ranger", LearnerClassifRanger)
x$add("classif.svm", LearnerClassifSVM)
x$add("classif.xgboost", LearnerClassifXgboost)
# regression learners
x$add("regr.glmnet", LearnerRegrGlmnet)
x$add("regr.kknn", LearnerRegrKKNN)
x$add("regr.km", LearnerRegrKM)
x$add("regr.lm", LearnerRegrLM)
x$add("regr.ranger", LearnerRegrRanger)
x$add("regr.svm", LearnerRegrSVM)
x$add("regr.xgboost", LearnerRegrXgboost)
}
.onLoad = function(libname, pkgname) {
# nocov start
register_mlr3()
setHook(packageEvent("mlr3", "onLoad"), function(...) register_mlr3(), action = "append")
} # nocov end
.onUnload = function(libpath) {
# nocov start
event = packageEvent("mlr3", "onLoad")
hooks = getHook(event)
pkgname = vapply(hooks, function(x) environment(x)$pkgname, NA_character_)
setHook(event, hooks[pkgname != "mlr3learners"], action = "replace")
} # nocov end
|
441abd745ae4a02be85925f9ab3b9a834c432b08
|
04c1a390bd446417d62ac8ec36d0d33b3b1d8445
|
/Modell Evaluation.R
|
0db3c1f271399beafb74fd9b1c2acb4e8b8d3fcb
|
[] |
no_license
|
dschradick/R-Toolbox
|
3a54990b7901066245160358c94b6a0f33bd80e2
|
01a207f788e191c164ac469772dfd328afcf564e
|
refs/heads/master
| 2021-10-23T19:15:37.493508
| 2018-06-08T05:38:53
| 2018-06-08T05:38:53
| 93,675,461
| 0
| 1
| null | null | null | null |
UTF-8
|
R
| false
| false
| 1,133
|
r
|
Modell Evaluation.R
|
########## MODELL EVALUATION
# Drei grundsätzliche Ansätze:
# 1. Validation-set
# 2. LOOCV
# 3. k-fold Cross-Validation
library(ISLR) # datasets
library(dplyr)
library(boot) # cv.glm
## Valdiation-Set Ansatz
attach(Auto)
set.seed(1)
number_of_observations = nrow(Auto)
train = sample(number_of_observations,number_of_observations/2) # Indizes erzeugen
lm.fit <- lm(mpg ~ horsepower, data = Auto, subset = train)
mean((mpg - predict(lm.fit,Auto))[-train]^2)
## LOOCV Ansatz
glm.fit <- glm(mpg ~ horsepower, data = Auto) # kein family-arg => wie lm
coef(glm.fit)
cv.err <- cv.glm(Auto, glm.fit) # kein k => LOOCV
cv.err$delta # Erste = CV estimate, Zweite = Adjusted cross-validation estimate (Bias, wenn nicht LOOCV)
## K-Fold Cross-Validation Ansatz
cv.err <- cv.glm(Auto, glm.fit, K=2) # => 5-folds
cv.err$delta # Erste = CV estimate, Zweite = Adjusted cross-validation estimate (Bias, wenn nicht LOOCV)
# Dann performanter meherer Modelle zu testen
set.seed(17)
cv.error.10=rep(0,10)
for (i in 1:10){
glm.fit=glm(mpg~poly(horsepower,i),data=Auto)
cv.error.10[i]=cv.glm(Auto,glm.fit,K=10)$delta[1]
}
cv.error.10
|
de8a31eeb3431d91596cbb05c623b4dbbb2e2067
|
ffdea92d4315e4363dd4ae673a1a6adf82a761b5
|
/data/genthat_extracted_code/VGAM/examples/vsmooth.spline.Rd.R
|
02ae553195edda69366f400c80913a2d8f4e08a6
|
[] |
no_license
|
surayaaramli/typeRrh
|
d257ac8905c49123f4ccd4e377ee3dfc84d1636c
|
66e6996f31961bc8b9aafe1a6a6098327b66bf71
|
refs/heads/master
| 2023-05-05T04:05:31.617869
| 2019-04-25T22:10:06
| 2019-04-25T22:10:06
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 1,353
|
r
|
vsmooth.spline.Rd.R
|
library(VGAM)
### Name: vsmooth.spline
### Title: Vector Cubic Smoothing Spline
### Aliases: vsmooth.spline
### Keywords: regression smooth
### ** Examples
nn <- 20; x <- 2 + 5*(nn:1)/nn
x[2:4] <- x[5:7] # Allow duplication
y1 <- sin(x) + rnorm(nn, sd = 0.13)
y2 <- cos(x) + rnorm(nn, sd = 0.13)
y3 <- 1 + sin(x) + rnorm(nn, sd = 0.13) # Run this for constraints
y <- cbind(y1, y2, y3)
ww <- cbind(rep(3, nn), 4, (1:nn)/nn)
(fit <- vsmooth.spline(x, y, w = ww, df = 5))
## Not run:
##D plot(fit) # The 1st and 3rd functions do not differ by a constant
## End(Not run)
mat <- matrix(c(1,0,1, 0,1,0), 3, 2)
(fit2 <- vsmooth.spline(x, y, w = ww, df = 5, i.constr = mat,
x.constr = mat))
# The 1st and 3rd functions do differ by a constant:
mycols <- c("orange", "blue", "orange")
## Not run: plot(fit2, lcol = mycols, pcol = mycols, las = 1)
p <- predict(fit, x = model.matrix(fit, type = "lm"), deriv = 0)
max(abs(depvar(fit) - with(p, y))) # Should be 0; and fit@y is not good
par(mfrow = c(3, 1))
ux <- seq(1, 8, len = 100)
for (dd in 1:3) {
pp <- predict(fit, x = ux, deriv = dd)
## Not run:
##D with(pp, matplot(x, y, type = "l", main = paste("deriv =", dd),
##D lwd = 2, ylab = "", cex.axis = 1.5,
##D cex.lab = 1.5, cex.main = 1.5))
## End(Not run)
}
|
a218a07bf05150ba7d0eb0441a327efbf72ba3b3
|
8238d31d16c0ce7737c598ca1dec688531e29098
|
/run_analysis.R
|
511dea1411980ebcb4347948a45270f618303dcc
|
[] |
no_license
|
dimuthuat/GettingandCleaning-
|
9db3ea93213f9bbe520c95b3f9bbb0cf7cafda50
|
5cd6ec0a5d114780a4671701d704126bb635bfa6
|
refs/heads/master
| 2020-07-10T10:48:47.087534
| 2019-08-25T12:40:14
| 2019-08-25T12:40:14
| 204,245,065
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 3,242
|
r
|
run_analysis.R
|
#Getting and Cleaning Data final project - JHU on Coursera
#Author: Dimuthu Attanayake
#The objective of the assignment is to collect and clean a wearable computing data set collected from the accelerometers from the Samsung Galaxy S smartphone to create a R script.
# The script:
# 1. Merges the training and the test sets to create one data set.
# 2. Extracts only the measurements on the mean and standard deviation for each measurement.
# 3. Uses descriptive activity names to name the activities in the data set
# 4. Appropriately labels the data set with descriptive variable names.
# 5. From the data set in step 4, creates a second, independent tidy data set with the average of each variable for each activity and each subject.
#downloading and loading data onto R
library(RCurl)
filePath <- getwd()
download.file("https://d396qusza40orc.cloudfront.net/getdata%2Fprojectfiles%2FUCI%20HAR%20Dataset.zip",file.path(filePath, "./excerData.zip"))
unzip("excerData.zip")
head(excerData)
#load activity & features data
activityLabels <- read.table("UCI HAR Dataset/activity_labels.txt", col.names = c("ActivityNo","ActivityName"))
activityLabels
features <- read.table("UCI HAR Dataset/features.txt", col.names = c("CodeNo","featureName"))
head(features)
#load test data and assign them to "Test"
subjectTest <- read.table("UCI HAR Dataset/test/subject_test.txt", col.names = "subjectNames")
subjectTest
Xtest <- read.table("UCI HAR Dataset/test/X_test.txt", col.names = features$featureName )
head(Xtest)
Ytest <- read.table("UCI HAR Dataset/test/y_test.txt", col.names = "CodeNo")
head(Ytest)
Test <- cbind(subjectTest,Xtest,Ytest)
#load train data and assign them to "Train"
subjectTrain <- read.table("UCI HAR Dataset/train/subject_train.txt", col.names = "subjectNames")
subjectTrain
Xtrain <- read.table("UCI HAR Dataset/train/X_train.txt", col.names = features$featureName)
Xtrain
Ytrain <- read.table("UCI HAR Dataset/train/y_train.txt", col.names = "CodeNo")
head(Ytrain)
Train <- cbind(subjectTrain, Xtrain, Ytrain)
# 1. Merges the training and the test sets to create one data set.
mergeData <- rbind(Test,Train)
#2.Extracts only the measurements on the mean and standard deviation for each measurement.
library(dplyr)
mean_sd <- mergeData %>% select(subjectNames, CodeNo, contains("mean"), contains("std"))
# 3.Uses descriptive activity names to name the activities in the data set
descriptiveNames <- activityLabels[mean_sd$CodeNo, 2]
#4.Appropriately labels the data set with descriptive variable names.
names(mergeData)[2] = "activityNames"
names(mergeData) <- gsub("BodyBody", "Body",gsub("^t", "Time",gsub("^f","Frequency",gsub("Mag", "Magnitude", names(mergeData)))))
#5.From the data set in step 4, creates a second, independent tidy data set with the average of each variable for each activity and each subject.
FinalData <- mergeData %>%
group_by(subjectNames, activityNames) %>%
summarise_all(funs(mean))
write.table(FinalData, "FinalData.txt", row.name=FALSE)
str(FinalData)
|
d1b8287c139342af17130e80d6ec236bad424073
|
47d95656f7bc9789823557111875421162b99025
|
/Misk_shiny_intro/ui.R
|
15b722eeae0a7f084f9f41b57058ab31b1d7de89
|
[] |
no_license
|
Safa721/Misk_Shiny
|
bb5d88dffe09f566be8ecc5e9a1e0a8b8ad2c33d
|
cf22c6c1a5ea8332eb7a0d50d5ed115bf10e3448
|
refs/heads/main
| 2023-01-28T05:01:01.814899
| 2020-12-03T13:48:15
| 2020-12-03T13:48:15
| 318,205,164
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 15,544
|
r
|
ui.R
|
shinyUI(
navbarPage(theme = shinytheme("yeti"),
id = 'main',
title = "Developing Data Products",
fluidRow(title = 'Start', width=12, align = "center",
tags$body(tags$p(style = "padding: 30px; width:822px",
align = 'left',
"Welcome!",
tags$br(), tags$br(),
'This application is a go-to source for getting you up-and-running with interactive shiny applications in R.',
tags$br(), tags$br(),
'Misk Academy, Data Science Immersive 2020'
))
),
# tabPanel("", ),
################################################################################ Distributions Menu
################################################################################
################################################################################
################################################################################
# navbarMenu("Version Control",
# tabPanel("GitHub"),
# tabPanel("Command Line GitHub")),
navbarMenu("Markdown",
tabPanel("Basic Syntax",
includeMarkdown("MarkdownBasics.md"),
includeMarkdown("InteractiveDocument.md")),
tabPanel("Widgets",
fluidPage(
titlePanel("Widget Examples"),
fluidRow(
column(4,
h3("Action Button"),
# renderPrint("hello"),
actionButton("action", label = "Execute")
# verbatimTextOutput("actionbuttonOutput")
),
column(4,
h3("Single Checkbox"),
checkboxInput("checkbox", label = "Show points", value = TRUE),
verbatimTextOutput("singlecheckOutput")
),
column(4,
h3("Checkbox group"),
checkboxGroupInput("checkGroup", label = "Select options", choices = list("Carbs" = 1, "Proteins" = 2, "Fat" = 3), selected = 1),
verbatimTextOutput("checkgroupOutput")
)
),
fluidRow(
column(4,
h3("Radio buttons"),
radioButtons("radio", label = "Selection options",
choices = list("Carbs" = 1, "Proteins" = 2, "Fat" = 3),
selected = 1)
),
column(4,
h3("Select box"),
selectInput("select", label = "Select options",
choices = list("Carbs" = 1, "Proteins" = 2, "Fat" = 3),
selected = 1)
),
column(4,
h3("Slider"),
sliderInput("slider1", label = "Pick a number", min = 0,
max = 100, value = 50)
)
),
fluidRow(
column(4,
h3("Slider range"),
sliderInput("slider2", label = "Pick a number range", min = 0,
max = 100, value = c(40, 60))
),
column(4,
h3("Text input"),
textInput("text", label = "ID value:", value = "Enter text...")
),
column(4,
h3("Date Input"),
dateInput("date", label = "Choose date", value = "2014-01-01")
)
),
fluidRow(
column(4,
h3("Date Range"),
dateRangeInput("dates", label = "Choose date range")
),
column(4,
h3("File input"),
fileInput("file", label = "File input")
),
column(4,
h3("Numeric input"),
numericInput("num", label = "Numeric input", value = 1)
)
)
),
includeMarkdown("widgetsTable.md")
),
tabPanel("Render output functions",
includeMarkdown("renderTable.md"))
# tabPanel("Interactive documents",
# column(1),
# column(5,br(),br(),br(),
# withMathJax(p("This application allows users to perform either a", code("one-sample t-test",style="color:navy"),
# "or a", code("two-sample t-test",style="color:navy"),". A one-sample t-test focuses on comparing the average of a
# single quantitative variable to a hypothesized value, while a two-sample t-test
# focuses on comparing the difference in averages of a quantitative variable between two groups to a hypothesized value. In
# both scenarios, the purpose of the hypothesis test is to determine how likely are the observed results or any more extreme results,
# under the assumption that the null hypothesis is true. This is known as a", strong("p-value.")),
# p("In most data analyses, the population mean(s) along with the population standard deviation(s) are unknown.
# Therefore, the", strong("t-test"), "is used instead of a z-test. The", strong("t-statistic"), "can be calculated to determine the p-value,
# by comparing it to the", strong("t-distribution"), "with a", strong("specified degrees of freedom."), "In this scenario, the sample standard deviation(s) replaces the population standard deviation(s) to yield
# the", strong("standard error"), "(an estimate of the true standard deviation) of the", strong("sampling distribution.")))),
#
# column(5,br(),br(),br(),
#
# wellPanel(
# # includeMarkdown("InteractiveDocument.md"),
# # code("inputPanel({
# # radioButtons('shape', 'Shape',
# # c('Circle, no outline' = 16,
# # 'Circle, with outline' = 19))
# # })")
# code("code regular", style="color:black"),
#
# code("code, stlye blue", style="color:navy"),
# p(HTML("<ul>
# <li type=square> the parameter of interest is the population mean, μ
# <li type=square>"),
# p(),
# p("t-statistic = \\(\\frac{\\bar x -\\mu_0}{s_{x}/\\sqrt{n}}\\)"),
# HTML("</ul>")),
# br(),
# code("Two-sample t-test:",style="color:navy"),
# p(HTML("<ul>
# <li type=square> the parameter of interest is the difference between
# the two population means, μ<sub>1</sub>-μ<sub>2</sub>
# <li type=square>"),
# p(),
# p("t-statistic = \\(\\frac{(\\bar x_1 - \\bar x_2) -(\\mu_1-\\mu_2)}
# {\\sqrt{\\frac{s_{1}^2}{n_1} + \\frac{s_{2}^2}{n_2}}}\\)"),
# HTML("</ul>")))),
#
# column(1))
),
# navbarMenu("New",
# tabPanel("new",
# includeMarkdown("learnr.Rmd")),
# tabPanel("new mini",
# includeMarkdown("learnr_mini.Rmd"))),
navbarMenu("Shiny Basics",
tabPanel("Shiny Parts 1",
includeMarkdown("TypesofShiny.md")),
tabPanel("Shiny Parts 2",
includeMarkdown("TypesofShiny2.md")),
tabPanel("App File Formats",
includeMarkdown("AppFileFormats.md")),
tabPanel("Download Handlers",
includeMarkdown("TypesofShiny3.md")),
tabPanel("App Layout Guide",
includeMarkdown("AppLayoutGuide.md"))#,
# tabPanel("Reactive programming"),
# tabPanel("Basic Commands",
# actionButton("action", label = "Action"),
# hr(),
# fluidRow(column(2, verbatimTextOutput("value")))),
# tabPanel("Separate UI and server files"),
# tabPanel("Single app file")
) #,
# navbarMenu("Shiny Layouts",
# tabPanel("Sidebar layout",
# sidebarLayout(position = "left",
# sidebarPanel(
# sliderInput(inputId = "bins",
# label = "Number of bins:",
# min = 1,
# max = 50,
# value = 30)
# ),
#
# mainPanel(
# plotOutput("distPlot")
# )
#
# )
# ), # end sidebarLayout
# tabPanel("Gridlayout",
# fluidPage(
#
# titlePanel("Hello Shiny!"),
#
# fluidRow(
#
# column(4,
# wellPanel(
# sliderInput("obs", "Number of observations:",
# min = 1, max = 1000, value = 500)
# )
# ),
#
# column(8,
# plotOutput("distPlot")
# )
# )
# )), # End Grid layout
# tabPanel("Dashboard Layout",
# dashboardPage(
# dashboardHeader(title = "Basic dashboard"),
# dashboardSidebar(
# sidebarMenu(
# menuItem("Dashboard", tabName = "dashboard", icon = icon("dashboard")),
# menuItem("Widgets", tabName = "widgets", icon = icon("th"))
# )
# ),
# dashboardBody(
# tabItems(
# # First tab content
# tabItem(tabName = "dashboard",
# fluidRow(
# column(4, plotOutput("plotHIST")),
# column(4, sliderInput("slider", "Number of observations:", 1, 100, 50))
# )
# ),
#
# # Second tab content
# tabItem(tabName = "widgets",
# h2("Widgets tab content")
# )
# )
# )
# )
# ),
# tabPanel("Tabsets"),
# tabPanel("Navlists"),
# tabPanel("Navbar Pages")
# )
) # End navbarPage
)
|
becb0c8cd0f3d9c652f01a675eab668caa7c5295
|
da7f013d52e65313d25798d075b29733b218a64c
|
/1_Bollywood_Dataset_study.R
|
e451192dfb400af0af51c09d53c44c6f2b3a338b
|
[] |
no_license
|
KrishnaKuyate/Bollywood-Movies-Data-Analysis
|
6b1a0af39853f6fea213a66fac90ab736be79644
|
3d2239ca7b4b79ea86f4a5bc1e44b2423ccf558f
|
refs/heads/master
| 2020-04-04T16:22:55.919791
| 2018-11-22T19:30:08
| 2018-11-22T19:30:08
| 156,075,256
| 1
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 1,080
|
r
|
1_Bollywood_Dataset_study.R
|
#$$$$$$$$$$$$$$$$Bollywood Movie Data Analysis -2015 $$$$$$$$$$$$$$$#
#%Part:1
###Add image
library(magick)
image_read("Wallpaper.jpg")->img1
image_resize(img1, "800x800")->img1
image_oilpaint(img1,radius = 2)
##@Import Package and data file
library(readr)
Bollywood_Movie_Data <- read_csv("Bollywood_Movie_Data.csv")
View(Bollywood_Movie_Data)
#%Part:2
###@Understand the datastructure:
##Convert dataset into dataframe
Bollywood_Movie_Data_Df<-as.data.frame(Bollywood_Movie_Data)
##Find out the no. of variable in dataset:
paste("No. of variable in 'Bollywood_Movie_Data ' dataset :",ncol(Bollywood_Movie_Data_Df))
##Find out no. of Observation in dataset:
paste("No. of Observation in 'Bollywood_Movie_Data ' dataset :",nrow(Bollywood_Movie_Data_Df))
##Find out the no. of movies data present:
na.omit(Bollywood_Movie_Data_Df$Movie_Name)->Movie_List
paste("No. of movies data present 'Bollywood_Movie_Data ' dataset :",length(unique(Movie_List)))
##Structure of dataset:
str(Bollywood_Movie_Data_Df)
##Summary
summary(Bollywood_Movie_Data_Df)
|
5a8864da799056e390d9e655ffa8b8f644f252c2
|
c9ad372b911661fbc32815ae3732661d6e0b78fe
|
/analysis/Brook Trout Project/Occupancy/RMark/Brown Trout/BRT_RMark_DF_construct.R
|
63643b0c295abda45b23cb57b6603f9e2e83f154
|
[] |
no_license
|
EEOB590A-Fall-2019/BKelly_Fishes
|
79348981dc04a094915e249db30864b806b0eae9
|
1379021c4fb52358bcc07e0963f2a052febb3f75
|
refs/heads/master
| 2021-06-25T17:09:20.996152
| 2021-03-29T03:18:53
| 2021-03-29T03:18:53
| 205,415,420
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 4,531
|
r
|
BRT_RMark_DF_construct.R
|
##############################################################################################################################
# Create an RMARK ready dataframe for Brown Trout
# Combine current Environmental Covariates and Brown Trout Encounter history
# > trim covariates to those with biologically/ecologically relevant a priori hypotheses
# > run correlation tests on covariates to know which predictor variables should and should not be included wthin same model
##############################################################################################################################
library(tidyverse)
library(skimr)
library(corrplot)
#setwd("C:/Users/bbkelly/Documents/Brook Trout_Brett/BKelly_Fishes_GithubRepos")
#read in Brown Trout encounter history and environmental covariates
brt <- read_csv("Data/Thesis/Tidy/BRT_ch_data.csv", col_names = T)
covars <- read_csv("Data/Thesis/Tidy/enviro_tidy.csv", col_names = T)
spc <- read_csv("Data/Thesis/Raw/All Years/Catchment_Attributes.csv", col_names = T)
#----------------------------------------------------------------------
#join and remove identifiers and covariates unrelated to hypotheses
names(covars)
covars2 <- covars %>%
unite(newID, c(HUC8, Site), sep = "_", remove = T) %>%
select(newID, t1_eff, t2_eff, t3_eff, avgT, MAXT, MEANT, RNGT, avwid, avdep, mFlow,
pctrun, pctslow, pctBrBnk, HAiFLS_alt, HAiFLS_for, HAiFLS_nat) %>%
rename(effort1 = t1_eff, effort2 = t2_eff, effort3 = t3_eff)
names(spc)
spc2 <- spc %>%
unite(newID, c(HUC8, Site), sep = "_", remove = T) %>%
select(newID, POLY_AREA, Avg_Percen, Count_2) %>%
rename(Area_km2=POLY_AREA, AvgSlope=Avg_Percen) %>%
mutate(Cross_Cat=(Count_2/Area_km2)) %>%
select(-Count_2) %>%
filter(newID != "UPI_29" & newID != "YEL_33")
spc2[9,1] <- "UPI_57b"
spc2[12,1] <- "UPI_14b"
spc2[26,1] <- "UPI_78b"
spc2[57,1] <- "YEL_118b"
spc2[67,1] <- "UPI_32b"
spc2[104,1] <- "YEL_97b"
spc2[132,1] <- "LMAQ_28b"
spc2[129,1] <- "LMAQ_48"
envc <- full_join(covars2, spc2, by = "newID")
envc <- envc %>%
mutate(HAiFLS_al2=(HAiFLS_alt^2))
########################################################
#merge ch and covariate data
brown <- full_join(brt, envc, by = "newID")
skim(brown)
########################################################
#~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~#
#inspect correlations between covariates
#~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~#
psi.vars <- brown[,7:22]
#correlation test
c <- cor(psi.vars)
head(round(c,2))
#round down
cround <- round(c,3)
#visualize these correlations
corrplot(c, type = "upper", order = "hclust", col = c("black", "white"),
bg="lightblue")
#visualize these correlations
corrplot(c, method = "number")
# mat : is a matrix of data
# ... : further arguments to pass to the native R cor.test function
cor.mtest <- function(mat, ...) {
mat <- as.matrix(c)
n <- ncol(mat)
p.mat<- matrix(NA, n, n)
diag(p.mat) <- 0
for (i in 1:(n - 1)) {
for (j in (i + 1):n) {
tmp <- cor.test(mat[, i], mat[, j], ...)
p.mat[i, j] <- p.mat[j, i] <- tmp$p.value
}
}
colnames(p.mat) <- rownames(p.mat) <- colnames(mat)
p.mat
}
# matrix of the p-value of the correlation
p.mat <- c
#correlogram
col <- colorRampPalette(c("#BB4444", "#EE9988", "#FFFFFF", "#77AADD", "#4477AA"))
corrplot(c, method="color", col=col(200),
type="upper", order="hclust",
addCoef.col = "black", # Add coefficient of correlation
tl.col="black", tl.srt=45, #Text label color and rotation
# Combine with significance
p.mat = p.mat, sig.level = 0.01, insig = "blank",
# hide correlation coefficient on the principal diagonal
diag=FALSE
)
pairs(psi.vars)
############################################################################
# Tidy up dataframe and add in new spatial vars @ cat scale #
#~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~#
brown <- brown %>%
select(-RNGT)
newstuff <- read.csv("Data/Thesis/Tidy/Spat_Atts_Cats.csv", header=T)
skim(newstuff)
brown2 <- left_join(brown, newstuff, by="newID")
summary(brown2[,19:26])
names(brown2)
brown3 <- brown2 %>%
select(-Area_km2.x, -AvgSlope.x, -Cross_Cat.x, -EFac_Cat) %>%
rename(Area_km2 = Area_km2.y, AvgSlope = AvgSlope.y, Cross_Cat = Cross_Cat.y)
names(brown3)
###########################################################################
#Write CSV's
#RMARK dataframe
write.csv(brown3,"Data/Thesis/Tidy/BRT_RMark.csv", row.names = F)
|
fa5a7b14ab98f53dac357a13885feb23b4583fbc
|
4acd07427b91a4d867a72be4d97f36acbb00ca3b
|
/Scripts/Wilcox_LogReg.R
|
404a7439ea86d7d6d3b08adb55d529d455c2cc67
|
[
"MIT"
] |
permissive
|
JonathanVSV/Conserved-vs-Degraded-Forest
|
94c4d1a69129403e4217a819f7d7235ebd981141
|
549dcb0144f32bf5582f9d6abb92823d3df969b8
|
refs/heads/master
| 2022-12-28T00:25:35.103927
| 2020-09-29T03:38:51
| 2020-09-29T03:38:51
| 285,399,591
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 1,957
|
r
|
Wilcox_LogReg.R
|
# Packages that are needed--------------------------------
library(epade)
library(MASS) # Check out for conflict with tidyverse::select
library(tidyverse)
library(rstatix)
library(broom)
# Read and prepare data------------------------------------
df <- read.csv("plots_data.csv", stringsAsFactors = T)
## Change data form wide to long format
df_long <- df %>%
# Indicate column names to use
dplyr::select(Plot, Forest_type,Canopy_cover, BA, Mean_height, AGB, Mean_height, Density_branches, Density_trees) %>%
# Prepare data: long format. Indicate which columns are going into long format
pivot_longer(cols = c(Canopy_cover, BA, Mean_height, AGB, Mean_height, Density_branches, Density_trees),
names_to = "Attribute",
values_to = "value")
# Wilcoxon test---------------------------------------------
df_long %>%
# Group by Type of forest and Attribute
group_by(Attribute) %>%
# Change Forest_type to numeric variable
mutate_at(vars(Forest_type), as.numeric) %>%
# Wilcoxon test, pipe-friendly
wilcox_test(value ~ Forest_type)
# Logistic regression models---------------------------------------------
## Define nesting function
## Taken from: https://github.com/tidyverse/tidyr/issues/769#issuecomment-537624093
func_nest <- function(df, vars_to_nest, new_col) {
nest(df, !!new_col := {{ vars_to_nest }})
}
## Logistic regression models
df_long %>%
# Remove columns that are not going to be used
dplyr::select(-Plot) %>%
# Use func_nest to nest the data for each attribute
func_nest(-Attribute, "data_nest") %>%
# Get the glm fit, get the coefficients of glm (both intercept and slope) and threshold for the glm
mutate(fit = map(data_nest, ~ glm(.$Forest_type ~ .$value, family = "binomial")),
coef_info = map(fit, tidy),
threshold = map(fit, function(x) dose.p(x, p = 0.5)[[1]])) %>%
# Unnest the coefficient and threshold info
unnest(c(coef_info, threshold))
|
20c63de1ab60d5166b4251253048047f8c51cd9c
|
4ee9d7179b4af02d1b2efcb0f0f43f03cabc1164
|
/man/buildDotPlotDataFrame.Rd
|
183bcb3799a33314cfd79b5cee15f333dd8569ad
|
[] |
no_license
|
FrankD/NetHet_old
|
78795a58d8a0484f4773230d391c0b99b0a4e0a8
|
38a55860acd636410c98ef30b51756776455be08
|
refs/heads/master
| 2020-04-16T07:10:05.290938
| 2015-08-18T12:07:48
| 2015-08-18T12:07:48
| 21,828,742
| 1
| 1
| null | null | null | null |
UTF-8
|
R
| false
| false
| 952
|
rd
|
buildDotPlotDataFrame.Rd
|
% Generated by roxygen2 (4.1.1): do not edit by hand
% Please edit documentation in R/netplot.R
\name{buildDotPlotDataFrame}
\alias{buildDotPlotDataFrame}
\title{Build up dataframe for plotting dot plot with ggplot2}
\usage{
buildDotPlotDataFrame(net.clustering, cluster.names, node.names)
}
\arguments{
\item{net.clustering}{Clustering}
\item{cluster.names}{Cluster names}
\item{node.names}{Node names}
}
\value{
A data frame for plotting the dotPlot with ggplot2 is returned.
Column P.Corr contains the partial correlations of each edge as a numeric,
column Mean contains the minimum mean expression of the two proteins
(e.g. if the edge is e(p1, p2), then the column contains min(mean(p1),
mean(p2))), column Edge contains the name of the edge as a character
string of the form "p1-p2" and column Type contains the cluster name of the
cluster that the edge belongs to as a character string.
}
\description{
Internal function
}
\keyword{internal}
|
8b674b5f660ec523179dd032816b94cacd1d3185
|
ffdea92d4315e4363dd4ae673a1a6adf82a761b5
|
/data/genthat_extracted_code/brms/examples/gp.Rd.R
|
36089a76270a42761a2dbf1a216f0bcdbde36648
|
[] |
no_license
|
surayaaramli/typeRrh
|
d257ac8905c49123f4ccd4e377ee3dfc84d1636c
|
66e6996f31961bc8b9aafe1a6a6098327b66bf71
|
refs/heads/master
| 2023-05-05T04:05:31.617869
| 2019-04-25T22:10:06
| 2019-04-25T22:10:06
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 1,239
|
r
|
gp.Rd.R
|
library(brms)
### Name: gp
### Title: Set up Gaussian process terms in 'brms'
### Aliases: gp
### ** Examples
## Not run:
##D # simulate data using the mgcv package
##D dat <- mgcv::gamSim(1, n = 30, scale = 2)
##D
##D # fit a simple GP model
##D fit1 <- brm(y ~ gp(x2), dat, chains = 2)
##D summary(fit1)
##D me1 <- marginal_effects(fit1, nsamples = 200, spaghetti = TRUE)
##D plot(me1, ask = FALSE, points = TRUE)
##D
##D # fit a more complicated GP model
##D fit2 <- brm(y ~ gp(x0) + x1 + gp(x2) + x3, dat, chains = 2)
##D summary(fit2)
##D me2 <- marginal_effects(fit2, nsamples = 200, spaghetti = TRUE)
##D plot(me2, ask = FALSE, points = TRUE)
##D
##D # fit a multivariate GP model
##D fit3 <- brm(y ~ gp(x1, x2), dat, chains = 2)
##D summary(fit3)
##D me3 <- marginal_effects(fit3, nsamples = 200, spaghetti = TRUE)
##D plot(me3, ask = FALSE, points = TRUE)
##D
##D # compare model fit
##D LOO(fit1, fit2, fit3)
##D
##D # simulate data with a factor covariate
##D dat2 <- mgcv::gamSim(4, n = 90, scale = 2)
##D
##D # fit separate gaussian processes for different levels of 'fac'
##D fit4 <- brm(y ~ gp(x2, by = fac), dat2, chains = 2)
##D summary(fit4)
##D plot(marginal_effects(fit4), points = TRUE)
## End(Not run)
|
13b49f5e58303de00cfec3abecdec7811f8431c5
|
ffdea92d4315e4363dd4ae673a1a6adf82a761b5
|
/data/genthat_extracted_code/sparkTable/examples/plot.Rd.R
|
6fe3ddaeb30d1a0351e72e88518188adf4a3974f
|
[] |
no_license
|
surayaaramli/typeRrh
|
d257ac8905c49123f4ccd4e377ee3dfc84d1636c
|
66e6996f31961bc8b9aafe1a6a6098327b66bf71
|
refs/heads/master
| 2023-05-05T04:05:31.617869
| 2019-04-25T22:10:06
| 2019-04-25T22:10:06
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 380
|
r
|
plot.Rd.R
|
library(sparkTable)
### Name: plot-methods
### Title: Plot objects of class 'sparkline', 'sparkbar', 'sparkhist' or
### 'sparkbox'
### Aliases: plot plot-methods plot,sparkbar-method plot,sparkbox-method
### plot,sparkline-method plot,sparkhist-method
### ** Examples
data(pop)
x <- pop[pop[,2]=="Insgesamt",3]
a <- newSparkLine(values=x, pointWidth=8)
plot(a)
|
f148f843e776139e8ccf3c2b63b973f4d06ad999
|
346ee713e87ffdfe87c7b5b7d611d98da47b0f0b
|
/R/Funcs/genColors.r
|
75a53b8f1bd481f3d9faed649ce778ad62465650
|
[] |
no_license
|
s7minhas/tensor
|
e705a86008d728cac235afd6138981c91b6fc99f
|
61a94612ce8b7a227e9062cd9eed06d16b13f614
|
refs/heads/master
| 2021-01-17T15:23:38.496614
| 2019-08-23T04:03:37
| 2019-08-23T04:03:37
| 41,993,828
| 2
| 3
| null | null | null | null |
UTF-8
|
R
| false
| false
| 786
|
r
|
genColors.r
|
####
if(!'Y' %in% ls()){ load(paste0(inPath, "YXsm.rda")) }
cntries=dimnames(Y)[[1]]
loadPkg('cshapes')
cmap = wmap = cshp(date=as.Date('2001-1-1'))
wmap = wmap[which(as.character(wmap$ISO1AL3) %in% cntries),]
coords=coordinates(wmap)
rownames(coords)=wmap$ISO1AL3
coords=coords[cntries,]
# Create colors
rlon = pi*coords[,1]/180
rlat = pi*coords[,2]/180
slon = (rlon-min(rlon))/(max(rlon)-min(rlon))
slat = (rlat-min(rlat))/(max(rlat)-min(rlat))
ccols = rgb( slon^2,slat^2,(1-sqrt(slon*slat))^2)
# Generate legend map
if(genCntryMap){
mapCol = ccols[match(cmap$ISO1AL3, cntries)]
mapCol[is.na(mapCol)] = 'grey'
fname=paste0(outPath, 'map.eps')
postscript(file=fname, width=8, height=4, horizontal=FALSE, onefile = FALSE, paper = "special")
plot(cmap, col=mapCol)
dev.off()
}
|
a59c54689f0d4c752020c8cc8d2215960deb7c18
|
fb4b378a10e6a909b64402571d5ed37ee5aec9ed
|
/source/code/STK3100 H2017 Exercise 8.14.R
|
f2c92a930c846ce6dad93c79cd9db7b842cd8cfb
|
[] |
no_license
|
JonasMoss/Exercises-in-GLM
|
c2282c0b908af036e3c554c3679636be58cf6fc2
|
23ee928522173125a878455e41e52023ddb85aba
|
refs/heads/master
| 2023-07-08T23:37:07.394922
| 2021-08-19T11:44:42
| 2021-08-19T11:44:42
| 294,111,545
| 0
| 1
| null | null | null | null |
UTF-8
|
R
| false
| false
| 774
|
r
|
STK3100 H2017 Exercise 8.14.R
|
# STK 3100, fall 2017
# Exercise 8.14
# Clean up the memory before we start.
rm(list=ls(all=TRUE))
# Load packages
library(MASS)
# Read data
homicide.data = read.table("http://www.stat.ufl.edu/~aa/glm/data/Homicides.dat", header = T)
homicide.data[,"race"] = as.factor(homicide.data[,"race"])
head(homicide.data)
table(homicide.data[,"count"], homicide.data[,"race"])
# Fit Poisson model
Poisson.model = glm(count ~ race, family = poisson, data = homicide.data)
summary(Poisson.model)
# Fit negative binomial model
negbin.model = glm.nb(count ~ race, data = homicide.data)
summary(negbin.model)
# Quasi likelihood approach.
QL.model = glm(count ~ race, family = quasi(link = "log", variance = "mu"), data = homicide.data)
summary(QL.model)
|
e4809a13fdb7f071497a4b15bc56ef029a0cbdcd
|
08a6e8e2b86a015fe6f847102bf244fc1ce18d6a
|
/4-Demography/FastStructure/plotFaststructure.rails.R
|
1c7c2f5f52e5b6a0b0d6b0cbbe187d15e3f50769
|
[] |
no_license
|
dechavezv/2nd.paper
|
40d5578aef8dfdaa07a5d9eb27c9f632f0750cd3
|
ffa6506ec062bc1442e3d0ee7325f60087ac53e1
|
refs/heads/master
| 2020-12-19T21:31:43.317719
| 2020-08-07T05:46:59
| 2020-08-07T05:46:59
| 235,857,029
| 4
| 1
| null | null | null | null |
UTF-8
|
R
| false
| false
| 4,116
|
r
|
plotFaststructure.rails.R
|
require(ggplot2)
require(reshape2)
require(RColorBrewer)
colorPal=RColorBrewer::brewer.pal(n=12,name = "Paired")
colors=list(CA=colorPal[1],BAJ=colorPal[1],AK=colorPal[2],AL=colorPal[3],COM=colorPal[4],KUR=colorPal[5])
calldate="20200517"
data.dir="/u/home/d/dechavez/project-rwayne/rails.project/FASTSTRUCTURE/20200517_filtered/"
plot.dir="/u/home/d/dechavez/project-rwayne/rails.project/FASTSTRUCTURE/20200517_filtered/plots/"
k=2
inputQ=read.table(paste(data.dir,"LS_joint_allchr_Annot_Mask_Filter_passingSNPs.vcf.faststructure_output.",k,".meanQ",sep=""))
popAssignment=read.table(paste(data.dir,"LS_joint_allchr_Annot_Mask_Filter_passingSNPs.vcf.manual.popAssignment",sep=""))
colnames(popAssignment) <- c("sample","population")
dim(popAssignment)
dim(inputQ)
# these are in same order so you can cbind them
combo <- cbind(inputQ,popAssignment)
# rename Baja to B*
combo$label <- as.character(combo$population)
#combo[combo$population=="St.Cruz",]$label <- "SC"
#combo[combo$population=="Isabela",]$label <- "IS"
#combo[combo$population=="Pinta",]$label <- "PI"
#combo[combo$population=="Santiago",]$label <- "SA"
head(combo)
combo_melt <- melt(combo,id.vars = c("sample","label","population"))
# arrange individuals in pop order
combo_melt$pop_sample <- paste(combo_melt$population,"_",combo_melt$sample,sep="")
combo_melt
#combo_melt$label <- factor(combo_melt$label,mylevels=c("SC","IS","PI","SA")) # order populations
#combo_melt$label <- factor(combo_melt$label,mylevels=c("St.Cruz","Isabela","Pinta","Santiago")) # order populations
plotForMs1 <- ggplot(combo_melt,aes(x=pop_sample,y=value,fill=variable))+
geom_bar(stat="identity")+
theme_bw()+
scale_fill_manual(values=c(V1=colorPal[1],V4=colorPal[2],V5=colorPal[3],V3=colorPal[4],V3=colorPal[5],V3=colorPal[6],V3=colorPal[7]))+
facet_grid(~label,scales = "free_x", space = "free_x")+ # ah! space does what I want, so Baja isn't big! nice
theme(panel.border = element_blank(),
panel.background = element_blank(),
panel.grid = element_blank(),
panel.spacing.x = unit(0.1,"line"),
axis.text.x=element_blank(),
axis.ticks.x=element_blank(),
strip.background = element_rect("transparent"),
strip.text = element_text(size=11))+
ylab("")+
xlab("")+
theme(legend.position = "none")
plotForMs1
ggsave(paste(plot.dir,"FaststructurePlot.forManuscript.k.",k,".goodColors.ALSplit.pdf",sep=""),plotForMs1,height=4,width=7,dpi=300)
ggsave(paste(plot.dir,"FaststructurePlot.forManuscript.k.",k,".goodColors.ALSplit.png",sep=""),plotForMs1,height=4,width=7,device="png",dpi=300)
# note the rmSergioInds doesn't matter -- those inds are fine, it just downsampled COM to match sample size
plotForMs2 <- ggplot(combo_melt,aes(x=pop_sample,y=value,fill=variable))+
geom_bar(stat="identity")+
theme_bw()+
scale_fill_manual(values=c(V1=colorPal[1],V4=colorPal[2],V5=colorPal[3],V3=colorPal[4],V2=colorPal[5]))+
facet_grid(~population2,scales = "free_x", space = "free_x")+ # ah! space does what I want, so Baja isn't big! nice
theme(panel.border = element_blank(),
panel.background = element_blank(),
panel.grid = element_blank(),
panel.spacing.x = unit(0.1,"line"),
axis.text.x=element_blank(),
axis.text.y=element_text(size=12),
axis.ticks.x=element_blank(),
strip.background = element_rect("transparent"),
strip.text = element_text(size=12))+
ylab("")+
xlab("")+
theme(legend.position = "none")
plotForMs2
ggsave(paste(plot.dir,"FaststructurePlot.forManuscript.k.",k,".goodColors.ALNotSplit.pdf",sep=""),plotForMs2,height=4,width=7,dpi=300)
ggsave(paste(plot.dir,"FaststructurePlot.forManuscript.k.",k,".goodColors.ALNotSplit.png",sep=""),plotForMs2,height=4,width=7,device="png",dpi=300)
ggsave(paste(plot.dir,"FaststructurePlot.forManuscript.k.",k,".goodColors.ALNotSplit.ResizeForMS.pdf",sep=""),plotForMs2,height=4,width=9,device="pdf",dpi=300)
ggsave(paste(plot.dir,"FaststructurePlot.forManuscript.k.",k,".goodColors.ALNotSplit.ResizeForMS.png",sep=""),plotForMs2,height=4,width=9,device="png",dpi=300)
|
db6ea9302ac93d7d7ad5350796210c8ff177b52f
|
7e991fa8c111f28084beeb9ea2d3141f8d66ec39
|
/man/cropFeaturesToBBox.Rd
|
16f3a9b450743ac257c73be866ce34897752ef35
|
[
"MIT"
] |
permissive
|
wStockhausen/wtsGIS
|
0809ec66cdb9ec8bc189184bec79ee5528ec84d4
|
5d868b31bbd7639af3fb9871b355f5531f934f50
|
refs/heads/master
| 2023-09-01T05:18:40.003523
| 2023-08-05T00:10:22
| 2023-08-05T00:10:22
| 124,290,835
| 1
| 1
| null | null | null | null |
UTF-8
|
R
| false
| true
| 935
|
rd
|
cropFeaturesToBBox.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/cropFeaturesToBBox.R
\name{cropFeaturesToBBox}
\alias{cropFeaturesToBBox}
\title{Crop features (a \code{sf::sf} or \code{sf::sfc} object) to a bounding box}
\usage{
cropFeaturesToBBox(obj, bbx)
}
\arguments{
\item{obj}{\itemize{
\item object of class "sf" or "sfc"
}}
\item{bbx}{\itemize{
\item object that can be converted to a sf:bbox (see details)
}}
}
\value{
\itemize{
\item object of same type as \code{obj}, but cropped to the bbox limits
}
}
\description{
Function to crop features (a \code{sf::sf} or \code{sf::sfc} object) to a bounding box.
}
\details{
\code{bbx} can be any object that can be converted to a \code{sf::bbox}
using \code{getBBox}.
}
\note{
The bounding box must be in the same coordinate system as \code{obj}. If the
coordinate reference system for \code{bbx} is undefined, it is assumed to be the same as
that for \code{obj}.
}
|
14b8ffc3ad777618ced9afa6620b089a84068dd3
|
89bd53b22672cbe74e727e8e45defc891af1052d
|
/EM/test_em.R
|
2404891d452949fa79db0b4de3ee94f78ddb6b75
|
[] |
no_license
|
hbhat4000/sdeinference
|
a62e8f5ddc6bbc913dbc8dc4c210ff30cf16143f
|
14db858c43a1b50001818399ef16e74ae926f51b
|
refs/heads/master
| 2020-04-04T05:30:05.922893
| 2018-07-11T22:59:38
| 2018-07-11T22:59:38
| 54,491,406
| 8
| 5
| null | null | null | null |
UTF-8
|
R
| false
| false
| 1,844
|
r
|
test_em.R
|
rm(list = ls(all = TRUE))
library("matrixcalc")
# xtraj = matrix(nrow = 1, ncol = 2)
# load necessary functions
source('dtq_main.R')
source('Dtheta.R')
k = 0.01
M = 800
deltat = 1
numsteps = 50
h = deltat/numsteps
theta = c(1, 0, 2)
init = 1
final = 3
source('kolmogorov_compare.R')
if(numsteps >= 1)
{
completelik_front = dtq_complete_front(theta, h, k, M, numsteps, init, final)
completelik_back = dtq_complete_back(theta, h, k, M, numsteps, init, final)
# compare against exact solution
exactcompletelik = transition_forward(theta,x=final,y=init,t=deltat)
print(c(completelik_front$lik, completelik_back,exactcompletelik))
}
if(numsteps >= 2)
{
# first step should be equal to the last step if 2 steps
firststeplik = dtq_firststep(theta, h, k, M, numsteps, init, final)
ourfirststep = firststeplik / completelik_back
# check normalization
print(sum(ourfirststep)*k)
part1 = transition_forward(theta,x=grid,y=init,t=h)
part2 = transition_forward(theta,x=final,y=grid,t=(deltat-h))
exactfirststeplik = log(hadamard.prod(part1,part2)/exactcompletelik)
laststeplik = dtq_laststep(theta, h, k, M, numsteps, init, final)
ourlaststep = laststeplik / completelik_back
# check normalization
print(sum(ourlaststep)*k)
part1 = transition_forward(theta,x=grid,y=init,t=(deltat-h))
part2 = transition_forward(theta,x=final,y=grid,t=h)
exactlaststeplik = log(hadamard.prod(part1,part2)/exactcompletelik)
# par(mfrow=c(1,2))
# plot(ourfirststep)
# lines(exactfirststeplik,col='red')
# plot(ourlaststep)
# lines(exactlaststeplik,col='red')
}
#
if(numsteps >= 3)
{
# numsteps = F+1
for (j in c(1:(numsteps-2))) {
internallik = dtq_internal(theta, h, k, M, numsteps, init, final, j)
internallik = internallik/completelik_back
print(c(j, sum(internallik)*k^2))
}
}
|
f845f74d47b33e61576b86b25c414e8cb214bd7a
|
7158cf060cccddf8c8c923f2d08e0c0c02a152bf
|
/practica01/practica02.R
|
f56ec4666a2ee86145c1eccffea166cd81a592b1
|
[] |
no_license
|
JCFlores93/math-statistics
|
661a50e2ea4366f7555f6299100b60d76848a548
|
70301283f268a1487a6688b0c8b10cc4d7b29f44
|
refs/heads/master
| 2020-05-19T14:40:49.875797
| 2019-06-09T19:50:25
| 2019-06-09T19:50:25
| 185,065,485
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 2,086
|
r
|
practica02.R
|
library(Hmisc)
library(ggplot2)
library(scatterplot3d)
library(rgl)
library(aplpack)
library(corrplot)
library(PerformanceAnalytics)
library(fBasics)
library(gmodels)
library(rpart.plot)
library(partykit)
library(caret)
library(e1071)
library(mlr)
library(rpart)
train<-read.csv("Morosidad.csv",sep = ';',encoding="UTF-8", header = T, fileEncoding = "cp932") # leer la data de entrenamiento
head(train)
names(train) # visualizar los nombres de la data
head(train) # visualizar los 6 primeros registros
str(train) # ver la estructura de la data
summary(train) # tabla comun de obtener
summarizeColumns(train) # tabla mas completa
resumen=data.frame(summarizeColumns(train))
dim(train)
unique(train$Nro_Veces_cob)
unique(train$Nro_Cuotas)
unique(train$Mes_pres)
unique(train$Llamada_fecha)
unique(train$Llamada_Resultado)
unique(train$Estatus)
unique(train$Tipo_contacto)
unique(train$Cod_cliente)
drop <- c("Llamada_fecha", "Hora", "Llamada_resultado","Cod_cliente")
train = train[,!(names(train) %in% drop)]
hist(train$Cod_cliente, breaks = 100, main = "Cod_cliente",xlab = "Cod_cliente",col="blue")
drop <- c("Llamada_fecha", "Hora", "Llamada_resultado","Cod_cliente")
train = train[,!(names(train) %in% drop)]
library(dummies)
dummy_data <- dummy.data.frame(train, sep = ".")
names(dummy_data)
drop <- c("Nro_Veces_cob.<=10", "Nro_Cuotas.>48", "Mes_pres.Marzo" ,"MES_2_Atraso","Llamada_Resultado.TELF. NO CORRESPONDE", "Tipo_contacto.CNE")
dummy_data = dummy_data[,!(names(dummy_data) %in% drop)]
summarizeColumns(dummy_data) # tabla mas completa
is.na(dummy_data)
set.seed(123)
split = sample.split(dummy_data$Mora, SplitRatio= 2/3)
training_set = subset(dummy_data, split==TRUE)
test_set = subset(dummy_data, split==TRUE)
classifier <- rpart(Mora~., data = training_set)
library(tidyverse)
y_pred = predict(classifier, newdata=test_set)
table(test_set$Mora, y_pred)
library(rpart.plot)
prp(classifier, extra = 1, type = 1)
library(partykit)
plot(as.party(classifier))
plot(classifier)
names(classifier)
predict <- predict(classifier, test_set, type = "vector")
|
ee04ee06bb75f4785026ebf63805cb8dab484531
|
0500ba15e741ce1c84bfd397f0f3b43af8cb5ffb
|
/cran/paws.developer.tools/R/codepipeline_interfaces.R
|
0f27e1d91a7ab15e777c169db812ec8a563d05c7
|
[
"Apache-2.0"
] |
permissive
|
paws-r/paws
|
196d42a2b9aca0e551a51ea5e6f34daca739591b
|
a689da2aee079391e100060524f6b973130f4e40
|
refs/heads/main
| 2023-08-18T00:33:48.538539
| 2023-08-09T09:31:24
| 2023-08-09T09:31:24
| 154,419,943
| 293
| 45
|
NOASSERTION
| 2023-09-14T15:31:32
| 2018-10-24T01:28:47
|
R
|
UTF-8
|
R
| false
| false
| 70,352
|
r
|
codepipeline_interfaces.R
|
# This file is generated by make.paws. Please do not edit here.
#' @importFrom paws.common populate
#' @include codepipeline_service.R
NULL
.codepipeline$acknowledge_job_input <- function(...) {
args <- c(as.list(environment()), list(...))
shape <- structure(list(jobId = structure(logical(0), tags = list(type = "string")), nonce = structure(logical(0), tags = list(type = "string"))), tags = list(type = "structure"))
return(populate(args, shape))
}
.codepipeline$acknowledge_job_output <- function(...) {
args <- c(as.list(environment()), list(...))
shape <- structure(list(status = structure(logical(0), tags = list(type = "string"))), tags = list(type = "structure"))
return(populate(args, shape))
}
.codepipeline$acknowledge_third_party_job_input <- function(...) {
args <- c(as.list(environment()), list(...))
shape <- structure(list(jobId = structure(logical(0), tags = list(type = "string")), nonce = structure(logical(0), tags = list(type = "string")), clientToken = structure(logical(0), tags = list(type = "string"))), tags = list(type = "structure"))
return(populate(args, shape))
}
.codepipeline$acknowledge_third_party_job_output <- function(...) {
args <- c(as.list(environment()), list(...))
shape <- structure(list(status = structure(logical(0), tags = list(type = "string"))), tags = list(type = "structure"))
return(populate(args, shape))
}
.codepipeline$create_custom_action_type_input <- function(...) {
args <- c(as.list(environment()), list(...))
shape <- structure(list(category = structure(logical(0), tags = list(type = "string")), provider = structure(logical(0), tags = list(type = "string")), version = structure(logical(0), tags = list(type = "string")), settings = structure(list(thirdPartyConfigurationUrl = structure(logical(0), tags = list(type = "string")), entityUrlTemplate = structure(logical(0), tags = list(type = "string")), executionUrlTemplate = structure(logical(0), tags = list(type = "string")), revisionUrlTemplate = structure(logical(0), tags = list(type = "string"))), tags = list(type = "structure")), configurationProperties = structure(list(structure(list(name = structure(logical(0), tags = list(type = "string")), required = structure(logical(0), tags = list(type = "boolean")), key = structure(logical(0), tags = list(type = "boolean")), secret = structure(logical(0), tags = list(type = "boolean")), queryable = structure(logical(0), tags = list(type = "boolean")), description = structure(logical(0), tags = list(type = "string")), type = structure(logical(0), tags = list(type = "string"))), tags = list(type = "structure"))), tags = list(type = "list")), inputArtifactDetails = structure(list(minimumCount = structure(logical(0), tags = list(type = "integer")), maximumCount = structure(logical(0), tags = list(type = "integer"))), tags = list(type = "structure")), outputArtifactDetails = structure(list(minimumCount = structure(logical(0), tags = list(type = "integer")), maximumCount = structure(logical(0), tags = list(type = "integer"))), tags = list(type = "structure")), tags = structure(list(structure(list(key = structure(logical(0), tags = list(type = "string")), value = structure(logical(0), tags = list(type = "string"))), tags = list(type = "structure"))), tags = list(type = "list"))), tags = list(type = "structure"))
return(populate(args, shape))
}
.codepipeline$create_custom_action_type_output <- function(...) {
args <- c(as.list(environment()), list(...))
shape <- structure(list(actionType = structure(list(id = structure(list(category = structure(logical(0), tags = list(type = "string")), owner = structure(logical(0), tags = list(type = "string")), provider = structure(logical(0), tags = list(type = "string")), version = structure(logical(0), tags = list(type = "string"))), tags = list(type = "structure")), settings = structure(list(thirdPartyConfigurationUrl = structure(logical(0), tags = list(type = "string")), entityUrlTemplate = structure(logical(0), tags = list(type = "string")), executionUrlTemplate = structure(logical(0), tags = list(type = "string")), revisionUrlTemplate = structure(logical(0), tags = list(type = "string"))), tags = list(type = "structure")), actionConfigurationProperties = structure(list(structure(list(name = structure(logical(0), tags = list(type = "string")), required = structure(logical(0), tags = list(type = "boolean")), key = structure(logical(0), tags = list(type = "boolean")), secret = structure(logical(0), tags = list(type = "boolean")), queryable = structure(logical(0), tags = list(type = "boolean")), description = structure(logical(0), tags = list(type = "string")), type = structure(logical(0), tags = list(type = "string"))), tags = list(type = "structure"))), tags = list(type = "list")), inputArtifactDetails = structure(list(minimumCount = structure(logical(0), tags = list(type = "integer")), maximumCount = structure(logical(0), tags = list(type = "integer"))), tags = list(type = "structure")), outputArtifactDetails = structure(list(minimumCount = structure(logical(0), tags = list(type = "integer")), maximumCount = structure(logical(0), tags = list(type = "integer"))), tags = list(type = "structure"))), tags = list(type = "structure")), tags = structure(list(structure(list(key = structure(logical(0), tags = list(type = "string")), value = structure(logical(0), tags = list(type = "string"))), tags = list(type = "structure"))), tags = list(type = "list"))), tags = list(type = "structure"))
return(populate(args, shape))
}
.codepipeline$create_pipeline_input <- function(...) {
args <- c(as.list(environment()), list(...))
shape <- structure(list(pipeline = structure(list(name = structure(logical(0), tags = list(type = "string")), roleArn = structure(logical(0), tags = list(type = "string")), artifactStore = structure(list(type = structure(logical(0), tags = list(type = "string")), location = structure(logical(0), tags = list(type = "string")), encryptionKey = structure(list(id = structure(logical(0), tags = list(type = "string")), type = structure(logical(0), tags = list(type = "string"))), tags = list(type = "structure"))), tags = list(type = "structure")), artifactStores = structure(list(structure(list(type = structure(logical(0), tags = list(type = "string")), location = structure(logical(0), tags = list(type = "string")), encryptionKey = structure(list(id = structure(logical(0), tags = list(type = "string")), type = structure(logical(0), tags = list(type = "string"))), tags = list(type = "structure"))), tags = list(type = "structure"))), tags = list(type = "map")), stages = structure(list(structure(list(name = structure(logical(0), tags = list(type = "string")), blockers = structure(list(structure(list(name = structure(logical(0), tags = list(type = "string")), type = structure(logical(0), tags = list(type = "string"))), tags = list(type = "structure"))), tags = list(type = "list")), actions = structure(list(structure(list(name = structure(logical(0), tags = list(type = "string")), actionTypeId = structure(list(category = structure(logical(0), tags = list(type = "string")), owner = structure(logical(0), tags = list(type = "string")), provider = structure(logical(0), tags = list(type = "string")), version = structure(logical(0), tags = list(type = "string"))), tags = list(type = "structure")), runOrder = structure(logical(0), tags = list(type = "integer")), configuration = structure(list(structure(logical(0), tags = list(type = "string"))), tags = list(type = "map")), outputArtifacts = structure(list(structure(list(name = structure(logical(0), tags = list(type = "string"))), tags = list(type = "structure"))), tags = list(type = "list")), inputArtifacts = structure(list(structure(list(name = structure(logical(0), tags = list(type = "string"))), tags = list(type = "structure"))), tags = list(type = "list")), roleArn = structure(logical(0), tags = list(type = "string")), region = structure(logical(0), tags = list(type = "string")), namespace = structure(logical(0), tags = list(type = "string"))), tags = list(type = "structure"))), tags = list(type = "list"))), tags = list(type = "structure"))), tags = list(type = "list")), version = structure(logical(0), tags = list(type = "integer"))), tags = list(type = "structure")), tags = structure(list(structure(list(key = structure(logical(0), tags = list(type = "string")), value = structure(logical(0), tags = list(type = "string"))), tags = list(type = "structure"))), tags = list(type = "list"))), tags = list(type = "structure"))
return(populate(args, shape))
}
.codepipeline$create_pipeline_output <- function(...) {
args <- c(as.list(environment()), list(...))
shape <- structure(list(pipeline = structure(list(name = structure(logical(0), tags = list(type = "string")), roleArn = structure(logical(0), tags = list(type = "string")), artifactStore = structure(list(type = structure(logical(0), tags = list(type = "string")), location = structure(logical(0), tags = list(type = "string")), encryptionKey = structure(list(id = structure(logical(0), tags = list(type = "string")), type = structure(logical(0), tags = list(type = "string"))), tags = list(type = "structure"))), tags = list(type = "structure")), artifactStores = structure(list(structure(list(type = structure(logical(0), tags = list(type = "string")), location = structure(logical(0), tags = list(type = "string")), encryptionKey = structure(list(id = structure(logical(0), tags = list(type = "string")), type = structure(logical(0), tags = list(type = "string"))), tags = list(type = "structure"))), tags = list(type = "structure"))), tags = list(type = "map")), stages = structure(list(structure(list(name = structure(logical(0), tags = list(type = "string")), blockers = structure(list(structure(list(name = structure(logical(0), tags = list(type = "string")), type = structure(logical(0), tags = list(type = "string"))), tags = list(type = "structure"))), tags = list(type = "list")), actions = structure(list(structure(list(name = structure(logical(0), tags = list(type = "string")), actionTypeId = structure(list(category = structure(logical(0), tags = list(type = "string")), owner = structure(logical(0), tags = list(type = "string")), provider = structure(logical(0), tags = list(type = "string")), version = structure(logical(0), tags = list(type = "string"))), tags = list(type = "structure")), runOrder = structure(logical(0), tags = list(type = "integer")), configuration = structure(list(structure(logical(0), tags = list(type = "string"))), tags = list(type = "map")), outputArtifacts = structure(list(structure(list(name = structure(logical(0), tags = list(type = "string"))), tags = list(type = "structure"))), tags = list(type = "list")), inputArtifacts = structure(list(structure(list(name = structure(logical(0), tags = list(type = "string"))), tags = list(type = "structure"))), tags = list(type = "list")), roleArn = structure(logical(0), tags = list(type = "string")), region = structure(logical(0), tags = list(type = "string")), namespace = structure(logical(0), tags = list(type = "string"))), tags = list(type = "structure"))), tags = list(type = "list"))), tags = list(type = "structure"))), tags = list(type = "list")), version = structure(logical(0), tags = list(type = "integer"))), tags = list(type = "structure")), tags = structure(list(structure(list(key = structure(logical(0), tags = list(type = "string")), value = structure(logical(0), tags = list(type = "string"))), tags = list(type = "structure"))), tags = list(type = "list"))), tags = list(type = "structure"))
return(populate(args, shape))
}
.codepipeline$delete_custom_action_type_input <- function(...) {
args <- c(as.list(environment()), list(...))
shape <- structure(list(category = structure(logical(0), tags = list(type = "string")), provider = structure(logical(0), tags = list(type = "string")), version = structure(logical(0), tags = list(type = "string"))), tags = list(type = "structure"))
return(populate(args, shape))
}
.codepipeline$delete_custom_action_type_output <- function(...) {
list()
}
.codepipeline$delete_pipeline_input <- function(...) {
args <- c(as.list(environment()), list(...))
shape <- structure(list(name = structure(logical(0), tags = list(type = "string"))), tags = list(type = "structure"))
return(populate(args, shape))
}
.codepipeline$delete_pipeline_output <- function(...) {
list()
}
.codepipeline$delete_webhook_input <- function(...) {
args <- c(as.list(environment()), list(...))
shape <- structure(list(name = structure(logical(0), tags = list(type = "string"))), tags = list(type = "structure"))
return(populate(args, shape))
}
.codepipeline$delete_webhook_output <- function(...) {
args <- c(as.list(environment()), list(...))
shape <- structure(list(), tags = list(type = "structure"))
return(populate(args, shape))
}
.codepipeline$deregister_webhook_with_third_party_input <- function(...) {
args <- c(as.list(environment()), list(...))
shape <- structure(list(webhookName = structure(logical(0), tags = list(type = "string"))), tags = list(type = "structure"))
return(populate(args, shape))
}
.codepipeline$deregister_webhook_with_third_party_output <- function(...) {
args <- c(as.list(environment()), list(...))
shape <- structure(list(), tags = list(type = "structure"))
return(populate(args, shape))
}
.codepipeline$disable_stage_transition_input <- function(...) {
args <- c(as.list(environment()), list(...))
shape <- structure(list(pipelineName = structure(logical(0), tags = list(type = "string")), stageName = structure(logical(0), tags = list(type = "string")), transitionType = structure(logical(0), tags = list(type = "string")), reason = structure(logical(0), tags = list(type = "string"))), tags = list(type = "structure"))
return(populate(args, shape))
}
.codepipeline$disable_stage_transition_output <- function(...) {
list()
}
.codepipeline$enable_stage_transition_input <- function(...) {
args <- c(as.list(environment()), list(...))
shape <- structure(list(pipelineName = structure(logical(0), tags = list(type = "string")), stageName = structure(logical(0), tags = list(type = "string")), transitionType = structure(logical(0), tags = list(type = "string"))), tags = list(type = "structure"))
return(populate(args, shape))
}
.codepipeline$enable_stage_transition_output <- function(...) {
list()
}
.codepipeline$get_action_type_input <- function(...) {
args <- c(as.list(environment()), list(...))
shape <- structure(list(category = structure(logical(0), tags = list(type = "string")), owner = structure(logical(0), tags = list(type = "string")), provider = structure(logical(0), tags = list(type = "string")), version = structure(logical(0), tags = list(type = "string"))), tags = list(type = "structure"))
return(populate(args, shape))
}
.codepipeline$get_action_type_output <- function(...) {
args <- c(as.list(environment()), list(...))
shape <- structure(list(actionType = structure(list(description = structure(logical(0), tags = list(type = "string")), executor = structure(list(configuration = structure(list(lambdaExecutorConfiguration = structure(list(lambdaFunctionArn = structure(logical(0), tags = list(type = "string"))), tags = list(type = "structure")), jobWorkerExecutorConfiguration = structure(list(pollingAccounts = structure(list(structure(logical(0), tags = list(type = "string"))), tags = list(type = "list")), pollingServicePrincipals = structure(list(structure(logical(0), tags = list(type = "string"))), tags = list(type = "list"))), tags = list(type = "structure"))), tags = list(type = "structure")), type = structure(logical(0), tags = list(type = "string")), policyStatementsTemplate = structure(logical(0), tags = list(type = "string")), jobTimeout = structure(logical(0), tags = list(type = "integer"))), tags = list(type = "structure")), id = structure(list(category = structure(logical(0), tags = list(type = "string")), owner = structure(logical(0), tags = list(type = "string")), provider = structure(logical(0), tags = list(type = "string")), version = structure(logical(0), tags = list(type = "string"))), tags = list(type = "structure")), inputArtifactDetails = structure(list(minimumCount = structure(logical(0), tags = list(type = "integer")), maximumCount = structure(logical(0), tags = list(type = "integer"))), tags = list(type = "structure")), outputArtifactDetails = structure(list(minimumCount = structure(logical(0), tags = list(type = "integer")), maximumCount = structure(logical(0), tags = list(type = "integer"))), tags = list(type = "structure")), permissions = structure(list(allowedAccounts = structure(list(structure(logical(0), tags = list(type = "string"))), tags = list(type = "list"))), tags = list(type = "structure")), properties = structure(list(structure(list(name = structure(logical(0), tags = list(type = "string")), optional = structure(logical(0), tags = list(type = "boolean")), key = structure(logical(0), tags = list(type = "boolean")), noEcho = structure(logical(0), tags = list(type = "boolean")), queryable = structure(logical(0), tags = list(type = "boolean")), description = structure(logical(0), tags = list(type = "string"))), tags = list(type = "structure"))), tags = list(type = "list")), urls = structure(list(configurationUrl = structure(logical(0), tags = list(type = "string")), entityUrlTemplate = structure(logical(0), tags = list(type = "string")), executionUrlTemplate = structure(logical(0), tags = list(type = "string")), revisionUrlTemplate = structure(logical(0), tags = list(type = "string"))), tags = list(type = "structure"))), tags = list(type = "structure"))), tags = list(type = "structure"))
return(populate(args, shape))
}
.codepipeline$get_job_details_input <- function(...) {
args <- c(as.list(environment()), list(...))
shape <- structure(list(jobId = structure(logical(0), tags = list(type = "string"))), tags = list(type = "structure"))
return(populate(args, shape))
}
.codepipeline$get_job_details_output <- function(...) {
args <- c(as.list(environment()), list(...))
shape <- structure(list(jobDetails = structure(list(id = structure(logical(0), tags = list(type = "string")), data = structure(list(actionTypeId = structure(list(category = structure(logical(0), tags = list(type = "string")), owner = structure(logical(0), tags = list(type = "string")), provider = structure(logical(0), tags = list(type = "string")), version = structure(logical(0), tags = list(type = "string"))), tags = list(type = "structure")), actionConfiguration = structure(list(configuration = structure(list(structure(logical(0), tags = list(type = "string"))), tags = list(type = "map"))), tags = list(type = "structure")), pipelineContext = structure(list(pipelineName = structure(logical(0), tags = list(type = "string")), stage = structure(list(name = structure(logical(0), tags = list(type = "string"))), tags = list(type = "structure")), action = structure(list(name = structure(logical(0), tags = list(type = "string")), actionExecutionId = structure(logical(0), tags = list(type = "string"))), tags = list(type = "structure")), pipelineArn = structure(logical(0), tags = list(type = "string")), pipelineExecutionId = structure(logical(0), tags = list(type = "string"))), tags = list(type = "structure")), inputArtifacts = structure(list(structure(list(name = structure(logical(0), tags = list(type = "string")), revision = structure(logical(0), tags = list(type = "string")), location = structure(list(type = structure(logical(0), tags = list(type = "string")), s3Location = structure(list(bucketName = structure(logical(0), tags = list(type = "string")), objectKey = structure(logical(0), tags = list(type = "string"))), tags = list(type = "structure"))), tags = list(type = "structure"))), tags = list(type = "structure"))), tags = list(type = "list")), outputArtifacts = structure(list(structure(list(name = structure(logical(0), tags = list(type = "string")), revision = structure(logical(0), tags = list(type = "string")), location = structure(list(type = structure(logical(0), tags = list(type = "string")), s3Location = structure(list(bucketName = structure(logical(0), tags = list(type = "string")), objectKey = structure(logical(0), tags = list(type = "string"))), tags = list(type = "structure"))), tags = list(type = "structure"))), tags = list(type = "structure"))), tags = list(type = "list")), artifactCredentials = structure(list(accessKeyId = structure(logical(0), tags = list(type = "string", sensitive = TRUE)), secretAccessKey = structure(logical(0), tags = list(type = "string", sensitive = TRUE)), sessionToken = structure(logical(0), tags = list(type = "string", sensitive = TRUE))), tags = list(type = "structure", sensitive = TRUE)), continuationToken = structure(logical(0), tags = list(type = "string")), encryptionKey = structure(list(id = structure(logical(0), tags = list(type = "string")), type = structure(logical(0), tags = list(type = "string"))), tags = list(type = "structure"))), tags = list(type = "structure")), accountId = structure(logical(0), tags = list(type = "string"))), tags = list(type = "structure"))), tags = list(type = "structure"))
return(populate(args, shape))
}
.codepipeline$get_pipeline_input <- function(...) {
args <- c(as.list(environment()), list(...))
shape <- structure(list(name = structure(logical(0), tags = list(type = "string")), version = structure(logical(0), tags = list(type = "integer"))), tags = list(type = "structure"))
return(populate(args, shape))
}
.codepipeline$get_pipeline_output <- function(...) {
args <- c(as.list(environment()), list(...))
shape <- structure(list(pipeline = structure(list(name = structure(logical(0), tags = list(type = "string")), roleArn = structure(logical(0), tags = list(type = "string")), artifactStore = structure(list(type = structure(logical(0), tags = list(type = "string")), location = structure(logical(0), tags = list(type = "string")), encryptionKey = structure(list(id = structure(logical(0), tags = list(type = "string")), type = structure(logical(0), tags = list(type = "string"))), tags = list(type = "structure"))), tags = list(type = "structure")), artifactStores = structure(list(structure(list(type = structure(logical(0), tags = list(type = "string")), location = structure(logical(0), tags = list(type = "string")), encryptionKey = structure(list(id = structure(logical(0), tags = list(type = "string")), type = structure(logical(0), tags = list(type = "string"))), tags = list(type = "structure"))), tags = list(type = "structure"))), tags = list(type = "map")), stages = structure(list(structure(list(name = structure(logical(0), tags = list(type = "string")), blockers = structure(list(structure(list(name = structure(logical(0), tags = list(type = "string")), type = structure(logical(0), tags = list(type = "string"))), tags = list(type = "structure"))), tags = list(type = "list")), actions = structure(list(structure(list(name = structure(logical(0), tags = list(type = "string")), actionTypeId = structure(list(category = structure(logical(0), tags = list(type = "string")), owner = structure(logical(0), tags = list(type = "string")), provider = structure(logical(0), tags = list(type = "string")), version = structure(logical(0), tags = list(type = "string"))), tags = list(type = "structure")), runOrder = structure(logical(0), tags = list(type = "integer")), configuration = structure(list(structure(logical(0), tags = list(type = "string"))), tags = list(type = "map")), outputArtifacts = structure(list(structure(list(name = structure(logical(0), tags = list(type = "string"))), tags = list(type = "structure"))), tags = list(type = "list")), inputArtifacts = structure(list(structure(list(name = structure(logical(0), tags = list(type = "string"))), tags = list(type = "structure"))), tags = list(type = "list")), roleArn = structure(logical(0), tags = list(type = "string")), region = structure(logical(0), tags = list(type = "string")), namespace = structure(logical(0), tags = list(type = "string"))), tags = list(type = "structure"))), tags = list(type = "list"))), tags = list(type = "structure"))), tags = list(type = "list")), version = structure(logical(0), tags = list(type = "integer"))), tags = list(type = "structure")), metadata = structure(list(pipelineArn = structure(logical(0), tags = list(type = "string")), created = structure(logical(0), tags = list(type = "timestamp")), updated = structure(logical(0), tags = list(type = "timestamp")), pollingDisabledAt = structure(logical(0), tags = list(type = "timestamp"))), tags = list(type = "structure"))), tags = list(type = "structure"))
return(populate(args, shape))
}
.codepipeline$get_pipeline_execution_input <- function(...) {
args <- c(as.list(environment()), list(...))
shape <- structure(list(pipelineName = structure(logical(0), tags = list(type = "string")), pipelineExecutionId = structure(logical(0), tags = list(type = "string"))), tags = list(type = "structure"))
return(populate(args, shape))
}
.codepipeline$get_pipeline_execution_output <- function(...) {
args <- c(as.list(environment()), list(...))
shape <- structure(list(pipelineExecution = structure(list(pipelineName = structure(logical(0), tags = list(type = "string")), pipelineVersion = structure(logical(0), tags = list(type = "integer")), pipelineExecutionId = structure(logical(0), tags = list(type = "string")), status = structure(logical(0), tags = list(type = "string")), statusSummary = structure(logical(0), tags = list(type = "string")), artifactRevisions = structure(list(structure(list(name = structure(logical(0), tags = list(type = "string")), revisionId = structure(logical(0), tags = list(type = "string")), revisionChangeIdentifier = structure(logical(0), tags = list(type = "string")), revisionSummary = structure(logical(0), tags = list(type = "string")), created = structure(logical(0), tags = list(type = "timestamp")), revisionUrl = structure(logical(0), tags = list(type = "string"))), tags = list(type = "structure"))), tags = list(type = "list"))), tags = list(type = "structure"))), tags = list(type = "structure"))
return(populate(args, shape))
}
.codepipeline$get_pipeline_state_input <- function(...) {
args <- c(as.list(environment()), list(...))
shape <- structure(list(name = structure(logical(0), tags = list(type = "string"))), tags = list(type = "structure"))
return(populate(args, shape))
}
.codepipeline$get_pipeline_state_output <- function(...) {
args <- c(as.list(environment()), list(...))
shape <- structure(list(pipelineName = structure(logical(0), tags = list(type = "string")), pipelineVersion = structure(logical(0), tags = list(type = "integer")), stageStates = structure(list(structure(list(stageName = structure(logical(0), tags = list(type = "string")), inboundExecution = structure(list(pipelineExecutionId = structure(logical(0), tags = list(type = "string")), status = structure(logical(0), tags = list(type = "string"))), tags = list(type = "structure")), inboundTransitionState = structure(list(enabled = structure(logical(0), tags = list(type = "boolean")), lastChangedBy = structure(logical(0), tags = list(type = "string")), lastChangedAt = structure(logical(0), tags = list(type = "timestamp")), disabledReason = structure(logical(0), tags = list(type = "string"))), tags = list(type = "structure")), actionStates = structure(list(structure(list(actionName = structure(logical(0), tags = list(type = "string")), currentRevision = structure(list(revisionId = structure(logical(0), tags = list(type = "string")), revisionChangeId = structure(logical(0), tags = list(type = "string")), created = structure(logical(0), tags = list(type = "timestamp"))), tags = list(type = "structure")), latestExecution = structure(list(actionExecutionId = structure(logical(0), tags = list(type = "string")), status = structure(logical(0), tags = list(type = "string")), summary = structure(logical(0), tags = list(type = "string")), lastStatusChange = structure(logical(0), tags = list(type = "timestamp")), token = structure(logical(0), tags = list(type = "string")), lastUpdatedBy = structure(logical(0), tags = list(type = "string")), externalExecutionId = structure(logical(0), tags = list(type = "string")), externalExecutionUrl = structure(logical(0), tags = list(type = "string")), percentComplete = structure(logical(0), tags = list(type = "integer")), errorDetails = structure(list(code = structure(logical(0), tags = list(type = "string")), message = structure(logical(0), tags = list(type = "string"))), tags = list(type = "structure"))), tags = list(type = "structure")), entityUrl = structure(logical(0), tags = list(type = "string")), revisionUrl = structure(logical(0), tags = list(type = "string"))), tags = list(type = "structure"))), tags = list(type = "list")), latestExecution = structure(list(pipelineExecutionId = structure(logical(0), tags = list(type = "string")), status = structure(logical(0), tags = list(type = "string"))), tags = list(type = "structure"))), tags = list(type = "structure"))), tags = list(type = "list")), created = structure(logical(0), tags = list(type = "timestamp")), updated = structure(logical(0), tags = list(type = "timestamp"))), tags = list(type = "structure"))
return(populate(args, shape))
}
.codepipeline$get_third_party_job_details_input <- function(...) {
args <- c(as.list(environment()), list(...))
shape <- structure(list(jobId = structure(logical(0), tags = list(type = "string")), clientToken = structure(logical(0), tags = list(type = "string"))), tags = list(type = "structure"))
return(populate(args, shape))
}
.codepipeline$get_third_party_job_details_output <- function(...) {
args <- c(as.list(environment()), list(...))
shape <- structure(list(jobDetails = structure(list(id = structure(logical(0), tags = list(type = "string")), data = structure(list(actionTypeId = structure(list(category = structure(logical(0), tags = list(type = "string")), owner = structure(logical(0), tags = list(type = "string")), provider = structure(logical(0), tags = list(type = "string")), version = structure(logical(0), tags = list(type = "string"))), tags = list(type = "structure")), actionConfiguration = structure(list(configuration = structure(list(structure(logical(0), tags = list(type = "string"))), tags = list(type = "map"))), tags = list(type = "structure")), pipelineContext = structure(list(pipelineName = structure(logical(0), tags = list(type = "string")), stage = structure(list(name = structure(logical(0), tags = list(type = "string"))), tags = list(type = "structure")), action = structure(list(name = structure(logical(0), tags = list(type = "string")), actionExecutionId = structure(logical(0), tags = list(type = "string"))), tags = list(type = "structure")), pipelineArn = structure(logical(0), tags = list(type = "string")), pipelineExecutionId = structure(logical(0), tags = list(type = "string"))), tags = list(type = "structure")), inputArtifacts = structure(list(structure(list(name = structure(logical(0), tags = list(type = "string")), revision = structure(logical(0), tags = list(type = "string")), location = structure(list(type = structure(logical(0), tags = list(type = "string")), s3Location = structure(list(bucketName = structure(logical(0), tags = list(type = "string")), objectKey = structure(logical(0), tags = list(type = "string"))), tags = list(type = "structure"))), tags = list(type = "structure"))), tags = list(type = "structure"))), tags = list(type = "list")), outputArtifacts = structure(list(structure(list(name = structure(logical(0), tags = list(type = "string")), revision = structure(logical(0), tags = list(type = "string")), location = structure(list(type = structure(logical(0), tags = list(type = "string")), s3Location = structure(list(bucketName = structure(logical(0), tags = list(type = "string")), objectKey = structure(logical(0), tags = list(type = "string"))), tags = list(type = "structure"))), tags = list(type = "structure"))), tags = list(type = "structure"))), tags = list(type = "list")), artifactCredentials = structure(list(accessKeyId = structure(logical(0), tags = list(type = "string", sensitive = TRUE)), secretAccessKey = structure(logical(0), tags = list(type = "string", sensitive = TRUE)), sessionToken = structure(logical(0), tags = list(type = "string", sensitive = TRUE))), tags = list(type = "structure", sensitive = TRUE)), continuationToken = structure(logical(0), tags = list(type = "string")), encryptionKey = structure(list(id = structure(logical(0), tags = list(type = "string")), type = structure(logical(0), tags = list(type = "string"))), tags = list(type = "structure"))), tags = list(type = "structure")), nonce = structure(logical(0), tags = list(type = "string"))), tags = list(type = "structure"))), tags = list(type = "structure"))
return(populate(args, shape))
}
.codepipeline$list_action_executions_input <- function(...) {
args <- c(as.list(environment()), list(...))
shape <- structure(list(pipelineName = structure(logical(0), tags = list(type = "string")), filter = structure(list(pipelineExecutionId = structure(logical(0), tags = list(type = "string"))), tags = list(type = "structure")), maxResults = structure(logical(0), tags = list(type = "integer")), nextToken = structure(logical(0), tags = list(type = "string"))), tags = list(type = "structure"))
return(populate(args, shape))
}
.codepipeline$list_action_executions_output <- function(...) {
args <- c(as.list(environment()), list(...))
shape <- structure(list(actionExecutionDetails = structure(list(structure(list(pipelineExecutionId = structure(logical(0), tags = list(type = "string")), actionExecutionId = structure(logical(0), tags = list(type = "string")), pipelineVersion = structure(logical(0), tags = list(type = "integer")), stageName = structure(logical(0), tags = list(type = "string")), actionName = structure(logical(0), tags = list(type = "string")), startTime = structure(logical(0), tags = list(type = "timestamp")), lastUpdateTime = structure(logical(0), tags = list(type = "timestamp")), status = structure(logical(0), tags = list(type = "string")), input = structure(list(actionTypeId = structure(list(category = structure(logical(0), tags = list(type = "string")), owner = structure(logical(0), tags = list(type = "string")), provider = structure(logical(0), tags = list(type = "string")), version = structure(logical(0), tags = list(type = "string"))), tags = list(type = "structure")), configuration = structure(list(structure(logical(0), tags = list(type = "string"))), tags = list(type = "map")), resolvedConfiguration = structure(list(structure(logical(0), tags = list(type = "string"))), tags = list(type = "map")), roleArn = structure(logical(0), tags = list(type = "string")), region = structure(logical(0), tags = list(type = "string")), inputArtifacts = structure(list(structure(list(name = structure(logical(0), tags = list(type = "string")), s3location = structure(list(bucket = structure(logical(0), tags = list(type = "string")), key = structure(logical(0), tags = list(type = "string"))), tags = list(type = "structure"))), tags = list(type = "structure"))), tags = list(type = "list")), namespace = structure(logical(0), tags = list(type = "string"))), tags = list(type = "structure")), output = structure(list(outputArtifacts = structure(list(structure(list(name = structure(logical(0), tags = list(type = "string")), s3location = structure(list(bucket = structure(logical(0), tags = list(type = "string")), key = structure(logical(0), tags = list(type = "string"))), tags = list(type = "structure"))), tags = list(type = "structure"))), tags = list(type = "list")), executionResult = structure(list(externalExecutionId = structure(logical(0), tags = list(type = "string")), externalExecutionSummary = structure(logical(0), tags = list(type = "string")), externalExecutionUrl = structure(logical(0), tags = list(type = "string"))), tags = list(type = "structure")), outputVariables = structure(list(structure(logical(0), tags = list(type = "string"))), tags = list(type = "map"))), tags = list(type = "structure"))), tags = list(type = "structure"))), tags = list(type = "list")), nextToken = structure(logical(0), tags = list(type = "string"))), tags = list(type = "structure"))
return(populate(args, shape))
}
.codepipeline$list_action_types_input <- function(...) {
args <- c(as.list(environment()), list(...))
shape <- structure(list(actionOwnerFilter = structure(logical(0), tags = list(type = "string")), nextToken = structure(logical(0), tags = list(type = "string")), regionFilter = structure(logical(0), tags = list(type = "string"))), tags = list(type = "structure"))
return(populate(args, shape))
}
.codepipeline$list_action_types_output <- function(...) {
args <- c(as.list(environment()), list(...))
shape <- structure(list(actionTypes = structure(list(structure(list(id = structure(list(category = structure(logical(0), tags = list(type = "string")), owner = structure(logical(0), tags = list(type = "string")), provider = structure(logical(0), tags = list(type = "string")), version = structure(logical(0), tags = list(type = "string"))), tags = list(type = "structure")), settings = structure(list(thirdPartyConfigurationUrl = structure(logical(0), tags = list(type = "string")), entityUrlTemplate = structure(logical(0), tags = list(type = "string")), executionUrlTemplate = structure(logical(0), tags = list(type = "string")), revisionUrlTemplate = structure(logical(0), tags = list(type = "string"))), tags = list(type = "structure")), actionConfigurationProperties = structure(list(structure(list(name = structure(logical(0), tags = list(type = "string")), required = structure(logical(0), tags = list(type = "boolean")), key = structure(logical(0), tags = list(type = "boolean")), secret = structure(logical(0), tags = list(type = "boolean")), queryable = structure(logical(0), tags = list(type = "boolean")), description = structure(logical(0), tags = list(type = "string")), type = structure(logical(0), tags = list(type = "string"))), tags = list(type = "structure"))), tags = list(type = "list")), inputArtifactDetails = structure(list(minimumCount = structure(logical(0), tags = list(type = "integer")), maximumCount = structure(logical(0), tags = list(type = "integer"))), tags = list(type = "structure")), outputArtifactDetails = structure(list(minimumCount = structure(logical(0), tags = list(type = "integer")), maximumCount = structure(logical(0), tags = list(type = "integer"))), tags = list(type = "structure"))), tags = list(type = "structure"))), tags = list(type = "list")), nextToken = structure(logical(0), tags = list(type = "string"))), tags = list(type = "structure"))
return(populate(args, shape))
}
.codepipeline$list_pipeline_executions_input <- function(...) {
args <- c(as.list(environment()), list(...))
shape <- structure(list(pipelineName = structure(logical(0), tags = list(type = "string")), maxResults = structure(logical(0), tags = list(type = "integer")), nextToken = structure(logical(0), tags = list(type = "string"))), tags = list(type = "structure"))
return(populate(args, shape))
}
.codepipeline$list_pipeline_executions_output <- function(...) {
args <- c(as.list(environment()), list(...))
shape <- structure(list(pipelineExecutionSummaries = structure(list(structure(list(pipelineExecutionId = structure(logical(0), tags = list(type = "string")), status = structure(logical(0), tags = list(type = "string")), startTime = structure(logical(0), tags = list(type = "timestamp")), lastUpdateTime = structure(logical(0), tags = list(type = "timestamp")), sourceRevisions = structure(list(structure(list(actionName = structure(logical(0), tags = list(type = "string")), revisionId = structure(logical(0), tags = list(type = "string")), revisionSummary = structure(logical(0), tags = list(type = "string")), revisionUrl = structure(logical(0), tags = list(type = "string"))), tags = list(type = "structure"))), tags = list(type = "list")), trigger = structure(list(triggerType = structure(logical(0), tags = list(type = "string")), triggerDetail = structure(logical(0), tags = list(type = "string"))), tags = list(type = "structure")), stopTrigger = structure(list(reason = structure(logical(0), tags = list(type = "string"))), tags = list(type = "structure"))), tags = list(type = "structure"))), tags = list(type = "list")), nextToken = structure(logical(0), tags = list(type = "string"))), tags = list(type = "structure"))
return(populate(args, shape))
}
.codepipeline$list_pipelines_input <- function(...) {
args <- c(as.list(environment()), list(...))
shape <- structure(list(nextToken = structure(logical(0), tags = list(type = "string")), maxResults = structure(logical(0), tags = list(type = "integer"))), tags = list(type = "structure"))
return(populate(args, shape))
}
.codepipeline$list_pipelines_output <- function(...) {
args <- c(as.list(environment()), list(...))
shape <- structure(list(pipelines = structure(list(structure(list(name = structure(logical(0), tags = list(type = "string")), version = structure(logical(0), tags = list(type = "integer")), created = structure(logical(0), tags = list(type = "timestamp")), updated = structure(logical(0), tags = list(type = "timestamp"))), tags = list(type = "structure"))), tags = list(type = "list")), nextToken = structure(logical(0), tags = list(type = "string"))), tags = list(type = "structure"))
return(populate(args, shape))
}
.codepipeline$list_tags_for_resource_input <- function(...) {
args <- c(as.list(environment()), list(...))
shape <- structure(list(resourceArn = structure(logical(0), tags = list(type = "string")), nextToken = structure(logical(0), tags = list(type = "string")), maxResults = structure(logical(0), tags = list(type = "integer"))), tags = list(type = "structure"))
return(populate(args, shape))
}
.codepipeline$list_tags_for_resource_output <- function(...) {
args <- c(as.list(environment()), list(...))
shape <- structure(list(tags = structure(list(structure(list(key = structure(logical(0), tags = list(type = "string")), value = structure(logical(0), tags = list(type = "string"))), tags = list(type = "structure"))), tags = list(type = "list")), nextToken = structure(logical(0), tags = list(type = "string"))), tags = list(type = "structure"))
return(populate(args, shape))
}
.codepipeline$list_webhooks_input <- function(...) {
args <- c(as.list(environment()), list(...))
shape <- structure(list(NextToken = structure(logical(0), tags = list(type = "string")), MaxResults = structure(logical(0), tags = list(type = "integer"))), tags = list(type = "structure"))
return(populate(args, shape))
}
.codepipeline$list_webhooks_output <- function(...) {
args <- c(as.list(environment()), list(...))
shape <- structure(list(webhooks = structure(list(structure(list(definition = structure(list(name = structure(logical(0), tags = list(type = "string")), targetPipeline = structure(logical(0), tags = list(type = "string")), targetAction = structure(logical(0), tags = list(type = "string")), filters = structure(list(structure(list(jsonPath = structure(logical(0), tags = list(type = "string")), matchEquals = structure(logical(0), tags = list(type = "string"))), tags = list(type = "structure"))), tags = list(type = "list")), authentication = structure(logical(0), tags = list(type = "string")), authenticationConfiguration = structure(list(AllowedIPRange = structure(logical(0), tags = list(type = "string")), SecretToken = structure(logical(0), tags = list(type = "string"))), tags = list(type = "structure"))), tags = list(type = "structure")), url = structure(logical(0), tags = list(type = "string")), errorMessage = structure(logical(0), tags = list(type = "string")), errorCode = structure(logical(0), tags = list(type = "string")), lastTriggered = structure(logical(0), tags = list(type = "timestamp")), arn = structure(logical(0), tags = list(type = "string")), tags = structure(list(structure(list(key = structure(logical(0), tags = list(type = "string")), value = structure(logical(0), tags = list(type = "string"))), tags = list(type = "structure"))), tags = list(type = "list"))), tags = list(type = "structure"))), tags = list(type = "list")), NextToken = structure(logical(0), tags = list(type = "string"))), tags = list(type = "structure"))
return(populate(args, shape))
}
.codepipeline$poll_for_jobs_input <- function(...) {
args <- c(as.list(environment()), list(...))
shape <- structure(list(actionTypeId = structure(list(category = structure(logical(0), tags = list(type = "string")), owner = structure(logical(0), tags = list(type = "string")), provider = structure(logical(0), tags = list(type = "string")), version = structure(logical(0), tags = list(type = "string"))), tags = list(type = "structure")), maxBatchSize = structure(logical(0), tags = list(type = "integer")), queryParam = structure(list(structure(logical(0), tags = list(type = "string"))), tags = list(type = "map"))), tags = list(type = "structure"))
return(populate(args, shape))
}
.codepipeline$poll_for_jobs_output <- function(...) {
args <- c(as.list(environment()), list(...))
shape <- structure(list(jobs = structure(list(structure(list(id = structure(logical(0), tags = list(type = "string")), data = structure(list(actionTypeId = structure(list(category = structure(logical(0), tags = list(type = "string")), owner = structure(logical(0), tags = list(type = "string")), provider = structure(logical(0), tags = list(type = "string")), version = structure(logical(0), tags = list(type = "string"))), tags = list(type = "structure")), actionConfiguration = structure(list(configuration = structure(list(structure(logical(0), tags = list(type = "string"))), tags = list(type = "map"))), tags = list(type = "structure")), pipelineContext = structure(list(pipelineName = structure(logical(0), tags = list(type = "string")), stage = structure(list(name = structure(logical(0), tags = list(type = "string"))), tags = list(type = "structure")), action = structure(list(name = structure(logical(0), tags = list(type = "string")), actionExecutionId = structure(logical(0), tags = list(type = "string"))), tags = list(type = "structure")), pipelineArn = structure(logical(0), tags = list(type = "string")), pipelineExecutionId = structure(logical(0), tags = list(type = "string"))), tags = list(type = "structure")), inputArtifacts = structure(list(structure(list(name = structure(logical(0), tags = list(type = "string")), revision = structure(logical(0), tags = list(type = "string")), location = structure(list(type = structure(logical(0), tags = list(type = "string")), s3Location = structure(list(bucketName = structure(logical(0), tags = list(type = "string")), objectKey = structure(logical(0), tags = list(type = "string"))), tags = list(type = "structure"))), tags = list(type = "structure"))), tags = list(type = "structure"))), tags = list(type = "list")), outputArtifacts = structure(list(structure(list(name = structure(logical(0), tags = list(type = "string")), revision = structure(logical(0), tags = list(type = "string")), location = structure(list(type = structure(logical(0), tags = list(type = "string")), s3Location = structure(list(bucketName = structure(logical(0), tags = list(type = "string")), objectKey = structure(logical(0), tags = list(type = "string"))), tags = list(type = "structure"))), tags = list(type = "structure"))), tags = list(type = "structure"))), tags = list(type = "list")), artifactCredentials = structure(list(accessKeyId = structure(logical(0), tags = list(type = "string", sensitive = TRUE)), secretAccessKey = structure(logical(0), tags = list(type = "string", sensitive = TRUE)), sessionToken = structure(logical(0), tags = list(type = "string", sensitive = TRUE))), tags = list(type = "structure", sensitive = TRUE)), continuationToken = structure(logical(0), tags = list(type = "string")), encryptionKey = structure(list(id = structure(logical(0), tags = list(type = "string")), type = structure(logical(0), tags = list(type = "string"))), tags = list(type = "structure"))), tags = list(type = "structure")), nonce = structure(logical(0), tags = list(type = "string")), accountId = structure(logical(0), tags = list(type = "string"))), tags = list(type = "structure"))), tags = list(type = "list"))), tags = list(type = "structure"))
return(populate(args, shape))
}
.codepipeline$poll_for_third_party_jobs_input <- function(...) {
args <- c(as.list(environment()), list(...))
shape <- structure(list(actionTypeId = structure(list(category = structure(logical(0), tags = list(type = "string")), owner = structure(logical(0), tags = list(type = "string")), provider = structure(logical(0), tags = list(type = "string")), version = structure(logical(0), tags = list(type = "string"))), tags = list(type = "structure")), maxBatchSize = structure(logical(0), tags = list(type = "integer"))), tags = list(type = "structure"))
return(populate(args, shape))
}
.codepipeline$poll_for_third_party_jobs_output <- function(...) {
args <- c(as.list(environment()), list(...))
shape <- structure(list(jobs = structure(list(structure(list(clientId = structure(logical(0), tags = list(type = "string")), jobId = structure(logical(0), tags = list(type = "string"))), tags = list(type = "structure"))), tags = list(type = "list"))), tags = list(type = "structure"))
return(populate(args, shape))
}
.codepipeline$put_action_revision_input <- function(...) {
args <- c(as.list(environment()), list(...))
shape <- structure(list(pipelineName = structure(logical(0), tags = list(type = "string")), stageName = structure(logical(0), tags = list(type = "string")), actionName = structure(logical(0), tags = list(type = "string")), actionRevision = structure(list(revisionId = structure(logical(0), tags = list(type = "string")), revisionChangeId = structure(logical(0), tags = list(type = "string")), created = structure(logical(0), tags = list(type = "timestamp"))), tags = list(type = "structure"))), tags = list(type = "structure"))
return(populate(args, shape))
}
.codepipeline$put_action_revision_output <- function(...) {
args <- c(as.list(environment()), list(...))
shape <- structure(list(newRevision = structure(logical(0), tags = list(type = "boolean")), pipelineExecutionId = structure(logical(0), tags = list(type = "string"))), tags = list(type = "structure"))
return(populate(args, shape))
}
.codepipeline$put_approval_result_input <- function(...) {
args <- c(as.list(environment()), list(...))
shape <- structure(list(pipelineName = structure(logical(0), tags = list(type = "string")), stageName = structure(logical(0), tags = list(type = "string")), actionName = structure(logical(0), tags = list(type = "string")), result = structure(list(summary = structure(logical(0), tags = list(type = "string")), status = structure(logical(0), tags = list(type = "string"))), tags = list(type = "structure")), token = structure(logical(0), tags = list(type = "string"))), tags = list(type = "structure"))
return(populate(args, shape))
}
.codepipeline$put_approval_result_output <- function(...) {
args <- c(as.list(environment()), list(...))
shape <- structure(list(approvedAt = structure(logical(0), tags = list(type = "timestamp"))), tags = list(type = "structure"))
return(populate(args, shape))
}
.codepipeline$put_job_failure_result_input <- function(...) {
args <- c(as.list(environment()), list(...))
shape <- structure(list(jobId = structure(logical(0), tags = list(type = "string")), failureDetails = structure(list(type = structure(logical(0), tags = list(type = "string")), message = structure(logical(0), tags = list(type = "string")), externalExecutionId = structure(logical(0), tags = list(type = "string"))), tags = list(type = "structure"))), tags = list(type = "structure"))
return(populate(args, shape))
}
.codepipeline$put_job_failure_result_output <- function(...) {
list()
}
.codepipeline$put_job_success_result_input <- function(...) {
args <- c(as.list(environment()), list(...))
shape <- structure(list(jobId = structure(logical(0), tags = list(type = "string")), currentRevision = structure(list(revision = structure(logical(0), tags = list(type = "string")), changeIdentifier = structure(logical(0), tags = list(type = "string")), created = structure(logical(0), tags = list(type = "timestamp")), revisionSummary = structure(logical(0), tags = list(type = "string"))), tags = list(type = "structure")), continuationToken = structure(logical(0), tags = list(type = "string")), executionDetails = structure(list(summary = structure(logical(0), tags = list(type = "string")), externalExecutionId = structure(logical(0), tags = list(type = "string")), percentComplete = structure(logical(0), tags = list(type = "integer"))), tags = list(type = "structure")), outputVariables = structure(list(structure(logical(0), tags = list(type = "string"))), tags = list(type = "map"))), tags = list(type = "structure"))
return(populate(args, shape))
}
.codepipeline$put_job_success_result_output <- function(...) {
list()
}
.codepipeline$put_third_party_job_failure_result_input <- function(...) {
args <- c(as.list(environment()), list(...))
shape <- structure(list(jobId = structure(logical(0), tags = list(type = "string")), clientToken = structure(logical(0), tags = list(type = "string")), failureDetails = structure(list(type = structure(logical(0), tags = list(type = "string")), message = structure(logical(0), tags = list(type = "string")), externalExecutionId = structure(logical(0), tags = list(type = "string"))), tags = list(type = "structure"))), tags = list(type = "structure"))
return(populate(args, shape))
}
.codepipeline$put_third_party_job_failure_result_output <- function(...) {
list()
}
.codepipeline$put_third_party_job_success_result_input <- function(...) {
args <- c(as.list(environment()), list(...))
shape <- structure(list(jobId = structure(logical(0), tags = list(type = "string")), clientToken = structure(logical(0), tags = list(type = "string")), currentRevision = structure(list(revision = structure(logical(0), tags = list(type = "string")), changeIdentifier = structure(logical(0), tags = list(type = "string")), created = structure(logical(0), tags = list(type = "timestamp")), revisionSummary = structure(logical(0), tags = list(type = "string"))), tags = list(type = "structure")), continuationToken = structure(logical(0), tags = list(type = "string")), executionDetails = structure(list(summary = structure(logical(0), tags = list(type = "string")), externalExecutionId = structure(logical(0), tags = list(type = "string")), percentComplete = structure(logical(0), tags = list(type = "integer"))), tags = list(type = "structure"))), tags = list(type = "structure"))
return(populate(args, shape))
}
.codepipeline$put_third_party_job_success_result_output <- function(...) {
list()
}
.codepipeline$put_webhook_input <- function(...) {
args <- c(as.list(environment()), list(...))
shape <- structure(list(webhook = structure(list(name = structure(logical(0), tags = list(type = "string")), targetPipeline = structure(logical(0), tags = list(type = "string")), targetAction = structure(logical(0), tags = list(type = "string")), filters = structure(list(structure(list(jsonPath = structure(logical(0), tags = list(type = "string")), matchEquals = structure(logical(0), tags = list(type = "string"))), tags = list(type = "structure"))), tags = list(type = "list")), authentication = structure(logical(0), tags = list(type = "string")), authenticationConfiguration = structure(list(AllowedIPRange = structure(logical(0), tags = list(type = "string")), SecretToken = structure(logical(0), tags = list(type = "string"))), tags = list(type = "structure"))), tags = list(type = "structure")), tags = structure(list(structure(list(key = structure(logical(0), tags = list(type = "string")), value = structure(logical(0), tags = list(type = "string"))), tags = list(type = "structure"))), tags = list(type = "list"))), tags = list(type = "structure"))
return(populate(args, shape))
}
.codepipeline$put_webhook_output <- function(...) {
args <- c(as.list(environment()), list(...))
shape <- structure(list(webhook = structure(list(definition = structure(list(name = structure(logical(0), tags = list(type = "string")), targetPipeline = structure(logical(0), tags = list(type = "string")), targetAction = structure(logical(0), tags = list(type = "string")), filters = structure(list(structure(list(jsonPath = structure(logical(0), tags = list(type = "string")), matchEquals = structure(logical(0), tags = list(type = "string"))), tags = list(type = "structure"))), tags = list(type = "list")), authentication = structure(logical(0), tags = list(type = "string")), authenticationConfiguration = structure(list(AllowedIPRange = structure(logical(0), tags = list(type = "string")), SecretToken = structure(logical(0), tags = list(type = "string"))), tags = list(type = "structure"))), tags = list(type = "structure")), url = structure(logical(0), tags = list(type = "string")), errorMessage = structure(logical(0), tags = list(type = "string")), errorCode = structure(logical(0), tags = list(type = "string")), lastTriggered = structure(logical(0), tags = list(type = "timestamp")), arn = structure(logical(0), tags = list(type = "string")), tags = structure(list(structure(list(key = structure(logical(0), tags = list(type = "string")), value = structure(logical(0), tags = list(type = "string"))), tags = list(type = "structure"))), tags = list(type = "list"))), tags = list(type = "structure"))), tags = list(type = "structure"))
return(populate(args, shape))
}
.codepipeline$register_webhook_with_third_party_input <- function(...) {
args <- c(as.list(environment()), list(...))
shape <- structure(list(webhookName = structure(logical(0), tags = list(type = "string"))), tags = list(type = "structure"))
return(populate(args, shape))
}
.codepipeline$register_webhook_with_third_party_output <- function(...) {
args <- c(as.list(environment()), list(...))
shape <- structure(list(), tags = list(type = "structure"))
return(populate(args, shape))
}
.codepipeline$retry_stage_execution_input <- function(...) {
args <- c(as.list(environment()), list(...))
shape <- structure(list(pipelineName = structure(logical(0), tags = list(type = "string")), stageName = structure(logical(0), tags = list(type = "string")), pipelineExecutionId = structure(logical(0), tags = list(type = "string")), retryMode = structure(logical(0), tags = list(type = "string"))), tags = list(type = "structure"))
return(populate(args, shape))
}
.codepipeline$retry_stage_execution_output <- function(...) {
args <- c(as.list(environment()), list(...))
shape <- structure(list(pipelineExecutionId = structure(logical(0), tags = list(type = "string"))), tags = list(type = "structure"))
return(populate(args, shape))
}
.codepipeline$start_pipeline_execution_input <- function(...) {
args <- c(as.list(environment()), list(...))
shape <- structure(list(name = structure(logical(0), tags = list(type = "string")), clientRequestToken = structure(logical(0), tags = list(idempotencyToken = TRUE, type = "string"))), tags = list(type = "structure"))
return(populate(args, shape))
}
.codepipeline$start_pipeline_execution_output <- function(...) {
args <- c(as.list(environment()), list(...))
shape <- structure(list(pipelineExecutionId = structure(logical(0), tags = list(type = "string"))), tags = list(type = "structure"))
return(populate(args, shape))
}
.codepipeline$stop_pipeline_execution_input <- function(...) {
args <- c(as.list(environment()), list(...))
shape <- structure(list(pipelineName = structure(logical(0), tags = list(type = "string")), pipelineExecutionId = structure(logical(0), tags = list(type = "string")), abandon = structure(logical(0), tags = list(type = "boolean")), reason = structure(logical(0), tags = list(type = "string"))), tags = list(type = "structure"))
return(populate(args, shape))
}
.codepipeline$stop_pipeline_execution_output <- function(...) {
args <- c(as.list(environment()), list(...))
shape <- structure(list(pipelineExecutionId = structure(logical(0), tags = list(type = "string"))), tags = list(type = "structure"))
return(populate(args, shape))
}
.codepipeline$tag_resource_input <- function(...) {
args <- c(as.list(environment()), list(...))
shape <- structure(list(resourceArn = structure(logical(0), tags = list(type = "string")), tags = structure(list(structure(list(key = structure(logical(0), tags = list(type = "string")), value = structure(logical(0), tags = list(type = "string"))), tags = list(type = "structure"))), tags = list(type = "list"))), tags = list(type = "structure"))
return(populate(args, shape))
}
.codepipeline$tag_resource_output <- function(...) {
args <- c(as.list(environment()), list(...))
shape <- structure(list(), tags = list(type = "structure"))
return(populate(args, shape))
}
.codepipeline$untag_resource_input <- function(...) {
args <- c(as.list(environment()), list(...))
shape <- structure(list(resourceArn = structure(logical(0), tags = list(type = "string")), tagKeys = structure(list(structure(logical(0), tags = list(type = "string"))), tags = list(type = "list"))), tags = list(type = "structure"))
return(populate(args, shape))
}
.codepipeline$untag_resource_output <- function(...) {
args <- c(as.list(environment()), list(...))
shape <- structure(list(), tags = list(type = "structure"))
return(populate(args, shape))
}
.codepipeline$update_action_type_input <- function(...) {
args <- c(as.list(environment()), list(...))
shape <- structure(list(actionType = structure(list(description = structure(logical(0), tags = list(type = "string")), executor = structure(list(configuration = structure(list(lambdaExecutorConfiguration = structure(list(lambdaFunctionArn = structure(logical(0), tags = list(type = "string"))), tags = list(type = "structure")), jobWorkerExecutorConfiguration = structure(list(pollingAccounts = structure(list(structure(logical(0), tags = list(type = "string"))), tags = list(type = "list")), pollingServicePrincipals = structure(list(structure(logical(0), tags = list(type = "string"))), tags = list(type = "list"))), tags = list(type = "structure"))), tags = list(type = "structure")), type = structure(logical(0), tags = list(type = "string")), policyStatementsTemplate = structure(logical(0), tags = list(type = "string")), jobTimeout = structure(logical(0), tags = list(type = "integer"))), tags = list(type = "structure")), id = structure(list(category = structure(logical(0), tags = list(type = "string")), owner = structure(logical(0), tags = list(type = "string")), provider = structure(logical(0), tags = list(type = "string")), version = structure(logical(0), tags = list(type = "string"))), tags = list(type = "structure")), inputArtifactDetails = structure(list(minimumCount = structure(logical(0), tags = list(type = "integer")), maximumCount = structure(logical(0), tags = list(type = "integer"))), tags = list(type = "structure")), outputArtifactDetails = structure(list(minimumCount = structure(logical(0), tags = list(type = "integer")), maximumCount = structure(logical(0), tags = list(type = "integer"))), tags = list(type = "structure")), permissions = structure(list(allowedAccounts = structure(list(structure(logical(0), tags = list(type = "string"))), tags = list(type = "list"))), tags = list(type = "structure")), properties = structure(list(structure(list(name = structure(logical(0), tags = list(type = "string")), optional = structure(logical(0), tags = list(type = "boolean")), key = structure(logical(0), tags = list(type = "boolean")), noEcho = structure(logical(0), tags = list(type = "boolean")), queryable = structure(logical(0), tags = list(type = "boolean")), description = structure(logical(0), tags = list(type = "string"))), tags = list(type = "structure"))), tags = list(type = "list")), urls = structure(list(configurationUrl = structure(logical(0), tags = list(type = "string")), entityUrlTemplate = structure(logical(0), tags = list(type = "string")), executionUrlTemplate = structure(logical(0), tags = list(type = "string")), revisionUrlTemplate = structure(logical(0), tags = list(type = "string"))), tags = list(type = "structure"))), tags = list(type = "structure"))), tags = list(type = "structure"))
return(populate(args, shape))
}
.codepipeline$update_action_type_output <- function(...) {
list()
}
.codepipeline$update_pipeline_input <- function(...) {
args <- c(as.list(environment()), list(...))
shape <- structure(list(pipeline = structure(list(name = structure(logical(0), tags = list(type = "string")), roleArn = structure(logical(0), tags = list(type = "string")), artifactStore = structure(list(type = structure(logical(0), tags = list(type = "string")), location = structure(logical(0), tags = list(type = "string")), encryptionKey = structure(list(id = structure(logical(0), tags = list(type = "string")), type = structure(logical(0), tags = list(type = "string"))), tags = list(type = "structure"))), tags = list(type = "structure")), artifactStores = structure(list(structure(list(type = structure(logical(0), tags = list(type = "string")), location = structure(logical(0), tags = list(type = "string")), encryptionKey = structure(list(id = structure(logical(0), tags = list(type = "string")), type = structure(logical(0), tags = list(type = "string"))), tags = list(type = "structure"))), tags = list(type = "structure"))), tags = list(type = "map")), stages = structure(list(structure(list(name = structure(logical(0), tags = list(type = "string")), blockers = structure(list(structure(list(name = structure(logical(0), tags = list(type = "string")), type = structure(logical(0), tags = list(type = "string"))), tags = list(type = "structure"))), tags = list(type = "list")), actions = structure(list(structure(list(name = structure(logical(0), tags = list(type = "string")), actionTypeId = structure(list(category = structure(logical(0), tags = list(type = "string")), owner = structure(logical(0), tags = list(type = "string")), provider = structure(logical(0), tags = list(type = "string")), version = structure(logical(0), tags = list(type = "string"))), tags = list(type = "structure")), runOrder = structure(logical(0), tags = list(type = "integer")), configuration = structure(list(structure(logical(0), tags = list(type = "string"))), tags = list(type = "map")), outputArtifacts = structure(list(structure(list(name = structure(logical(0), tags = list(type = "string"))), tags = list(type = "structure"))), tags = list(type = "list")), inputArtifacts = structure(list(structure(list(name = structure(logical(0), tags = list(type = "string"))), tags = list(type = "structure"))), tags = list(type = "list")), roleArn = structure(logical(0), tags = list(type = "string")), region = structure(logical(0), tags = list(type = "string")), namespace = structure(logical(0), tags = list(type = "string"))), tags = list(type = "structure"))), tags = list(type = "list"))), tags = list(type = "structure"))), tags = list(type = "list")), version = structure(logical(0), tags = list(type = "integer"))), tags = list(type = "structure"))), tags = list(type = "structure"))
return(populate(args, shape))
}
.codepipeline$update_pipeline_output <- function(...) {
args <- c(as.list(environment()), list(...))
shape <- structure(list(pipeline = structure(list(name = structure(logical(0), tags = list(type = "string")), roleArn = structure(logical(0), tags = list(type = "string")), artifactStore = structure(list(type = structure(logical(0), tags = list(type = "string")), location = structure(logical(0), tags = list(type = "string")), encryptionKey = structure(list(id = structure(logical(0), tags = list(type = "string")), type = structure(logical(0), tags = list(type = "string"))), tags = list(type = "structure"))), tags = list(type = "structure")), artifactStores = structure(list(structure(list(type = structure(logical(0), tags = list(type = "string")), location = structure(logical(0), tags = list(type = "string")), encryptionKey = structure(list(id = structure(logical(0), tags = list(type = "string")), type = structure(logical(0), tags = list(type = "string"))), tags = list(type = "structure"))), tags = list(type = "structure"))), tags = list(type = "map")), stages = structure(list(structure(list(name = structure(logical(0), tags = list(type = "string")), blockers = structure(list(structure(list(name = structure(logical(0), tags = list(type = "string")), type = structure(logical(0), tags = list(type = "string"))), tags = list(type = "structure"))), tags = list(type = "list")), actions = structure(list(structure(list(name = structure(logical(0), tags = list(type = "string")), actionTypeId = structure(list(category = structure(logical(0), tags = list(type = "string")), owner = structure(logical(0), tags = list(type = "string")), provider = structure(logical(0), tags = list(type = "string")), version = structure(logical(0), tags = list(type = "string"))), tags = list(type = "structure")), runOrder = structure(logical(0), tags = list(type = "integer")), configuration = structure(list(structure(logical(0), tags = list(type = "string"))), tags = list(type = "map")), outputArtifacts = structure(list(structure(list(name = structure(logical(0), tags = list(type = "string"))), tags = list(type = "structure"))), tags = list(type = "list")), inputArtifacts = structure(list(structure(list(name = structure(logical(0), tags = list(type = "string"))), tags = list(type = "structure"))), tags = list(type = "list")), roleArn = structure(logical(0), tags = list(type = "string")), region = structure(logical(0), tags = list(type = "string")), namespace = structure(logical(0), tags = list(type = "string"))), tags = list(type = "structure"))), tags = list(type = "list"))), tags = list(type = "structure"))), tags = list(type = "list")), version = structure(logical(0), tags = list(type = "integer"))), tags = list(type = "structure"))), tags = list(type = "structure"))
return(populate(args, shape))
}
|
3eefe07e59088de2ec391a3a32344ba945172c0c
|
4a8bddea17619eeb7ee0cee872531288365e8b5f
|
/Assignment 11.1.R
|
ebd97742ef3cba4eebf672f9292f84804044f23c
|
[] |
no_license
|
Tejassingh1010/Assignment-11.1.
|
b3a201563e704a2e9ef3957a7b14e27d684e34ae
|
63038ca4202c5667b80b63fa22cc51758b2ccc61
|
refs/heads/master
| 2020-03-30T17:18:05.634405
| 2018-10-03T17:05:36
| 2018-10-03T17:05:36
| 151,449,458
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 1,300
|
r
|
Assignment 11.1.R
|
View(bank.additional.full)
dim(bank.additional.full)
#There are 41188 oberservations wtih 21 attributes in this data file.
str(bank.additional.full)
# All of them are in different format like int, numberic, factor .
psych::describe(bank.additional.full)
# This is a wondeful commnad to get the whole overview of the data set with values.
install.packages(VIM)
library(VIM)
missing <- bank.additional.full
missing[missing =="unknown"]<-NA
aggr(missing, col = c("blue", "Green"), numbers = TRUE, sortvars = TRUE, labels = names(missing), cex.axis=0.5, gap= 3, ylab =c("missing data", "pattern"))
sapply(missing, function(x)sum(is.na(x)))
t<-table(bank.additional.full$job)
# We can find the tabular form of the variables by this command.
t
title<-barplot(t,xlab = "job", ylab = "Numbers", main = "Clients based on job", col = heat.colors(12), las = 3)
# We have got here the barplot of this data set in graphical shape.
text(title,0,t,pos=3,srt=90)
chisq.test(missing$job, missing$marital)
# We have found that there is a relation between job and marital status at 95% confidence level.
# All NA values have been lees and have been removed or ommitted.
chisq.test(missing$job,missing$education)
# This is the relation between job and education.
|
81de96690ba4e3616b826dd56953a09535326b6c
|
ffdece4a2168f3487e966f980cf486c97198fe18
|
/step-function.R
|
c1840c0f421cfa2816c716e2927d9a82e3d8886b
|
[] |
no_license
|
gpapadog/RJ-MCMC-examples
|
971d33e547b3518f3f4945b617bd66042b14802d
|
3fd95ec9d32d165973872cf2334910fa618147f0
|
refs/heads/master
| 2021-01-12T17:08:23.265664
| 2017-09-05T17:34:03
| 2017-09-05T17:34:03
| 69,982,957
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 10,303
|
r
|
step-function.R
|
CalcLogLike <- function(Y, X, cutoffs, heights, sigma) {
if (length(heights) != length(cutoffs) + 1) {
stop('Error: heights should be one element longer than cutoffs.')
}
mean_function <- rep(heights[1], length(X))
for (ii in 2:length(heights)) {
wh <- which(X >= cutoffs[ii - 1])
mean_function[wh] <- heights[ii]
}
log_like <- dnorm(Y, mean = mean_function, sd = sigma, log = TRUE)
return(sum(log_like))
}
set.seed(1234)
N <- 3000
sigma <- 1
minX <- 0
maxX <- 10
#X <- runif(N, minX, maxX)
X <- rnorm(N, mean = 5, sd = 1.5)
minX <- min(X)
maxX <- max(X)
true_means <- c(1, 2, 3)
true_cut <- c(3, 6)
mX <- true_means[1] * (X >= minX & X < true_cut[1]) +
true_means[2] * (X >= true_cut[1] & X < true_cut[2]) +
true_means[3] * (X >= true_cut[2])
Y <- rnorm(N, mean = mX, sd = sigma)
#Y <- rnorm(N, mean = 0, sd = sigma)
plot(X, mX)
plot(X, Y)
max_cutoffs <- 10
lambda <- 5 # Poisson parameter for number of breaks.
# Calculating the c parameter of bk, dk.
bk <- rep(0, max_cutoffs + 1)
names(bk) <- paste0('s=', c(0:max_cutoffs))
dk <- bk
etak <- bk
pik <- bk
for (ii in 0:(max_cutoffs - 1)) { # Number of current cutoffs.
bk[ii + 1] <- dpois(ii + 1, lambda = lambda) / dpois(ii, lambda = lambda)
dk[ii + 2] <- dpois(ii, lambda = lambda) / dpois(ii + 1, lambda = lambda)
}
dk[length(dk)] <- dpois(max_cutoffs - 1, lambda = lambda) /
dpois(max_cutoffs, lambda = lambda)
bk <- sapply(bk, function(x) min(x, 1))
dk <- sapply(dk, function(x) min(x, 1))
c <- 0.9 / max(bk + dk)
bk <- c * bk
dk <- c * dk
for (ii in 2:(max_cutoffs + 1)) {
etak[ii] <- (1 - bk[ii] - dk[ii]) / 2
pik[ii] <- etak[ii]
}
etak[1] <- 1 - bk[1]
uninf <- 100 ^ 2
Nsims <- 10000
heights <- NULL
heights[[1]] <- c(4, 2, 0)
cutoffs <- NULL
cutoffs[[1]] <- c(5, 9)
sigmas <- rep(NA, Nsims)
sigmas[1] <- 1
moves <- rep(NA, Nsims)
range_unif <- 1
# moves correspond to 1 = 'H', 2 = 'P', 3 = 'B', 4 = 'D'.
for (ii in 2:Nsims) {
curr_cut <- length(cutoffs[[ii - 1]])
moves[ii] <- sample(1:4, 1, prob = c(etak[curr_cut + 1], pik[curr_cut + 1],
bk[curr_cut + 1], dk[curr_cut + 1]))
# MOVE 1: Change in height.
if (moves[ii] == 1) {
# For change in heigth we can do Gibbs sampling, so we don't propose.
cutoffs[[ii]] <- cutoffs[[ii - 1]]
heights[[ii]] <- heights[[ii - 1]]
# Choose which height to change randomly.
wh_height <- sample(1:length(heights[[ii]]), 1)
# Which observations correspond to that height.
if (wh_height == 1) {
wh_obs <- which(X < ifelse(length(cutoffs[[ii]]) > 0,
cutoffs[[ii]][1], maxX + 1))
} else if (wh_height == curr_cut + 1) { # Last height.
wh_obs <- which(X >= cutoffs[[ii]][curr_cut])
} else {
wh_obs <- which(X >= cutoffs[[ii]][wh_height - 1] &
X <= cutoffs[[ii]][wh_height])
}
# Update using Gibbs.
ssq_h <- 1 / (length(wh_obs) / sigmas[ii - 1] + 1 / uninf)
mu_h <- ssq_h * sum(Y[wh_obs] / sigmas[ii - 1])
heights[[ii]][wh_height] <- rnorm(1, mean = mu_h, sd = sqrt(ssq_h))
# MOVE 2: Change in position of cutoff.
} else if (moves[ii] == 2) {
proposed_cutoffs <- cutoffs[[ii - 1]]
proposed_heights <- heights[[ii - 1]]
# Which cutoff we will change.
wh_cut <- sample(1:length(proposed_cutoffs), 1)
cuts <- c(min(X), proposed_cutoffs, max(X))
choose_from <- c(cuts[wh_cut], cuts[wh_cut + 2])
proposed_cutoffs[wh_cut] <- runif(1, min = choose_from[1], max = choose_from[2])
# Calculating the AR.
logAR <- CalcLogLike(Y = Y, X = X, cutoffs = proposed_cutoffs,
heights = proposed_heights, sigma = sigmas[ii - 1])
logAR <- logAR - CalcLogLike(Y = Y, X = X, cutoffs = cutoffs[[ii - 1]],
heights = heights[[ii - 1]], sigma = sigmas[ii - 1])
# I think the additional term is the same as in Poisson, but I need to check.
logAR <- logAR + log(choose_from[2] - proposed_cutoffs[wh_cut])
logAR <- logAR + log(proposed_cutoffs[wh_cut] - choose_from[1])
logAR <- logAR - log(choose_from[2] - cutoffs[[ii - 1]][wh_cut])
logAR <- logAR + log(cutoffs[[ii - 1]][wh_cut] - choose_from[1])
cutoffs[[ii]] <- cutoffs[[ii - 1]]
heights[[ii]] <- heights[[ii - 1]]
if (log(runif(1)) < logAR) {
cutoffs[[ii]] <- proposed_cutoffs
}
# MOVE 3: Birth to a new cutoff.
} else if (moves[ii] == 3) {
# Choose the new cutoff.
sstar <- runif(1, minX, maxX)
proposed_cutoffs <- sort(c(cutoffs[[ii - 1]], sstar))
wh_cut <- which(proposed_cutoffs == sstar)
sj <- ifelse(wh_cut == 1, minX, proposed_cutoffs[wh_cut - 1])
sj1 <- ifelse(wh_cut == curr_cut + 1, maxX, proposed_cutoffs[wh_cut + 1])
# Defining the proposed heights corresponding to the new cutoff.
proposed_heights <- rep(NA, length(proposed_cutoffs) + 1)
proposed_heights[- c(wh_cut, wh_cut + 1)] <- heights[[ii - 1]][- wh_cut]
hj_prev <- heights[[ii - 1]][wh_cut]
u <- runif(1, 0, range_unif)
hj_new <- hj_prev - u * (sj1 - sstar) / (sj1 - sj)
hj1_new <- hj_new + u * (sstar - sj) / (sj1 - sj)
proposed_heights[c(wh_cut, wh_cut + 1)] <- c(hj_new, hj1_new)
# NOTE: What if the change in heights is NOT uniform?
# Calculating the acceptance probability.
## Likelihood ratio:
logAR <- CalcLogLike(Y = Y, X = X, cutoffs = proposed_cutoffs,
heights = proposed_heights, sigma = sigmas[ii - 1])
logAR <- logAR - CalcLogLike(Y = Y, X = X, cutoffs = cutoffs[[ii - 1]],
heights = heights[[ii - 1]], sigma = sigmas[ii - 1])
## Prior ratio:
# For the number of cutoffs:
k <- length(cutoffs[[ii - 1]])
logAR <- logAR + dpois(k + 1, lambda, log = TRUE)
logAR <- logAR - dpois(k, lambda, log = TRUE)
# For the cutoffs:
logAR <- logAR + log(2 * (k + 1) * (2 * k + 3)) - log((maxX - minX) ^ 2)
logAR <- logAR + log((sstar - sj) * (sj1 - sstar) / (sj1 - sj))
# For the heights:
logAR <- logAR + sum(dnorm(proposed_heights[c(wh_cut, wh_cut + 1)],
mean = 0, sd = sqrt(uninf), log = TRUE))
logAR <- logAR - dnorm(heights[[ii - 1]][wh_cut], mean = 0, sd = sqrt(uninf),
log = TRUE)
## Proposal ratio:
logAR <- logAR + log(dk[k + 2] * (maxX - minX) / (bk[k + 1] * (k + 1)))
## Jacobian is equal to 1.
cutoffs[[ii]] <- cutoffs[[ii - 1]]
heights[[ii]] <- heights[[ii - 1]]
if (log(runif(1)) < logAR) {
cutoffs[[ii]] <- proposed_cutoffs
heights[[ii]] <- proposed_heights
}
# MOVE 4: DEATH OF A CUTOFF.
} else {
# Choose the cutoff to drop.
wh_drop <- sample(1:curr_cut, 1)
drop_cut <- cutoffs[[ii - 1]][wh_drop]
# Proposing the new height.
proposed_cutoffs <- setdiff(cutoffs[[ii - 1]], drop_cut)
proposed_heights <- heights[[ii - 1]][- wh_drop]
# Dropping sj1
sj <- ifelse(wh_drop == 1, minX, proposed_cutoffs[wh_drop - 1])
sj1 <- drop_cut
sj2 <- ifelse(wh_drop == curr_cut, maxX, proposed_cutoffs[wh_drop])
# Defining the new height as a weighted average of the two.
hj_prev <- heights[[ii - 1]][wh_drop]
hj1_prev <- heights[[ii - 1]][wh_drop + 1]
h_new <- ((sj1 - sj) * hj_prev + (sj2 - sj1) * hj1_prev) / (sj2 - sj)
proposed_heights[wh_drop] <- h_new
# Calculate the AR.
logAR <- CalcLogLike(Y, X, cutoffs = proposed_cutoffs, heights = proposed_heights,
sigma = sigmas[ii - 1])
logAR <- logAR - CalcLogLike(Y, X, cutoffs = cutoffs[[ii - 1]],
heights = heights[[ii - 1]], sigma = sigmas[ii - 1])
# Add log prior ratio.
# For the number of cutoffs:
k <- length(cutoffs[[ii - 1]])
logAR <- logAR + dpois(k - 1, lambda, log = TRUE) - dpois(k, lambda, log = TRUE)
# For the cutoffs:
logAR <- logAR + log((maxX - minX) ^ 2 / (2 * k * (2 * k - 1)))
logAR <- logAR + log((sj2 - sj1) / ((sj1 - sj) * (sj2 - sj1)))
# For the heights:
logAR <- logAR +
dnorm(h_new, mean = 0, sd = sqrt(uninf), log = TRUE) -
sum(dnorm(c(hj_prev, hj1_prev), mean = 0, sd = sqrt(uninf), log = TRUE))
# Add log proposal ratio.
logAR <- logAR + log(bk[k] * k / (dk[k + 1] * (maxX - minX)))
# Jacobian is 1.
# Propose value and Accept/Reject.
if (log(runif(1)) < logAR) {
cutoffs[[ii]] <- proposed_cutoffs
heights[[ii]] <- proposed_heights
} else {
cutoffs[[ii]] <- cutoffs[[ii - 1]]
heights[[ii]] <- heights[[ii - 1]]
}
}
sigmas[ii] <- sigmas[ii - 1]
}
pred_x <- seq(minX, maxX, length.out = 100)
pred_x <- pred_x[- c(1, length(pred_x))]
pred_y <- matrix(NA, nrow = length(pred_x), ncol = length(heights))
for (ii in 1:length(heights)) {
cuts <- rep(cutoffs[[ii]], each = 2)
cuts <- c(minX, cuts, maxX)
for (cc in 1:(length(cuts) / 2)) {
wh_obs <- which(pred_x > cuts[2 * cc - 1] & pred_x <= cuts[2 * cc])
pred_y[wh_obs, ii] <- heights[[ii]][cc]
}
}
pred_y <- pred_y[, - c(1:2000)]
cutoffs <- cutoffs[-c(1:2000)]
par(mfrow = c(1, 2), oma = rep(1, 4))
plot(1, xlim = c(0, 10), type = 'n', ylim = range(pred_y), main = '')
points(X, Y, pch = 16, cex = 0.3, col = 'red')
lines(pred_x, apply(pred_y, 1, mean))
lines(pred_x, apply(pred_y, 1, function(x) quantile(x, probs = 0.025)), col = 'green')
lines(pred_x, apply(pred_y, 1, function(x) quantile(x, probs = 0.975)), col = 'green')
hist(unlist(cutoffs), breaks = 100)
table(sapply(cutoffs, length)) / sum(table(sapply(cutoffs, length)))
# Plotting the results separately for the number of cutoffs.
par(mfrow = c(1, 2), mar = rep(2, 4))
only_cuts <- 2
wh <- which(number_cuts == only_cuts)
pred_y_wh <- pred_y[, wh]
plot(1, xlim = c(0, 10), type = 'n', ylim = range(Y), main = only_cuts)
points(X, Y, pch = 16, cex = 0.3, col = 'red')
lines(pred_x, apply(pred_y_wh, 1, mean))
lines(pred_x, apply(pred_y_wh, 1, function(x) quantile(x, probs = 0.025)), col = 'green')
lines(pred_x, apply(pred_y_wh, 1, function(x) quantile(x, probs = 0.975)), col = 'green')
hist(unlist(cutoffs[wh]), breaks = 200, xlim = range(X), main = only_cuts)
|
b0538f216637b8d0e3f94da240649e766cece310
|
e45aca549a2df39c9158b73b78015db58f7832ec
|
/sandbox/sentiment_graphs.R
|
12eff6ee87f3d28ffdc81e53528c1480a4869b33
|
[] |
no_license
|
andrewheiss/Media-and-NGOs
|
5e502f4dd7209c3247df05e2f95011e18a92968f
|
f4ae14869f1d9183d47123fd3b1779a12ae3cbdf
|
refs/heads/master
| 2020-06-03T19:05:48.132168
| 2014-03-26T16:04:20
| 2014-03-26T16:04:20
| 10,343,784
| 1
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 459
|
r
|
sentiment_graphs.R
|
library(ggplot2)
library(reshape2)
ahram_sentiment$publication <- "ahram"
dne_sentiment$publication <- "dne"
egind_sentiment$publication <- "egind"
plot.data <- data.frame(rbind(ahram_sentiment, dne_sentiment, egind_sentiment))
plot.data$publication <- factor(plot.data$publication)
plot.data <- melt(plot.data, id=c("id_article", "publication"))
p <- ggplot(plot.data, aes(x=value, fill=publication))
p + geom_density(alpha=.7) + facet_grid(. ~ variable)
|
55b27ca430ae4afe9cafeea5995a32148c3d329a
|
ded254a14f62f6705715c719f126ff6b557a661e
|
/R/sequence_tools.R
|
35ebb2e13cf01f23a8b6a916f5d0873bca794be7
|
[] |
no_license
|
pedroivo000/lambdaPrimeR
|
9a2269fc967242ed79b3a032e0c2cb1b33db1898
|
0a40261fabea18075fc953a2c62e26620c57b65a
|
refs/heads/master
| 2021-06-04T07:57:45.801757
| 2020-01-20T05:34:36
| 2020-01-20T05:34:36
| 119,300,044
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 742
|
r
|
sequence_tools.R
|
################
#Sequence tools
################
complement <- function(sequence) {
seq <- toupper(sequence)
complement <- chartr('ATCG', 'TAGC', seq)
}
reverse_complement <- function(sequence) {
complement <- complement(sequence)
revcomp <- stringi::stri_reverse(complement)
}
simulate_pcr <- function(sequence_object, primers) {
#Get original sequence from sequence input:
orginal_seq <- sequence_object
forward_primer <- filter(primers, id == 'forward')
reverse_primer <- filter(primers, id == 'reverse')
left_flank <- forward_primer$template_annealing_seq
right_flank <- reverse_complement(reverse_primer$template_annealing_seq)
extended_seq <- paste(left_flank, orginal_seq, tolower(right_flank), sep = '')
}
|
35715ba7038d8ea2b9b99756382ce2944f4a35f1
|
27a4b400cacbe8bf9958bae8b6b126640bc9e6d7
|
/R/search.R
|
04db4314b9cdf8831f47f104a5390032030bd151
|
[] |
no_license
|
dhersz/replr
|
e3ea52aac32dbec5725fb9b5fb6ce8c2920594ba
|
95e4915cf9a3f455a47b4f336cbf6ffb4b2dd200
|
refs/heads/master
| 2023-05-09T23:58:20.252248
| 2021-06-09T22:23:24
| 2021-06-09T22:23:24
| 374,838,597
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 308
|
r
|
search.R
|
custom_search <- function(){
env <- parent.frame()
packages <- character()
while (!identical(env, emptyenv())) {
packages <- c(packages, environmentName(env))
env <- parent.env(env)
}
# base -> package:base
packages <- sub("^base$", "package:base", packages)
return(packages)
}
|
9d0d6b96a464193c44ac30596568e8353db228c1
|
2bf9a12e1d07eb63d89a74df4659f9bb3cab5be6
|
/JSCORES.R
|
c58a5e1e773f695408ad58eff990163cb789d854
|
[] |
no_license
|
rdolia/All-my-codes
|
bac39b6f46ab5a4407cc3b9fc40bbc28485de0c8
|
30f5ded6f6d6a981798faf226879a94efaad578b
|
refs/heads/master
| 2020-04-09T18:01:17.598378
| 2019-03-12T09:50:07
| 2019-03-12T09:50:07
| 160,499,222
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 599
|
r
|
JSCORES.R
|
# DATA JSCORE FILE
library(xlsx)
DATAJSCORES <- read.xlsx("D:/Rachit/7. Workforce Transformation/DATA Jscores.xlsx",1)
View(DATAJSCORES)
library(sqldf)
#modify this to do searches.
# FOR PEOPLE WITH FOUNDATION SCORE
sqldf("SELECT `EMP.Name`
FROM DATAJSCORES
WHERE `Skill` = 'Foundation Complete'")
sqldf("SELECT `EMP.Name`
FROM DATAJSCORES
WHERE `Skill` = 'I'")
sqldf("SELECT `EMP.Name`
FROM DATAJSCORES
WHERE `Skill` = 'T'")
#FOR NA
sqldf("SELECT `EMP.Name`
FROM DATAJSCORES
WHERE `Skill` = 'FALSE'")
|
d4c209a709fc2a317ec0e79997be9f3769f33eae
|
8091ad65b820e9d33a32f368d3316be84e8f686e
|
/app_allinone.R
|
89efc01461d59862b60cc2eacd9a5970460a084b
|
[] |
no_license
|
rachelss/ShinyTreeCompare
|
336dae2b7c9eb96b24ec7eeb3a13f00117076529
|
ade531389676174c02dd61ac4054b2708d9f1926
|
refs/heads/master
| 2020-04-25T05:36:09.632865
| 2019-03-21T12:54:50
| 2019-03-21T12:54:50
| 172,548,585
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 6,819
|
r
|
app_allinone.R
|
library(shiny)
library(magrittr)
library(ape)
library(ggtree)
library(gridExtra)
library(cowplot)
library(shinyBS)
library(shinyLP)
library(shinythemes)
#mammals <- read.tree("mammal_raxml_bmc_paper_data/alltrees.tre")
#get species list
#species <- sort(as.character(mammals[[1]]["tip.label"][[1]]))
#set root for each tree
#for (i in 1:length(mammals)){
# mammals[i]<-root(mammals[i], outgroup = "Opossum", resolve.root = TRUE)
#}
# Define UI for application
shinyUI(
# Include a fliudPage above the navbar to incorporate a icon in the header
# Source: http://stackoverflow.com/a/24764483
fluidPage(
div(style="padding: 1px 0px; width: '100%'",
titlePanel(
title="", windowTitle="LandscapR: Comparing phylogenies from different datasets"
)
),
navbarPage(title = "LandscapR: Comparing phylogenies from different datasets",
inverse = F, # for diff color view
theme = shinytheme("united"),
tabPanel("Landing Page", icon = icon("home"),
jumbotron(div(img(src="LandscapR.png"))),
fluidRow(
column(6, panel_div(class_type = "primary", panel_title = "Directions",
content = "How to use the app")),
column(6, panel_div("success", "Application Maintainers",
HTML("Email Me: <a href='mailto:jasmine.dumas@gmail.com?Subject=Shiny%20Help' target='_top'>Jasmine Dumas</a>")))
), # end of fluidRow
fluidRow(
column(6, panel_div("info", "App Status", "Include text with status, version and updates")),
column(6, panel_div("danger", "Security and License", "Copyright 2016")),
#### FAVICON TAGS SECTION ####
tags$head(tags$link(rel="shortcut icon", href="favicon.ico"))
) # end of fluidRow
), #end tabpanel 1
tabPanel("App", icon = icon("cog"),
# Sidebar with a slider input for number of bins
fluidRow(
column(4,
content = 'This app allows you to compare phylogenies generated from different datasets.
Use the default datasets of mammals (from Schwartz et al. 2015) or upload your own.
Select an outgroup to root the tree.
Given a selection of two species, the app will highlight the smallest clade containing
those two species.',
fileInput("file", h3("Upload your own trees")),
uiOutput("outgroup"),# from objects created in server
uiOutput("select"),#add selectinput boxs
selectInput("hilite", h3("Select which to highlight"),
choices = c("Clade","Species"),selected = "Clade"
),
radioButtons("brlens", h3("Show Branch Lengths"),
choices = list("Brlens" = 1, "Cladogram" = 2),
selected = 1
)
), #end sidebar column
column(8,
plotOutput("phyloPlot", height="auto")
) #end center column
) #end single fluidrow
) #end tabpanel 2
) #end navbarpage
) #end fluidpage
) #end shinyui
shinyServer(function(input, output, session) {
#read tree either default or uploaded
mammals <- reactive({
if (is.null(input$file)){
#Schwartz et al BMC Bioinf 2015 results
return(read.tree("mammal_raxml_bmc_paper_data/alltrees.tre"))
}
else{
return(read.tree(input$file$datapath))
}
})
#get species list
species <- reactive({
sort(as.character(mammals()[[1]]["tip.label"][[1]]))
})
output$select = renderUI({ #creates select box object called in ui
spp <- species()
selectInput(inputId = "select", #name of input
label = "Select two species:", #label displayed in ui
choices = spp, multiple = TRUE,
# calls unique values from the State column in the previously created table
selected = c(spp[1],spp[2]) #default choice (not required)
)
})
output$outgroup = renderUI({
selectInput(inputId = "outgroup", #name of input
label = "Select outgroup:", #label displayed in ui
choices = species()
# calls unique values from the State column in the previously created table
#selected = c(species[length(species)]) #default choice (not required)
)
})
#set root for each tree
mammals2 <- reactive({
trees <- mammals() #get trees
numtrees <- length(trees) #number of trees
m <- vector("list", numtrees) #empty vector to hold rooted trees
for (i in 1:numtrees){
m[i]<-root(trees[i], outgroup = input$outgroup, resolve.root = TRUE)
}
m
})
#Prompt to select species to get mrca/clade for
families <- reactive({
validate(
need(length(input$select) == 2,
"Please select two families to highlight the clade containing their MRCA")
)
input$select
})
p <- reactive({
trees <- mammals2()
myplots <- vector("list", length(trees))
for (i in 1:length(trees)){
mrca <- getMRCA(trees[[i]], tip=families())
cladetree <- groupClade(trees[[i]], .node=mrca)
if(input$brlens == 1){
myplots[[i]] <- ggtree(cladetree, aes(color=group, linetype=group))+
geom_tiplab() #+
#scale_color_manual(values=c("black", "red"))
}
else{
myplots[[i]] <- ggtree(cladetree, aes(color=group, linetype=group), branch.length="none")+
geom_tiplab() #+
#scale_color_manual(values=c("black", "red"))
}
}
myplots
})
output$phyloPlot <- renderPlot({
plot_grid(plotlist = p(), ncol=2)
},
height = function() {
2*session$clientData$output_phyloPlot_width
}
)
}
)
# Run the application
shinyApp(ui = ui, server = server)
|
2660dc5ca45ea4f204bb11f016962d6d935b0295
|
d1a62fa02626e48a354d409662270fd42f2d38cc
|
/quizzes.r
|
ea5b0e73e93d89cfd8f2cbea85496de191f903ea
|
[] |
no_license
|
ACHANGWA/PracticalMachineLearningAssignment
|
64ac766628be1eb5fae6a7abf44be0abb9e1ebb6
|
36ac9c711645facd046a5fea2c84e961f0c7a91a
|
refs/heads/master
| 2021-01-16T18:38:42.336480
| 2017-08-18T04:36:50
| 2017-08-18T04:36:50
| 100,105,508
| 1
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 6,724
|
r
|
quizzes.r
|
# Quiz 2
# Question 1
library(AppliedPredictiveModeling)
library(caret)
data(AlzheimerDisease)
adData = data.frame(diagnosis,predictors)
trainIndex = createDataPartition(diagnosis, p = 0.50,list=FALSE)
training = adData[trainIndex,]
testing = adData[-trainIndex,]
# Question 2
library(AppliedPredictiveModeling)
data(concrete)
library(caret)
set.seed(975)
inTrain = createDataPartition(mixtures$CompressiveStrength, p = 3/4)[[1]]
training = mixtures[ inTrain,]
testing = mixtures[-inTrain,]
# Make a plot of the outcome (CompressiveStrength) versus the index of the samples.
# Color by each of the variables in the data set (you may find the cut2() function in the Hmisc package useful for turning continuous covariates into factors).
# What do you notice in these plots?
training$index <- seq(1, nrow(training))
require(reshape2)
D <- melt(training, id.var=c("index"))
ggplot(D, aes(x=index, y=value, color=variable)) +
geom_point(alpha=1/2) +
geom_smooth(alpha=1/2) +
facet_wrap(~ variable, nrow=3, scales="free_y") +
theme(legend.position="none")
ggplot(training, aes(x=Cement, y=CompressiveStrength)) +
geom_point(alpha=1/2) +
geom_smooth(alpha=1/2) +
geom_rug(alpha=1/4)
# Question 3
library(AppliedPredictiveModeling)
data(concrete)
library(caret)
set.seed(975)
inTrain = createDataPartition(mixtures$CompressiveStrength, p = 3/4)[[1]]
training = mixtures[ inTrain,]
testing = mixtures[-inTrain,]
qplot(Superplasticizer, data=training, geom="histogram")
table(training$Superplasticizer)
# Question 4
library(caret)
library(AppliedPredictiveModeling)
set.seed(3433)
data(AlzheimerDisease)
adData = data.frame(diagnosis,predictors)
inTrain = createDataPartition(adData$diagnosis, p = 3/4)[[1]]
training = adData[ inTrain,]
testing = adData[-inTrain,]
predVar <- grep("^IL", names(training))
str(training[predVar])
preProcess(training[predVar], method="pca", thresh=0.8)
# Question 5
library(caret)
library(AppliedPredictiveModeling)
set.seed(3433)
data(AlzheimerDisease)
adData = data.frame(diagnosis,predictors)
inTrain = createDataPartition(adData$diagnosis, p = 3/4)[[1]]
training = adData[ inTrain,]
testing = adData[-inTrain,]
predVar <- grep("^IL", names(training))
M0 <- train(training$diagnosis ~ ., data=training[predVar], method="glm")
hat0 <- predict(M0, testing)
confusionMatrix(testing$diagnosis, hat0)
preProc <- preProcess(training[predVar], method="pca", thresh=0.8)
trainPC <- predict(preProc, training[predVar])
M1 <- train(training$diagnosis ~ ., data=trainPC, method="glm")
testPC <- predict(preProc, testing[predVar])
hat1 <- predict(M1, testPC)
confusionMatrix(testing$diagnosis, hat1)
# Quiz 3
# Question 1
library(AppliedPredictiveModeling)
data(segmentationOriginal)
library(caret)
training = segmentationOriginal[segmentationOriginal$Case == "Train",]
testing = segmentationOriginal[segmentationOriginal$Case == "Test",]
set.seed(125)
M <- train(Class ~ ., data=training, method="rpart")
M
M$finalModel
plot(M$finalModel)
text(M$finalModel)
# Question 3
library(pgmm)
data(olive)
olive = olive[,-1]
M <- train(Area ~ ., data=olive, method="rpart")
newdata = as.data.frame(t(colMeans(olive)))
newdata
predict(M, newdata)
M$finalModel
# Question 4
library(ElemStatLearn)
data(SAheart)
set.seed(8484)
train = sample(1:dim(SAheart)[1],size=dim(SAheart)[1]/2,replace=F)
trainSA = SAheart[train,]
testSA = SAheart[-train,]
set.seed(13234)
M <- train(chd ~ age + alcohol + obesity + tobacco + typea + ldl, data=trainSA, method="glm", family="binomial")
M
M$finalModel
missClass = function(values,prediction){sum(((prediction > 0.5)*1) != values)/length(values)}
missClass(testSA$chd, predict(M, testSA))
missClass(trainSA$chd, predict(M, trainSA))
# Question 5
library(ElemStatLearn)
data(vowel.train)
data(vowel.test)
vowel.train$y <- factor(vowel.train$y)
vowel.test$y <- factor(vowel.test$y)
set.seed(33833)
M <- train(y ~ ., data=vowel.train, method="rf")
varImp(M)
# Quiz 4
# Question 1
library(ElemStatLearn)
data(vowel.train)
data(vowel.test)
vowel.train$y <- factor(vowel.train$y)
vowel.test$y <- factor(vowel.test$y)
table(vowel.train$y)
set.seed(33833)
require(caret)
M1 <- train(y ~ ., data=vowel.train, method="rf")
M2 <- train(y ~ ., data=vowel.train, method="gbm")
hat1 <- predict(M1, vowel.test)
hat2 <- predict(M2, vowel.test)
confusionMatrix(hat1, vowel.test$y)$overall
confusionMatrix(hat2, vowel.test$y)$overall
hat <- data.frame(hat1,
hat2,
y = vowel.test$y,
agree = hat1 == hat2)
accuracy <- sum(hat1[hat$agree] == hat$y[hat$agree]) / sum(hat$agree)
accuracy
# Question 2
library(caret)
library(gbm)
set.seed(3433)
library(AppliedPredictiveModeling)
data(AlzheimerDisease)
adData = data.frame(diagnosis,predictors)
inTrain = createDataPartition(adData$diagnosis, p = 3/4)[[1]]
training = adData[ inTrain,]
testing = adData[-inTrain,]
set.seed(62433)
M1 <- train(diagnosis ~ ., data=training, method="rf")
M2 <- train(diagnosis ~ ., data=training, method="gbm")
M3 <- train(diagnosis ~ ., data=training, method="lda")
hat1 <- predict(M1, testing)
hat2 <- predict(M2, testing)
hat3 <- predict(M3, testing)
hat <- data.frame(hat1, hat2, hat3, diagnosis=testing$diagnosis)
M4 <- train(diagnosis ~ ., data=hat, method="rf")
M4
hat4 <- predict(M4, testing)
confusionMatrix(hat1, testing$diagnosis)$overall
confusionMatrix(hat2, testing$diagnosis)$overall
confusionMatrix(hat3, testing$diagnosis)$overall
confusionMatrix(hat4, testing$diagnosis)$overall
# Question 3
set.seed(3523)
library(AppliedPredictiveModeling)
data(concrete)
inTrain = createDataPartition(concrete$CompressiveStrength, p = 3/4)[[1]]
training = concrete[ inTrain,]
testing = concrete[-inTrain,]
set.seed(233)
M1 <- train(CompressiveStrength ~ ., data=training, method="lasso")
M1
plot(M1$finalModel, xvar="penalty")
# Question 4
library(lubridate) # For year() function below
url <- "https://d396qusza40orc.cloudfront.net/predmachlearn/gaData.csv"
dat = read.csv(url)
training = dat[year(dat$date) < 2012,]
testing = dat[(year(dat$date)) > 2011,]
tstrain = ts(training$visitsTumblr)
require(forecast)
M <- bats(tstrain)
M
hat <- forecast(M, length(testing$visitsTumblr))
hat <- cbind(testing, data.frame(hat))
hat$isIn95 <- hat$Lo.95 < hat$visitsTumblr & hat$visitsTumblr < hat$Hi.95
prop.table(table(hat$isIn95))
# Question 5
set.seed(3523)
library(AppliedPredictiveModeling)
data(concrete)
inTrain = createDataPartition(concrete$CompressiveStrength, p = 3/4)[[1]]
training = concrete[ inTrain,]
testing = concrete[-inTrain,]
set.seed(325)
require(e1071)
M <- svm(CompressiveStrength ~ ., data=training)
testing$hat <- predict(M, testing)
testing$error <- testing$CompressiveStrength - testing$hat
rmse <- sqrt(mean(testing$error ^ 2))
|
a7160e10a8f0cb246bcef53f3d531ffdc060ac64
|
d1852a229192aa890f4e7b242c9d06b7b69bfefb
|
/Assignment_5_TimeSeries_Predictions/timeseries_predictions.r
|
27a97591af9dc69a2e526e093dcf831b3e520e60
|
[] |
no_license
|
nandakrishna75/Data-Analytics-Assignments
|
a52e746020e124422496cd02dd31b28e68e8f329
|
65f017f17918c2c3d076206efc2bc7dbee301b33
|
refs/heads/master
| 2020-07-16T11:10:44.129970
| 2019-12-01T06:57:02
| 2019-12-01T06:57:02
| 205,778,502
| 1
| 3
| null | 2019-11-19T12:47:19
| 2019-09-02T04:46:02
|
Jupyter Notebook
|
UTF-8
|
R
| false
| false
| 2,394
|
r
|
timeseries_predictions.r
|
library("fpp2")
# MOVING AVERAGES
# using the electicity sales of south australia as the dataset
autoplot(elecsales) + xlab("Year") + ylab("GWh") + ggtitle("Annual electricity sales: South Australia")
# plotting and using 2 Moving Average as the prediction model
ma(elecsales, 2)
autoplot(elecsales, series="Data") + autolayer(ma(elecsales,2), series="2-MA") + xlab("Year") + ylab("GWh") + ggtitle("Annual electricity sales: South Australia") +
scale_colour_manual(values=c("Data"="grey50","2-MA"="red"), breaks=c("Data","2-MA"))
# plotting and using 3 Moving Average as the prediction model
ma(elecsales, 3)
autoplot(elecsales, series="Data") + autolayer(ma(elecsales,3), series="3-MA") + xlab("Year") + ylab("GWh") + ggtitle("Annual electricity sales: South Australia") +
scale_colour_manual(values=c("Data"="grey50","3-MA"="red"), breaks=c("Data","3-MA"))
# plotting and using 5 Moving Average as the prediction model
ma(elecsales, 5)
autoplot(elecsales, series="Data") + autolayer(ma(elecsales,5), series="5-MA") + xlab("Year") + ylab("GWh") + ggtitle("Annual electricity sales: South Australia") +
scale_colour_manual(values=c("Data"="grey50","5-MA"="red"), breaks=c("Data","5-MA"))
# plotting and using 10 Moving Average as the prediction model
ma(elecsales, 10)
autoplot(elecsales, series="Data") + autolayer(ma(elecsales,10), series="10-MA") + xlab("Year") + ylab("GWh") + ggtitle("Annual electricity sales: South Australia") +
scale_colour_manual(values=c("Data"="grey50","10-MA"="red"), breaks=c("Data","10-MA"))
#using the 12 moving average prediction model on electric equipment orders dataset
autoplot(elecequip, series="Data") + autolayer(ma(elecequip, 12), series="12-MA") + xlab("Year") + ylab("New orders index") +
ggtitle("Electrical equipment manufacturing (Euro area)") + scale_colour_manual(values=c("Data"="grey","12-MA"="red"), breaks=c("Data","12-MA"))
# SIMPLE EXPONENTIAL SMOOTHING
# using oil spill in saudi arabia from year 1996 as dataset
oildata <- window(oil, start=1996)
autoplot(oildata) + ylab("Oil (millions of tonnes)") + xlab("Year")
# using simple exponential smoothing model (ses)
fc <- ses(oildata, h=5)
round(accuracy(fc),2) #obtaining all accuracy and metric scores rounded to 2 decimal places
# plotting SES model
autoplot(fc) + autolayer(fitted(fc), series="Fitted") + ylab("Oil (millions of tonnes)") + xlab("Year")
|
374eb2c1164b30b8aa7ebe8f8099ea15524edb50
|
9d27bab3a73f7238b864f483f5f409bc156b270d
|
/Unsupervised_Learning_Project.R
|
6296c561c3e85a87515df9b2e5c1ae6c028bd175
|
[] |
no_license
|
aModernExplorer/College_Projects
|
ff8af95341785f87116bcdde0f68b31b15826874
|
e1b8f2c83bdadc747eff0034122589389d217574
|
refs/heads/master
| 2022-12-22T23:29:46.538392
| 2020-09-26T03:17:11
| 2020-09-26T03:17:11
| 295,476,623
| 0
| 0
| null | 2020-09-26T03:17:12
| 2020-09-14T16:35:33
|
R
|
UTF-8
|
R
| false
| false
| 11,091
|
r
|
Unsupervised_Learning_Project.R
|
############################################################################################
Imports
############################################################################################
library(plyr)
library(psych)
library(fpc)
library(ggplot2)
library(tidyverse) # data manipulation
library(cluster) # clustering algorithms
library(factoextra) # clustering visualization
library(dendextend) # for comparing two dendrograms
library(purrr)
library(gridExtra)
############################################################################################
# Cleaning
############################################################################################
getwd()
data <- read.csv("./Unsupervised_Learning_Data", header = TRUE, sep = ",")
summary(data) # Look for anything unusual, see that there was an extra column added that I
# need to delete
View(data)
data <- data[,-c(1,2)] # Get rid of the added column and ID column
View(data) # see that the columns were removed
data$Inquired <- revalue(data$Inquired, c(Yes = 1, No = 0)) # revalued inquired to No = 0, Yes = 1
class(data$Inquired)
mode(data$Inquired)
data$PermntCountry <- revalue(data$PermntCountry, c("United States"=1, "Out"=0)) # revalue PermntCountry to either 0, meaning outside of the U.S., or 1, meanining inside U.S.
class(data$PermntCountry)
mode(data$PermntCountry)
summary(data$DecisionPlan)
data$DecisionPlan <- revalue(data$DecisionPlan, c("Early Decision I"=2, "Early Decision II"=2, "Early Action I"=1, "Early Action II"=1, "Regular Decision"=0))
View(data)
data$SportRating <- revalue(data$SportRating, c("Blue Chip"= 3, "Franchise"=2, "Varsity"=1, "None"=0))
data <- data[, -c(2, 4, 5, 11, 18, 20)] # Get rid of non-numeric columns that I could
# not make into numeric or ordinal without obstructing the analysis
View(data)
############################################################################################
# Hierarchial Clustering
############################################################################################
hcdat <- data # make a separate dataset for hierarchial clusetring
View(hcdat)
sapply(hcdat, class) # check to see that all variables are numeric (which they are not)
hcdat <- sapply(hcdat, function(x) as.numeric(as.character(x))) # convert all columns to numeric while retaining their values
sapply(hcdat, class) # check to see all variables are numeric
View(hcdat)
class(hcdat) # hcdat has become a matrix now, which is fine for the time being
set.seed(345) # setting the seed for reproucible work
hc.comp=hclust(dist(hcdat), method = "complete", members = NULL) # hc by complete linkage
hc.average=hclust(dist(hcdat), method = "average", members = NULL) # hc by average linkage
hc.single=hclust(dist(hcdat), method = "single", members = NULL) # hc by single linkage
hc.centr=hclust(dist(hcdat), method = "centroid", members = NULL) # hc by centroid
par(mfrow = c(2, 2))
plot(hc.comp, main="Complete Linkage", cex = .6, hang = -1)
plot(hc.average, main = "Average Linkage", cex = .9, hang = -1)
plot(hc.single, main = "Single Linkage", cex = .9, hang = -1)
plot(hc.centr, main = "Centroid", cex=.9, hang = -1)
# Clustering based off of correlation distance
dd=as.dist(1-cor(t(hcdat))) # matrix with the "correlation distance" between observations
plot(hclust(dd, method="complete"), main="Complete Linkage with Correlation-Based Distance", xlab="", sub="") # plot
# Make a scaled matrix of hcdat
scaledhcdat <- scale(hcdat)
View(scaledhcdat)
# Dissimilarity matrix
hcdismat <- dist(scaledhcdat, method = "euclidean")
# Hierarchical clustering using Complete Linkage
hc1 <- hclust(hcdismat, method = "complete" )
plot(hc1, cex=.6, hang = -1)
# Assessing hierarchial models based on different methods for AGNES (Agglomerative Clustering)
method <- c("average", "single", "complete", "ward") # create a variable that stores method names (code courtesy of UC R Github)
names(method) <- c("average", "single", "complete", "ward") # setting the names of variable method (code courtesy of UC R Github)
assessfunc <- function(x) {
agnes(scaledhcdat, method = x)$ac
} # created function to compute various coefficients to see which model is best (code courtesy of UC R Github)
map_dbl(method, assessfunc) # average single complete ward
# 0.9066583 0.9039645 0.9359926 0.9868039
# ward method turns out to be the best
assessfunc1 <- function(x) {
agnes(hcdismat, method = x)$ac
} # created function with dissimilarity matrix to compare results of function that did not use dissimilarity matrix (code courtesy of UC R Github)
map_dbl(method, assessfunc1) # average single complete ward
# 0.9066583 0.9039645 0.9359926 0.9868039
# see results are the same, ward is still best method
hcw <- agnes(hcdismat, method = "ward") # Make an AGNES cluster based on the ward method
hcwtree <- pltree(hcw, cex = .6, hang = -1, main = "AGNES Ward Dendrogram") # making dendrogram of AGNES algorithm measured by Ward method
hcwtree # viewing the tree created *********************** Not able to read it *********************
# Assessing hierarchial model based off of DIANA algorithm (Divisive hierarchial clustering)
hcd <- diana(scaledhcdat) # making a DIANA model (code courtesy of UC R Github)
hcd$dc # looking at score of model, which equals 0.9270291 (which is not bad but not the best model) (code courtesy of UC R Github)
pltree(hcd, cex = .6, hang = -1, main = "Dendrogram of DIANA") # plotting a tree based off of DIANA algorithm (code courtesy of UC R Github)
# Calculating optimal number of clusters to apply to dendrograms
fviz_nbclust(scaledhcdat, FUN = hcut, method = "wss") # does not appear to be a clear optimal # of clusters for the elbow method (code courtesy of UC R Github)
fviz_nbclust(scaledhcdat, FUN = hcut, method = "silhouette") # looks like 4 clusters is the optimal for average silhouette method (code courtesy of UC R Github)
# Use best clustering model (which is an AGNES algorthm using the method ward) and split it by k=4 and K=7
hcagnesward <- hclust(hcdismat, method = "ward.D2") # make variable that stores hierarchial cluster using ward method (code help courtesy of UC R Github)
plot(hcagnesward, cex = .6) # (code help courtesy of UC R Github)
rect.hclust(hcagnesward, k = 4, border = 2:5) # put clusters around dedrogram (code help courtesy of UC R Github)
plot(hcagnesward, cex = .6)
rect.hclust(hcagnesward, k = 7, border = 2:5)
# Create cluster plot of hcagneward
plotcut4 <- cutree(hcagnesward, k = 4) # cut tree by 4 clusters
fviz_cluster(list(data = hcdat, cluster = plotcut4)) # view cluster plot (for the points out on the tope right of the plot)
describeBy(data, plotcut4)
plotcut7 <- cutree(hcagnesward, k = 7) # cut tree by 7 clusters
fviz_cluster(list(data = hcdat, cluster = plotcut7)) # see that clustering by 7 shows ver little dissimilarity
describeBy(data, plotcut7)
############################################################################################
# K-Means Clustering
############################################################################################
set.seed(345)
mdl1 <- kmeans(data, 4, nstart = 45)
data <- cbind(data, mdl1$cluster)
describeBy(data, mdl1$cluster) # Looking at descriptives
mdl2 <- kmeans(data, 4, nstart = 60) # Testng to see how changing nstart might affect model
data <- cbind(data, mdl2$cluster)
describeBy(data, mdl2$cluster) # There is a difference between mdl1 and mdl2 clustering
mdl3 <- kmeans(data, 4, nstart = 100) # Testng to see how changing nstart might affect model
data <- cbind(data, mdl3$cluster)
describeBy(data, mdl3$cluster) # There is a difference between mdl1m, mdl2, and mdl3 clustering
mdl4 <- kmeans(data, 4, nstart = 100, iter.max = 100) # Testng to see how adding iter.max might affect model
data <- cbind(data, mdl4$cluster)
describeBy(data, mdl4$cluster)
sum_sq <- sapply(1:10,
function(k){
kmeans(data, k, nstart=100, iter.max = 100)$tot.withinss
}) # create a variable that stores square distances within groups for various k
sum_sq # view stored square distances
plot(1:10, sum_sq, type = "b", pch = 1, xlab = "K", ylab ="Within clusters sum of squares")
# Looks like optimal k is 2
# Find optimal k through other methods
View(data)
kmnsdata <- data[, -c(18:21)] # create datat that does not have added columns of cluster columns
kmnsdata <- sapply(kmnsdata, function(x) as.numeric(as.character(x))) # make all columns numeric
kmnsdata <- scale(kmnsdata) # scaled the data for use
mxdat <- as.matrix(kmnsdata) # make a matrix of data to make it easier to find optimal k through other forms
calinhara.clustering <- kmeansruns(kmnsdata, krange = 1:10, crtierion="ch", scaledata = TRUE)
calinhara.clustering$bestk # finding best k through Calinski-Harabasz Index
asw.clustering <- kmeansruns(mxdata, krange = 1:10, criterion = "asw")
asw.clustering$bestk # finding best k through average silhouette width
fviz_nbclust(kmnsdata, kmeans, method = "wss") # another way to do elbow method
fviz_nbclust(kmnsdata, kmeans, method = "silhouette") # find optimal k through average sum of squares
# Plotting optimal k methods to visually see optimal K
critframe <- data.frame(k=1:10, ch=scale(calinhara.clustering$crit),
asw=scale(asw.clustering$crit))
critframe <- melt(critframe, id.vars=c("k"), variable.name="measure", value.name="score")
ggplot(critframe, aes(x=k, y=score, color=measure)) +
geom_point(aes(shape=measure)) + geom_line(aes(linetype=measure)) +
scale_x_continuous(breaks = 1:10, labels = 1:10)
View(kmnsdata)
# Plotting K-means clusters
KM2 <- kmeans(kmnsdata, centers = 2, nstart = 25) # creating k-means with 2 clusters
KM3 <- kmeans(kmnsdata, centers = 3, nstart = 25) # creating k-means with 3 clusters
KM4 <- kmeans(kmnsdata, centers = 4, nstart = 25) # creating k-means with 4 clusters
KM5 <- kmeans(kmnsdata, centers = 5, nstart = 25) # creating k-means with 5 clusters
kmp2 <- fviz_cluster(KM2, geom = "point", data = kmnsdata) + ggtitle("k = 2") # preparing plot to compare to other k-means
kmp3 <- fviz_cluster(KM3, geom = "point", data = kmnsdata) + ggtitle("k = 3") # preparing plot to compare to other k-means
kmp4 <- fviz_cluster(KM4, geom = "point", data = kmnsdata) + ggtitle("k = 4") # preparing plot to compare to other k-means
kmp5 <- fviz_cluster(KM5, geom = "point", data = kmnsdata) + ggtitle("k = 5") # preparing plot to compare to other k-means
grid.arrange(kmp2, kmp3, kmp4, kmp5, nrow = 2) # plot k-means cluster plots side by side to compare
kmmdl2 <- kmeans(kmnsdata, 2, nstart = 100, iter.max = 100) # Testng with k = 2 based off of elbow model
data <- cbind(data, kmmdl2$cluster)
describeBy(data, kmmdl2$cluster)
kmmdl5 <- kmeans(kmnsdata, 5, nstart = 100, iter.max = 100) # Testng with k = 2 based off of elbow model
data <- cbind(data, kmmdl5$cluster)
describeBy(data, kmmdl5$cluster)
View(data)
|
da8bf5cb9145ac692d1f8fc2b752446c0296f0b2
|
2b59fe5ace08b332c7d8d486772401f7f1ac16e0
|
/R/design.R
|
7e6a44d36e60ef32d740dd4c3bcb32210d66ad40
|
[] |
no_license
|
neraunzaran/flipMaxDiff
|
5dc04d5408582bf0f712d00cd9e38a51f2861ae4
|
0573ebbb5e0fdaa53933974a2462914d787bf448
|
refs/heads/master
| 2022-12-09T15:45:09.495248
| 2020-09-10T19:33:13
| 2020-09-10T19:33:13
| 288,797,569
| 0
| 0
| null | 2020-08-19T17:41:46
| 2020-08-19T17:41:46
| null |
UTF-8
|
R
| false
| false
| 8,732
|
r
|
design.R
|
#' \code{MaxDiffDesign}
#' @description Creates an experimental design for a MaxDiff experiment.
#' @param number.alternatives The number of alternatives in the experiment. For example, if you are doing a study investigating preferences for 10 brands, then 10 is the number of alternatives.
#' @param number.questions The number of MaxDiff questions to show to respondents. Sawtooth Software suggests that a rough guideline is: \code{Number of questions >= 3 * Number of alternatives / Alternatives per question}.
#' @param alternatives.per.question For example, if you have a study of 10 brands, and in each question you show five brands, asking the respondent to choose the one of the five that they like the most and the one that they like the least, then \code{Alternatives per question = 5}. That is, the number of options shown in each question.
#' @param n.repeats The number of times that the algorithm seeks to find a solution. The higher the number, the greater the chance that the best possible solution is found. For most problems, this makes little difference (i.e., a marginally sub-optimal experimental design will tend not to have any meaningful consequence on the conclusions drawn from the analyses).
#' @param n.versions The number of versions of the experimental design (defaults to 1). Subsequent versions are obtained by permuting the columns of the binary design.
#' @param seed Random number seed for generation of the experimental design.
#' @import AlgDesign
#' @export
MaxDiffDesign <- function(number.alternatives, number.questions, alternatives.per.question, n.versions = 1, n.repeats = 1000, seed = 1223){
# Check that the parameters are appropriate
# Sawtooth recommends that number.questions >= 3 * number.alternatives / alternatives.per.question
if (alternatives.per.question >= number.alternatives)
stop("The number of alternatives per question must be less than the number of alternatives.")
set.seed(seed)
best.result <- NULL
best.D <- -Inf
for (i in 1:n.repeats){
alg.results <- try(optBlock(~.,withinData=factor(1:number.alternatives),
blocksizes=rep(alternatives.per.question,number.questions),
nRepeats=5000), silent = TRUE) #BIB, silent = TRUE))
if (any("try-error" %in% class(alg.results)))
stop("Unable to compute experimental design. It is likely that the inputs are not sensible.")
if (alg.results$D > best.D)
{
best.result = alg.results
best.D = alg.results$D
}
}
design <- matrix(best.result$rows, nrow = number.questions, byrow = TRUE, dimnames = list(Questions = paste("Question", 1:number.questions), Alternatives = paste("Option", 1:alternatives.per.question)))
result <- CheckMaxDiffDesign(design)
if (n.versions > 1)
result <- c(result, multipleVersionDesign(result, n.versions))
result
}
#' \code{multipleVersionDesign}
#' @param original The experiment design that is randomized to individual-level vesions.
#' @param n.versions The number of versions of the experimental design (defaults to 1). Subsequent versions are obtained by permutting the columns of the binary design.
multipleVersionDesign <- function(original, n.versions)
{
binary.design <- original$binary.design
number.alternatives <- ncol(binary.design)
# Creating an array to store the outputs
if (is.matrix(original))
stop("Select 'Detailed outputs' on the experimental design.")
alternatives.per.question <- ncol(original$design)
number.questions <- nrow(original$design)
nrows <- number.questions * n.versions
cnames <- colnames(original$design)
randomized.designs <- matrix(NA,
nrow = nrows, ncol = 2 + alternatives.per.question,
dimnames = list(1:nrows, c("Version", "Question", cnames)))
randomized.designs[, 1] <- rep(1:n.versions, each = number.questions)
randomized.designs[, 2] <- rep(1:number.questions)
randomized.designs[1:number.questions, -1:-2] <- original$design
big.binary.design <- matrix(NA, nrows, number.alternatives, dimnames = list(1:nrows, colnames(binary.design)))
big.binary.design[1:number.questions, ] <- binary.design
# Randomly rearranging the columns.
set.seed(1223)
for (i in 2:n.versions)
{
rows <- (i - 1) * number.questions + 1:number.questions
d <- binary.design[, sample(1:number.alternatives, number.alternatives, replace = FALSE)]
big.binary.design[rows, ] <- d
d <- t(d)
design <- matrix(row(d)[d == 1], byrow = TRUE, ncol = alternatives.per.question)
randomized.designs[rows, -1:-2] <- design
}
# Summmary statistics
correlations <- round(cor(big.binary.design), 2)
pairwise.frequencies <- crossprod(big.binary.design)
dimnames(pairwise.frequencies) <- dimnames(correlations) <- list(Alternative = 1:number.alternatives, Alternative = 1:number.alternatives)
list(versions.binary.correlations = correlations,
versions.pairwise.frequencies = pairwise.frequencies,
versions.design = randomized.designs)
}
#' \code{CheckMaxDiffDesign}
#' @description Produces summary statistics for a MaxDiff design.
#' @param design A \code{\link{matrix}}, where each row represents a question or task, and each column
#' shows the alternatives to be shown.
#' @export
CheckMaxDiffDesign <- function(design)
{
design <- as.matrix(design)
number.questions <- nrow(design)
number.alternatives <- max(design)
alternatives.per.question <- ncol(design)
binary.design <- matrix(0,number.questions,number.alternatives, dimnames = list(Question = paste("Question", 1:number.questions), Alternative = 1:number.alternatives))
colnames(binary.design) <- c("Alternative 1", 2:number.alternatives)
if (number.questions < 3 * number.alternatives / alternatives.per.question)
warning(paste0("You have specified ", number.questions, " questions. It is sometimes recommended that number.questions >= 3 * number.alternatives / alternatives.per.question (i.e., that you should have at least ", ceiling(3 * number.alternatives / alternatives.per.question), " questions)."))
for (q in 1:number.questions)
binary.design[q, design[q, ]] <- 1
n.appearances.per.alternative <- table(as.numeric(design))
if ((min.a <- min(n.appearances.per.alternative)) < 3)
warning(paste0("One or more of the alternatives appears only ", min.a, " time(s). A common recommendation is that each alternative should appear 3 times. You can review the frequencies by viewing the detailed outputs."))
if (min.a != max(n.appearances.per.alternative))
warning(paste0("The design is not balanced. That is, some alternatives appear more frequently than others. You can review the frequencies by viewing the detailed outputs."))
correlations <- round(cor(binary.design), 2)
cors <- abs(correlations[lower.tri(correlations)])
cor.max <- max(cors, na.rm = TRUE)
cor.min <- min(cors, na.rm = TRUE)
if (any(is.na(cors)))
warning("Some of the binary correlations are zero. This is only a problem of the cause is not that an alternative always appears in the design.")
if (cor.max > 0.5)
warning(paste0("The largest binary absolute correlation is ", cor.max, ". You should consider having more questions. You can review the binary correlations by viewing the detailed outputs."))
if (cor.min != cor.max)
warning(paste0("The absolute value of the correlations varies from ", cor.min, " to ", cor.max, ". This may not be a problem, but ideally the absolute value of the correlations should be constant (this is not always possible). Consider increasing the number of questions."))
pairwise.frequencies <- crossprod(binary.design)
min.pairwise <- min(pairwise.frequencies)
if (min.pairwise == 0)
warning(paste0("Some alternatives never appear together. You can review the pairwise frequencies by viewing the detailed outputs."))
appearance.ratios <- sweep(pairwise.frequencies, 1, n.appearances.per.alternative, "/")
if (any(appearance.ratios[lower.tri(appearance.ratios)] == 1))
warning(paste0("Some alternatives only ever appear together. You can review the pairwise frequencies by viewing the detailed outputs."))
dimnames(pairwise.frequencies) <- dimnames(correlations) <- list(Alternative = 1:number.alternatives, Alternative = 1:number.alternatives)
list(binary.design = binary.design,
design = design,
frequencies = n.appearances.per.alternative,
pairwise.frequencies = pairwise.frequencies,
binary.correlations = correlations)
}
|
21f2cc220ce3c30b75859cde36a6271beaffa424
|
15b4217a06d73b3dc946df4e26704bf1b597cd91
|
/scripts/foredrag-tromso-feb-2015.R
|
852cf20c5c4179308e47a37a4725073aec664f09
|
[] |
no_license
|
hegemb/first-test
|
8c1c14cd12db39e60d57cbe2c59d931fb1ffda0c
|
395bfcccc4ac43f8fd0695efd95324a94e93e423
|
refs/heads/master
| 2021-01-10T11:38:55.274259
| 2016-02-24T14:20:56
| 2016-02-24T14:20:56
| 52,445,919
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 12,363
|
r
|
foredrag-tromso-feb-2015.R
|
## -------- FUNCTION ------------
## Make plots, function:
## Pick out gene to investigate:
set.seed(1)
gene <- featureNames(eset.noNorm)[sample(1:nrow(eset.noNorm),1)] ## Randomly chosen number..
## "cl36CSOeh6ule_H8uE"
geneWiseMDSplots <- function(aeset, name, gene=gene){
pdf(file.path(resultPath,paste0("geneWisePlot_MDS_all_3_runs_",name,".pdf")),width=15)
par(mfrow=c(2,2));
plotGeneWiseBoxPlot(aeset, colLabel="Case_ctrl", batchLabel="New_plate", gene=gene, legend=TRUE, main=paste(name,"Run 1-3, \n Plate (=Day), gene",gene));
abline(h=0)
plotGeneWiseBoxPlot(aeset, colLabel="Case_ctrl", batchLabel="Chip", gene=gene, legend=TRUE, main=paste(name,"Run 1-3, \n Chip type, gene,",gene));
abline(h=0)
plotGeneWiseBoxPlot(aeset, colLabel="Case_ctrl", batchLabel="Run", gene=gene, legend=TRUE, main=paste(name,"Run 1-3, \n Run, gene",gene));
abline(h=0)
## MDS Plot:
par(mfrow=c(2,2))
plotMDS(aeset, colLabel="New_plate", symLabel="Case_ctrl", main=paste(name,"Run 1-3, \n Plate (=Day)"))
## PCA plot colored with run:
plotMDS(aeset, colLabel="Run", symLabel="Case_ctrl", main=paste(name,"Run 1-3, \n Chip type"));
## Color by Chip:
plotMDS(aeset, colLabel="Chip", symLabel="Case_ctrl", main=paste(name,"Run 1-3, \n Run"));
dev.off()
}
## -------- END FUNCTION ------------
## Make plots:
geneWiseMDSplots(eset.noNorm, "noNorm")
geneWiseMDSplots(d.eset.noNorm, "CCdiff_noNorm")
############################
### NORMALIZE DATA #########
############################
## Perform quantile normalization on each run separetly and merge,
## or merge first and then normalize?
## Remember that the data eset.noNorm are already log2-transformed.
normData <- lumiN(eset.noNorm,method="quantile")
## Take difference between case and control:
d.normData<- normData[,case_labnr]
exprs(d.normData) <- exprs(normData)[,case_labnr] - exprs(normData)[,ctrl_labnr]
##############################
## Make new plots with
## normalized data
##############################
geneWiseMDSplots(normData, "qNorm")
geneWiseMDSplots(d.normData, "CC-diff_qNorm") ## Case-control DIFFERENCE
## Density plots for all runs:
## plot density for all individuals in one plot, color by run:
## HAve to first plot run 1, then run 3, then run 2 to see them all:
densityAllRuns <- function(aeset,name){
d.uu <- split(sampleNames(aeset),pData(aeset)$Run)
order.run <- c(3,1,2)
col.run <- c("blue","green","red")
pdf(file.path(resultPath,paste0("density_all_3_runs_CCdiff_",name,".pdf")),width=9)
# par(mfrow=c(2,2))
# i <- 1 ## plot first density outside loop for run1, then add the remaineder densities as "lines" (see code below).
# #plot(density(exprs(aeset)[,d.uu[[i]][1]]),xlab="",main = paste("log2(case) - log2(ctrl),",name),lwd=.5,xlim=c(-3,3),ylim=c(0,4))
# for (i in order.run){
# plot(density(exprs(aeset)[,d.uu[[i]][1]]),xlab="",main = paste("Run",i,"\n log2(case) - log2(ctrl),",name),lwd=.5,xlim=c(-3,3),ylim=c(0,4))
# for (j in d.uu[[i]]){
# lines(density(exprs(aeset)[,j]),lwd=.5,col=col.run[i])
# }
# }
#
## All plots on top of each other:
i <- 1 ## plot first density outside loop for run1, then add the remaineder densities as "lines" (see code below).
plot(density(exprs(aeset)[,d.uu[[i]][1]]),xlab="",main = paste("log2(case) - log2(ctrl),",name),lwd=.5,xlim=c(-3,3),ylim=c(0,4))
for (i in order.run){
#plot(density(exprs(aeset)[,d.uu[[i]][1]]),xlab="",main = paste("log2(case) - log2(ctrl),",name),lwd=.5,ylim=c(0,1.3))#,xlim=c(-3,3),ylim=c(0,4))
for (j in d.uu[[i]]){
lines(density(exprs(aeset)[,j]),lwd=.5,col=col.run[i])
}
}
dev.off()
}
densityAllRuns(d.normData,"quantile_normalized2")
densityAllRuns(d.eset.noNorm, "no_normalization")
densityAllRuns(d.combat.normData, "combat_adjusted_quantile_normalized")
densityAllRuns(eset.noNorm, "Test_ind_no_normalization")
densityAllRuns(normData, "Test_ind_quantile_normalization")
###############################
## Normalize before we merge:
###############################
normData1 <- lumiN(eset1,method="quantile")
normData2 <- lumiN(eset2,method="quantile")
normData3 <- lumiN(eset3,method="quantile")
esets.preNorm <- list(normData1, normData2, normData3)
## Merge:
preNormData <- merge(esets.preNorm)
pData(preNormData)$New_plate <- factor(pData(preNormData)$New_plate,as.character(c(1:9,10:18)))
levels(pData(preNormData)$New_plate)
## Take difference between case and control:
d.preNormData<- preNormData[,case_labnr]
exprs(d.preNormData) <- exprs(preNormData)[,case_labnr] - exprs(preNormData)[,ctrl_labnr]
## Make plots:
geneWiseMDSplots(preNormData,gene,"pre_qNorm")
geneWiseMDSplots(d.preNormData, gene,"CC-diff_pre_qNorm")
#############################
## Try Combat
############################
## Merge the three prenormalized runs and apply combat:
batch = pData(normData)$New_plate
modcombat = model.matrix(~1, data=pData(normData))
combat_edata = ComBat(dat=exprs(normData), batch=batch, mod=modcombat)
combat.normData <- normData
exprs(combat.normData) <- combat_edata ## Add combat adjusted data.
geneWiseMDSplots(combat.normData,gene,"combat_qNorm")
## CC-Difference:
aeset <- combat.normData
tmp <- aeset[,case_labnr]
exprs(tmp) <- exprs(aeset)[,case_labnr] - exprs(aeset)[,ctrl_labnr]
d.combat.normData <- tmp
geneWiseMDSplots(d.combat.normData,gene,"CCdiff_combat_qNorm")
## Combat on the difference:
d.batch = pData(d.normData)$New_plate
d.modcombat = model.matrix(~1, data=pData(d.normData))
d.combat_edata = ComBat(dat=exprs(d.normData), batch=d.batch, mod=d.modcombat)
combat.D.normData <- d.normData
exprs(combat.D.normData) <- d.combat_edata ## Add combat adjusted data.
############################
## Mean centering
###########################
bmc.normData <- bmcFunc(normData,"New_plate")
## Finc case-control difference:
d.bmc.normData<- bmc.normData[,case_labnr]
exprs(d.bmc.normData) <- exprs(bmc.normData)[,case_labnr] - exprs(bmc.normData)[,ctrl_labnr]
######### PLOT
## Want to plot genewise plot for one gene and four different normalization/batch adjustment methods
## in on page:
## Change the plotGeneWiseBoxPlot
plotGeneWiseBoxPlot2 <- plotGeneWiseBoxPlot
as.list (body(plotGeneWiseBoxPlot))
body(plotGeneWiseBoxPlot2)[[14]] <- substitute(min_y <- 4)
body(plotGeneWiseBoxPlot2)[[15]] <- substitute(max_y <- 9)
plotGeneWiseBoxPlot3 <- plotGeneWiseBoxPlot
as.list (body(plotGeneWiseBoxPlot))
body(plotGeneWiseBoxPlot3)[[14]] <- substitute(min_y <- -3)
body(plotGeneWiseBoxPlot3)[[15]] <- substitute(max_y <- 2)
#listEsets <- list(eset.noNorm, normData, bmc.normData, combat.normData)
set.seed(16)
genes <- c(gene, featureNames(eset.noNorm)[sample(1:nrow(eset.noNorm),3)])
name <- "All-adjust-methods-Plate-adjusted"
pdf(file.path(resultPath,paste0("geneWisePlot_PCA_all_3_runs_",name,".pdf")),width=15)
par(mfrow=c(2,2));
for (gg in genes){
plotGeneWiseBoxPlot2(eset.noNorm, colLabel="Case_ctrl", batchLabel="New_plate", gene=gg, legend=TRUE, main=paste("No normalization \n gene",gg));
abline(h=0)
plotGeneWiseBoxPlot2(normData, colLabel="Case_ctrl", batchLabel="New_plate", gene=gg, legend=TRUE, main=paste("Quantile normalized"));
abline(h=0)
plotGeneWiseBoxPlot3(bmc.normData, colLabel="Case_ctrl", batchLabel="New_plate", gene=gg, legend=TRUE, main=paste("Quantile normalized \n Mean-centering plate batch adjusted"));
abline(h=0)
plotGeneWiseBoxPlot2(combat.normData, colLabel="Case_ctrl", batchLabel="New_plate", gene=gg, legend=TRUE, main=paste("Quantile normalized \n Combat plate batch adjusted"));
abline(h=0)
}
dev.off()
## CAse-control difference:
plotGeneWiseBoxPlot4 <- plotGeneWiseBoxPlot
as.list (body(plotGeneWiseBoxPlot))
body(plotGeneWiseBoxPlot4)[[14]] <- substitute(min_y <- -3)
body(plotGeneWiseBoxPlot4)[[15]] <- substitute(max_y <- 3)
name <- "All-adjust-methods-Plate-adjusted-CCdiff"
pdf(file.path(resultPath,paste0("geneWisePlot_PCA_all_3_runs_",name,".pdf")),width=15)
par(mfrow=c(2,2));
for (gg in genes){
plotGeneWiseBoxPlot4(d.eset.noNorm, colLabel="Case_ctrl", batchLabel="New_plate", gene=gg, legend=TRUE, main=paste("CC-difference, No normalization \n gene",gg));
abline(h=0)
plotGeneWiseBoxPlot4(d.normData, colLabel="Case_ctrl", batchLabel="New_plate", gene=gg, legend=TRUE, main=paste("CC-difference, Quantile normalized"));
abline(h=0)
plotGeneWiseBoxPlot4(d.bmc.normData, colLabel="Case_ctrl", batchLabel="New_plate", gene=gg, legend=TRUE, main=paste("CC-difference, Quantile normalized \n Mean-centering plate batch adjusted"));
abline(h=0)
plotGeneWiseBoxPlot4(d.combat.normData, colLabel="Case_ctrl", batchLabel="New_plate", gene=gg, legend=TRUE, main=paste("CC-difference, Quantile normalized \n Combat plate batch adjusted"));
abline(h=0)
}
dev.off()
########## PCA PLots:
## Run pca to investigate the first two principal components:
pcaPlots <- function(aeset, name){
#require(ggplot)
ex <- t(exprs(aeset))
n <- nrow(ex)
pcaResult <- prcomp(ex)
pcData <- data.frame(pcaResult$x)
## look at how much the first few pc-components explain of the variation in the data:
pcVar <- (pcaResult$sdev)^2 / sum(pcaResult$sdev^2) ## percent per component.
y.max<- x.max <- max(abs(c(max(pcData[1:2]), min(pcData[1:2]))))
y.min <- x.min <- (- y.max)
#pdf(file.path(resultPath,paste("pca-plot-n=",n,"all-3-runs-breastcancer",name,".pdf",sep="")))
pl1 <- ggplot(pcData,aes(x=PC1,y=PC2)) + geom_point(aes(color=as.factor(pData(aeset)$New_plate)))+ ggtitle(paste("PCA plot of",n,"individuals. Colored by Plate. \n",name)) + xlim(x.min,x.max) + ylim(y.min,y.max) + xlab(paste("PC1 (",round(pcVar[1]*100)," %)")) + ylab(paste("PC2 (",round(pcVar[2]*100)," %)"))
pl2 <- ggplot(pcData,aes(x=PC1,y=PC2)) + geom_point(aes(color=as.factor(pData(aeset)$Run)))+ ggtitle(paste("PCA plot of",n,"individuals.Colored by Run. \n",name)) + xlim(x.min,x.max) + ylim(y.min,y.max) + xlab(paste("PC1 (",round(pcVar[1]*100)," %)")) + ylab(paste("PC2 (",round(pcVar[2]*100)," %)"))
pl3 <- ggplot(pcData,aes(x=PC1,y=PC2)) + geom_point(aes(color=as.factor(pData(aeset)$Chip)))+ ggtitle(paste("PCA plot of",n,"individuals. Colored by Chip. \n",name)) + xlim(x.min,x.max) + ylim(y.min,y.max) + xlab(paste("PC1 (",round(pcVar[1]*100)," %)")) + ylab(paste("PC2 (",round(pcVar[2]*100)," %)"))
pl4 <- ggplot(pcData,aes(x=PC1,y=PC2)) + geom_point(aes(color=as.factor(pData(aeset)$User_ID)))+ ggtitle(paste("PCA plot of",n,"individuals. Colored by lab personnel. \n",name)) + xlim(x.min,x.max) + ylim(y.min,y.max) + xlab(paste("PC1 (",round(pcVar[1]*100)," %)")) + ylab(paste("PC2 (",round(pcVar[2]*100)," %)"))
ppp <- list(pl1, pl2, pl3, pl4)
return(ppp)
#dev.off()
}
pl1 <- pcaPlots(eset.noNorm,"no normalization")
pl2 <- pcaPlots(normData,"quantile normalized")
pl3 <- pcaPlots(bmc.normData,"quantile normalized and batch adjusted with mean centering")
pl4 <- pcaPlots(combat.normData,"quantile normalized and batch adjusted with ComBat")
pdf(file.path(resultPath,paste("pca-plot-all-3-runs-breastcancer",name,".pdf",sep="")),width=15)
grid.arrange(pl1[[1]], pl1[[2]], pl1[[3]],pl1[[4]], ncol=2)
grid.arrange(pl2[[1]], pl2[[2]], pl2[[3]],pl2[[4]], ncol=2)
grid.arrange(pl3[[1]], pl3[[2]], pl3[[3]],pl3[[4]], ncol=2)
grid.arrange(pl4[[1]], pl4[[2]], pl4[[3]],pl4[[4]], ncol=2)
dev.off()
## Case control difference:
pl1 <- pcaPlots(d.eset.noNorm,"no normalization")
pl2 <- pcaPlots(d.normData,"quantile normalized")
pl3 <- pcaPlots(d.bmc.normData,"quantile normalized and batch adjusted with mean centering")
pl4 <- pcaPlots(d.combat.normData,"quantile normalized and batch adjusted with ComBat")
pdf(file.path(resultPath,paste("pca-plot-all-3-runs-breastcancer-CCdiff",name,".pdf",sep="")),width=15)
grid.arrange(pl1[[1]], pl1[[2]], pl1[[3]],pl1[[4]], ncol=2)
grid.arrange(pl2[[1]], pl2[[2]], pl2[[3]],pl2[[4]], ncol=2)
grid.arrange(pl3[[1]], pl3[[2]], pl3[[3]],pl3[[4]], ncol=2)
grid.arrange(pl4[[1]], pl4[[2]], pl4[[3]],pl4[[4]], ncol=2)
dev.off()
pdf(file.path(resultPath,"pca-cc-diff-all-3-runs.pdf"),width=12)
ggplot(pcData,aes(x=PC1,y=PC2)) + geom_point(aes(color=as.factor(pData(aeset)$New_plate)))+ ggtitle(paste("PCA plot of",n,"individuals.\n Colored by Plate.")) + xlim(x.min,x.max) + ylim(y.min,y.max) + xlab(paste("PC1 (",round(pcVar[1]*100)," %)")) + ylab(paste("PC2 (",round(pcVar[2]*100)," %)"))
dev.off()
|
cc4308aea629ee2f518f8dd5f6180e92660493dd
|
91b8791e9338a8e561a1f49e369c5188b8b45c30
|
/Documents/CleanData/run_analysis.R
|
1412cc4cbdc919f90950c3051767f2ce7eb45be3
|
[] |
no_license
|
hiladg/ExData_Plotting1
|
be9546a569df32a6a7e093a4d5cba23b74fddc1e
|
35d94b7bca5ffb8676a6e428d5c71c1a5c52d3c4
|
refs/heads/master
| 2020-12-31T03:02:48.443658
| 2015-04-12T16:47:54
| 2015-04-12T16:47:54
| 33,824,337
| 0
| 0
| null | 2015-04-12T16:29:14
| 2015-04-12T16:29:14
| null |
UTF-8
|
R
| false
| false
| 3,888
|
r
|
run_analysis.R
|
## first we load the data into R
## These are the folders' names
## Please note that the operating system is Windows! (clause 1)
wd_train<-"./getdata-projectfiles-UCI HAR Dataset/UCI HAR Dataset/train/Inertial Signals"
wd_test<-"./getdata-projectfiles-UCI HAR Dataset/UCI HAR Dataset/test/Inertial Signals"
##For both X and Y, the Y is the activity, the X is the data itself
y_train<-read.table("./getdata-projectfiles-UCI HAR Dataset/UCI HAR Dataset/train/Y_train.txt", header=FALSE)
x_train<-read.table("./getdata-projectfiles-UCI HAR Dataset/UCI HAR Dataset/train/X_train.txt", header=FALSE)
y_test<-read.table("./getdata-projectfiles-UCI HAR Dataset/UCI HAR Dataset/test/Y_test.txt", header=FALSE)
x_test<-read.table("./getdata-projectfiles-UCI HAR Dataset/UCI HAR Dataset/test/X_test.txt", header=FALSE)
## and these are ther list of subjects
subject_train.filename<-"./getdata-projectfiles-UCI HAR Dataset/UCI HAR Dataset/train/subject_train.txt"
subject_test.filename<-"./getdata-projectfiles-UCI HAR Dataset/UCI HAR Dataset/test/subject_test.txt"
subject_data<-read.table(subject_train.filename)
test_data<-read.table(subject_test.filename)
## We want only those items with "mean()" or "std" in their names
## I included the meanFrequency that sometimes appear (clause 2), as it is a mean
c1<-grep("mean", names[,2])
c2<-c(c1,grep("std", names[,2]))
## and these are only the columns that we want
x_train.sub<-AllTrain.df[c2]
x_test.sub<-AllTest.df[c2]
##This is how we merge the data
##simple merge
## first All the trains, then all the test
AllTrain.df<-cbind(subject_data, y_train, x_train.sub)
AllTest.df<-cbind(test_data, y_test, x_test.sub)
## and then all of them together
TrainAndTest.df<-rbind(AllTrain.df, AllTest.df)
## now we subset only what we want
## We're only looking for the data items that are either mean or std
## so this is the list of column items with mean, or std, in their name
fitres <- "./getdata-projectfiles-UCI HAR Dataset/UCI HAR Dataset/features.txt"
names<-read.table(fitres)
## Uses descriptive activity names to name the activities in the data set
##WALKING WALKING_UPSTAIRS WALKING_DOWNSTAIRS SITTING STANDING LAYING
TrainAndTest.df[,4]=factor(TrainAndTest.df[,4], labels=c("WALKING", "WALKING_UPSTAIRS", "WALKING_DOWNSTAIRS", "SITTING", "STANDING", "LAYING"))
## since it is a factor variable, we had to use the factor as below in order to change it
## now we merge them together
i<-sapply(names, is.factor)
names[i]<-lapply(names[i], as.character)
MyColNames<-c("subject", "activity", names[c2, 2])
colnames(TrainAndTest.df)<-MyColNames
##Now we need to create the new & tidy data set
## that includes: the average of Each Variable for Each Activity and Each Subject
## The variable for each activity
## First we split the data by Activity
splitData<-split(TrainAndTest.df, TrainAndTest.df[,2])
## and now we create 6 datasets from it, by Subject
r1<-ddply(splitData$WALKING, "subject", function(x)colMeans(subset(x, select=-activity)))
r2<-ddply(splitData$WALKING_UPSTAIRS , "subject", function(x)colMeans(subset(x, select=-activity)))
r3<-ddply(splitData$WALKING_DOWNSTAIRS , "subject", function(x)colMeans(subset(x, select=-activity)))
r4<-ddply(splitData$SITTING , "subject", function(x)colMeans(subset(x, select=-activity)))
r5<-ddply(splitData$STANDING , "subject", function(x)colMeans(subset(x, select=-activity)))
r6<-ddply(splitData$LAYING , "subject", function(x)colMeans(subset(x, select=-activity)))
##Let's return the activity column
r1$Activity = "WALKING"
r2$Activity = "WALKING_UPSTAIRS"
r3$Ac tivity = "WALKING_DOWNSTAIRS"
r4$Ac tivity = "SITTING"
r5$Activity = "STANDING"
r6$Activity = "LAYING"
##and now for submission
write.table(rbind(r1, r2, r3, r4, r5, r6), row.name=FALSE, file="project3.txt")
|
838bd8465286fa9354f7e51b6111eb59b4998cbb
|
1eeaa7b3dc0411d3788c3cdf8da57b97cb93d5ab
|
/single-extract.R
|
d1d4f3021263cea0b194514a4c4fb421414a5f9e
|
[] |
no_license
|
CBUFLM/topographic-distance
|
d174c508318df84c2f743302736b277373a5817c
|
74b84286e7147f592b2d361a2235c4ac254e28b3
|
refs/heads/master
| 2020-03-28T04:26:35.045116
| 2011-03-08T19:18:56
| 2011-03-08T19:18:56
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 2,089
|
r
|
single-extract.R
|
#!/usr/bin/env Rscript
require('raster')
require('rgdal')
# the original data they sent on is all in NAD27, may want to check if they're
# measuring things in a different datum for consistency
setwd("~/d/education/frog-pathogen")
r <- raster("data/seki/dem/elev_met/hdr.adf")
# Read in the polygon shapefile, reshape the centroid coordinates into coordinate pairs, to
# be fed into a SpatialLines object -- this can be saved as a shapefile with sp, as an intermediate product.
lines <- readOGR(dsn="data/generated/single.shp", layer="single")
# compute the full line matrix
#for (i in 1:nrow(lines)) {
for (i in 1:2) {
# get two consecutive points for calculating the line. To do the distance matrix, you'd need
# to compute the full m*n == (m*m-1)/2 set of lines.
#lines[[i]] <- Lines(Line(rbind(c(lakes$X_CENTER[i], lakes$Y_CENTER[i]), c(lakes$X_CENTER[i+1], lakes$Y_CENTER[i+1]))), ID = as.character(i))
#attr <- rbind(attr, c(ID=as.character(i), from_id=lakes$LAKEID[i], to_id=lakes$LAKEID[i+1]))
#tmp <- SpatialLines(lines[[i]]) #SpatialLines(list(), i)))
# running this for 18 distances takes ~5minutes! That's ~17s per distance,
# with a full matrix we'd have 125000 calculations, or 590hr to run the whole set, which is 24 days...
# actually, its worse than this: we'd have 7.1M calculations across the full set of lakes, though perhaps filteirng would reduce this to a more reasonable quantity...
#system.time(elevations <- extract(r, tmp))
}
system.time(elevations <- extract(r, lines))
# 392.63 0.45 396.41
# my guess is this is probably an order of magnitude faster in GRASS...
# our 'z' values
#elevations <- extract(r, lines)
# x and y can be pulled from the sp object containing the shapefile itself
# TODO actually, this only pulls the coordinates for the _points_ need the coordinates from the extract locations...
# actually smackually, we don't even need this -- the sampling grid is COMPLETELY REGULAR so the x and y components are always the same... only Z is varying. So we just need to do the maths for the paths, and we're g2g.
|
1ab8b749fdcbe9b4f4245ebaddfeb198f8aa1fa4
|
d92c025b8495184365b4555c270abfda69473c2b
|
/man/mat2coords.Rd
|
59e75ac1a3cfe52cbcaa18da913ba58ef3e3dae5
|
[] |
no_license
|
npetraco/feature2
|
9877ff6117edd173ea432c17ef273f3c6d84437f
|
09c113fdb31b9a3ba3fd3570bb3013b7a191c8d6
|
refs/heads/master
| 2021-03-30T15:47:02.853302
| 2017-06-26T02:57:29
| 2017-06-26T02:57:29
| 40,421,436
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 639
|
rd
|
mat2coords.Rd
|
% Generated by roxygen2 (4.1.0): do not edit by hand
% Please edit documentation in R/mat2coords.R
\name{mat2coords}
\alias{mat2coords}
\title{mat2coords}
\usage{
mat2coords(dmat, x.phys.max = NULL, y.phys.max = NULL)
}
\arguments{
\item{dmat}{A matrix of points.}
\item{x.phys.max}{XXXX}
\item{y.phys.max}{XXXX}
\item{num.x.pts}{Number of points desired in the x-direction.}
\item{num.slices}{Number of points desired in the y-direction.}
}
\value{
XXXX
}
\description{
Convert a surface or image matrix to xyz coordinates
}
\details{
Output x y coordinates can be either indices or physical.
}
\examples{
XXXX
}
\references{
XXXX
}
|
2e4ff85cd24e8877d9389d85e2a5beef36020f38
|
3f90e417415a4b2808bbe4f4cff7601c2272a94e
|
/expanding(1).R
|
f162cd00fca8645b4422878fcf072b0379fc54da
|
[] |
no_license
|
nuke705/MiscR
|
f977dcca8ce89e8e73c3b9bef8b5c0c8dd4f6a31
|
8c6005ed053925de0434c084ab5456ea26b6a22f
|
refs/heads/master
| 2020-04-03T07:57:22.358605
| 2018-10-28T21:49:52
| 2018-10-28T21:49:52
| 155,118,918
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 2,932
|
r
|
expanding(1).R
|
#expanding
library(rpart)
library(readxl)
library(plyr)
setwd("/Users/admin/Desktop/msfe/principal practicum/week11/hw11")
ndata<-read_excel("final_data.xlsx")
pretotal=NA
return=rep(0,48)
cutoff=matrix(data=0,nrow=48,ncol=4)
for (i in 1:48) {
ndata_train = ndata[1:(384 + 4*(i-1)),]
ndata_test = ndata[(385 + 4*(i-1)):(384 + 4*i),]
tc <- rpart.control(minsplit=50,minbucket=(384+4*(i-1))/13,maxdepth=10,xval=5,cp=0.005)
tree=rpart(`Next 1 W Index Return`~`Factor 1` + buf+HiborD+ESI,data=ndata_train,control=tc)
#summary(tree)
#plot(tree, uniform=TRUE, main="Regression Tree for Mileage ")
#text(tree, use.n=TRUE, all=TRUE, cex=.8)
pre=predict(tree,ndata_test)
pretotal=c(pretotal,pre)
pre_insample=predict(tree,ndata_train)
v= table(pre_insample)
pre_outcome=as.numeric(names(v))
cutoff[i,1]=quantile(pre_outcome,0.8)
cutoff[i,2]=quantile(pre_outcome,0.6)
cutoff[i,3]=quantile(pre_outcome,0.4)
cutoff[i,4]=quantile(pre_outcome,0.2)
}
#mse<-c()
#mae<-c()
#mmeu<-c()
#mmeo<-c()
pretotal=pretotal[2:length(pretotal)]
#for (i in 1:length(pretotal)){
# mse[i]=(pretotal[i]-ndata$`Next 1 W Index Return`[423+i])^2
# mae[i]=abs(pretotal[i]-ndata$`Next 1 W Index Return`[423+i])
# if (pretotal[i]>ndata$`Next 1 W Index Return`[423+i]){
# mmeu[i]=abs(pretotal[i]-ndata_test$`Next 1 W Index Return`[i])
# mmeo[i]=sqrt(abs(pretotal[i]-ndata_test$`Next 1 W Index Return`[i]))
# }
# else{
# mmeu[i]=sqrt(abs(pretotal[i]-ndata_test$`Next 1 W Index Return`[i]))
# mmeo[i]=abs(pretotal[i]-ndata_test$`Next 1 W Index Return`[i])
# }
#}
#mean(mse)
#mean(mae)
#mean(mmeu)
#mean(mmeo)
##
r<-c()
r[1]=0;
for (i in 1:192){
row_index=ceiling(i/4)
if(pretotal[i]>=cutoff[row_index,1]){
r[i]=0.17*100+0.33*100*(1+3*ndata$`Next 1 W Index Return`[(385 + (i-1))])+0.5*100*(1+ndata$`Next 1 W Index Return`[(385 + (i-1))])
}
else if((pretotal[i]<cutoff[row_index,1])&(pretotal[i]>=cutoff[row_index,2])){
r[i]=0.33*100+0.17*100*(1+3*ndata$`Next 1 W Index Return`[(385 + (i-1))])+0.5*100*(1+ndata$`Next 1 W Index Return`[(385 + (i-1))])
}
else if((pretotal[i]<cutoff[row_index,2])&(pretotal[i]>=cutoff[row_index,3])){
r[i]=0.5*100+0.5*100*(1+ndata$`Next 1 W Index Return`[(385 + (i-1))])
}
else if((pretotal[i]<cutoff[row_index,3])&(pretotal[i]>=cutoff[row_index,4])){
r[i]=0.33*100+0.17*100*(1-3*ndata$`Next 1 W Index Return`[(385 + (i-1))])+0.5*100*(1+ndata$`Next 1 W Index Return`[(385 + (i-1))])
}
else if((pretotal[i]<cutoff[row_index,4])){
r[i]=0.17*100+0.33*100*(1-3*ndata$`Next 1 W Index Return`[(385 + (i-1))])+0.5*100*(1+ndata$`Next 1 W Index Return`[(385 + (i-1))])
}
}
r=r/100
return=1
for (i in 1:192){
return=return*r[i]
}
#(return-1)/192*52
return^(52/192)-1
sd(r)*sqrt(52)
length(pretotal)
real=ndata$`Next 1 W Index Return`[385:576]
hit=real*pretotal
length(hit)
hit_ratio=length(which(hit>0))/length(hit)
hit_ratio
|
d9c5de9170a4eacc2180a2ea867eec16bfeb9b41
|
99037a80c2ec155aba772b778ac3b7c814d97d22
|
/ui.R
|
343dff9d6964705afe025debcaf4a6e247e69b8b
|
[] |
no_license
|
miharanGitHub/DataProducts
|
a6be8847057bedb39cbe26bd3ededd440e5e6dce
|
0912ac995178438e62a2aacaeb15e5ddec45dfee
|
refs/heads/master
| 2021-01-23T19:46:24.687383
| 2015-04-18T14:45:16
| 2015-04-18T14:45:16
| 34,169,320
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 1,021
|
r
|
ui.R
|
library(shiny)
shinyUI(pageWithSidebar(
headerPanel("Randoms with defined correlation"),
sidebarPanel(
sliderInput("n", "Number of simulations:",
min=50, max=10000, value=500),
p("Define the number of simulations"),
br(),
br(),
sliderInput("r", "Correlation:",
min=0, max=1, value=0.5),
p("Define the correlation"),
br(),
br(),
actionButton("goButton", "Generate!"),
p("Click the button to update simulation.")
),
mainPanel(
p("Simulated correlation"),
verbatimTextOutput("nText"),
# If number of points is less than 50 scatter is not shown
conditionalPanel("input.n >= 100",
plotOutput("scatterPlot", height = 600, width=600)
)
)
))
|
7be00e95d50a310401e6b6b76ad2ea28626e4071
|
179e29245509e1b61709eba0502eeac6de10ea60
|
/man/dist.Rd
|
e80471daf90431126760a25102c810405579a987
|
[
"MIT"
] |
permissive
|
jacquesattack/kmeanspp
|
8081999e03e745967a3da983f5673edda2483242
|
4a218ea3c3e4d680f765de04cb818da1c82158cc
|
refs/heads/master
| 2020-12-29T07:18:42.638123
| 2020-02-08T17:40:09
| 2020-02-08T17:40:09
| 238,508,118
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| true
| 351
|
rd
|
dist.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/kpp.R
\name{dist}
\alias{dist}
\title{Calculate Euclidean distance between two vectors}
\usage{
dist(v1, v2)
}
\arguments{
\item{v1}{First vector}
\item{v2}{Second vector}
}
\value{
The Euclidean distance
}
\description{
Calculate Euclidean distance between two vectors
}
|
e27d6d2d857dcebe5f915a7030dc4eb201ba6bcf
|
cc7124ca13f2daee8120960334452e655b22d301
|
/plot1.R
|
12e9d763880b9032f1313f51cc74d8772f6b22e5
|
[] |
no_license
|
liyunchao/ExData_Plotting1
|
9f4aba27fe3dc933dcf1d368fd48d33565c7d651
|
131c2d4a13f32da0223ac1f336c97ad57a8fc963
|
refs/heads/master
| 2021-01-18T00:44:35.123150
| 2016-09-18T16:10:18
| 2016-09-18T16:10:18
| 68,445,586
| 0
| 0
| null | 2016-09-17T10:22:21
| 2016-09-17T10:22:21
| null |
UTF-8
|
R
| false
| false
| 161
|
r
|
plot1.R
|
source("data.R")
png("plot1.png")
hist(target.house$Global_active_power,col="red",main="Global Active Power",xlab = "Global Active Power (kilowatts)")
dev.off()
|
9a049931f6241c2e5697facfed054d466f4fe37f
|
050854230a7cead95b117237c43e1c8ff1bddcaa
|
/demo/pub_code/R.Figures/grl.figure1.1.R
|
969fbc18719e38137d7586b8bcdb3781c0487f10
|
[
"LicenseRef-scancode-warranty-disclaimer",
"LicenseRef-scancode-public-domain-disclaimer"
] |
permissive
|
USGS-R/mda.lakes
|
7b829d347e711416cbadbf50f8ac52c20546e7bc
|
eba6ddfba4d52c74e7b09fb1222772630dfa7f30
|
refs/heads/main
| 2023-04-15T18:10:46.043228
| 2020-11-13T18:43:09
| 2020-11-13T18:43:09
| 7,429,212
| 1
| 11
| null | 2023-04-07T22:44:55
| 2013-01-03T19:50:59
|
R
|
UTF-8
|
R
| false
| false
| 2,448
|
r
|
grl.figure1.1.R
|
#all.slopes = fread('all.slopes.csv')
library(data.table)
library(plyr)
library(fields)
library(mda.lakes)
#source('../R/Libraries/GLM.functions.R')
source('luke.legend.R')
source('sens.confint.mod.R')
all.slopes = fread('all.slopes.csv')
wi = fread('../../../inst/supporting_files/WI_boundary_lat_lon.tsv')
wbic.slopes = ddply(all.slopes, 'wbic', function(df){
return(data.frame(slope=median(df$slopes), n=nrow(df)))
})
confint = ddply(all.slopes, 'wbic',
function(df){sens.confint.mod.hirsch(df$slopes, data.table(df),pval=0.95)})
#if lower bound is > 0, then significant at 95%
confint$issig = (confint$V1 > 0 & confint$V2 > 0) | (confint$V1 < 0 & confint$V2 < 0)
wbic.slopes$lat = NA
wbic.slopes$lon = NA
for(i in 1:nrow(wbic.slopes)){
lat.lon = getLatLon(as.character(wbic.slopes$wbic[i]))
wbic.slopes$lat[i] = lat.lon[1]
wbic.slopes$lon[i] = lat.lon[2]
}
wbic.slopes$cex = NA
wbic.slopes$cex[wbic.slopes$n < 3e3] = 0.5
wbic.slopes$cex[wbic.slopes$n >= 3e3] = 1.5
wbic.slopes = wbic.slopes[order(wbic.slopes$n, decreasing=TRUE),]
wbic.slopes = merge(wbic.slopes, confint) #merge is.sig into it
tiff('grl.figure.1.2.tiff', width=2400, height=3150, res=450, compression='lzw')
plot(wi$Lon, wi$Lat, type='l', lwd=2, bty='n', ylab='Lat', xlab='Lon', col='grey')
luke.colors = function(values){
values[values < -0.1] = -0.1
values[values > 0.1] = 0.1
#l.levels = c(-0.2, -0.1, 0, 0.1, 0.2)
my.col = color.scale(
values,
zlim=c(-0.11,0.11),
col=(two.colors(n=256, start="blue", end="red", middle="grey",
alpha=1.0)),
transparent.color="grey")
return(my.col)
}
to.plot = wbic.slopes[!wbic.slopes$issig,]
points(to.plot$lon, to.plot$lat,
col=luke.colors(to.plot$slope), bg=rgb(1,1,1,0),
pch=21, cex=to.plot$cex)#cex=log(wbic.slopes$n+10))
to.plot = wbic.slopes[wbic.slopes$issig,]
points(to.plot$lon, to.plot$lat,
col=luke.colors(to.plot$slope), bg=luke.colors(to.plot$slope),
pch=21, cex=to.plot$cex)#cex=log(wbic.slopes$n+10))
l.levels = c(-0.1, -0.05, 0, 0.05, 0.1)
my.col = luke.colors(l.levels)
luke.legend("topright", fill = my.col, title=expression(Trend~degree*C~yr^-1),
legend = c("<= -0.1", "-0.05", "0", "0.05", ">= 0.1"),
horiz=FALSE,x.intersp=0.3, y.intersp=1, adj=c(-0.1,0.5),
title.adj=c(0,0.0), bty='n', fill.x.cex=1.2, fill.y.cex=1.3)
dev.off()
|
5e37b7ddcf6a016b14dcdafc4c769ea5e18ee829
|
42a3f11d0de26fa754b4388f31570bc0a6dd5179
|
/shiny/shiny_wqdi/server.R
|
80262f3193c110feab5d6e6b216d584a7e1d307a
|
[] |
no_license
|
InterstateCommissionPotomacRiverBasin/wq_data_inventory
|
e0b7aa4a01a14e6b85a951f9d634d09c34d0c120
|
674824a955628d0bc859b7f427b9656246f53625
|
refs/heads/master
| 2021-07-25T09:10:43.851314
| 2018-08-09T19:22:09
| 2018-08-09T19:22:09
| 133,057,365
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 1,087
|
r
|
server.R
|
server <- function(input, output, session) {
# Loading Page (Server) ---------------------------------------------------
load_data()
hide(id = "loading_content",
anim = TRUE,
animType = "fade")
show("app_content")
# Information Tab (Server) ------------------------------------------------
output$instructions <- renderUI({
source("ui/ui_instructions.R", local = TRUE)$value
})
# Tabular Tab (Server) ----------------------------------------------------
source("server/server_filter.R", local = TRUE)
source("server/server_program_rec.R", local = TRUE)
callModule(dt_table, "program_dt", program.rec)
source("server/server_site_rec.R", local = TRUE)
callModule(dt_table, "site_dt", site.rec)
source("server/server_downloads.R", local = TRUE)
# source("server/server_output_options.R", local = TRUE)
# Map Tab (Server) --------------------------------------------------------
# source("server/server_leaflet_filter.R", local = TRUE)
# source("server/server_map_rec.R", local = TRUE)
# source("server/server_leaflet.R", local = TRUE)
}
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.