blob_id
stringlengths 40
40
| directory_id
stringlengths 40
40
| path
stringlengths 2
327
| content_id
stringlengths 40
40
| detected_licenses
listlengths 0
91
| license_type
stringclasses 2
values | repo_name
stringlengths 5
134
| snapshot_id
stringlengths 40
40
| revision_id
stringlengths 40
40
| branch_name
stringclasses 46
values | visit_date
timestamp[us]date 2016-08-02 22:44:29
2023-09-06 08:39:28
| revision_date
timestamp[us]date 1977-08-08 00:00:00
2023-09-05 12:13:49
| committer_date
timestamp[us]date 1977-08-08 00:00:00
2023-09-05 12:13:49
| github_id
int64 19.4k
671M
⌀ | star_events_count
int64 0
40k
| fork_events_count
int64 0
32.4k
| gha_license_id
stringclasses 14
values | gha_event_created_at
timestamp[us]date 2012-06-21 16:39:19
2023-09-14 21:52:42
⌀ | gha_created_at
timestamp[us]date 2008-05-25 01:21:32
2023-06-28 13:19:12
⌀ | gha_language
stringclasses 60
values | src_encoding
stringclasses 24
values | language
stringclasses 1
value | is_vendor
bool 2
classes | is_generated
bool 2
classes | length_bytes
int64 7
9.18M
| extension
stringclasses 20
values | filename
stringlengths 1
141
| content
stringlengths 7
9.18M
|
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
e8a992d41ce6d1003117ebb0efa289b8c61dc32c
|
03b58eced475bed141a0e81b2b687fd72d42769b
|
/R/get_p_LSHTM.R
|
4359a09eb6daffbb8dd0c87feae4868d96b662c6
|
[] |
no_license
|
epicentre-msf/covidestim
|
a86a14697bac09e27cee2d802dc12489da5fbe91
|
93785a265a495d9388add08913aba3ab14d96c59
|
refs/heads/master
| 2023-01-04T21:46:17.651857
| 2020-11-02T16:21:49
| 2020-11-02T16:21:49
| 263,291,260
| 1
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 3,846
|
r
|
get_p_LSHTM.R
|
#' Estimate Covid2019 outcome probabilities for a population given its age
#' distribution, and age-severity estimates used by LHSTM
#'
#' @description
#' Estimate Covid19 outcome probabilities including hospitalizion|infection,
#' ICU|hospitalization, death|hospitalization, and death|infection, using
#' age-specific outcomes estimates of Pr(Clinical|Infection) from Davies et al.
#' (2020) (with confidence intervals) and point estimates of
#' Pr(hospitalization|clinical), Pr(ICU|hospitalization), and
#' Pr(dead|hospitalization) from Van Zandvoort et al. (2020).
#'
#' Population age distributions can either be taken from the UN World
#' Population Prospects 2019 (WPP2019), or directly supplied by the user.
#'
#' @param x Either an ISO3 country code used to extract age-specific population
#' estimates from the UN World Population Prospects 2019 dataset, \emph{or}, a
#' data.frame containing age categories in the first column and population
#' counts (or proportions) in the second column
#' @param p_type Outcome to estimate (either "p_hosp_inf", "p_icu_hosp",
#' "p_dead_hosp", or "p_dead_inf")
#' @param p_stat Statistic of the severity estimates to use (either "mean",
#' "median", "low_95", "up_95", "low_50", or "up_50")
#'
#' @return
#' Estimated outcome probability (scalar)
#'
#' @author Anton Camacho
#' @author Patrick Barks <patrick.barks@@epicentre.msf.org>
#'
#' @source
#' van Zandvoort, K., Jarvis, C.I., Pearson, C., Davies, N.G., CMMID COVID-19
#' Working Group, Russell, T.W., Kucharski, A.J., Jit, M.J., Flasche, S., Eggo,
#' R.M., and Checchi, F. (2020) Response strategies for COVID-19 epidemics in
#' African settings: a mathematical modelling study. medRxiv preprint.
#' \url{https://doi.org/10.1101/2020.04.27.20081711}
#'
#' Davies, N.G., Klepac, P., Liu, Y., Prem, K., Jit, M., CMMID COVID-19 Working
#' Group, and Eggo, R.M. (2020) Age-dependent effects in the transmission and
#' control of COVID-19 epidemics. medRxiv preprint.
#' \url{https://doi.org/10.1101/2020.03.24.20043018}
#'
#' @examples
#' # mean Pr(hospitalization|infection) for Canada (ISO3 code "CAN"), taking age
#' # distribution from WPP2019
#' get_p_LSHTM(x = "CAN", p_type = "p_hosp_inf", p_stat = "mean")
#'
#' # use custom age-distribution
#' age_df <- data.frame(
#' age = c("0-9", "10-19", "20-29", "30-39", "40-49", "50-59", "60-69", "70-79", "80+"),
#' pop = c(1023, 1720, 2422, 3456, 3866, 4104, 4003, 3576, 1210),
#' stringsAsFactors = FALSE
#' )
#'
#' get_p_LSHTM(x = age_df, p_type = "p_hosp_inf", p_stat = "mean")
#'
#' @export get_p_LSHTM
get_p_LSHTM <- function(x,
p_type = c("p_hosp_inf", "p_icu_hosp", "p_dead_hosp", "p_dead_inf"),
p_stat = c("mean", "median", "low_95", "up_95", "low_50", "up_50")) {
p_type <- match.arg(p_type)
p_stat <- match.arg(p_stat)
## for testing only
if (FALSE) {
x <- "FRA"
p_type <- "p_dead_inf"
p_stat <- "mean"
}
# use P(Clinical|Infection) from Davies 2020 (with confidence intervals) and
# P(Hosp|Clinical) from VanZandvoort 2020 to compute P(Hosp|Infection)
est_davies <- get_est_davies(stat = p_stat)
est_vanzan <- get_est_vanzandvoort()
est_merge <- merge(est_vanzan, est_davies, by.x = "age_group")
est_merge$p_hosp_inf <- est_merge$p_hosp_clin * est_merge$p_clin_inf
est_merge$p_dead_inf <- est_merge$p_dead_hosp * est_merge$p_hosp_inf
# prepare age distribution
age_distr <- prep_age_distib(x)
# aggrate population age-classes to match estimate age-classes
age_distr_agg <- aggregate_ages(age_distr, target = est_merge$age_group)
# bind estimates to population data by age class
est_full <- merge(est_merge, age_distr_agg, all.x = TRUE)
# return overall population probability
return(sum(est_full[["pop"]] * est_full[[p_type]]) / sum(est_full[["pop"]]))
}
|
7f25737a0795e56c8340cab41f639438de762115
|
011c52e9bae1857c0bd3d44f6f27adada699ba0a
|
/man/getTileIdFromFilename.Rd
|
0a2163dfecbdc8a3ff4e81e9f9c61352dd51599c
|
[
"MIT"
] |
permissive
|
albhasan/blissR
|
ffebfe0fcd72e2e0226b140a1ddfe7cdcad579d2
|
2ce56272596c709df4bcecedf4ee9720bb62ef8a
|
refs/heads/master
| 2020-12-24T13:36:46.158293
| 2014-08-19T12:54:25
| 2014-08-19T12:54:25
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 355
|
rd
|
getTileIdFromFilename.Rd
|
\name{getTileIdFromFilename}
\alias{getTileIdFromFilename}
\title{Get ths MODIS tile id from the modis filename}
\usage{
getTileIdFromFilename(object, fileName)
}
\arguments{
\item{object}{An instance of the class Util}
\item{fileName}{Name of the file}
}
\value{
The name of the file
}
\description{
Get ths MODIS tile id from the modis filename
}
|
59ef695abc651bdc3c150b4ad0c5bd5191745234
|
68d1186a1f8bf36627f45d926e3a0971b07cee7d
|
/R/RcppExports.R
|
149c8ce6b264d2579249e9e660903ae4dd74894c
|
[] |
no_license
|
tz-lom/bcidat
|
c0bf6f6de42d163ba01f719fe345bc16e4ef779b
|
12bc6f9aea5dd1b2f8e4d933e70669d54e710585
|
refs/heads/master
| 2021-06-06T18:53:37.679957
| 2019-04-10T19:25:30
| 2019-04-10T19:25:30
| 23,744,391
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 239
|
r
|
RcppExports.R
|
# Generated by using Rcpp::compileAttributes() -> do not edit by hand
# Generator token: 10BE3573-1514-4C36-9D1C-5A225CD40393
load_bcidat <- function(file, raw = FALSE) {
.Call('_bcidat_load_bcidat', PACKAGE = 'bcidat', file, raw)
}
|
939ed1ff93ef0162e6f6c2dc6bad543a2dc5aa1f
|
58551e85e82d8e4df11dd7922b619f7896a0f566
|
/R/wrappers.R
|
4e9f5c0f9b8d8fb298a8ddd66c344d7b4007666d
|
[] |
no_license
|
jkaupp/infer
|
e77abe19803061995f1665950750c8dbd4eef053
|
c863cd91fc142ca49da458f641736488d3a68dcf
|
refs/heads/master
| 2020-03-19T09:19:16.639094
| 2018-05-14T23:13:22
| 2018-05-14T23:13:22
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 3,975
|
r
|
wrappers.R
|
# Wrapper functions
# Different shortcuts to doing traditional hypothesis tests & confidence intervals
# in R as well as calculating test statistics,
# following a pipe-able framework
#'
#' A tidier version of t.test for two sample tests
#'
#' @param data a data frame that can be coerced into a \code{\link[tibble]{tibble}}
#' @param formula a formula with the response variable on the left and the explanatory on the right
#' @param alternative character string specifying the direction of the alternative hypothesis. Options are
#' "\code{two_sided}" (default), "\code{greater}", or "\code{less}".
#' @param ... currently ignored
#' @export
#' @examples
#' # t test for comparing mpg against automatic/manual
#' mtcars %>%
#' dplyr::mutate(am = factor(am)) %>%
#' t_test(mpg ~ am, alternative = "less")
t_test <- function(data, formula, #response = NULL, explanatory = NULL,
alternative = "two_sided",...){
# Match with old "dot" syntax
if(alternative == "two_sided")
alternative <- "two.sided"
### Only currently working with formula interface
# if (hasArg(formula)) {
data %>%
stats::t.test(formula = formula, data = .,
alternative = alternative) %>%
broom::glance() %>%
dplyr::select(statistic, t_df = parameter, p_value = p.value,
alternative)
# } else {
# data %>%
# stats::t.test(formula = substitute(response) ~ substitute(explanatory),
# data = .,
# alternative = alternative) %>%
# broom::glance() %>%
# dplyr::select(statistic, t_df = parameter, p_value = p.value,
# alternative)
# t.test(y = data[[as.character(substitute(response))]],
# x = data[[as.character(substitute(explanatory))]],
# alternative = alternative) %>%
# broom::glance() %>%
# select(statistic, t_df = parameter, p_value = p.value, alternative)
# }
}
#' A shortcut wrapper function to get the observed test statistic for a t test
#'
#' @param data a data frame that can be coerced into a \code{\link[tibble]{tibble}}
#' @param formula a formula with the response variable on the left and the explanatory on the right
#' @param ... currently ignored
#' @export
t_stat <- function(data, formula, ...){
data %>%
t_test(formula = formula, ...) %>%
dplyr::select(statistic) %>%
dplyr::pull()
}
#'
#' A tidier version of chisq.test for goodness of fit tests and tests of independence.
#'
#' @param data a data frame that can be coerced into a \code{\link[tibble]{tibble}}
#' @param formula a formula with the response variable on the left and the explanatory on the right
#' @param ... additional arguments for \code{chisq.test}
#' @importFrom rlang f_lhs f_rhs
#' @export
#' @examples
#' # chisq test for comparing number of cylinders against automatic/manual
#' mtcars %>%
#' dplyr::mutate(cyl = factor(cyl), am = factor(am)) %>%
#' chisq_test(cyl ~ am)
chisq_test <- function(data, formula, #response = NULL, explanatory = NULL,
...){
## Only currently working with formula interface
explanatory_var <- f_rhs(formula)
response_var <- f_lhs(formula)
df <- data[ , as.character(c(response_var, explanatory_var))]
stats::chisq.test(table(df), ...) %>%
broom::glance() %>%
dplyr::select(statistic, chisq_df = parameter, p_value = p.value)
}
#' A shortcut wrapper function to get the observed test statistic for a chisq test. Uses \code{stats::chisq.test}, which applies a continuity correction.
#'
#' @param data a data frame that can be coerced into a \code{\link[tibble]{tibble}}
#' @param formula a formula with the response variable on the left and the explanatory on the right
#' @param ... additional arguments for \code{chisq.test}
#' @export
chisq_stat <- function(data, formula, ...){
data %>%
chisq_test(formula = formula, ...) %>%
dplyr::select(statistic) %>%
dplyr::pull()
}
|
9ae632ffd8d49f1c4c1ea0c526dee6410f01790b
|
6eb82256addc1b8da73b394219965b844112c8d3
|
/catSurv/man/estimateSE.Rd
|
6194dcf9f033362fe0186bf57a303240dd7a0b55
|
[] |
no_license
|
drmiller1220/CATSurv
|
b441b9d34a9b23b0460bfa11ec144661063b4c01
|
9cfff1e28c4f09f75b587101a6c16e00aa8e7981
|
refs/heads/master
| 2021-01-23T00:48:45.078983
| 2016-03-31T22:11:05
| 2016-03-31T22:11:05
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 1,564
|
rd
|
estimateSE.Rd
|
% Generated by roxygen2 (4.1.1): do not edit by hand
% Please edit documentation in R/estimateSE.R
\name{estimateSE}
\alias{estimateSE}
\title{Computerized Adaptive Testing Survey Posterior Standard Error of Estimated Latent Trait Position Estimator}
\usage{
estimateSE(cat, theta.hat, ...)
}
\arguments{
\item{cat}{An object of class \code{CATsurv}}
\item{theta.hat}{A scalar value containing an estimate of a respondent's position on the latent trait. Generally, this is the output of the \code{\link{estimateTheta}} funciton.}
\item{...}{argument passed to other functions.}
}
\value{
The estimate of the standard error of the user supplied \code{theta.hat}
}
\description{
This function estimates the standard error of an estimate of a respondent's expected \emph{a posteriori} (EAP) position on the latent scale.
}
\details{
The standard error of the expected \emph{a posteriori} (EAP) estimate of respondent \eqn{j}'s position on the latent scale is calculated as the square root of \deqn{E((\theta_j-\hat{\theta_j}^{(\text{EAP})})^2|\mathbf{y}_{k-1,j})=\frac{\int(\theta_j-\hat{\theta_j}^{(\text{EAP})})^2\pi(\theta_j)L(\theta_j|\mathbf{y}_{k-1,j}d\theta_j}{\int\pi(\theta_j)L(\theta_j|\mathbf{y}_{k-1,j})d\theta_j}}.
}
\author{
Josh W. Cutler: \email{josh@zistle.com} and Jacob M. Montgomery: \email{jacob.montgomery@wustl.edu}
}
\seealso{
\code{\link{three.pl}},\code{\link{likelihood}}, \code{\link{prior.value}}, \code{\link{estimateTheta}}, \code{\link{expectedPV}}, \code{\link{nextItem}}, \code{\link{storeAnswer}}, \code{\link{debugNextItem}}
}
|
6c4e6ba5ee1c6d1fd7a68b480dc3a630c3761be2
|
a0cfa7a7e59b8aaa13e413af70eab364bfa31948
|
/plot4.R
|
2ae46a994d914bd47850676fea19493324dd6f8e
|
[] |
no_license
|
Manu929/ExData_Plotting1
|
ec75b673cda3cb3fc33e3e12cdcc19cd69506bec
|
6d33a30611dfb974bec9f5dbf1524685749d2157
|
refs/heads/master
| 2022-04-24T22:04:56.362956
| 2020-04-26T16:29:15
| 2020-04-26T16:29:15
| 258,986,605
| 0
| 0
| null | 2020-04-26T09:09:47
| 2020-04-26T09:09:47
| null |
UTF-8
|
R
| false
| false
| 1,564
|
r
|
plot4.R
|
png(filename = "plot4.png", height = 480, width = 480) #first create a png file (instead of using dev.copy)
data <- read.table("household_power_consumption.txt", header = TRUE, sep = ";", na.strings ="?",
colClasses = c("character","character", "numeric","numeric","numeric","numeric","numeric","numeric","numeric"),
stringsAsFactors = FALSE) #read all data
dat <- subset(data, Date %in% c("1/2/2007","2/2/2007")) #subset data of relevant days
dat$Date <- as.Date(dat$Date, format = "%d/%m/%Y") #change Dates to Date-Format, readable by R
date_time <- paste(as.Date(dat$Date), dat$Time)
dat$Datetime <- as.POSIXct(date_time)
par(mfcol = c(2,2), mar = c(4,4,2,1), oma = c(0,0,2,0))
with(dat,{
plot(Global_active_power ~ Datetime, type = "l",xlab = "", #Plot1
ylab = "Global Active Power (kilowatts)")
#Plot2
plot(Sub_metering_1 ~ Datetime, type = "l",col = "black",
xlab = "",ylab = "Energy sub metering")
lines(Sub_metering_2 ~ Datetime, type = "l", col = "red")
lines(Sub_metering_3 ~ Datetime, type = "l", col = "blue")
legend("topright", lty = 1, lwd = 2, col = c("black", "red", "blue"), legend = c ("Sub_metering_1", "Sub_metering_2", "Sub_metering_3"),
y.intersp = 0.5, x.intersp = 0.5, cex = 0.5, bty ="n")
#Plot3
plot(Voltage ~ Datetime, type = "l", xlab = "datetime")
#Plot4
plot(Global_reactive_power ~ Datetime, type = "l", xlab = "datetime")
})
dev.off()
|
15620315356ad3f0313facf4dc81064c59e279b7
|
f0937348e9cca17d24830c34798e167e85eb0140
|
/man/rowMeans.Rd
|
59f0f12be2a060934c0a87b0495f96baf357e495
|
[] |
no_license
|
mohakuma/AdapteR
|
8cc349621d8deb64d80dbab5bf68829fce3bc8b3
|
3f3c31301f0aa4a31a34fbc00b2facac61189954
|
refs/heads/master
| 2021-01-21T09:59:53.611783
| 2017-05-11T12:05:44
| 2017-05-11T12:05:44
| 91,676,188
| 0
| 0
| null | 2017-05-18T09:41:01
| 2017-05-18T09:41:01
| null |
UTF-8
|
R
| false
| true
| 578
|
rd
|
rowMeans.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/rowcolOps.R
\name{rowMeans}
\alias{rowMeans}
\title{row means of a FLMatrix.}
\usage{
rowMeans(object, ...)
}
\arguments{
\item{object}{is of class FLMatrix.}
\item{...}{any additional arguments}
}
\value{
\code{rowMeans} returns a FLVector object representing the row-wise Means.
}
\description{
\code{rowMeans} computes the row-wise average of FLMatrix objects.
}
\examples{
flmatrix <- FLMatrix("tblMatrixMulti", 5,"MATRIX_ID","ROW_ID","COL_ID","CELL_VAL")
resultFLVector <- rowMeans(flmatrix)
}
|
6f4ebd59a7add9ec4243ae4aecb126201b9c954e
|
a47ce30f5112b01d5ab3e790a1b51c910f3cf1c3
|
/B_analysts_sources_github/MarkEdmondson1234/autoGoogleAPI/playmoviespartner_functions.R
|
18893991e87cb8f8ce5c2d6494a55cf4ed45e3e9
|
[] |
no_license
|
Irbis3/crantasticScrapper
|
6b6d7596344115343cfd934d3902b85fbfdd7295
|
7ec91721565ae7c9e2d0e098598ed86e29375567
|
refs/heads/master
| 2020-03-09T04:03:51.955742
| 2018-04-16T09:41:39
| 2018-04-16T09:41:39
| 128,578,890
| 5
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 10,232
|
r
|
playmoviespartner_functions.R
|
#' Google Play Movies Partner API
#' Gets the delivery status of titles for Google Play Movies Partners.
#'
#' Auto-generated code by googleAuthR::gar_create_api_skeleton
#' at 2017-03-05 19:57:46
#' filename: /Users/mark/dev/R/autoGoogleAPI/googleplaymoviespartnerv1.auto/R/playmoviespartner_functions.R
#' api_json: api_json
#'
#' @details
#' Authentication scopes used are:
#' \itemize{
#' \item https://www.googleapis.com/auth/playmovies_partner.readonly
#' }
#'
#' @docType package
#' @name playmoviespartner_googleAuthR
#'
NULL
## NULL
#' A helper function that tests whether an object is either NULL _or_
#' a list of NULLs
#'
#' @keywords internal
is.NullOb <- function(x) is.null(x) | all(sapply(x, is.null))
#' Recursively step down into list, removing all such objects
#'
#' @keywords internal
rmNullObs <- function(x) {
x <- Filter(Negate(is.NullOb), x)
lapply(x, function(x) if (is.list(x))
rmNullObs(x) else x)
}
#' Get an Avail given its avail group id and avail id.
#'
#' Autogenerated via \code{\link[googleAuthR]{gar_create_api_skeleton}}
#'
#' @seealso \href{https://developers.google.com/playmoviespartner/}{Google Documentation}
#'
#' @details
#' Authentication scopes used by this function are:
#' \itemize{
#' \item https://www.googleapis.com/auth/playmovies_partner.readonly
#' }
#'
#' Set \code{options(googleAuthR.scopes.selected = c(https://www.googleapis.com/auth/playmovies_partner.readonly)}
#' Then run \code{googleAuthR::gar_auth()} to authenticate.
#' See \code{\link[googleAuthR]{gar_auth}} for details.
#'
#' @param accountId REQUIRED
#' @param availId REQUIRED
#' @importFrom googleAuthR gar_api_generator
#' @export
accounts.avails.get <- function(accountId, availId) {
url <- sprintf("https://playmoviespartner.googleapis.com/v1/accounts/%s/avails/%s",
accountId, availId)
# playmoviespartner.accounts.avails.get
f <- googleAuthR::gar_api_generator(url, "GET", data_parse_function = function(x) x)
f()
}
#' List Avails owned or managed by the partner. See _Authentication and Authorization rules_ and _List methods rules_ for more information about this method.
#'
#' Autogenerated via \code{\link[googleAuthR]{gar_create_api_skeleton}}
#'
#' @seealso \href{https://developers.google.com/playmoviespartner/}{Google Documentation}
#'
#' @details
#' Authentication scopes used by this function are:
#' \itemize{
#' \item https://www.googleapis.com/auth/playmovies_partner.readonly
#' }
#'
#' Set \code{options(googleAuthR.scopes.selected = c(https://www.googleapis.com/auth/playmovies_partner.readonly)}
#' Then run \code{googleAuthR::gar_auth()} to authenticate.
#' See \code{\link[googleAuthR]{gar_auth}} for details.
#'
#' @param accountId REQUIRED
#' @param pageSize See _List methods rules_ for info about this field
#' @param pageToken See _List methods rules_ for info about this field
#' @param pphNames See _List methods rules_ for info about this field
#' @param studioNames See _List methods rules_ for info about this field
#' @param title Filter that matches Avails with a `title_internal_alias`, `series_title_internal_alias`, `season_title_internal_alias`, or `episode_title_internal_alias` that contains the given case-insensitive title
#' @param territories Filter Avails that match (case-insensitive) any of the given country codes, using the 'ISO 3166-1 alpha-2' format (examples: 'US', 'us', 'Us')
#' @param altId Filter Avails that match a case-insensitive, partner-specific custom id
#' @param videoIds Filter Avails that match any of the given `video_id`s
#' @param altIds Filter Avails that match (case-insensitive) any of the given partner-specific custom ids
#' @importFrom googleAuthR gar_api_generator
#' @export
accounts.avails.list <- function(accountId, pageSize = NULL, pageToken = NULL, pphNames = NULL,
studioNames = NULL, title = NULL, territories = NULL, altId = NULL, videoIds = NULL,
altIds = NULL) {
url <- sprintf("https://playmoviespartner.googleapis.com/v1/accounts/%s/avails",
accountId)
# playmoviespartner.accounts.avails.list
pars = list(pageSize = pageSize, pageToken = pageToken, pphNames = pphNames,
studioNames = studioNames, title = title, territories = territories, altId = altId,
videoIds = videoIds, altIds = altIds)
f <- googleAuthR::gar_api_generator(url, "GET", pars_args = rmNullObs(pars),
data_parse_function = function(x) x)
f()
}
#' Get an Order given its id. See _Authentication and Authorization rules_ and _Get methods rules_ for more information about this method.
#'
#' Autogenerated via \code{\link[googleAuthR]{gar_create_api_skeleton}}
#'
#' @seealso \href{https://developers.google.com/playmoviespartner/}{Google Documentation}
#'
#' @details
#' Authentication scopes used by this function are:
#' \itemize{
#' \item https://www.googleapis.com/auth/playmovies_partner.readonly
#' }
#'
#' Set \code{options(googleAuthR.scopes.selected = c(https://www.googleapis.com/auth/playmovies_partner.readonly)}
#' Then run \code{googleAuthR::gar_auth()} to authenticate.
#' See \code{\link[googleAuthR]{gar_auth}} for details.
#'
#' @param accountId REQUIRED
#' @param orderId REQUIRED
#' @importFrom googleAuthR gar_api_generator
#' @export
accounts.orders.get <- function(accountId, orderId) {
url <- sprintf("https://playmoviespartner.googleapis.com/v1/accounts/%s/orders/%s",
accountId, orderId)
# playmoviespartner.accounts.orders.get
f <- googleAuthR::gar_api_generator(url, "GET", data_parse_function = function(x) x)
f()
}
#' List Orders owned or managed by the partner. See _Authentication and Authorization rules_ and _List methods rules_ for more information about this method.
#'
#' Autogenerated via \code{\link[googleAuthR]{gar_create_api_skeleton}}
#'
#' @seealso \href{https://developers.google.com/playmoviespartner/}{Google Documentation}
#'
#' @details
#' Authentication scopes used by this function are:
#' \itemize{
#' \item https://www.googleapis.com/auth/playmovies_partner.readonly
#' }
#'
#' Set \code{options(googleAuthR.scopes.selected = c(https://www.googleapis.com/auth/playmovies_partner.readonly)}
#' Then run \code{googleAuthR::gar_auth()} to authenticate.
#' See \code{\link[googleAuthR]{gar_auth}} for details.
#'
#' @param accountId REQUIRED
#' @param pageSize See _List methods rules_ for info about this field
#' @param pageToken See _List methods rules_ for info about this field
#' @param pphNames See _List methods rules_ for info about this field
#' @param studioNames See _List methods rules_ for info about this field
#' @param name Filter that matches Orders with a `name`, `show`, `season` or `episode` that contains the given case-insensitive name
#' @param status Filter Orders that match one of the given status
#' @param customId Filter Orders that match a case-insensitive, partner-specific custom id
#' @param videoIds Filter Orders that match any of the given `video_id`s
#' @importFrom googleAuthR gar_api_generator
#' @export
accounts.orders.list <- function(accountId, pageSize = NULL, pageToken = NULL, pphNames = NULL,
studioNames = NULL, name = NULL, status = NULL, customId = NULL, videoIds = NULL) {
url <- sprintf("https://playmoviespartner.googleapis.com/v1/accounts/%s/orders",
accountId)
# playmoviespartner.accounts.orders.list
pars = list(pageSize = pageSize, pageToken = pageToken, pphNames = pphNames,
studioNames = studioNames, name = name, status = status, customId = customId,
videoIds = videoIds)
f <- googleAuthR::gar_api_generator(url, "GET", pars_args = rmNullObs(pars),
data_parse_function = function(x) x)
f()
}
#' List StoreInfos owned or managed by the partner. See _Authentication and Authorization rules_ and _List methods rules_ for more information about this method.
#'
#' Autogenerated via \code{\link[googleAuthR]{gar_create_api_skeleton}}
#'
#' @seealso \href{https://developers.google.com/playmoviespartner/}{Google Documentation}
#'
#' @details
#' Authentication scopes used by this function are:
#' \itemize{
#' \item https://www.googleapis.com/auth/playmovies_partner.readonly
#' }
#'
#' Set \code{options(googleAuthR.scopes.selected = c(https://www.googleapis.com/auth/playmovies_partner.readonly)}
#' Then run \code{googleAuthR::gar_auth()} to authenticate.
#' See \code{\link[googleAuthR]{gar_auth}} for details.
#'
#' @param accountId REQUIRED
#' @param pageSize See _List methods rules_ for info about this field
#' @param pageToken See _List methods rules_ for info about this field
#' @param pphNames See _List methods rules_ for info about this field
#' @param studioNames See _List methods rules_ for info about this field
#' @param videoId Filter StoreInfos that match a given `video_id`
#' @param countries Filter StoreInfos that match (case-insensitive) any of the given country codes, using the 'ISO 3166-1 alpha-2' format (examples: 'US', 'us', 'Us')
#' @param name Filter that matches StoreInfos with a `name` or `show_name` that contains the given case-insensitive name
#' @param videoIds Filter StoreInfos that match any of the given `video_id`s
#' @param mids Filter StoreInfos that match any of the given `mid`s
#' @param seasonIds Filter StoreInfos that match any of the given `season_id`s
#' @importFrom googleAuthR gar_api_generator
#' @export
accounts.storeInfos.list <- function(accountId, pageSize = NULL, pageToken = NULL,
pphNames = NULL, studioNames = NULL, videoId = NULL, countries = NULL, name = NULL,
videoIds = NULL, mids = NULL, seasonIds = NULL) {
url <- sprintf("https://playmoviespartner.googleapis.com/v1/accounts/%s/storeInfos",
accountId)
# playmoviespartner.accounts.storeInfos.list
pars = list(pageSize = pageSize, pageToken = pageToken, pphNames = pphNames,
studioNames = studioNames, videoId = videoId, countries = countries, name = name,
videoIds = videoIds, mids = mids, seasonIds = seasonIds)
f <- googleAuthR::gar_api_generator(url, "GET", pars_args = rmNullObs(pars),
data_parse_function = function(x) x)
f()
}
|
a6bf98f28a8903baee696894f1371de04b4ec727
|
fc7a4e9a00eae8f4970f5c1e72e63e4f0ad6ac60
|
/man/full.rundown.Rd
|
2a7a15e48037cb9d3a9a63cfeb14812d389ebaf8
|
[] |
no_license
|
andrewejaffe/clusterRundown
|
9be6e85d8e82d2cb3343d9579d33f0a498b1f420
|
69d62ca4ef91c3453982c28126220df44c9ef8e4
|
refs/heads/master
| 2021-01-18T16:22:20.475901
| 2017-03-30T19:42:41
| 2017-03-30T19:42:41
| 86,740,040
| 0
| 0
| null | 2017-03-30T19:21:37
| 2017-03-30T19:21:37
| null |
UTF-8
|
R
| false
| true
| 281
|
rd
|
full.rundown.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/nodes.R
\name{full.rundown}
\alias{full.rundown}
\title{Get Full Cluster Rundown}
\usage{
full.rundown()
}
\value{
List of stuff
}
\description{
Get output of resources and slots begin used by cluster
}
|
9742e87ba4e9de7a5fd7b5ce900050f5ab618777
|
257f253f9fd8fd6a385abaaaa9d9509409137631
|
/Numerical method/test - wL.r
|
89f1941360a45e65f672eab4c9c23591ba7f482d
|
[] |
no_license
|
YaojieLu/Optimal-model-with-cost
|
ee16d16e159687e0b7d06e132101422cd7f3a788
|
0a66573bc221034f02c0554c3ff63e371d025a85
|
refs/heads/master
| 2020-05-22T01:17:15.500934
| 2017-01-24T22:45:10
| 2017-01-24T22:45:10
| 50,156,177
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 594
|
r
|
test - wL.r
|
# Curve fit - gs(w) as a double power function
options(digits=20)
library(deSolve)
library(optimx)
source("Functions 1.r")
ca <- 400
k <- 0.05
MAP <- 1000
h3 <- 10
c <- 2.64
d <- 3.54
wLf <- function(wL){
gswLf <- function(gswL){
int <- c(0.07, 4, 0.08, 0.5)
f1 <- function(pars)-CFf(pars, wL=wL, gswL=gswL)
res <- optimx(int, f1, itnmax=5000, method="BFGS")
return(res$value)
}
res <- optimize(gswLf, c(0.001, 0.02), tol=.Machine$double.eps)
message(wL, " ", res[1], " ",res[2])
return(res$objective)
}
wL <- optimize(wLf, c(0.14, 1), tol=.Machine$double.eps)
|
5f1408b4bba49d37dd738a7d198909878cf1d7a5
|
af287883420604671bc5fcd223f53dce92b85438
|
/man/plaint.Rd
|
5be926b9d5723e09ae92cc5b97e48006ff32099e
|
[
"MIT"
] |
permissive
|
ratmaster/plaint
|
dfc346d2ba2293009b3c80b2a07bc2e0db4af6eb
|
22a3c2fd5f0202769a854d573cdef83523a9584f
|
refs/heads/master
| 2021-06-03T00:27:03.930304
| 2016-10-22T10:32:58
| 2016-10-22T10:32:58
| 53,420,263
| 1
| 0
| null | null | null | null |
UTF-8
|
R
| false
| true
| 957
|
rd
|
plaint.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/plaint.R
\docType{package}
\name{plaint}
\alias{plaint}
\alias{plaint-package}
\title{plaint: Plain Table Markup Language}
\description{
\code{plaint} allows you to ease your life with forming data arrays, such
as data frames, matrices or tables, and their export to user-designed
LaTeX tables. It comes with full S3 method support for further R objects.
Get back to the important parts of your work.
}
\details{
This packages provides two S3-type functions: \code{\link{form}} formats,
accentuates and filters data arrays and \code{\link{latex}} exports 'formed'
data frames to LaTeX code resulting in state of the art tables. Both
functions allow the user to easily transform arbitrary dataset based on a
user-specified set of rules and table markup. Read this documentation, the
vignettes and study the examples in order to see how you can employ these
functions with joy.
}
|
a5acf4072cfb75db90bbf1412600d142bc401551
|
ffdea92d4315e4363dd4ae673a1a6adf82a761b5
|
/data/genthat_extracted_code/pdSpecEst/examples/InvWavTransf2D.Rd.R
|
6cc2e348c7e972f8b30997f5df0ad7f707c02a98
|
[] |
no_license
|
surayaaramli/typeRrh
|
d257ac8905c49123f4ccd4e377ee3dfc84d1636c
|
66e6996f31961bc8b9aafe1a6a6098327b66bf71
|
refs/heads/master
| 2023-05-05T04:05:31.617869
| 2019-04-25T22:10:06
| 2019-04-25T22:10:06
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 342
|
r
|
InvWavTransf2D.Rd.R
|
library(pdSpecEst)
### Name: InvWavTransf2D
### Title: Inverse AI wavelet transform for surface of HPD matrices
### Aliases: InvWavTransf2D
### ** Examples
P <- rExamples2D(c(2^4, 2^4), 2, example = "tvar")
P.wt <- WavTransf2D(P$f) ## forward transform
P.f <- InvWavTransf2D(P.wt$D, P.wt$M0) ## backward transform
all.equal(P.f, P$f)
|
c683419fc2fb37212c3e06975888bb0221327663
|
e20a3587463ac8491b376d6f16cb607426fb119b
|
/R/load_data.R
|
2207cd5d52e08b7931b1709440d49b7234f7f07e
|
[
"MIT"
] |
permissive
|
phillipwongg/311-analysis
|
bedaf9b318e34c12309f69191e84b6a3f37697bf
|
fae5af0e022bf3ee06f8218203173b224be2ba60
|
refs/heads/master
| 2020-03-25T12:49:27.032641
| 2018-08-09T18:35:23
| 2018-08-09T18:35:23
| 143,795,607
| 0
| 0
|
MIT
| 2018-08-09T18:35:24
| 2018-08-06T23:53:52
|
Jupyter Notebook
|
UTF-8
|
R
| false
| false
| 1,834
|
r
|
load_data.R
|
#' This script loads the csv files and ensures the correct data types are used
#' and that the column names align before binding the rows
# column names with proper spacing
col_names_311 <- c(
"srn_number", "created_date", "updated_date", "action_taken",
"owner", "request_type", "status", "request_source",
"mobile_os", "anonymous", "assign_to", "service_date",
"closed_date", "address_verified", "approximate_address",
"address", "house_number", "direction", "street_name",
"suffix", "zipcode", "latitude", "longitude", "location",
"thompson_brothers_map_page", "thompson_brothers_map_column",
"thompson_brothers_map_row", "area_planning_commissions",
"council_district", "city_council_member",
"neighborhood_council_code", "neighborhood_council_name",
"police_precinct"
)
validate_columns <- function(df1, df2) {
get_colnames <- tibble(
df1 = colnames(df1),
df2 = colnames(df2)
)
check_match <- mutate(
get_colnames,
check_match = if_else(df1 == df2, "Match", NA_character_)) %>%
summarise(errors = sum(is.na(check_match)))
if_else(check_match$errors == 0, return(0), return(1))
}
load_data <- function() {
data_2016 <- read_csv(
"data/MyLA311_Service_Request_Data_2016.csv",
skip = 1,
col_names = col_names_311,
col_types = read_rds("data/my311_spec.rds"),
progress = FALSE
)
stop_for_problems(data_2016)
data_2017 <- read_csv(
"data/MyLA311_Service_Request_Data_2017.csv",
skip = 1,
col_names = col_names_311,
col_types = read_rds("data/my311_spec.rds"),
progress = FALSE
)
stop_for_problems(data_2017)
if_else(validate_columns(data_2016, data_2017) == 0,
return(bind_rows(data_2016, data_2017)),
stop("Rows may not align: There was a non 0 exit from the `validate_columns` function")
)
}
|
c8ef7ad681b96438bb07aed9cf08c2151e60f2bc
|
e532ccecac7a53c2c211ace6864858f88c13e364
|
/R/LoadDemandVectors.R
|
9d3cf4fac1218b92fb288f8f1e2d916d7975474c
|
[
"CC0-1.0"
] |
permissive
|
MoLi7/useeior
|
a8038a30bfabee55af1abfc59947f59254e505fe
|
b5e8b3bdd544c0ffb1f27e98aba3afcd3b7ab965
|
refs/heads/master
| 2023-04-04T23:30:07.588455
| 2021-04-13T17:18:05
| 2021-04-13T17:18:05
| 313,776,518
| 0
| 0
|
CC0-1.0
| 2021-04-13T17:18:06
| 2020-11-18T00:15:02
| null |
UTF-8
|
R
| false
| false
| 1,299
|
r
|
LoadDemandVectors.R
|
#' Adds demand vectors and metadata based on model specs to model object
#' @param model A model list object with the specs object listed
#' @return model with a list of demand vectors and a meta file
#' @export
loadDemandVectors <- function(model) {
logging::loginfo("Loading demand vectors from model spec...")
model$DemandVectors <- list()
meta <- data.frame()
model$DemandVectors$vectors <- list()
specs <- model$specs$DemandVectors
for (v in names(specs)) {
# Populate metadata
i <- specs[[v]]
i["Name"] <- v
i["ID"] <- tolower(paste(i$Year,i$Location,i$Type,i$System,sep="_"))
meta <- rbind(meta,data.frame(i, stringsAsFactors = FALSE) )
#Check if the demand is registered
if (!is.null(dem_vec_fxn_registry[[i$Type]][[i$System]])) {
logging::loginfo(paste("Loading", v, "demand vector..."))
func_to_eval <- dem_vec_fxn_registry[[i$Type]][[i$System]]
demandFunction <- as.name(func_to_eval)
dv <- do.call(eval(demandFunction), list(model))
model$DemandVectors$vectors[[i$ID]] <- dv
} else {
logging::logerror(paste(v, "not found in registered demand vector functions. This vector must be registered or removed from the model spec."))
stop()
}
}
model$DemandVectors$meta <- meta
return(model)
}
|
7ef9f6db21e203c19515b892aeac8e6e9bea3224
|
4ee1f08370c39ca5ad702225359a1978d6f1a57e
|
/figure scripts/miso9_immune_figure1.R
|
1d30532a3d6ce444d9acdb777adfdf34572bdd7e
|
[] |
no_license
|
jgrembi/wash-immune
|
358696974d7b2d3a8ecd528eb803e88f60c8decd
|
44d924c9ce32dcf7ac034d5220b62a1ec1f8677d
|
refs/heads/master
| 2023-04-04T07:50:59.986253
| 2021-04-02T05:29:23
| 2021-04-02T05:29:23
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 7,527
|
r
|
miso9_immune_figure1.R
|
source(here::here("0-config.R"))
library(tibble)
data <- tibble(x = 1:100, y= 1:100)
head(data)
library(dplyr)
data %>%
ggplot(aes(x, y)) +
scale_x_continuous(minor_breaks = seq(10, 100, 10)) +
scale_y_continuous(minor_breaks = seq(10, 100, 10)) +
theme_void() ->
p
p +
geom_rect(xmin = 25, xmax=75, ymin=96, ymax=100, color='black',
fill='white', size=0.25) +
annotate('text', x= 50, y=98,label= '13,279 compounds assessed for eligibility', size=2.5) ->#+
#annotate('text', x= 50, y=102,label= 'Figure S1: CONSORT Diagram for the WASH Benefits immune status and growth factor study population', size=3) ->
p
p +
geom_rect(xmin = 58, xmax=104, ymin=87, ymax=95, color='black',
fill='white', size=0.25) +
annotate('text', x= 81, y=90,label= 'Excluded: 7,728 compounds \n 7,429 compounds excluded to create bu???er zones\n 219 compounds did not meet enrollment criteria\n 80 compounds declined to participate
', size=2.5) +
annotate('text', x= 3, y=90,label= 'Enrollment', size=4) +
geom_rect(xmin = 30, xmax=70, ymin=82, ymax=86, color='black',
fill='white', size=0.25) +
annotate('text', x= 50, y=84.6,label= '
720 clusters created and randomly allocated across 7 arms \n 5,551 compounds randomly allocated across 7 arms \n 2 out of 7 arms selected into substudy', size=2.5) +
annotate('text', x= 2.5, y=84,label= 'Allocation', size=4) +
geom_rect(xmin = 9, xmax=25, ymin=76, ymax=82, color='black',
fill='white', size=0.25) +
annotate('text', x= 17, y=80,label= '
Control \n 180 clusters \n 1,382 households', size=2.5) +
geom_rect(xmin = 71, xmax=104, ymin=76, ymax=82, color='black',
fill='white', size=0.25) +
annotate('text', x= 88, y=80,label= '
Water+Sanitation+Handwashing+Nutrition \n 90 clusters \n 686 households ', size=2.5) +
geom_rect(xmin = 71, xmax=104, ymin=63, ymax=75, color='black',
fill='white', size=0.25) +
annotate('text', x= 88, y=70,label= '
Year 1 \n 63 clusters \n 480 children \n Year 2 \n 67 clusters \n 505 children ', size=2.5)+
geom_rect(xmin = 71, xmax=104, ymin=32, ymax=62, color='black',
fill='white', size=0.25) +
annotate('text', x= 88, y=48,label= '
Year 1 \n 100 children lost to follow-up \n 9 moved \n 29 absent \n 14 withdrew \n 37 no live birth \n 11 child death \n Year 2 \n 25 new children measured \n 104 children lost to follow-up \n 28 moved \n 2 absent \n 18 withdrew \n 38 no live birth \n 18 child death ', size=2.5) +
geom_rect(xmin = 71, xmax=104, ymin=19, ymax=31, color='black',
fill='white', size=0.25) +
annotate('text', x= 88, y=26,label= '
Year 1 \n 63 clusters \n 380 children \n Year 2 \n 67 clusters \n 401 children ', size=2.5) +
geom_rect(xmin = 71, xmax=104, ymin=10, ymax=18, color='black',
fill='white', size=0.25) +
annotate('text', x= 88, y=15,label= '
Year 1 \n 69 missing outcome \n Year 2 \n 22 missing outcome', size=2.5) +
geom_rect(xmin = 71, xmax=104, ymin=-3, ymax=9, color='black',
fill='white', size=0.25) +
annotate('text', x= 88, y=4,label= '
Year 1 \n 62 clusters \n 311 children \n Year 2 \n 67 clusters \n 379 children', size=2.5) +
geom_rect(xmin = 9, xmax=25, ymin=63, ymax=75, color='black',
fill='white', size=0.25) +
annotate('text', x= 17, y=70,label= '
Year 1 \n 68 clusters \n 516 children \n Year 2 \n 68 clusters \n 516 children ', size=2.5) +
annotate('text', x= 3, y=70,label= 'Subsample \n Target', size=3.5) +
geom_rect(xmin = 6, xmax=28, ymin=32, ymax=62, color='black',
fill='white', size=0.25) +
annotate('text', x= 17, y=48,label= '
Year 1 \n 140 children lost to follow-up \n 14 moved \n 16 absent \n 62 withdrew \n 29 no live birth \n 19 child death \n Year 2 \n 0 new children measured \n 158 children lost to follow-up \n 35 moved \n 3 absent \n 72 withdrew \n 29 no live birth \n 19 child death ', size=2.5) +
annotate('text', x= 1, y=48,label= 'Follow-up', size=3.3) +
geom_rect(xmin = 9, xmax=25, ymin=19, ymax=31, color='black',
fill='white', size=0.25) +
annotate('text', x= 17, y=26,label= '
Year 1 \n 68 clusters \n 376 children \n Year 2 \n 68 clusters \n 358 children ', size=2.5) +
annotate('text', x= 2.5, y=26,label= 'Subsample \n Enrollment', size=3.5) +
geom_rect(xmin = 9, xmax=25, ymin=10, ymax=18, color='black',
fill='white', size=0.25) +
annotate('text', x= 17, y=15,label= '
Year 1 \n 91 missing outcome \n Year 2 \n 33 missing outcome', size=2.5) +
annotate('text', x= 2.5, y=15,label= 'Specimen \n Collection', size=3.5) +
annotate('text', x= 2.5, y=4,label= 'Analysis', size=3.5) +
geom_rect(xmin = 9, xmax=25, ymin=-3, ymax=9, color='black',
fill='white', size=0.25) +
annotate('text', x= 17, y=4,label= '
Year 1 \n 68 clusters \n 285 children \n Year 2 \n 68 clusters \n 325 children', size=2.5) ->
p
p
p +
geom_segment(
x=50, xend=50, y=96, yend=86,
size=0.15, linejoin = "mitre", lineend = "butt",
arrow = arrow(length = unit(1, "mm"), type= "closed")) +
geom_segment(
x=50, xend=58, y=91, yend=91,
size=0.15, linejoin = "mitre", lineend = "butt",
arrow = arrow(length = unit(1, "mm"), type= "closed")) +
geom_segment(
x=17, xend=17, y=76, yend=75,
size=0.15, linejoin = "mitre", lineend = "butt",
arrow = arrow(length = unit(1, "mm"), type= "closed")) +
geom_segment(
x=17, xend=17, y=63, yend=62,
size=0.15, linejoin = "mitre", lineend = "butt",
arrow = arrow(length = unit(1, "mm"), type= "closed")) +
geom_segment(
x=17, xend=17, y=32, yend=31,
size=0.15, linejoin = "mitre", lineend = "butt",
arrow = arrow(length = unit(1, "mm"), type= "closed")) +
geom_segment(
x=17, xend=17, y=19, yend=18,
size=0.15, linejoin = "mitre", lineend = "butt",
arrow = arrow(length = unit(1, "mm"), type= "closed")) +
geom_segment(
x=17, xend=17, y=10, yend=9,
size=0.15, linejoin = "mitre", lineend = "butt",
arrow = arrow(length = unit(1, "mm"), type= "closed")) +
geom_segment(
x=88, xend=88, y=76, yend=75,
size=0.15, linejoin = "mitre", lineend = "butt",
arrow = arrow(length = unit(1, "mm"), type= "closed")) +
geom_segment(
x=88, xend=88, y=63, yend=62,
size=0.15, linejoin = "mitre", lineend = "butt",
arrow = arrow(length = unit(1, "mm"), type= "closed")) +
geom_segment(
x=88, xend=88, y=32, yend=31,
size=0.15, linejoin = "mitre", lineend = "butt",
arrow = arrow(length = unit(1, "mm"), type= "closed")) +
geom_segment(
x=88, xend=88, y=19, yend=18,
size=0.15, linejoin = "mitre", lineend = "butt",
arrow = arrow(length = unit(1, "mm"), type= "closed")) +
geom_segment(
x=88, xend=88, y=10, yend=9,
size=0.15, linejoin = "mitre", lineend = "butt",
arrow = arrow(length = unit(1, "mm"), type= "closed")) +
geom_segment(
x=30, xend=17, y=85, yend=85,
size=0.15, linejoin = "mitre", lineend = "butt") +
geom_segment(
x=17, xend=17, y=85, yend=82,
size=0.15, linejoin = "mitre", lineend = "butt",
arrow = arrow(length = unit(1, "mm"), type= "closed")) +
geom_segment(
x=70, xend=88, y=85, yend=85,
size=0.15, linejoin = "mitre", lineend = "butt") +
geom_segment(
x=88, xend=88, y=85, yend=82,
size=0.15, linejoin = "mitre", lineend = "butt",
arrow = arrow(length = unit(1, "mm"), type= "closed")) ->
p
p
ggsave(p, file = here("figures/immune_figure1.tiff"), height=14, width=9)
|
08a7be16190915cdfe218f2d8449b007faf9296f
|
065370e27d5d0dd082273f5a707f7c153628828b
|
/man/AssignedProxy-class.Rd
|
8d8c997010bd5a8d942f488a5de974e8bcbc17ca
|
[] |
no_license
|
takewiki/XR
|
9622a1b019c28fadca5c254b4d61edd405756ef0
|
296bbcbc4c2d7d1f7761715923c52f17d047c612
|
refs/heads/master
| 2020-03-25T06:56:55.203485
| 2018-07-26T23:03:33
| 2018-07-26T23:03:33
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| true
| 1,554
|
rd
|
AssignedProxy-class.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/Interface.R
\docType{class}
\name{AssignedProxy-class}
\alias{AssignedProxy-class}
\alias{AssignedProxy}
\title{Class for Assigned Proxy Objects and Related Mechanisms}
\description{
The `AssignedProxy` class is used by interface packages to return a reference
to an assigned server object. The \R user can then supply this object anywhere
in later interface computations, just as one would use the name of an \R object
in a function call or other expression.
}
\details{
The virtual class `ProxyObject` is a superclass, designed to allow
other mechanisms for proxy objects (none exists at this time).
}
\section{Slots}{
\describe{
\item{\code{.Data}}{The `AssignedProxy` class is a subclass of `character`; the actual character string
will be generated by the interface and is unique over the session, so long as the `XR`
package stays loaded.}
\item{\code{serverClass,module}}{The server language class and module for the corresponding object.}
\item{\code{size}}{The size (usually, length) of the server object, if that makes sense.
Can be used to make decisions about handling large objects.}
\item{\code{evaluator}}{The evaluator object that returned the proxy. Having this as a slot allows
interface computations to operate directly on the proxy, without a user-supplied evaluator.}
}}
\references{
Chambers, John M. (2016)
\emph{Extending R},
Chapman & Hall/CRC.
( Chapter 13, discussing this package, is included in the package: \url{../doc/Chapter_XR.pdf}.)
}
|
d42b70ef711ca98c149abbbae3a4160fff70ae11
|
4f288ad36167e1d97c56aac00ae66a663bf0925d
|
/puckalytics_dashboard_scraper.r
|
b0420667ea36903848960cb9dffd2ae27efb353a
|
[] |
no_license
|
charliechaf/nhl
|
a629940b5460169908fb6f3174679ddcb175de27
|
9e045dfe35047e15c613565b22eb6473f553eb0a
|
refs/heads/master
| 2021-01-20T12:52:15.556740
| 2017-06-05T16:07:51
| 2017-06-05T16:07:51
| 90,422,211
| 0
| 1
| null | 2017-06-05T16:07:52
| 2017-05-05T22:44:09
|
R
|
UTF-8
|
R
| false
| false
| 1,222
|
r
|
puckalytics_dashboard_scraper.r
|
library("rvest")
library(data.table)
library(stringr)
library(dplyr)
library(dtplyr)
library(beepr)
z=1
situations = c('5v5','5v5home','5v5road','5v5close_home', '5v5close_road', '5v5close','5v5tied','5v5tied_home','5v5tied_road',
'5v5leading','5v5leading_home','5v5leading_road','5v5trailing','5v5trailing_home','5v5trailing_road',
'5v5up1','5v5up2','5v5down1','5v5down2','4v4','threeonthree','5v5firstperiod','5v5secondperiod','5v5thirdperiod',
'all','5v4','4v5','PP','SH')
count=0
for (i in situations){
x = 2013
y=14
for(i in 1:4){
url <- sprintf("http://stats.hockeyanalysis.com/teamstats.php?db=%s%s&sit=%s&disp=1&sortdir=DESC&sort=GFPCT",x,y, situations[z])
population <- url %>%
read_html() %>%
html_nodes(xpath='/html/body/table') %>%
html_table(fill=TRUE)
population[[1]]$year= x+1
population[[1]]$situation= situations[z]
population[[1]][1] = NULL
if(count==0){
db = population[[1]] }
else {
db= rbind(db,population[[1]]) }
print(count)
print(x)
print(situations[z])
x=x+1
y=y+1
count=count+1
}
z=z+1
}
beep(sound=4)
setwd("G:/Hoffmann/R")
write.csv(db, file= "nhl.csv")
|
f4126ad418d6fbae623c13006756349728e8c174
|
497881180743eb709fe9956a42a0aedb60f64878
|
/R/update.R
|
bab6ac8158ab37ff23f6298cb4ba5ab0327eb1d8
|
[] |
no_license
|
tr8dr/dygraphs
|
77c9e3354e25f404bbfaf666c8cc280fdaab635c
|
42ca0e341df49777212912d1b8b7ce74ee8b114f
|
refs/heads/master
| 2020-04-10T19:11:53.284248
| 2018-12-12T13:59:19
| 2018-12-12T13:59:19
| 161,226,587
| 0
| 0
| null | 2018-12-10T19:33:39
| 2018-12-10T19:33:38
| null |
UTF-8
|
R
| false
| false
| 2,218
|
r
|
update.R
|
#' dyUpdate allows in-situ replacement of the rendered timeseries in dygraph within shiny, rather than
#' rerendering the widget on each data change. This can be used for "real-time" update changes in
#' sampling, etc.
#'
#' @param session shiny session
#' @param id graph identifier (refers to the dygraph elementId parameter)
#' @param data the latest data set to be rendered (must have the same number of columns as the original data set)
#'
#' @note
#' See the \href{https://rstudio.github.io/dygraphs/}{online documentation} for
#' additional details and examples.
#'
#' @examples
#' require(shiny)
#' require(dygraphs)
#'
#' app = function () {
#' newdata <- function(n = 1000) {
#' vclose <- cumsum(rnorm(n,sd=0.25))
#' vlow <- vclose - abs(rnorm(n,sd=0.25))
#' vhigh <- vclose + abs(rnorm(n,sd=0.25))
#' vopen <- c(vlow[1], vclose[1:(NROW(vclose)-1)])
#' times <- as.POSIXct((1:n)*5, origin='2018-1-1 00:00:00', tz='UTC')
#' data.frame(open=vopen, high=vhigh, low=vlow, close=vclose, row.names = times)
#' }
#' graph = function() {
#' bars <- newdata()
#' v1 <- dygraph(bars, height=650, width='100%', elementId='graph1') %>%
#' dyCandlestick() %>%
#' dyOptions(labelsUTC = TRUE)
#' htmltools::browsable(v1)
#' }
#'
#' ui <- fluidPage(
#' sidebarLayout(sidebarPanel(actionButton("button", "regenerate")),
#' mainPanel(graph())))
#'
#' events <- function (input, output, session) {
#' observeEvent(input$button, {
#' bars <- newdata()
#' dyUpdate (session, 'graph1', bars)
#' })
#' }
#'
#' shinyApp(ui = ui, server = events, options=list(port=5432, host="127.0.0.1"))
#' }
#'
#' app()
#'
#' @export
dyUpdate <- function(session, id, data) {
if (!xts::is.xts(data))
data <- xts::as.xts(data)
times <- as.POSIXct(time(data), origin='2010-1-1 00:00:00', tz='UTC')
data <- zoo::coredata(data)
# create matrix of time and data columns (time in epoch time seconds)
mat <- cbind(as.numeric(times), data)
# send data and target graph ID to browser
session$sendCustomMessage(type='dygraph:newdata', list(id=id, data=mat))
}
|
a118b30885d5f91c754fa06a7f9514264905a3a1
|
66cfbd0954f5067b418e1d5712b029626aaa9923
|
/man/plotGens.Rd
|
b769237b00c068401f0e7f97e78933c1cf9e0cec
|
[
"MIT"
] |
permissive
|
ozt-ca/TokyoCovidMonitor
|
7a390708f8c0696855d5f3670fc40dba0a5e09b3
|
b81b1fe0c195d03323cfbd9c7599c18e2ef7e69a
|
refs/heads/main
| 2023-07-09T13:06:37.961797
| 2021-08-13T09:50:57
| 2021-08-13T09:50:57
| 352,635,952
| 13
| 0
| null | null | null | null |
UTF-8
|
R
| false
| true
| 328
|
rd
|
plotGens.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/analysis.R
\name{plotGens}
\alias{plotGens}
\title{Plot of cases by generations}
\usage{
plotGens(out = NULL, saveFile = F)
}
\arguments{
\item{saveFile}{}
}
\value{
}
\description{
Plot of cases by generations
}
\examples{
plotGens(saveFile = T)
}
|
b6a9fd62a092d0000eb7656fc7c1c493f929d6bb
|
af09e109dbf1ca9cdbcf3aa8d1a53d8b75dbea44
|
/TestScript.R
|
c71773fcf69d5b24aefc49776aea88092aac7b34
|
[] |
no_license
|
shaunharrigan/Test
|
738adc30dc2cde44ab5cbd94be51814ee3487e07
|
14bc5b4d92f2381c7415bb936d6e22a99e8e8caa
|
refs/heads/master
| 2020-02-26T13:10:10.349605
| 2016-07-14T14:22:17
| 2016-07-14T14:22:17
| 63,319,317
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 62
|
r
|
TestScript.R
|
# Test script
x <- rnorm(100)
plot(x)
print("Test GitHub V2")
|
fae41722c5aee9ec0cd53c2180bf6aba0b0bfbc4
|
cfd4ee0d5f3d6c54ba41df91483e19716f313481
|
/2groups_sex/server.R
|
a5cb007af12110763a875d132cde68fd72c1d90b
|
[] |
no_license
|
anni3ly/behavior_webapp
|
92e758f249e9c663d7a59a40740a11e166bcd402
|
f86f3c8f84597977d03f277d477bb3f68f3cde28
|
refs/heads/master
| 2020-09-06T04:40:44.727520
| 2020-07-09T14:10:28
| 2020-07-09T14:10:28
| 220,324,440
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 14,823
|
r
|
server.R
|
server = function(input, output)({
group_1 = c("thc")
group_2 = c("veh")
#----------------------------------------
# Reactive data sets ----
reactive.data1 <- reactive({
data_summary(FR_data, varname=input$y, groupnames=c("session", "group", "fr", "sex"))
#data_summary(FR_data, varname=input$y, groupnames=c("session", "group", "fr"))
})
reactive.data2<- reactive({
subset(reactive.data1(),
session >= input$session[1] & session <=input$session[2])
})
reactive.data3<- reactive({
subset(FR_data, group==group_1)
})
reactive.data4<- reactive({
subset(FR_data, group==group_2)
})
# reactive.data5<- reactive({
# data_summary(reactive.data3, varname=input$y, groupnames="subject")
# })
#
# reactive.data15<- reactive({
# data_summary(reactive.data4, varname=input$y, groupnames="subject")
# })
reactive.data6<- reactive({
select(FR_data, subject, session, group, boxnum, fr, active, trial.active, timeout.active, inactive, reward, sex) %>% subset(group==group_1 & sex=="f")
})
reactive.data7<- reactive({
select(FR_data, subject, session, group, boxnum, fr, active, trial.active, timeout.active, inactive, reward, sex) %>% subset(group==group_2 & sex=="f")
})
reactive.data8<- reactive({
select(FR_data, subject, session, group, boxnum, fr, active, trial.active, timeout.active, inactive, reward, sex) %>% subset(group==group_1 & sex=="m")
})
reactive.data11<- reactive({
select(FR_data, subject, session, group, boxnum, fr, active, trial.active, timeout.active, inactive, reward, sex) %>% subset(group==group_2 & sex=="m")
})
reactive.data9<- reactive({
keep4 <- subset(FR_data, group==group_1 & sex=="f")[ vals4$keeprows4, , drop = FALSE]
keep5 <- subset(FR_data, group==group_2 & sex=="f")[ vals5$keeprows5, , drop = FALSE]
keep6 <- subset(FR_data, group==group_1 & sex=="m")[ vals6$keeprows6, , drop = FALSE]
keep7 <- subset(FR_data, group==group_2 & sex=="m")[ vals7$keeprows7, , drop = FALSE]
exclude_final<-rbind(keep4,keep5, keep6, keep7)
level1_9<-tidyr::gather(exclude_final, lever, presses, c(active, inactive))
level1_9<-with(level1_9, level1_9[order(session, subject),])
})
reactive.data10<- reactive({
lmer(presses ~ session*lever*group*sex + (1|subject), data=reactive.data9())
})
reactive.data13<- reactive({
keep4 <- subset(FR_data, group==group_1 & sex=="f")[ vals4$keeprows4, , drop = FALSE]
keep5 <- subset(FR_data, group==group_2 & sex=="f")[ vals5$keeprows5, , drop = FALSE]
keep6 <- subset(FR_data, group==group_1 & sex=="m")[ vals6$keeprows6, , drop = FALSE]
keep7 <- subset(FR_data, group==group_2 & sex=="m")[ vals7$keeprows7, , drop = FALSE]
exclude_final<-rbind(keep4,keep5, keep6, keep7)
level1_9<-tidyr::gather(exclude_final, lever, presses, c(active, inactive))
level1_9<-with(level1_9, level1_9[order(session, subject),])
level1_9$session<- factor(level1_9$session)
return(level1_9)
})
reactive.data14<- reactive({
lmer(presses ~ session*lever*group*sex + (1|subject), data=reactive.data13())
})
reactive.data16<- reactive({
keep4 <- subset(FR_data, group==group_1 & sex=="f")[ vals4$keeprows4, , drop = FALSE]
keep5 <- subset(FR_data, group==group_2 & sex=="f")[ vals5$keeprows5, , drop = FALSE]
keep6 <- subset(FR_data, group==group_1 & sex=="m")[ vals6$keeprows6, , drop = FALSE]
keep7 <- subset(FR_data, group==group_2 & sex=="m")[ vals7$keeprows7, , drop = FALSE]
exclude_final<-rbind(keep4,keep5, keep6, keep7)
level1_9<-tidyr::gather(exclude_final, lever, presses, c(active, inactive))
level1_9<-with(level1_9, level1_9[order(session, subject),])
data15_1<-unite(level1_9, session, lever, col="session_lever", sep="_")
data15_1<-dplyr::select(data15_1, subject, group, sex, session_lever, presses)
data15_1$session_lever<-data15_1$session_lever %>% factor() %>% fct_inorder()
data16_1<-spread(data15_1, session_lever, presses)
group<- data16_1$group
subject<- data16_1$subject
sex<- data16_1$sex
data16_1$group<-NULL
data16_1$subject<-NULL
data16_1$sex<-NULL
data16_1<-t(apply(data16_1, 1, function(x) c(x[is.na(x)], x[!is.na(x)])))
data16_1<-data16_1[,apply(data16_1, 2, function(x) !any(is.na(x)))]
data16_1<-data16_1[,(ncol(data16_1)-5):ncol(data16_1)]
colnames(data16_1)<-c("x_active", "x_inactive", "y_active", "y_inactive", "z_active", "z_inactive")
data16_1<-as.data.frame(data16_1)
data16_1<- cbind(subject, group, sex, data16_1)
data16_1<- gather(data16_1, "session_lever", "presses", x_active:z_inactive)
data16_1<- separate(data16_1, session_lever, c("session", "lever"), sep="_")
data16_1$presses<- as.numeric(data16_1$presses)
return(data16_1)
})
reactive.data12<- reactive({
lmer(presses ~ session*lever*group*sex + (session|subject), data=reactive.data16())
})
vals4 <- reactiveValues(
keeprows4 = rep(TRUE, nrow(subset(FR_data, group==group_1 & sex=="f")))
)
vals5 <- reactiveValues(
keeprows5 = rep(TRUE, nrow(subset(FR_data, group==group_2 & sex=="f")))
)
vals6 <- reactiveValues(
keeprows6 = rep(TRUE, nrow(subset(FR_data, group==group_1 & sex=="m")))
)
vals7 <- reactiveValues(
keeprows7 = rep(TRUE, nrow(subset(FR_data, group==group_2 & sex=="m")))
)
download.plot<- reactive({
ggplot(reactive.data2(), aes(x=session, y= mean, group = interaction(group,sex), color=interaction(group,sex))) +
geom_errorbar(aes(ymin=mean-se, ymax=mean+se), width=.1, position=position_dodge(0.05)) +
geom_point(aes(shape=fr), size = 5) +
geom_line(aes(linetype=interaction(group,sex)), size=1) +
ggtitle(input$title) +
ylab(input$y) +
theme_classic()
})
#----------------------------------------
# Plots
# Main plots ----
output$plot<- renderPlot({
p <- ggplot(reactive.data2(), aes(x=session, y= mean, color=sex)) + #group = interaction(group,sex),color=interaction(group,sex)
geom_errorbar(aes(ymin=mean-se, ymax=mean+se), width=.05, size=1) +
geom_point(aes(shape=fr), size = 5) +
geom_line(aes(linetype=group), size=1) +
#ggtitle(input$title) +
ylab(input$y) +
scale_shape_discrete(name="FR Ratio Schedule") +
theme_classic()
p
})
output$plot2<- renderPlot({
q<- ggplot(reactive.data3(), aes_string(y=input$y, x="session", group = "sex", color="sex")) +
geom_line() +
geom_point(aes(shape=fr), size=3) +
facet_wrap( ~ subject) +
scale_shape_discrete(name="FR Ratio Schedule") +
theme_bw()
q
}, height=1000)
output$plot3<- renderPlot({
q<- ggplot(reactive.data4(), aes_string(y=input$y, x="session", group = "sex", color="sex")) +
geom_line() +
geom_point(aes(shape=fr), size=3) +
facet_wrap(~ subject) +
scale_shape_discrete(name="FR Ratio Schedule") +
theme_bw()
q
}, height=1000)
output$plot4<- renderPlot({
keep4 <- subset(FR_data, group==group_1 & sex=="f")[ vals4$keeprows4, , drop = FALSE]
ggplot(keep4, aes_string(y=input$y, x="session")) +
geom_boxplot(aes(fill=factor(session))) +
geom_point() +
labs(y=input$y, x="session") +
theme_bw()
})
output$plot5<- renderPlot({
keep5 <- subset(FR_data, group==group_2 & sex=="f")[ vals5$keeprows5, , drop = FALSE]
ggplot(keep5, aes_string(y=input$y, x="session")) +
geom_boxplot(aes(fill=factor(session))) +
geom_point() +
labs(y=input$y, x="session") +
theme_bw()
})
output$plot6<- renderPlot({
keep6 <- subset(FR_data, group==group_1 & sex=="m")[ vals6$keeprows6, , drop = FALSE]
ggplot(keep6, aes_string(y=input$y, x="session")) +
geom_boxplot(aes(fill=factor(session))) +
geom_point() +
labs(y=input$y, x="session") +
theme_bw()
})
output$plot7<- renderPlot({
keep7 <- subset(FR_data, group==group_2 & sex=="m")[ vals7$keeprows7, , drop = FALSE]
ggplot(keep7, aes_string(y=input$y, x="session")) +
geom_boxplot(aes(fill=factor(session))) +
geom_point() +
labs(y=input$y, x="session") +
theme_bw()
})
output$int_plot<- renderPlot({
emmip(reactive.data14(), group + sex ~ session | lever)
})
output$int_plot2<- renderPlot({
emmip(reactive.data12(), group + sex ~ session | lever)
})
output$residualplot3<- renderPlot({
qqnorm(residuals(reactive.data12()))
qqline(residuals(reactive.data12()))
})
output$residualplot4<- renderPlot({
plot(reactive.data12(), resid(., scaled=TRUE) ~ presses | sex*group)
})
output$residualplot1<- renderPlot({
qqnorm(residuals(reactive.data10()))
qqline(residuals(reactive.data10()))
})
output$residualplot2<- renderPlot({
plot(reactive.data10(), resid(., scaled=TRUE) ~ presses | sex*group)
})
#----------------------------------------
# Render Prints ----
output$hover_info1<- renderPrint({
nearPoints(reactive.data6(), input$plot_hover1, xvar = "session", yvar = input$y)
})
output$hover_info2<- renderPrint({
nearPoints(reactive.data7(), input$plot_hover2, xvar = "session", yvar = input$y)
})
output$hover_info3<- renderPrint({
nearPoints(reactive.data8(), input$plot_hover3, xvar = "session", yvar = input$y)
})
output$hover_info4<- renderPrint({
nearPoints(reactive.data11(), input$plot_hover4, xvar = "session", yvar = input$y)
})
output$stats_wald2<-renderPrint({
Anova(lmer(presses ~ session*lever*group*sex + (1|subject), data=reactive.data9()), type=2)
})
output$stats_wald3<-renderPrint({
Anova(lmer(presses ~ session*lever*group*sex + (1|subject), data=reactive.data9()), type=3)
})
# output$stats_s2<-renderPrint({
# anova(lmer(presses ~ session*lever*group*sex + (group|subject), data=reactive.data9()), type=2)
# })
#
# output$stats_s3<-renderPrint({
# anova(lmer(presses ~ session*lever*group*sex + (group|subject), data=reactive.data9()), type=3)
# })
output$stats_tukey<-renderPrint({
TukeyHSD(x=aov(presses ~ group + lever + sex, data=reactive.data9()), conf.level=0.95)
})
output$last3sessions_anova3<-renderPrint({
Anova(lmer(presses ~ session*lever*group*sex + (session|subject), data=reactive.data16()), type=3)
#aov(presses~group*lever*session + Error(subject/session), data=reactive.data16())
})
output$last3sessions_anova2<-renderPrint({
Anova(lmer(presses ~ session*lever*group*sex + (session|subject), data=reactive.data16()), type=2)
#aov(presses~group*lever*session + Error(subject/session), data=reactive.data16())
})
output$stats_tukey2<-renderPrint({
TukeyHSD(x=aov(presses ~ group + lever + sex, data=reactive.data16()), conf.level=0.95)
})
output$contrasts1<- renderPrint({
emmeans(reactive.data10(), pairwise ~ lever + group + sex)
})
output$contrasts2<- renderPrint({
emmeans(reactive.data12(), pairwise ~ lever + group + sex)
})
#----------------------------------------
# Render Tables ----
output$master.data<- DT::renderDataTable({
DT::datatable(FR_data, filter='top', options=list(pagelength=25), class = 'cell-border stripe')
})
output$table<- DT::renderDataTable({
DT::datatable(reactive.data2(), filter='top', options=list(pagelength=25), class = 'cell-border stripe')
})
#----------------------------------------
# Download Handlers ----
# output$downloadplot<- downloadHandler(
# filename = function() { paste(input$y, '.png', sep = '')},
# content = function(file) {
# ggsave(file, plot=download.plot(), device="png")
# })
output$downloaddata<- downloadHandler(
filename = function(){ paste0("data.", input$filetype, sep = '')},
content = function(file){
if(input$filetype == "csv"){
write_csv(FR_data, file)
}
if(input$filetype == "tsv"){
write_tsv(FR_data, file)
}
if(input$filetype =="xlsx"){
#write_excel_csv(FR_data, file)
write.xlsx(FR_data, file)
}
})
#----------------------------------------
# Toggle points that are clicked ----
observeEvent(input$plot_click1, {
res4 <- nearPoints(subset(FR_data, group==group_1 & sex=="f"), input$plot_click1, allRows = TRUE)
vals4$keeprows4 <- xor(vals4$keeprows4, res4$selected_)
res5 <- nearPoints(subset(FR_data, group==group_2 & sex=="f"), input$plot_click2, allRows = TRUE)
vals5$keeprows5 <- xor(vals5$keeprows5, res5$selected_)
res6 <- nearPoints(subset(FR_data, group==group_1 & sex=="m"), input$plot_click3, allRows = TRUE)
vals6$keeprows6 <- xor(vals6$keeprows6, res6$selected_)
res7 <- nearPoints(subset(FR_data, group==group_2 & sex=="m"), input$plot_click4, allRows = TRUE)
vals7$keeprows7 <- xor(vals7$keeprows7, res7$selected_)
})
#----------------------------------------
# Toggle points that are brushed, when button is clicked ----
observeEvent(input$exclude_toggle, {
res4 <- brushedPoints(subset(FR_data, group==group_1 & sex=="f"), input$plot_brush1, allRows = TRUE)
vals4$keeprows4 <- xor(vals4$keeprows4, res4$selected_)
res5 <- brushedPoints(subset(FR_data, group==group_2 & sex=="f"), input$plot_brush2, allRows = TRUE)
vals5$keeprows5 <- xor(vals5$keeprows5, res5$selected_)
res6 <- brushedPoints(subset(FR_data, group==group_1 & sex=="m"), input$plot_brush3, allRows = TRUE)
vals6$keeprows6 <- xor(vals6$keeprows6, res6$selected_)
res7 <- brushedPoints(subset(FR_data, group==group_2 & sex=="m"), input$plot_brush4, allRows = TRUE)
vals7$keeprows7 <- xor(vals7$keeprows7, res7$selected_)
})
#----------------------------------------
# Reset all toggle points ----
observeEvent(input$exclude_reset, {
vals4$keeprows4 <- rep(TRUE, nrow(subset(FR_data, group==group_1 & sex=="f")))
vals5$keeprows5 <- rep(TRUE, nrow(subset(FR_data, group==group_2 & sex=="f")))
vals6$keeprows6 <- rep(TRUE, nrow(subset(FR_data, group==group_1 & sex=="m")))
vals7$keeprows7 <- rep(TRUE, nrow(subset(FR_data, group==group_2 & sex=="m")))
})
})
shinyApp(ui = ui,server = server)
|
fd09f812f919c73bd36d72371424bc41aa5e4e96
|
f23c0c55de531dd18c3f07f194a5ebce2a7c3f13
|
/rankhospital.R
|
bcf217bb66ce1a5e0089408938f9770e4fddf597
|
[] |
no_license
|
YevgenyY/RProg-Assignment3
|
c899d54e98d260c51e573bba878c59d483ecbe14
|
0e206eb68e54856cee8eae3cd2a96164388df3a4
|
refs/heads/master
| 2021-01-16T18:03:13.376632
| 2014-09-27T21:45:40
| 2014-09-27T21:45:40
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 1,202
|
r
|
rankhospital.R
|
rankhospital <- function(state, outcome, num = "best") {
## Read outcome data
data <- read.csv("outcome-of-care-measures.csv", colClasses = "character")
# prepare data, format as numeric
tmp <- data_num<-sapply(data[, c(11, 17, 23)], as.numeric)
data_num<-cbind(data[,c(2,7)], tmp)
# order alphabetically by Hospital name
data_ord <- data_num[order(data_num[,1]),]
## Check that state and outcome are valid
# Check outcome string
if (outcome == "heart attack")
colindex <- 3
else if (outcome == "heart failure")
colindex <- 4
else if (outcome == "pneumonia")
colindex <- 5
else
stop("invalid outcome\n")
# Check state
if (! state %in% data_num[,2])
stop("invalid state")
## Return hospital name in that state with the given rank
## 30-day death rate
data_state <- data_ord[data_ord$State==state,]
if (num == "best")
result <- data_state[ which.min(data_state[,colindex]), "Hospital.Name"]
else if (num == "worst")
result <- data_state[ which.max(data_state[,colindex]), "Hospital.Name"]
else
{
tmp <- data_state[order(data_state[, colindex]), "Hospital.Name"]
result <- tmp[num]
}
result
}
|
edeb697be151d037b5bc84ede5013ca0b521159a
|
328a32b2399be3815a5a9d1e09926fa37eb64fb8
|
/tests/temp/rubens/ldp/utils/ldp.R
|
83fe53367a8cf9e13de7aabd1d01ef52ab1eccdf
|
[] |
no_license
|
phramos07/taskminer
|
e5d9b198c61b9c7ebb63e45b30f2df29431faf17
|
1f9515acbb3acc9a8fa5ed8f054cfaec4b1209cf
|
refs/heads/master
| 2023-08-19T07:04:31.731247
| 2019-03-22T13:38:47
| 2019-03-22T13:38:47
| 416,901,621
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 130
|
r
|
ldp.R
|
#! /usr/bin/Rscript
# Arguments
args = commandArgs();
inldp <- args[6];
inpar <- args[7];
inseq <- args[8];
output <- args[9];
|
4f83e136e59e736d62909a571d37554a2c648eb8
|
299f423a60e2bd8430cb53b07e750013a12a19a4
|
/IQBoss/R/package_functions.R
|
ad8ed069d9bb09071d45c49814035905220cccf2
|
[
"MIT"
] |
permissive
|
xcit-lab/iqboss-audit
|
d74fd72638cf458e85f7281dae8a650e44e34e12
|
58be89f1298f7e2a0545e9a5ec136abb9608d17f
|
refs/heads/master
| 2020-05-09T09:54:08.828158
| 2019-12-12T16:19:29
| 2019-12-12T16:19:29
| 181,021,392
| 1
| 6
|
MIT
| 2019-12-12T16:19:30
| 2019-04-12T14:12:49
|
R
|
UTF-8
|
R
| false
| false
| 654
|
r
|
package_functions.R
|
library(tidyverse)
report_summary <- function() {
exp1 <- IQBoss::data %>%
mutate(IQ_change = IQ_post - IQ_pre)
exp2 <- IQBoss::data2 %>%
mutate(IQ_change = IQ_post - IQ_pre) %>%
mutate(dose_ml = dose_l / 1000)
lm_model1 <- lm(formula = IQ_change ~ dose_ml, data=exp1)
print(summary(lm_model1))
lm_model2 <- lm(formula = IQ_change ~ dose_ml, data = exp2)
print(summary(lm_model2))
}
open_report <- function() {
path <- system.file("final_report_FINAL3.pdf", package="IQBoss")
if (Sys.info()['sysname'] == "Windows") {
system(paste0('open "', path, '"'))
} else {
system(paste0('start "', path, '"'))
}
}
|
7663cf6574d9d5b6177695904608f6d69adaf6cd
|
d052e3d0af115e428ca3d8df31272b5238ea2107
|
/functions/cptPlot.R
|
036566e6a6f0b8695bddbb991acc2c2f7a545a48
|
[] |
no_license
|
BhaktiDwivedi/shinySISPA
|
f8e139d33bfbdc2d5bc824fbffb99d8420471623
|
9844a00963032dc2b1a013183227616f17cb1fc3
|
refs/heads/master
| 2021-01-17T17:51:18.365536
| 2020-06-17T20:46:39
| 2020-06-17T20:46:39
| 70,636,046
| 1
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 801
|
r
|
cptPlot.R
|
cptsPlot <- function(x,y,LABEL){
if(missing(x)){
stop("data is missing!")
}
if(missing(y)){
stop("changepoint cutoffs are missing!")
}
x <- sort(x,FALSE)
y <- sort(y,TRUE)
par(bg = "white")
cptplot <- plot(x, type='p', lty=3, lwd=1, main="", xlab="Samples", ylab=LABEL, cex=2.0, pch=1, xlim=c(0,length(x)))
for(i in 1:length(y)) {
if(i==1) {
abline(h=y[i],lty=2,col='red')
text(y[i],y[i],y[i], cex=1.0, pos=4, col="orange")
}else{
## display in terms of sample group profiles
if(y[i]<0){
abline(h=y[i],lty=2,col='grey')
text(y[i],y[i],y[i], cex=1.0, pos=4, col="grey")
} else{
abline(h=y[i],lty=2,col='grey')
text(y[i],y[i],y[i], cex=1.0, pos=4, col="grey")
}
}
}
return(cptplot)
}
|
2355c9b12821f851d517681702a2ee80485a4f12
|
e9cd9c80512f984512ac51f8256cd4c7bd0b61fd
|
/R/07c_V_HistoricalComparison.R
|
f0a9c8aa5b9f47b0b8434a91f84da7c03c3f234a
|
[] |
no_license
|
arnanaraza/streamflow
|
c49e502c082d3c12265bb9be602d25f49deb34c0
|
610cd81e003e18e7019041607ee55031865e8d63
|
refs/heads/master
| 2020-04-19T22:09:29.966358
| 2019-01-31T04:21:42
| 2019-01-31T04:21:42
| 168,461,594
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 4,108
|
r
|
07c_V_HistoricalComparison.R
|
### FUNCTION TO COMPUTE ANOMALIES BASED ON HISTORICAL MEAN STREAMFLOW VALUES PER SEASON ###
# Preliminearies
pacman::p_load(dplyr, lubridate, data.table, ggplot2)
mydir <- setwd('D:/THESIS_PP')
# Function proper
getHist <- function (pred.table, SW, floss) {
# get wet and dry season months
by.month <- aggregate(pred.table$predicted ~month(date),
data=pred.table,FUN=mean)
colnames(by.month) <- c('mo', 'pred')
by.month.d <- data.table(by.month, key='pred')
print (by.month.d) #checker if sorted
by.month.d[, tail(.SD, 3), by=by.month.d$pred]
# pivot per year
by.year <- aggregate(pred.table$predicted ~ year(date) + month(date),
data=pred.table,FUN=mean)
colnames(by.year) <- c('year','mo', 'pred')
by.year.pivot <- dcast(by.year, mo ~ year, value.var="pred", fun.aggregate=mean)
# get driest and wettest per year
mo <- by.month.d[,1]
dry.mo <- as.vector(t(mo[1:2]))
wet.mo <- as.vector(t(mo[11:12]))
driest <- filter(by.year.pivot, mo %in% dry.mo)
wettest <- filter(by.year.pivot, mo %in% wet.mo)
dry.list <- t(as.data.frame(lapply(driest[1:2,2:length(driest)], mean)))
wet.list <- t(as.data.frame(lapply(wettest[1:2,2:length(wettest)], mean)))
# attach pcp and fl
by.year.fl <- aggregate(pred.table$forest ~year(date),data=pred.table,FUN=mean)
driest.fl <- cbind(dry.list, by.year.fl)
driest.fl <- driest.fl[,-4]
colnames (driest.fl) <- c('dry.flow', 'year', 'forest.loss')
wettest.fl <- cbind(wet.list, by.year.fl)
wettest.fl <- wettest.fl[,-4]
colnames (wettest.fl) <- c('wet.flow', 'year', 'forest.loss')
# open historical pivot table and get SW
h.table <- read.csv('D:/THESIS_PP/mid-results/hist3.csv')
h.table <- h.table[-c(13:15),]
subs <- subset(h.table, select = grep(SW, names(h.table))) #given that h.table has SW as headings
#colnames(subs) <- c('mo', SW)
driest.h <- filter(subs, h.table$mo %in% dry.mo)
wettest.h <- filter(subs, h.table$mo %in% wet.mo)
driest.mean <- mean(driest.h[[1]])
wettest.mean <- mean(wettest.h[[1]])
# plot the graph
plot <- ggplot(driest.fl, aes( year ) ) +
geom_line(aes(y=dry.flow, colour ='dry.flow'))+
geom_line(aes(y=forest.loss, colour="forest.loss")) +
geom_hline(yintercept = as.numeric(driest.mean), linetype="dotted", size=2) +
labs(title=paste0('predicted dry season daily mean streamflow,', ' ', SW)) +
scale_colour_manual('',breaks = c('dry.flow', 'forest.loss'),
values = c('dry.flow'="blue", 'forest.loss'="red")) +
xlab("year") +
scale_y_continuous("average streamflow (l/sec)") +
annotate("text", min(driest.fl$year), as.numeric(driest.mean), hjust = 0, vjust = -1, label = "historical dry season daily mean streamflow") +
theme(text = element_text(size = 16))
#plot(plot)
ggsave(plot=plot, filename=paste0('D:/THESIS_PP/finalresults/', Sys.Date(),'_', SW, '_', floss,'_','_dry.png'), device='png', dpi=100, width = 11, height = 8, units='in')
plot1 <- ggplot(wettest.fl, aes( year ) ) +
geom_line(aes(y=wet.flow, colour ='wet.flow'))+
geom_line(aes(y=forest.loss, colour="forest.loss")) +
geom_hline(yintercept = as.numeric(wettest.mean), linetype="dotted", size=2) +
labs(title=paste0('predicted wet season daily mean streamflow', ' ', SW)) +
scale_colour_manual('',breaks = c('wet.flow', 'forest.loss'),
values = c('wet.flow'="blue", 'forest.loss'="red")) +
xlab("year") +
scale_y_continuous("average streamflow (l/sec)") +
annotate("text", min(wettest.fl$year), as.numeric(wettest.mean), hjust = 0, vjust = -1, label = "historical wet season daily mean streamflow") +
theme(text = element_text(size = 16))
# plot(plot)
ggsave(plot=plot1, filename=paste0('D:/THESIS_PP/finalresults/', Sys.Date(),'_', SW, '_',floss, '_','_wet.png'), device='png', dpi=100, width = 11, height = 8, units='in')
return (driest.fl) #inter-change to driest and wettest
}
|
9f8ca4336bb714aa708856e7b5afd7b34d4cb1dd
|
c47d4292a1e9500dc151a4b561d97a9b762fc88b
|
/R/weighting.R
|
24f4880063d8030636237b5c974c008cfd46b3c5
|
[
"MIT"
] |
permissive
|
Landscape-Data-Commons/aim.analysis
|
656f9066e00a40a870dfe12d5778be908b19b76b
|
88ec6f227f68874d3bc316c1af56f06392863002
|
refs/heads/master
| 2023-05-08T18:13:45.030573
| 2021-06-01T20:36:18
| 2021-06-01T20:36:18
| 89,946,288
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 45,453
|
r
|
weighting.R
|
#' Calculate sample weights for points using design polygons
#' @description Calculate the weight for points in a sample design based on the sample frame or strata used to draw them and the point fate. The outputs are a data frame of the points and their weights, a data frame summary of the fates of the points (optionally by year), and a data frame summary of the strata (optionally by year) if strata were used. No spatial checks are done, so make sure that \code{pts} and \code{frame.spdf} are from the same design, limited to the extent of the original sample frame, and have complete overlap.
#' @param pts Data frame or spatial points data frame. This should be the points information. At minimum, it must contain a field matching the string \code{pts.fatefield} containing string values matching those in \code{target.values}, \code{unknown.values}, etc. If providing \code{pts.groupfield} it must also have a field matching that containing values matching values found in \code{frame.spdf$frame.groupfield}. If \code{date.field} or \code{year.source.field} is provided, then fields matching those must exist.
#' @param pts.fatefield Character string. This must exactly match the name of the field in \code{pts} that contains string values matching those in \code{target.values}, \code{unknown.values}, etc.
#' @param pts.groupfield Optional character string. This must exactly match the name of the field in \code{pts} that contains values matching those found in \code{frame.spdf$frame.groupfield}. This will most often be the field containing strata identities.
#' @param frame.spdf Spatial polygons data frame. This must be the design's sample frame or strata restricted to the sample frame extent. If providing \code{frame.groupfield} it must also have a field matching that containing values matching values found in \code{pts$pts.groupfield}.
#' @param frame.groupfield Optional character string. This must exactly match the name of the field in \code{frame.spdf} that contains values matching those found in \code{pts$pts.groupfield}. This will most often be the field containing strata identities.
#' @param target.values Character string or character vector. This defines what values in the point fate field count as target points. When using AIM design databases, this should be at minimum \code{c("Target Sampled", "TS")}. This is case insensitive.
#' @param unknown.values Character string or character vector. This defines what values in the point fate field count as unknown points. When using AIM design databases, this should be at minimum \code{c("Unknown", "Unk")}. This is case insensitive.
#' @param nontarget.values Character string or character vector. This defines what values in the point fate field count as non-target points. When using AIM design databases, this should be at minimum \code{c("Non-Target", "NT", NA)}. This is case insensitive.
#' @param inaccessible.values Character string or character vector. This defines what values in the point fate field count as non-target points. When using AIM design databases, this should be at minimum \code{c("Inaccessible")}. This is case insensitive.
#' @param unneeded.values Character string or character vector. This defines what values in the point fate field count as not needed or unneeded points. When using AIM design databases, this should be at minimum \code{c("Not needed")}. This is case insensitive.
# @param ... Optional character strings. These must exactly match the names of the field in \code{pts} and will be used to group the points beyond the identity/identities they share with the frame. When calculating \code{frame.stats} these will be passed to \code{dplyr::group_by_()}. They will have no impact on \code{frame.summary} or \code{point.weights}. \code{"YEAR"} would be a common string to pass here.
#' @return A list containing the named data frames \code{frame.stats}, \code{frame.summary} (if groupfield strings were provided), and \code{point.weights}.
#' @export
weight.gen <- function(pts,
pts.fatefield = NULL, #pts.fatefield
pts.groupfield = NULL, #"WEIGHT.ID"
frame.spdf,
frame.groupfield = NULL, #designstratumfield
target.values = NULL,
unknown.values = NULL,
nontarget.values = NULL,
inaccessible.values = NULL,
unneeded.values = NULL){
## Sanitize
if (class(pts) == "SpatialPointsDataFrame") {
working.pts <- pts@data
} else {
working.pts <- pts
}
if (class(working.pts) != "data.frame") {
stop("pts must be either a data frame or a spatial points data frame.")
}
working.pts[[pts.fatefield]] <- toupper(working.pts[[pts.fatefield]])
if (!all(working.pts[[pts.fatefield]] %in% c(target.values, unknown.values, nontarget.values, inaccessible.values, unneeded.values))) {
message("The following fate[s] need to be added to the appropriate fate argument[s] in your function call:")
## Take the vector of all the unique values in pts.spdf$final_desig (or another fate field) that aren't found in the fate vectors and collapse it into a single string, separated by ", "
stop(paste(unique(working.pts[[pts.fatefield]][!(working.pts[[pts.fatefield]] %in% c(target.values, unknown.values, nontarget.values, inaccessible.values, unneeded.values))]), collapse = ", "))
}
# additional.point.groups <- list(...)
# if (!any(additional.point.groups %in% names(working.pts))) {
# message("The following additional grouping fields were not found in pts:")
# stop(paste(additional.point.groups[!(additional.point.groups %in% names(working.pts))], collapse = ", "))
# }
# additional.point.groups <- rlang::quos(...)
## Add areas in hectares to the frame if they're not there already
if (!("AREA.HA" %in% names(frame.spdf@data))) {
frame.spdf <- add.area(frame.spdf)
}
## Creating a table of the point counts by point type
## Start with adding the point types
working.pts$key[working.pts[[pts.fatefield]] %in% target.values] <- "Observed.pts"
working.pts$key[working.pts[[pts.fatefield]] %in% nontarget.values] <- "Unsampled.pts.nontarget"
working.pts$key[working.pts[[pts.fatefield]] %in% inaccessible.values] <- "Unsampled.pts.inaccessible"
working.pts$key[working.pts[[pts.fatefield]] %in% unneeded.values] <- "Unsampled.pts.unneeded"
working.pts$key[working.pts[[pts.fatefield]] %in% unknown.values] <- "Unsampled.pts.unknown"
## Here's the summary by key and pts.groupfield and year.field as appropriate
pts.summary.fields <- c(pts.groupfield[!is.null(pts.groupfield)])
pts.summary <- eval(parse(text = paste0("dplyr::summarize(.data = group_by(.data = ungroup(working.pts), key, ", paste(pts.summary.fields, collapse = ", "),"), count = n())")))
## Spreading that
pts.summary.wide <- tidyr::spread(data = pts.summary,
key = key,
value = count,
fill = 0)
## What kinds of points might be tackled
point.types <- c("Observed.pts", "Unsampled.pts.nontarget", "Unsampled.pts.inaccessible", "Unsampled.pts.unneeded", "Unsampled.pts.unknown")
## We need to know which of the types of points (target, non-target, etc.) are actually represented
extant.counts <- point.types[point.types %in% names(pts.summary.wide)]
## Only asking for summarize() to operate on those columns that exist because if, for example, there's no Unsampled.pts.unneeded column and we call it here, the function will crash and burn
# This should probably be converted to use !!!rlang::quos() but it works right now so don't sweat it
frame.stats <- eval(parse(text = paste0("pts.summary.wide %>% group_by(", paste(pts.summary.fields, collapse = ","),") %>%",
"dplyr::summarize(sum(", paste0(extant.counts, collapse = "), sum("), "))")))
## Fix the naming becaue it's easier to do it after the fact than write paste() so that it builds names in in the line above
names(frame.stats) <- stringr::str_replace_all(string = names(frame.stats), pattern = "^sum\\(", replacement = "")
names(frame.stats) <- stringr::str_replace_all(string = names(frame.stats), pattern = "\\)$", replacement = "")
## Add in the missing columns if some point categories weren't represented
for (name in point.types[!(point.types %in% names(frame.stats))]) {
frame.stats[, name] <- 0
}
frame.stats <- frame.stats[, names(frame.stats) != "NA"]
## TODO: Needs to handle a polygon OR a raster for the frame
## HERE WE FORK FOR IF THERE ARE STRATA OR NOT
if (!is.null(frame.groupfield)) {
## Because we have strata, use the design stratum attribute
## Create a data frame to store the area values in hectares for strata. The as.data.frame() is because it was a tibble for some reason
area.df <- group_by_(frame.spdf@data, frame.groupfield) %>% dplyr::summarize(AREA.HA.SUM = sum(AREA.HA)) %>% as.data.frame()
## Get the sums of point types
frame.summary <- frame.stats %>% group_by_(pts.groupfield) %>%
dplyr::summarize(Observed.pts = sum(Observed.pts),
Unsampled.pts.nontarget = sum(Unsampled.pts.nontarget),
Unsampled.pts.inaccessible = sum(Unsampled.pts.inaccessible),
Unsampled.pts.unneeded = sum(Unsampled.pts.unneeded),
Unsampled.pts.unknown = sum(Unsampled.pts.unknown))
## Add in the areas of the strata
frame.summary <- merge(x = frame.summary,
y = area.df,
by.x = pts.groupfield,
by.y = frame.groupfield)
## Renaming is causing dplyr and tidyr to freak out, so we'll just copy the values into the fieldnames we want
frame.summary$Stratum <- frame.summary[[pts.groupfield]]
frame.summary$Area.HA <- frame.summary$AREA.HA.SUM
## Calculate the rest of the values
frame.summary <- frame.summary %>% group_by(Stratum) %>%
## The total points, minus the unneeded so we don't penalize projects for them!
mutate(Total.pts = sum(Observed.pts, Unsampled.pts.nontarget, Unsampled.pts.inaccessible, Unsampled.pts.unknown)) %>%
## The proportion of the total points in the stratum that were "target"
mutate(Prop.dsgn.pts.obsrvd = Observed.pts/Total.pts) %>%
## The effective "sampled area" based on the proportion of points that were surveyed
mutate(Sampled.area.HA = unlist(Area.HA * Prop.dsgn.pts.obsrvd)) %>%
## The weight for each point in the stratum is the effective sampled area divided by the number of points surveyed, unknown, and inaccessible in the stratum
mutate(Weight = Sampled.area.HA/sum(Observed.pts, Unsampled.pts.inaccessible, Unsampled.pts.unknown)) %>% as.data.frame()
## When there are NaNs in the calculated fields, replace them with 0
frame.summary <- tidyr::replace_na(frame.summary, replace = list(Prop.dsgn.pts.obsrvd = 0, Sampled.area.HA = 0, Weight = 0))
## Add the weights to the points, but only the observed ones
for (stratum in frame.summary$Stratum) {
working.pts$WGT[(working.pts[[pts.fatefield]] %in% target.values) & working.pts[[pts.groupfield]] == stratum] <- frame.summary$Weight[frame.summary$Stratum == stratum]
}
## All the unassigned weights get converted to 0
working.pts <- tidyr::replace_na(working.pts, replace = list(WGT = 0))
point.weights <- working.pts
} else if (is.null(frame.groupfield)) {
## Treat it as a single unit for lack of stratification
area <- sum(frame.spdf@data$AREA.HA)
## derive weights
proportion.observed <- 1 ## initialize - proportion of 1.0 means there were no nonresponses
wgt <- 0 ## initialize wgt
sample.area <- 0 ## initialize actual sampled area
if (sum(frame.stats[, point.types]) > 0) {
proportion.observed <- sum(frame.stats$Observed.pts)/sum(frame.stats[, c("Observed.pts", "Unsampled.pts.nontarget", "Unsampled.pts.inaccessible", "Unsampled.pts.unknown")]) ## realized proportion of the stratum that was sampled (observed/total no. of points that weren't "unneeded")
}
if (sum(frame.stats$Observed.pts) > 0) {
## Record the actual area(ha) sampled - (proportional reduction * stratum area)
sample.area <- proportion.observed*area
## (The proportion of the total area that was sampled * total area [ha]) divided by the number of observed, inaccessible, and unknown points
wgt <- (sample.area)/sum(frame.stats$Observed.pts, frame.stats$Inaccessible.pts, frame.stats$Unknown.pts)
}
##Tabulate key information for this DD
frame.summary <- data.frame(Stratum = "Frame",
Total.pts = sum(frame.stats[, point.types]),
Observed.pts = sum(frame.stats$Observed.pts),
Unsampled.pts.nontarget = sum(frame.stats$Unsampled.pts.nontarget),
Unsampled.pts.inaccessible = sum(frame.stats$Unsampled.pts.inaccessible),
Unsampled.pts.unneeded = sum(frame.stats$Unsampled.pts.unneeded),
Unsampled.pts.unknown = sum(frame.stats$Unsampled.pts.unknown),
Area.HA = area,
Prop.dsgn.pts.obsrvd = proportion.observed,
Sampled.area.HA = sample.area,
Weight = wgt,
stringsAsFactors = FALSE)
point.weights <- working.pts
## If there are points to work with, do this
if (nrow(point.weights) > 0) {
## If a point had a target fate, assign the calculates weight
point.weights$WGT[point.weights[[pts.fatefield]] %in% target.values] <- wgt
## If a point had a non-target or unknown designation, assign 0 as the weight
point.weights$WGT[point.weights[[pts.fatefield]] %in% c(nontarget.values, unknown.values, inaccessible.values, unneeded.values)] <- 0
}
}
## Make sure that this is in the order we want
frame.summary <- frame.summary[, c("Stratum",
"Total.pts",
"Observed.pts",
"Unsampled.pts.nontarget",
"Unsampled.pts.inaccessible",
"Unsampled.pts.unneeded",
"Unsampled.pts.unknown",
"Area.HA",
"Prop.dsgn.pts.obsrvd",
"Sampled.area.HA",
"Weight")]
names(point.weights)[names(point.weights) == "key"] <- "POINT.FATE"
output <- list("frame.stats" = frame.stats, "frame.summary" = frame.summary, "point.weights" = point.weights)
return(output[!is.null(output)])
}
#' Adjusting Weights Calculated from AIM Sample Designs
#'
#' This function takes the point weights data frame output from the function \code{weight()} and a SpatialPolygonsDataFrame defining the weight categories. Returns the data frame supplied as points with the new column \code{ADJWGT} containing the adjusted weights.
#' @param points Data frame output from \code{weight()}, equivalent to \code{weight()[["point.weights"]]} or \code{weight()[[2]]}.
#' @param wgtcat.spdf SpatialPolygonsDataFrame describing the weight categories for adjusting the weights. Use the output from \code{intersect()}.
#' @param spdf.area.field Character string defining the field name in \code{wgtcat@data} that contains the areas for the weight categories. Defaults to \code{"AREA.HA.UNIT.SUM"}.
#' @param spdf.wgtcat.field Character string defining the field name in \code{wgtcat@data} that contains the unique identification for the weight categories. Defaults to \code{"UNIQUE.IDENTIFIER"}.
#' @param projection \code{sp::CRS()} argument. Defaults to NAD83 with \code{sp::CRS("+proj=longlat +ellps=GRS80 +datum=NAD83 +no_defs")}. Is used to reproject all SPDFs in order to perform spatial manipulations.
#' @keywords weights
#' @examples
#' weight.adjuster()
#' @export
weight.adjust <- function(points,
wgtcat.spdf,
spdf.area.field = "AREA.HA.UNIT.SUM",
spdf.wgtcat.field = "UNIQUE.IDENTIFIER",
projection = sp::CRS("+proj=longlat +ellps=GRS80 +datum=NAD83 +no_defs")
){
## Sanitization
names(points) <- toupper(names(points))
names(wgtcat.spdf@data) <- toupper(names(wgtcat.spdf@data))
spdf.area.field <- toupper(spdf.area.field)
spdf.wgtcat.field <- toupper(spdf.wgtcat.field)
## Convert points to an SPDF
points.spdf <- SpatialPointsDataFrame(coords = points[, c("LONGITUDE", "LATITUDE")],
data = points,
proj4string = projection)
## Attribute the points.spdf with the wgtcat identities from wgtcat.spdf
points.spdf <- attribute.shapefile(spdf1 = points.spdf,
spdf2 = wgtcat.spdf,
attributefield = spdf.wgtcat.field,
newfield = spdf.wgtcat.field)
if (is.null(points.spdf)) {
message("There was no overlap between the points and the wgtcat polygons. Returning NULL.")
return(NULL)
} else {
## Add the areas in using the unique identifier
data.current <- merge(x = points.spdf@data,
y = distinct(wgtcat.spdf@data[, c(spdf.wgtcat.field, spdf.area.field)]))
## The weighted points attributed by the combination of reporting units and strata
## We first restrict to the points that inherited identities (this should've already happened in the previous step, but just to be safe)
# data.current <- data.attributed[!is.na(data.attributed[, points.wgtcat.field]),]
## We want to include all the points. So we make a logical vector of just T with a length equal to the number of plots
sites.current <- (rep(TRUE, nrow(data.current)))
## Grab the current weights from those points as its own vector
wgt.current <- data.current$WGT
## NB: The identity inherited from the shapefile needs to match the field used for name in framesize
wtcat.current <- data.current[, spdf.wgtcat.field]
## The framesize information about each of the unique wgtcat identities
## I currently have this as an area, but I think it needs to be the inverse of the proportion of the area of the reporting unit that each identity represents
## so the framesize value for a particular wgtcat = [area of the whole spdf]/[area of particular wgtcat]
framesize.current <- wgtcat.spdf@data[, spdf.area.field]
names(framesize.current) <- wgtcat.spdf@data[, spdf.wgtcat.field]
## Run the weight adjustment
data.current$ADJWGT <- spsurvey::adjwgt(sites.current, wgt.current, wtcat.current, framesize.current)
return(data.current)
}
}
#' Calculate weights for analyses combining AIM and LMF data
#' @description When combining AIM and LMF designs for a single weighted analysis, the process of calculating weights is complicated by the two-stage nature of the LMF design. This calculates a relative weight for each point first based on whether or not it falls inside an LMF segment that was selected for sampling and how many points total fall in any given LMF segment. Those relative weights are then used to calculate combined design weights.
#' @param aim_points Data frame or spatial points data frame. The AIM point information, including unique identities in the variable \code{aim_idvar}. If this is just a data frame OR if \code{wgtcats} is not a spatial polygons data frame then it must already have the weight category and LMF segment memberships assigned in the variables \code{wgtcat_var} and \code{segment_var} (there should be an \code{NA} for any observation that does not fall in an LMF segment). If this is spatial and so are either \code{wgtcats} or \code{segments}, then the points will be attributed with the memberships useing \code{sp::over()}. This must also include \code{aim_fate_var} if any of the points were not observed/sampled, e.g. rejected, inaccessible, or had an unknown fate, otherwise they will all be assumed to've been observed/sampled.
#' @param lmf_points Data frame or spatial points data frame. The LMF point information, including unique identities in the variable \code{lmf_idvar}. If this is just a data frame OR if \code{wgtcats} is not a spatial polygons data frame then it must already have the weight category and LMF segment memberships assigned in the variables \code{wgtcat_var} and \code{segment_var} (there should be an \code{NA} for any observation that does not fall in an LMF segment). If this is spatial and so are either \code{wgtcats} or \code{segments}, then the points will be attributed with the memberships useing \code{sp::over()}. All LMF points are assumed to be observed/sampled.
#' @param aim_idvar Character string. The name of the variable in \code{aim_points} that contains the unique identities for the points.
#' @param lmf_idvar Character string. The name of the variable in \code{lmf_points} that contains the unique identities for the points.
#' @param aim_fatevar Optional character string. The name of the variable in \code{aim_points} that contains the fates of the points. If \code{NULL} then the fate of all points in \code{aim_points} will be assumed to be sampled/observed. Defaults to \code{NULL}
#' @param observed_fates Optional vector of character strings. The strings present in \code{aim_points$aim_fatevar} that correspond to a sampled/observed fate. Defaults to \code{NULL}
#' @param invalid_fates Optional vector of character strings. The strings present in \code{aim_points$aim_fatevar} that correspond to fates that SHOULD NOT BE INCLUDED IN WEIGHTING, e.g. unneeded or not yet evaluated as in future points or unused oversample. Defaults to \code{NULL}
#' @param wgtcats Data frame or spatial polygons data frame. The information about the weight categories. This must contain the weight category identities in a variable named \code{wgtcat_var}. If this is a data frame, it must also contain the AREAS IN HECTARES in a variable named \code{wgtcat_area_var}. Defaults to \code{NULL}
#' @param wgtcat_var Character string. The name of the variable in \code{wgtcats} (and, if \code{wgtcats} is not spatial, \code{aim_points} and \code{lmf_points}) that contains the weight category identities/memberships.
#' @param wgtcat_area_var Optional character string. The name of the variable in \code{wgtcats} that contains the AREAS IN HECTARES. Only optional if \code{wgtcats} is spatial. Defaults to \code{NULL}
#' @param segments Optional spatial polygons data frame. The information about the LMF segments, this is only optional if \code{segment_var} is not already assigned to \code{aim_points} and \code{lmf_points}. This must contain the weight category identities in a variable named \code{segment_var}. Defaults to \code{NULL}
#' @param segment_var Character string. The name of the variable in \code{segments} (or, if \code{segments} is not provided, \code{aim_points} and \code{lmf_points}) that contains the LMF segment identities.
#' @param projection Optional CRS object. Used to reproject all spatial data. If \code{NULL} then the projection is taken from \code{wgtcats} unless it's not spatial in which case it's taken from \code{segments} unless it's not provided in which case no reprojection occurs. Defaults to \code{NULL}
#' @param verbose Logical. If \code{TRUE} then the function will produce informative messages as it executes its steps. Useful for debugging. Defaults to \code{FALSE}.
#' @return A list of two data frames: point_weights which contains information for each point that did not have a fate in \code{invalid_fates} and wgtcat_summary which contains information about each weight category.
#' @export
weight_aimlmf <- function(aim_points,
lmf_points,
aim_idvar,
lmf_idvar,
aim_fatevar = NULL,
observed_fates = c("TS"),
invalid_fates = NULL,
wgtcats = NULL,
wgtcat_var,
wgtcat_area_var = NULL,
segments = NULL,
segment_var,
projection = NULL,
verbose = FALSE){
if (length(wgtcat_var) != 1 | class(wgtcat_var) != "character") {
stop("wgtcat_var must be a single character string")
}
if (!is.null(wgtcat_area_var)) {
if (length(wgtcat_area_var) != 1 | class(wgtcat_area_var) != "character") {
stop("wgtcat_area_var must be a single character string")
}
}
if (length(segment_var) != 1 | class(segment_var) != "character") {
stop("segment_var must be a single character string")
}
if (!is.null(wgtcats)) {
if (!(class(wgtcats) %in% c("SpatialPolygonsDataFrame", "data.frame"))) {
stop("wgtcats must be a spatial polygons data frame or data frame")
}
if (nrow(wgtcats) < 1) {
stop("wgtcats contains no observations/data")
}
if (!(wgtcat_var %in% names(wgtcats))) {
stop(paste("The variable", wgtcat_var, "does not appear in wgtcats@data"))
}
}
if (!is.null(segments)) {
if (!(class(segments) %in% "SpatialPolygonsDataFrame")) {
stop("segments must be a spatial polygons data frame")
}
if (nrow(segments) < 1) {
stop("segments contains no observations/data")
}
if (!(segment_var %in% names(segments))) {
stop(paste("The variable", segment_var, "does not appear in segments@data"))
}
}
if (is.null(aim_fatevar)) {
warning("No fate variable specified for AIM points. Assuming all were observed/sampled.")
aim_fatevar <- "fate"
aim_points[["fate"]] <- "observed"
lmf_points[["fate"]] <- "observed"
observed_fates <- "observed"
} else {
if (length(aim_fatevar) > 1 | class(aim_fatevar) != "character") {
stop("The aim fate variable must be a single character string")
}
if (!aim_fatevar %in% names(aim_points)) {
stop(paste("The variable", aim_fatevar, "does not appear in aim_points@data"))
} else {
aim_points[["fate"]] <- aim_points[[aim_fatevar]]
}
if (is.null(observed_fates)) {
warning("No observed fates provided. Assuming all AIM points were observed/sampled unless specified otherwise with invalid_fates")
observed_fates <- unique(aim_points[["fate"]])
observed_fates <- observed_fates[!(observed_fates %in% invalid_fates)]
}
}
lmf_points[["fate"]] <- observed_fates[1]
if (!is.null(observed_fates)) {
if (!any(aim_points[["fate"]] %in% observed_fates)) {
warning("No AIM points have a fate specified as observed.")
}
}
# Harmonize projections
if (is.null(projection)) {
if (!is.null(wgtcats)) {
projection <- wgtcats@proj4string
} else if (!is.null(segments)) {
projection <- segments@proj4string
}
}
if (!is.null(projection)) {
if (class(aim_points) %in% c("SpatialPointsDataFrame")) {
if (!identical(projection, aim_points@proj4string)) {
aim_points <- sp::spTransform(aim_points,
CRSobj = projection)
}
}
if (class(lmf_points) %in% c("SpatialPointsDataFrame")) {
if (!identical(projection, lmf_points@proj4string)) {
lmf_points <- sp::spTransform(lmf_points,
CRSobj = projection)
}
}
if (!is.null(wgtcats)) {
if (class(lmf_points) %in% c("SpatialPolygonsDataFrame")) {
if (!identical(projection, wgtcats)) {
wgtcats <- sp::spTransform(wgtcats,
CRSobj = projection)
}
}
}
if (!is.null(segments)) {
if (!identical(projection, segments@proj4string)) {
segments <- sp::spTransform(segments,
CRSobj = projection)
}
}
}
# Assign the weight categories
if (class(wgtcats) %in% "SpatialPolygonsDataFrame") {
if (!(wgtcat_var %in% names(wgtcats))) {
stop("The variable ", wgtcat_var, " does not appear in wgtcats")
}
aim_points[["wgtcat"]] <- sp::over(aim_points, wgtcats)[[wgtcat_var]]
lmf_points[["wgtcat"]] <- sp::over(lmf_points, wgtcats)[[wgtcat_var]]
} else {
if (!(wgtcat_var %in% names(aim_points))) {
stop("The variable ", wgtcat_var, " does not appear in aim_points")
}
if (!(wgtcat_var %in% names(lmf_points))) {
stop("The variable ", wgtcat_var, " does not appear in lmf_points")
}
aim_points[["wgtcat"]] <- aim_points[[wgtcat_var]]
lmf_points[["wgtcat"]] <- lmf_points[[wgtcat_var]]
}
# Assign the LMF segment codes
if (is.null(segments)) {
if (!(segment_var %in% names(aim_points))) {
stop("The variable ", segment_var, " does not appear in aim_points")
}
if (!(segment_var %in% names(lmf_points))) {
stop("The variable ", segment_var, " does not appear in lmf_points")
}
aim_points[["segment"]] <- aim_points[[segment_var]]
lmf_points[["segment"]] <- lmf_points[[segment_var]]
} else {
if (!(segment_var %in% names(segments))) {
stop("The variable ", segment_var, " does not appear in segments")
}
aim_points[["segment"]] <- sp::over(aim_points, segments)[[segment_var]]
lmf_points[["segment"]] <- sp::over(lmf_points, segments)[[segment_var]]
}
# Just harmonize the idvar names for now
aim_points[["unique_id"]] <- aim_points[[aim_idvar]]
lmf_points[["unique_id"]] <- lmf_points[[lmf_idvar]]
# If somehow LMF points aren't in a segment, that's a major problem
if (any(is.na(lmf_points[["segment"]]))) {
stop(paste("The following LMF points did not spatially intersect any segment polygons:",
paste(lmf_points[is.na(lmf_points[["segment"]]), "unique_id"], collapse = ", ")))
}
# TODO: Stick a check in here that the segment ID from the polygons
# matches the one derived from the LMF plot ID
# Probably just warn if not?
# Get data frames
if (class(aim_points) %in% "SpatialPointsDataFrame") {
aim_df <- aim_points@data
} else {
aim_df <- aim_points
}
if (class(lmf_points) %in% "SpatialPointsDataFrame") {
lmf_df <- lmf_points@data
} else {
lmf_df <- lmf_points
}
# NOTE THAT THIS FILTERS OUT ANYTHING FLAGGED AS NOT NEEDED IN THE FATE
# So that'd be unused oversamples or points from the FUTURE that no one would've sampled anyway
aim_df <- aim_df[!(aim_df[["fate"]] %in% invalid_fates), c("unique_id", "fate", "wgtcat", "segment")]
aim_df[["aim"]] <- TRUE
aim_df[["lmf"]] <- FALSE
# We only have target sampled LMF points available to us, so we don't need to filter them
lmf_df <- lmf_df[, c("unique_id", "fate", "wgtcat", "segment")]
lmf_df[["aim"]] <- FALSE
lmf_df[["lmf"]] <- TRUE
# Combine them
combined_df <- unique(rbind(aim_df, lmf_df))
# There shouldn't be any that don't belong to a wgtcat anymore
combined_df <- combined_df[!is.na(combined_df[["wgtcat"]]), ]
# Add an observed variable for easy reference later
combined_df[["observed"]] <- combined_df[["fate"]] %in% observed_fates
# To make the lookup table, drop any points that fell outside LMF segments
combined_segmentsonly_df <- combined_df[!is.na(combined_df[["segment"]]), ]
# Create a segment relative weight lookup table
segment_relwgt_lut <- do.call(rbind,
lapply(X = split(combined_segmentsonly_df, combined_segmentsonly_df[["segment"]]),
FUN = function(X){
# These are the count of AIM points with any valid fate
aim_count <- sum(X[["aim"]])
# We also need the count of LMF points with any valid fate, but that's complicated
# We only have the sampled LMF points so we can count those
lmf_sampled_count <- sum(X[["lmf"]])
# To get the number of evaluated but not sampled points:
# The LMF plot keys end in a digit that represents the intended sampling order within a segment
# 1 and 2 are considered base points and were intended to be sampled
# If a sampled LMF plot's plot key ends in 3, that means that one or both of the base points
# were evaluated and rejected rather than sampled, which brings the evaluated LMF plot count
# to three for the segment.
# This just asks if the third point was used
lmf_oversample_used <- any(grepl(X[["unique_id"]][X[["lmf"]]],
pattern = "\\D3$"))
# Likewise, if only one LMF plot was sampled in a segment, that means the other two were
# evalurated and rejected rather than sampled, also bringing the total to three.
# So if there was only one sampled or if the oversample was used, there were three evaluated
if (sum(X[["lmf"]]) == 1 | lmf_oversample_used) {
lmf_count <- 3
} else {
# This will fire only if there sampled count was 2, but better to be safe here
lmf_count <- lmf_sampled_count
}
# The relative weight for points falling within a segment is calculated as
# 1 / (number of points)
relative_weight <- 1 / sum(aim_count, lmf_count)
output <- data.frame("segment" = X[["segment"]][1],
"relwgt" = relative_weight,
stringsAsFactors = FALSE)
return(output)
}))
# Add the relative weights to the combined AIM and LMF points
combined_df <- merge(x = combined_df,
y = segment_relwgt_lut,
by = "segment",
all.x = TRUE)
# Anywhere there's an NA associated with an AIM point, that's just one that fell outside the segments
combined_df[is.na(combined_df[["relwgt"]]) & combined_df[["aim"]], "relwgt"] <- 1
####### NOTE!!!!!!!!! ##############################################
# This is hacked up using wgtcat_df from above because the wgtcats geometry is screwed up
# wgtcats <- aim.analysis::add.area(wgtcats)
if (is.null(wgtcat_area_var)) {
if (class(wgtcats) %in% "SpatialPolygonsDataFrame") {
wgtcat_df <- aim.analysis::add.area(wgtcats)@data
} else {
stop("No name for a variable in wgtcats containing the area in hectares was provided and wgtcats is not a spatial polygons data frame so area can't be calculated")
}
} else {
if (!(wgtcat_area_var %in% names(wgtcats))) {
if (class(wgtcats) %in% "SpatialPolygonsDataFrame") {
wgtcat_df <- aim.analysis::add.area(wgtcats)@data
} else {
stop("The variable ", wgtcat_area_var, " does not appear in wgtcats")
}
} else {
warning("Trusting that the variable ", wgtcat_area_var, " in wgtcats contains the areas in hectares")
if (class(wgtcats) %in% "SpatialPolygonsDataFrame") {
wgtcat_df <- wgtcats@data
wgtcat_df[["AREA.HA"]] <- wgtcat_df[[wgtcat_area_var]]
} else {
wgtcat_df <- wgtcats
wgtcat_df[["AREA.HA"]] <- wgtcat_df[[wgtcat_area_var]]
}
}
}
wgtcat_df[["wgtcat"]] <- wgtcat_df[[wgtcat_var]]
# Making sure that these aren't spread out over multiple observations
if (verbose) {
message("Summarizing wgtcat_df by wgtcat to calculate areas in case the wgtcats are split into multiple observations")
}
wgtcat_df <- dplyr::summarize(dplyr::group_by(wgtcat_df,
wgtcat),
"hectares" = sum(AREA.HA))
wgtcat_areas <- setNames(wgtcat_df[["hectares"]], wgtcat_df[["wgtcat"]])
weight_info <- lapply(X = wgtcat_df[["wgtcat"]],
wgtcat_areas = wgtcat_areas,
points = combined_df,
wgtcat_var = wgtcat_var,
FUN = function(X, wgtcat_areas, points, wgtcat_var){
# Area of this polygon
area <- wgtcat_areas[X]
# All of the points (AIM and LMF) falling in this polygon
points <- points[points[["wgtcat"]] == X, ]
# If there are in fact points, do some weight calculations!
if (nrow(points) > 0 & any(points[["observed"]])) {
# The number of observed AIM points with a relative weight of 1
# So, not sharing an LMF segment with any LMF points
aim_standalone <- sum(combined_df[["aim"]] & combined_df[["relwgt"]] == 1)
# The number of unique segments selected for LMF points
lmf_segment_count <- length(unique(points[["segment"]]))
# The approximate number of 160 acre segments in this polygon
# Obviously, not all of the segments were selected in the first stage
# of the LMF design, but this is how many were available in this polygon
approximate_segment_count <- area / (160 / 2.47)
# This is the sum of the relative weights of the OBSERVED points
# This does not include the inaccessible, rejected, or unknown points
# It does include both AIM and LMF, however
sum_observed_relwgts <- sum(points[points[["observed"]], "relwgt"])
# This is the sum of the relative weights of all the AIM points, regardless of fate
sum_relwgts <- sum(points[["relwgt"]])
# The units are segments per point
# The segments are 120 acre quads (quarter sections?) that the first stage of LMF picked from
# Steve Garman called this "ConWgt" which I've expanded to conditional_weight
# but that's just a guess at what "con" was short for
conditional_weight <- approximate_segment_count / (aim_standalone + lmf_segment_count)
conditional_area <- conditional_weight * sum_observed_relwgts
# What's the observed proportion of the area?
observed_proportion <- sum_observed_relwgts / sum_relwgts
# And how many acres is that then?
# We can derive the "unknown" or "unsampled" area as the difference
# between the polygon area and the observed area
observed_area <- area * observed_proportion
# Then this adjustment value is calculated
weight_adjustment <- observed_area / conditional_area
# Put everything about the wgtcat in general in one output data frame
output_wgtcat <- data.frame(wgtcat = X,
area = area,
area_units = "hectares",
approximate_segment_count = approximate_segment_count,
sum_observed_relwgts = sum_observed_relwgts,
sum_relwgts = sum_relwgts,
observed_proportion = observed_proportion,
observed_area = observed_area,
unobserved_area = area - observed_area,
conditional_weight = conditional_weight,
conditional_area = conditional_area,
weight_adjustment = weight_adjustment,
point_count = sum(points[["observed"]]),
observed_point_count = nrow(points),
stringsAsFactors = FALSE)
# But much more importantly add the calculated weights to the points
output_points <- points
# This handles situations where there were points, but none of them were observed
# In that case, weight_adjustment will be 0 / 0 = NaN
# That's fine because we can identify that this polygon is still in the inference area
# but that its entire area is "unknown" because no data were in it
if (is.nan(weight_adjustment)) {
output_points[["wgt"]] <- NA
} else {
# The point weight is calculated here.
# This is Garman's formula and I don't have the documentation justifying it on hand
output_points[["wgt"]] <- weight_adjustment * conditional_weight * points[["relwgt"]]
message("Checking weight sum for ", X)
# I'm rounding here because at unrealistically high levels of precision it gets weird and can give false positives
if (round(sum(output_points[["wgt"]]), digits = 3) != round(area, digits = 3)) {
warning("The sum of the point weights (", sum(output_points[["wgt"]]), ") does not equal the polygon area (", area, ") for ", X)
}
}
} else {
# Basically just empty data frames
output_wgtcat <- data.frame(wgtcat = X,
area = area,
area_units = "hectares",
approximate_segment_count = area / (160 / 2.47),
sum_observed_relwgts = NA,
sum_relwgts = NA,
observed_proportion = 0,
observed_area = 0,
unobserved_area = area,
conditional_weight = NA,
conditional_area = NA,
weight_adjustment = NA,
point_count = 0,
observed_point_count = 0,
stringsAsFactors = FALSE)
output_points <- NULL
}
return(list(points = output_points,
wgtcat = output_wgtcat))
})
point_weights <- do.call(rbind,
lapply(X = weight_info,
FUN = function(X){
X[["points"]]
}))
wgtcat_summary <- do.call(rbind,
lapply(X = weight_info,
FUN = function(X){
X[["wgtcat"]]
}))
return(list(point_weights = point_weights,
wgtcat_summary = wgtcat_summary))
}
|
a63320aaac2273a16238c901b5144d54d72606ad
|
1064b565d86e7b9fede7a1bd11b397428a4f97f7
|
/install_packages/rinstall_cran_R-3.2.0.R
|
515edcd2634042de0fccdd1dbfb89c9ace29bdc2
|
[] |
no_license
|
bguillod/R_generic_funcs
|
5c73d625939748c4738d9df7f409546156f718a8
|
df03f5e26b9071b0e4526e92aa992cfb3b90f7e3
|
refs/heads/master
| 2021-01-17T10:08:30.871309
| 2016-05-06T15:18:26
| 2016-05-06T15:18:26
| 41,424,494
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 2,510
|
r
|
rinstall_cran_R-3.2.0.R
|
options(repos = "http://stat.ethz.ch/CRAN/")
## libdir.ouce <- file.path(Sys.getenv("HOME"), "R/x86_64-redhat-linux-gnu-library/3.2/")
## libdir.jasmin <- file.path(Sys.getenv("HOME"), "R/x86_64-redhat-linux-gnu-library/3.2/")
libdir <- libdir.jasmin
libdir <- stop("SPECIFY THE LIBDIR PLEASE")
install.path <- file.path(r.scripts.path, "install_packages")
owndir <- file.path(install.path, "own_pkgs")
destdir <- file.path(install.path, "cran_pkgs")
already.pkgs <- c("abind", "akima", "fields", "fitdistrplus", "gdata", "lattice", "lmomco", "manipulate", "mapdata", "mapproj", "maps", "ncdf4", "ncdf4.helpers", "PCICt", "SCI", "spam", "xts", "zoo", "maptools", "sp", "rgdal", "rgeos", "optparse", "spacetime", "XML", "Hmisc")
new.pkgs <- c("clue")
## config.args <- c(ncdf4 = "--with-netcdf-include=/usr/local/include
## --with-netcdf-lib=/usr/local/lib",
## RNetCDF = "--with-netcdf-include=/usr/local/include
## --with-netcdf-lib=/usr/local/lib")
config.args <- NULL
dependencies.install <- c("Depends", "Imports", "LinkingTo")
## INSTALL CRAN PACKAGES
for (i in 1:length(new.pkgs)) {
if (is.null(config.args)) {
tryCatch({install.packages(new.pkgs[i],
destdir=destdir,
dependencies=dependencies.install)},
warning = function(w) stop(paste("WARNING: ", w)),
error = function(e) stop(paste("ERROR:", e)))
} else {
tryCatch({install.packages(new.pkgs[i],
destdir=destdir,dependencies=dependencies.install,
configure.args = config.args)},
warning = function(w) stop(paste("WARNING: ", w)),
error = function(e) stop(paste("ERROR:", e)))
}
print(paste("**** package", new.pkgs[i], "installed ****"))
}
## INSTALL OWN PACKAGES (CHRIGEL)
new.own.pkgs <- c("trend_1.5.1", "gevXgpd_1.4.2", "geocors_1.2.8", "plotmap_2.3.7", "pcaXcca_1.4.1", "ACWD_2.0.0", "sm_2.2-5.4", "vioplot_0.2")
for (i in 1:length(new.own.pkgs)) {
tryCatch({install.packages(file.path(owndir, paste0(new.own.pkgs[i], ".tar.gz")),
# lib=libdir,
repos=NULL, depencencies=F)},
warning = function(w) stop(paste("WARNING: ", w)),
error = function(e) stop(paste("ERROR:", e)))
print(paste("**** package", new.own.pkgs[i], "installed ****"))
}
|
b5cc7c3b1693fa53db9a50d64544995b440b01dd
|
da25c9a12cd3b720a26ab1fb5db09d40a5932abc
|
/man/MODIS-deprecated.Rd
|
64de6783825168898d737816321db5a7acb714f8
|
[
"MIT"
] |
permissive
|
itati01/MODIS
|
4e71492825f338c6b392f60335acc1aa06b09b09
|
bc478e6fd9262ed7321fca77ac6d713c6e94420d
|
refs/heads/master
| 2023-01-05T15:35:50.412115
| 2022-10-08T08:21:42
| 2022-10-08T08:21:42
| 290,968,032
| 0
| 0
|
NOASSERTION
| 2020-08-28T06:24:02
| 2020-08-28T06:24:01
| null |
UTF-8
|
R
| false
| true
| 828
|
rd
|
MODIS-deprecated.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/MODIS-deprecated.R, R/lpdaacLogin.R
\name{MODIS-deprecated}
\alias{MODIS-deprecated}
\alias{lpdaacLogin}
\title{Deprecated Functions in \pkg{MODIS}}
\usage{
lpdaacLogin(server = "LPDAAC")
}
\description{
The functions listed below are deprecated and will be defunct in the near
future. If applicable, alternative functions with similar or even identical
functionality are mentioned. Help pages for deprecated functions are
available at \code{help("MODIS-deprecated")}.
\itemize{
\item \code{\link[=lpdaacLogin-deprecated]{lpdaacLogin-deprecated()}}: use \link{EarthdataLogin} instead.
}
}
\section{\code{lpdaacLogin}}{
For \code{\link[=lpdaacLogin]{lpdaacLogin()}}, use \code{\link[=EarthdataLogin]{EarthdataLogin()}} instead.
}
\keyword{internal}
|
27836c035fce6a5f85360133c571edfa4677b926
|
36128a1548e889c7dfa8d4ab1456e426ee302dba
|
/SubscriptionSimulation.R
|
69bdb1786abfc59a01d406a7939d3fd97bb55117
|
[] |
no_license
|
Tybirk/MDP_Project
|
9a2935b6c983a16a55dbdddaa3bd2656bf62442c
|
599de945c6478129c7d0d9e5817f5e4d32f24992
|
refs/heads/master
| 2020-03-15T04:21:50.067679
| 2018-05-03T13:10:58
| 2018-05-03T13:10:58
| 131,963,296
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 5,868
|
r
|
SubscriptionSimulation.R
|
#### Project: Subscription Intelligence
### Simulation of Markov chains
Actions = c("Nothing","Incentivize")
States = c("Satisfied Customer", "Dissatisfied Customer","Former Customer")
NothingTransitionMatrix = matrix(0,nrow = length(States),ncol = length(States))
#NothingTransitionMatrix[1,] = c(0.7,0.2,0.1) # Satisfied customer transition probabilities
#NothingTransitionMatrix[2,] = c(0.1,0.2,0.7) # Dissatisfied customer transition probabilities
#NothingTransitionMatrix[3,] = c(0.1,0,0.9) # Dissatisfied customer transition probabilities
#NothingReward = c(100,100,0)
IncentivizeTransitionMatrix = matrix(0,nrow = length(States),ncol = length(States))
#IncentivizeTransitionMatrix[1,] = c(0.7,0.2,0.1) # Satisfied customer transition probabilities
#IncentivizeTransitionMatrix[2,] = c(0.7,0.2,0.1) # Dissatisfied customer transition probabilities
#IncentivizeTransitionMatrix[3,] = c(0.1,0,0.9) # Dissatisfied customer transition probabilities
#IncentivizeReward = c(50,50,-50)
#NothingTransitionMatrix = matrix(0,nrow = length(States),ncol = length(States))
NothingTransitionMatrix[1,] = c(0.7,0.2,0.1) # Satisfied customer transition probabilities
NothingTransitionMatrix[2,] = c(0.1,0.2,0.7) # Dissatisfied customer transition probabilities
NothingTransitionMatrix[3,] = c(0.1,0,0.9) # Dissatisfied customer transition probabilities
NothingReward = c(100,100,0)
#IncentivizeTransitionMatrix = matrix(0,nrow = length(States),ncol = length(States))
IncentivizeTransitionMatrix[1,] = c(0.7,0.2,0.1) # Satisfied customer transition probabilities
IncentivizeTransitionMatrix[2,] = c(0.7,0.2,0.1) # Dissatisfied customer transition probabilities
IncentivizeTransitionMatrix[3,] = c(0.7,0.2,0.1) # Dissatisfied customer transition probabilities
IncentivizeReward = c(50,50,-50)
TransitionMatrix = list(Nothing = NothingTransitionMatrix, Incentivize = IncentivizeTransitionMatrix)
Reward = list(Nothing = NothingReward, Incentivize = IncentivizeReward)
RewardMat = cbind(NothingReward,IncentivizeReward)
rownames(RewardMat) = States
## VALUE ITERATION
# Step 0
V0 = rep(0,length(States))
V_Old <- V0
V_New <- rep(0,length(States))
discountrate = 0.1
discountfactor = exp(-discountrate)
# Step 1
i = 1
threshold <- 1
while(threshold > 0.0001) {
print(threshold)
action0vec <- Reward[[1]] + discountfactor*(TransitionMatrix[[1]] %*% V_Old)
action1vec <- Reward[[2]] + discountfactor*(TransitionMatrix[[2]] %*% V_Old)
V_New <- pmax(action0vec, action1vec)
policy <- V_New == action1vec
m = min(V_New - V_Old)
M = max(V_New - V_Old)
if(m==0){ threshold <- 1
} else{threshold <- (M - m)/m}
V_Old <- V_New
i = i +1
}
print(i)
## SARSA
# Step 0
epsilon = 0.05
learningRate = 0.3
discountFactor = 0.9
Q = matrix(0,ncol = length(States),nrow = length(Actions))
colnames(Q) = States
rownames(Q) = Actions
CurrentState = sample(3,1)
CurrentAction = sample(2,1)
iterations = 10000
plotfrequency = 100
BestAction = matrix(0,ncol = length(States),nrow = iterations/plotfrequency)
colnames(BestAction) = States
for(i in 1:iterations){
NextState = sample(3,1,prob = TransitionMatrix[[CurrentAction]][CurrentState,])
NextAction = which.max(Q[,NextState])
if(runif(1,0,1)<epsilon){
NextAction = sample(2,1)
}
Q[CurrentAction,CurrentState] = Q[CurrentAction,CurrentState] +
learningRate*(Reward[[CurrentAction]][CurrentState] +
discountFactor*Q[NextAction,NextState] -
Q[CurrentAction,CurrentState])
CurrentState = NextState
CurrentAction = NextAction
if(as.integer(i/plotfrequency)==i/plotfrequency){
BestAction[i/plotfrequency,] = apply(Q,2,which.max)
}
}
## Plots
library(highcharter)
#plot(1:(iterations/plotfrequency),BestAction[,1])
hchart(BestAction, "scatter")
## Concurrent SARSA
epsilon = 0.4
learningRate = 0.7
discountFactor = 0.8
iterations = 500
agents = 100
Q = matrix(0,ncol = length(States),nrow = length(Actions))
colnames(Q) = States
rownames(Q) = Actions
CurrentStates = sample(3,agents,replace=TRUE)
CurrentActions = sample(2,agents,replace=TRUE)
plotfrequency = 10
BestAction = matrix(0,ncol = length(States),nrow = iterations/plotfrequency)
colnames(BestAction) = States
tempQ = array(0,dim = c(length(Actions),length(States),agents))
for(i in 1:iterations){
NextActions = rep(0,agents)
NextStates = rep(0,agents)
for(a in 1:agents){
NextStates[a]= sample(3,1,prob = TransitionMatrix[[CurrentActions[a]]][CurrentStates[a],])
NextActions[a] = which.max(Q[,NextStates[a]])
if(runif(1,0,1)<epsilon || i == 1){
NextActions[a] = sample(2,1)
}
}
#for(a in 1:length(CurrentActions)){
# Q[CurrentActions[a],CurrentStates[a]] = Q[CurrentActions[a],CurrentStates[a]] +
# learningRate*(RewardMat[NextStates[a]] +
# discountFactor*Q[NextActions[a],NextStates[a]] -
# Q[CurrentActions[a],CurrentStates[a]])
#
#}
for(a in 1:agents){
tempQ[CurrentActions[a],CurrentStates[a],a] = Q[CurrentActions[a],CurrentStates[a]] +
learningRate*(RewardMat[CurrentStates[a],CurrentActions[a]] +
discountFactor*Q[NextActions[a],NextStates[a]] -
Q[CurrentActions[a],CurrentStates[a]])
}
avgtempQ = apply(tempQ,c(1,2),mean)
Q = avgtempQ
CurrentStates = NextStates
CurrentActions = NextActions
#learningRate = learningRate*0.9999
if(as.integer(i/plotfrequency)==i/plotfrequency){
BestAction[i/plotfrequency,] = apply(Q,2,which.max)
}
}
## Plots
library(highcharter)
#plot(1:(iterations/plotfrequency),BestAction[,1])
hchart(BestAction, "scatter")
|
349dec03529e370a9969ed2f08d53121d13ac59e
|
f270c3e243539f0b790db8ba8118bd22500109fc
|
/orderlist/server.R
|
20f0fbc3b9619990c5eb69387223fe1ea8d4dda4
|
[] |
no_license
|
takewiki/lcsd
|
2bced1651e8e32531b0dc7fac90bf44d6df13c0a
|
5b2141c79b43d89db8ada54b04203a330e21482b
|
refs/heads/master
| 2020-12-07T16:19:48.517388
| 2020-10-01T15:55:58
| 2020-10-01T15:55:58
| 232,750,401
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 1,005
|
r
|
server.R
|
library(shiny)
shinyServer(function(input, output) {
files <- getFiles()
dteditCn(input, output,
name = 'files',
thedata = files,
edit.cols = c('FBarcode','FChartNumber','FBillNo'),
edit.label.cols = c('二维码','图号','生产任务单号'),
#input.types = c(Fstatus='numericInput'),
#input.choices = list(Authors = unique(unlist(books$Authors))),
view.cols = c('FBarcode','FChartNumber','FBillNo'),
label.add = '新增',
label.copy = '复制',
label.edit = '修改',
label.delete = '删除',
title.add = '新增界面',
title.edit = '修改界面',
title.delete = '删除界面',
#show.copy = FALSE,
#show.insert = FALSE,
defaultPageLength = 10,
callback.update = books.update.callback,
callback.insert = books.insert.callback,
callback.delete = books.delete.callback)
})
|
db26dd63c6444e7a7e0bac5eaabc2e0718a33fbb
|
c4fca74f6561d480f844ce9b808a61ca2b5f44e4
|
/R/SINTER_functions.R
|
7f418733fdda341d1ed4857cb322676da1c4d885
|
[] |
no_license
|
WeiqiangZhou/SINTER
|
c6f362f9c7c01f61221ff6c4e31fff9c8c04311b
|
f11405f964a4ed3ab5a0b30ed9cc032dcf8dd7a9
|
refs/heads/master
| 2020-04-03T22:45:14.589375
| 2019-09-09T19:34:44
| 2019-09-09T19:34:44
| 155,608,705
| 2
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 16,186
|
r
|
SINTER_functions.R
|
#' @import gam
#' @import matrixStats
#' @import preprocessCore
#' @import scran
#' @import ggplot2
#' @import parallel
#' @import GenomicAlignments
#' @import GenomicRanges
#' @import irlba
#' @title Summarize single-cell ATAC-seq data according to ENCODE cluster features
#' @description This function is used for summarizing scATAC-seq data according to ENCODE clusters.
#' @param path Directory to bam files of scATAC-seq data
#' @param gr_ENCL GRanges object for ENCODE clusters
#' @param type Sequencing read type. "paired" or "single" end read.
#' @return
#' \item{data}{Log2-transformed summarized data matrix.}
#' \item{lib}{Total read counts (i.e., library size) for each single cell.}
#' @keywords summarize scATAC-seq
#' @examples
#' \dontrun{
#' scATAC_data <- ENCLfunc("/bam_file_path",gr_ENCL,type="paired")
#' }
#' @export
ENCLfunc <- function(path,gr_ENCL,type="paired") {
allf <- list.files(path,pattern="\\.bam$")
grf <- sapply(allf,function(f) {
if (type=="paired") {
GRanges(readGAlignmentPairs(paste0(path,"/",f)))
} else if (type=="single") {
GRanges(readGAlignments(paste0(path,"/",f)))
}
})
libsize_org <- sapply(grf,length)
libsize <- libsize_org/median(libsize_org)
count <- sapply(grf,function(i) {
countOverlaps(gr_ENCL,i)
})
data_mat <- log2(sweep(count,2,libsize,"/")+1)
return(list(data=data_mat,lib=libsize_org))
}
#' @title Select variable gene from single-cell RNA-seq data
#' @description This function is used for selecting variable gene from scRNA-seq data.
#' @param expr Gene expression matrix of scRNA-seq (normalized counts).
#' @param RNA_train_all Gene expression matrix of bulk RNA-seq from ENCODE.
#' @param filter_th Threshold for filtering out genes with no expression in the given percentage of cells.
#' @param var_th Threshold for filtering out genes with low variability in the given percentage of genes.
#' @param dataplot Plot the mean and variance of all genes and the selected genes are highlighted.
#' @return
#' \item{expr_select}{Log2-transformed gene expression matrix of scRNA-seq for the selected genes.}
#' \item{RNA_train_select}{Gene expression matrix of bulk RNA-seq for the selected genes.}
#' @keywords variable gene scRNA-seq
#' @examples
#' \dontrun{
#' expr_data_select <- select_gene(expr_in,RNA_train_all,filter_th=0.1,var_th=0.2,dataplot=TRUE)
#' }
#' @export
select_gene <- function(expr,RNA_train_all,filter_th=0.1,var_th=0.2,dataplot=FALSE){
retain_idx <- which(rowMeans(expr > 0) > filter_th)
expr_filter <- expr[retain_idx,]
row_mean <- rowMeans(expr_filter)
row_var <- rowVars(as.matrix(expr_filter))
data_fit <- data.frame(X=log2(row_mean+1),Y=log2(row_var+1))
fit_model <- gam(Y~s(X),data=data_fit)
hyper_var <- log2(row_var+1) - fit_model$fitted.values
hyper_var_sort <- sort(hyper_var,decreasing=TRUE)
hyper_var_th <- hyper_var_sort[round(length(hyper_var_sort)*var_th)]
var_idx <- which(hyper_var > hyper_var_th)
if(dataplot){
plot(log2(row_mean+1),log2(row_var+1),pch=19)
points(log2(row_mean+1),fit_model$fitted.values,pch=19,col="blue")
points(log2(row_mean+1)[var_idx],log2(row_var+1)[var_idx],pch=19,col="red")
}
expr_select <- expr_filter[var_idx,]
gene_names <- row.names(expr_select)
gene_names <- sapply(gene_names,function(x) sub("\\..*","",x))
train_gene <- row.names(RNA_train_all)
train_gene <- sapply(train_gene,function(x) sub("\\..*","",x))
match_idx <- match(gene_names,train_gene)
print(paste0(length(which(!is.na(match_idx)))," genes are selected"))
return(list(expr_select=log2(expr_select[which(!is.na(match_idx)),]+1),RNA_train_select=RNA_train_all[match_idx[which(!is.na(match_idx))],]))
}
#' @title Select variable ENCODE cluster features from scATAC-seq data
#' @description This function is used for selecting variable ENCODE cluster features from scATAC-seq data.
#' @param DHS_data Summarized ENCODE cluster features from scATAC-seq data.
#' @param lib_th Threshold for filtering out cells with low total number of reads.
#' @param var_th Threshold for filtering out ENCODE cluster features with low variability in the given percentage of features.
#' @param dataplot Plot the mean and variance of all ENCODE cluster features and the selected features are highlighted.
#' @return
#' \item{data_select}{Selected ENCODE cluster features.}
#' \item{select_idx}{Index for the selected ENCODE cluster features.}
#' @keywords variable feature scATAC-seq
#' @examples
#' \dontrun{
#' scATAC_data_select <- select_DHS_cluster(DHS_data,lib_th=1e3,var_th=0.2,dataplot=FALSE)
#' }
#' @export
select_DHS_cluster <- function(DHS_data,lib_th=1e3,var_th=0.2,dataplot=FALSE){
filter_idx <- DHS_data$lib < lib_th
DHS_data_filter <- DHS_data$data[,!filter_idx]
DHS_data_sd <- DHS_data_filter - rowMeans(DHS_data_filter)
colnames(DHS_data_sd) <- colnames(DHS_data_filter)
row_mean <- rowMeans(DHS_data_filter)
row_var <- rowVars(as.matrix(DHS_data_filter))
data_fit <- data.frame(X=row_mean,Y=row_var)
fit_model <- gam(Y~s(X),data=data_fit)
hyper_var <- row_var - fit_model$fitted.values
hyper_var_sort <- sort(hyper_var,decreasing=TRUE)
hyper_var_th <- hyper_var_sort[round(length(hyper_var_sort)*var_th)]
var_idx <- which(hyper_var > hyper_var_th)
DHS_data_ex <- DHS_data_sd[var_idx,]
if(dataplot){
plot(row_mean,row_var,pch=19)
points(row_mean,fit_model$fitted.values,pch=19,col="blue")
points(row_mean[var_idx],row_var[var_idx],pch=19,col="red")
}
print(paste0(ncol(DHS_data_ex)," cells are retained and ",length(var_idx)," DHS clusters are selected"))
return(list(data_select=DHS_data_ex,select_idx=var_idx))
}
#' @title Linear regression function based on sure independence screening
#' @description This function is used for building linear regression models based on the top N predictors that are most correlated with reponse.
#' @param DNase_train ENCODE cluster features from DNase-seq data for building the regression model.
#' @param RNA_train_mean Gene cluster mean from ENCODE RNA-seq data for building the regression model.
#' @param RNA_test_mean Gene cluster mean from scRNA-seq data for making predictions.
#' @param top_n Number of predictors used in the regression model.
#' @return
#' \item{y_pre}{A vector of predicted ENCODE cluster features.}
#' @keywords prediction
#' @export
cluster_regression_topn <- function(DNase_train,RNA_train_mean,RNA_test_mean,top_n){
y_cor <- cor(DNase_train,t(RNA_train_mean))
y_cor[is.na(y_cor)] <- 0
if(top_n > 1){
max_idx <- sort(y_cor, decreasing=TRUE, index.return=TRUE)$ix[1:top_n] #if the N>1, select the N most correlated predictiors
data_train <- data.frame(y=DNase_train, t(RNA_train_mean[max_idx,]))
data_test <- data.frame(t(RNA_test_mean[max_idx,]))
fit <- lm(y~.,data_train)
y_pre <- predict(fit,data_test)
}else{
max_idx <- which(y_cor == max(y_cor))[1] #if N=1, select the most correlated predictor
data_train <- data.frame(y=DNase_train, x=RNA_train_mean[max_idx,])
data_test <- data.frame(x=RNA_test_mean[max_idx,])
fit <- lm(y~x, data_train)
y_pre <- predict(fit,data_test)
}
return(y_pre)
}
#' @title Prediction of ENCODE cluster features based on scRNA-seq data
#' @description This function is used for predicting ENCODE cluster features based on scRNA-seq data. The scRNA-seq data are first clustered into gene clusters and the cluster means are used as predictors.
#' @param expr_select Input gene expression data from scRNA-seq.
#' @param DNase_train ENCODE cluster features from DNase-seq data for building the regression model.
#' @param RNA_train Gene expression from ENCODE RNA-seq data for building the regression model.
#' @param num_predictor Number of predictors used in the prediction model.
#' @param cluster_scale The scale to determine the number of gene clusters. The number of gene clusters is obtained by [the number of genes]/[cluster_scale].
#' @param seed Set the seed in kmeans clustering for reproducible results.
#' @return
#' \item{Y_pre}{A matrix of predicted ENCODE cluster features.}
#' @keywords prediction
#' @examples
#' \dontrun{
#' Y_pre <- pre_model(expr_select,DNase_train,RNA_train,num_predictor=10,cluster_scale=10,seed=12345)
#' }
#' @export
pre_model <- function(expr_select,DNase_train,RNA_train,num_predictor=10,cluster_scale=10,seed=12345){
##standardize each gene
RNA_all_sd <- t(apply(cbind(expr_select,RNA_train),1,scale))
RNA_train_sd <- RNA_all_sd[,-c(1:ncol(expr_select))]
##cluster genes based on RNA_train
cluster_num <- round(nrow(RNA_train_sd)/cluster_scale)
if(num_predictor >= cluster_num){
num_predictor <- cluster_num - 1
}
print(paste0("Using ",cluster_num," clusters and ",num_predictor," predictors for prediction"))
set.seed(seed)
gene_cluster <- kmeans(RNA_train_sd, centers=cluster_num, nstart=10, iter.max=50)$cluster
RNA_all_mean <- sapply(c(min(gene_cluster):max(gene_cluster)),function(x){
if(length(which(gene_cluster == x)) == 1){
RNA_all_sd[which(gene_cluster == x),]
}
else{
colMeans(RNA_all_sd[which(gene_cluster == x),])
}
})
RNA_all_mean <- t(RNA_all_mean)
RNA_all_mean_norm <- normalize.quantiles(RNA_all_mean)
colnames(RNA_all_mean_norm) <- c(colnames(expr_select),colnames(RNA_train))
expr_select_norm <- RNA_all_mean_norm[,c(1:ncol(expr_select))]
RNA_train_norm <- RNA_all_mean_norm[,-c(1:ncol(expr_select))]
Y_pre <- matrix(data=NA,nrow=nrow(DNase_train),ncol=ncol(expr_select_norm))
colnames(Y_pre) <- colnames(expr_select_norm)
pb = txtProgressBar(min = 0, max = nrow(DNase_train), initial = 0, style = 3)
for(i in 1:nrow(DNase_train)){
setTxtProgressBar(pb,i)
Y_pre[i,] <- cluster_regression_topn(DNase_train[i,],RNA_train_norm,expr_select_norm,num_predictor)
}
close(pb)
return(Y_pre)
}
#' @title Evaluate the spacial discribution of the mixed single cells from different data types
#' @description This function is used for testing whether the single cells from different data types are mixed well.
#' For each cell in the input data, a fisher's extract test will be performed to test whether the ratio of input cells to reference cells in the given region is the same as the ratio of the total number of input cells to the total number of reference cells.
#' @param input_data Low dimensional representation of single cell from one data type as the input for matching (e.g., PCs from scRNA-seq data).
#' @param ref_data Low dimensional representation of single cell from another data type as the reference for matching (e.g., PCs from scATAC-seq data).
#' @param dist_scale Scale used to define the radius of the region for testing.
#' @param print_message Flag to print the radius used for the testing.
#' @return
#' \item{input_count}{The number of cells in the input data for each test.}
#' \item{ref_count}{The number of cells in the reference data for each test.}
#' \item{pval}{P-values from fisher's extract tests.}
#' @keywords spacial test
#' @examples
#' \dontrun{
#' neighbor_test_p <- neighbor_test(input_data,ref_data,dist_scale=10)$pval
#' }
#' @export
neighbor_test <- function(input_data,ref_data,dist_scale=10,print_message=TRUE){
dist_search <- max(max(dist(input_data)),max(dist(ref_data)))/dist_scale
if(print_message){
print(paste0("Using radius ",dist_search))
}
input_expect <- nrow(input_data)
ref_expect <- nrow(ref_data)
input_count <- rep(NA,nrow(input_data))
ref_count <- rep(NA,nrow(input_data))
dist_all <- as.matrix(dist(rbind(input_data,ref_data)))
input_dist_all <- dist_all[c(1:nrow(input_data)),c(1:nrow(input_data))]
ref_dist_all <- dist_all[c(1:nrow(input_data)),-c(1:nrow(input_data))]
fisher_pval <- sapply(1:nrow(input_data),function(i){
##self included
input_count[i] <- length(which(input_dist_all[i,] < dist_search))
ref_count[i] <- length(which(ref_dist_all[i,] < dist_search))
test_table <- matrix(c(input_count[i],ref_count[i],input_expect,ref_expect),
nrow = 2,dimnames = list(c("input", "ref"),c("observe", "expect")))
fisher.test(test_table)$p.value
})
return(list(input_count=input_count,ref_count=ref_count,pval=fisher_pval))
}
#' @title Evaluate the spacial discribution of the mixed single cells from different data types while given the cell type labels
#' @description This function is used for evaluating whether single cells from the same cell type are mixed well.
#' For each cell in the input data, a fisher's extract test will be performed to test whether the ratio of input cells to reference cells in the given region is the same as the ratio of the total number of input cells to the total number of reference cells while only single cells from the same cell type are considered.
#' @param input_data Low dimensional representation of single cell from one data type as the input for matching (e.g., PCs from scRNA-seq data).
#' @param ref_data Low dimensional representation of single cell from another data type as the reference for matching (e.g., PCs from scATAC-seq data).
#' @param input_mem Cell type label for the input_data.
#' @param ref_mem Cell type label for the ref_data.
#' @param dist_scale Scale used to define the radius of the region for testing.
#' @param print_message Flag to print the radius used for the testing.
#' @return
#' \item{test_stat}{P-values from fisher's extract tests.}
#' @keywords spacial test evaluation
#' @examples
#' \dontrun{
#' test_stat <- eval_neighbor_test(input_data,ref_data,input_mem,ref_mem,dist_scale=10,print_message=TRUE)
#' }
#' @export
eval_neighbor_test <- function(input_data,ref_data,input_mem,ref_mem,dist_scale=10,print_message=TRUE){
dist_search <- max(max(dist(input_data)),max(dist(ref_data)))/dist_scale
test_stat <- rep(NA,nrow(input_data))
input_expect <- nrow(input_data)
ref_expect <- nrow(ref_data)
if(print_message){
print(paste0("Using radius ",dist_search))
}
for(i in 1:nrow(input_data)){
ref_dist <- sapply(1:nrow(ref_data),function(x){
sqrt(sum((input_data[i,] - ref_data[x,]) ^ 2))})
input_dist <- sapply(1:nrow(input_data),function(x){
sqrt(sum((input_data[i,] - input_data[x,]) ^ 2))})
ref_count <- length(intersect(which(ref_dist < dist_search),which(ref_mem == input_mem[i])))
input_count <- length(intersect(which(input_dist < dist_search),which(input_mem == input_mem[i])))
test_table <- matrix(c(input_count,ref_count,input_expect,ref_expect),
nrow = 2,dimnames = list(c("input", "ref"),c("observe", "expect")))
test_stat[i] <- fisher.test(test_table)$p.value
}
return(test_stat)
}
#' @title Run MNN to correct for platform effects
#' @description This function is used run MNN to correct for platform effects between the experimental and predicted scATAC-seq data.
#' @param atac_data scATAC-seq data for matching.
#' @param pre_result Predicted scATAC-seq data based on scRNA-seq.
#' @param k The number of mutual nearest neighbor in MNN.
#' @param sigma The bandwidth of the Gaussian smoothing kernel used to compute the correction vector.
#' @param MNN_ref A flag to determine which data type is used as reference in MNN. Select from "scATAC" and "scRNA".
#' @return
#' \item{data_combine}{The combined data matrix from all single cells.}
#' @keywords MNN
#' @examples
#' \dontrun{
#' data_combine <- run_MNN(atac_data,pre_result,k=param_opt$k_opt,sigma=param_opt$sigma_opt,MNN_ref="scATAC")
#' }
#' @export
run_MNN <- function(atac_data,pre_result,k,sigma,MNN_ref="scATAC",...){
pre_result_sd <- pre_result - rowMeans(pre_result)
colnames(pre_result_sd) <- colnames(pre_result)
row.names(pre_result_sd) <- row.names(atac_data)
if(MNN_ref == "scATAC"){
data_MNN <- mnnCorrect(atac_data,pre_result_sd,k=k,sigma=sigma,...)
data_combine <- cbind(data_MNN$corrected[[1]],data_MNN$corrected[[2]])
}
else{
data_MNN <- mnnCorrect(pre_result_sd,atac_data,k=k,sigma=sigma,...)
data_combine <- cbind(data_MNN$corrected[[2]],data_MNN$corrected[[1]])
}
colnames(data_combine) <- c(colnames(atac_data),colnames(pre_result_sd))
return(data_combine)
}
|
6c9276e8e06a0f6ec88dae29a3a2cc1a51c13d19
|
91e29ff9983166531f07f8412d61280cdf57e1d4
|
/mergeFARSdata.R
|
f64749a567abff8ed4e976ec49a21d1388803f43
|
[] |
no_license
|
alexvbr22082018/sparklyr_wrecks
|
811d430ea6a0835ab9f01b765ca842b940dd5c19
|
68ce2068fc6cdcda9d700e68d047bf46ba63a604
|
refs/heads/master
| 2021-05-20T13:29:45.664607
| 2017-02-24T14:33:16
| 2017-02-24T14:33:16
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 8,876
|
r
|
mergeFARSdata.R
|
###############################################
## Prepare historical FARS data for Analysis ##
###############################################
## Load libraries
install.packages('data.table')
install.packages('RPostgreSQL')
install.packages('dplyr')
library(data.table)
library(RPostgreSQL)
library(dplyr)
## Connect to PostgreSQL database on Compose for 2014 FARS data
## Note: I downloaded the 2014 FARS data, created tables in PostgreSQL, and am
## accessing them here. You can also simply download the vehicle and accident
## files for 2014 in their native format and work from there.
drv <- dbDriver("PostgreSQL")
conn <- dbConnect(drv,
dbname = "yourdb",
host = "yourhost",
port = "portnumber",
user = "user",
password = password)
## View tables in PostgreSQL database
dbListTables(conn)
## Load PostgreSQL data into R DataFrame
accident.df <- dbGetQuery(conn, "SELECT * FROM accident")
vehicle.df <- dbGetQuery(conn, "SELECT * FROM vehicle")
###############################################################################
## After downloading all years of accident and vehicle data from FARS, ##
## I am going to merge them into one table. The trouble here is that column ##
## names do not always match, so we'll have to work around that by matching ##
## column names to those found in the 2014 dataset. ##
###############################################################################
## Create list of dataframes, drop first column of row names, bind all
## rows to make one giant dataframe, then convert column names to lower case.
## Accidents
accdata <- list()
acclist <- list.files(pattern="*.csv")
for (i in 1:length(acclist))
accdata[[i]] <- read.csv(acclist[i], stringsAsFactors = F, header = T)[,-1]
accident.all <- rbindlist(accdata, fill = T, use.names = T)
accident.all <- select(accident.all, -c(50,51)) ## Remove duplicate columns
names(accident.all) <- tolower(names(accident.all))
## Subset columns found in 2014 FARS data
accnames <- names(accident.all) %in% names(accident.df)
accnames <- colnames(accident.all)[accnames]
accnames <- which(colnames(accident.all) %in% accnames)
accident.all <- select(accident.all, accnames)
## Write to disk
write.csv(accident.all, file = 'accident_all.csv', row.names = F)
## Vehicles
vehdata <- list()
vehlist <- list.files(pattern="*.csv")
for (i in 1:length(vehlist))
vehdata[[i]] <- read.csv(vehlist[i], stringsAsFactors = F, header = T)[,-1]
vehicle.all <- rbindlist(vehdata, fill = T, use.names = T)
names(vehicle.all) <- tolower(names(vehicle.all))
## Subset columns found in 2014 FARS data
vehnames <- names(vehicle.all) %in% names(vehicle.df)
vehnames <- colnames(vehicle.all)[vehnames]
vehnames <- which(colnames(vehicle.all) %in% vehnames)
vehicle.all <- select(vehicle.all, vehnames)
## Write to disk
write.csv(vehicle.all, file = 'vehicle_all.csv', row.names = F)
## Persons
perdata <- list()
perlist <- list.files(pattern="*.csv")
for (i in 1:length(perlist))
perdata[[i]] <- read.csv(perlist[i], stringsAsFactors = F, header = T)[,-1]
person.all <- rbindlist(perdata, fill = T)
names(person.all) <- tolower(names(person.all))
write.csv(person.all, file = 'person_all.csv', row.names = F)
## Load each unique file as DF
dframes <- list.files(pattern="*.csv")
for (i in 1:length(dframes)){
assign(dframes[i], read.csv(dframes[i], stringsAsFactors = F, header = T)[,-1])
names(dframes[i]) <- tolower(names(dframes[i]))
}
## List of df's in environment
dfs <- Filter(function(x) is(x, "data.frame"), mget(ls()))
## Names to lowercase
for(i in 1:length(dfs)){
colnames(dfs[[i]]) <- tolower(colnames(dfs[[i]]))
}
## Unlist to global environment
list2env(dfs, .GlobalEnv)
## Modeling dataframes for each year
mod_dat75 <- left_join(accident_75.csv,
vehicle_75.csv, by = "st_case")
mod_dat76 <- left_join(accident_76.csv,
vehicle_76.csv, by = "st_case")
mod_dat77 <- left_join(accident_77.csv,
vehicle_77.csv, by = "st_case")
mod_dat78 <- left_join(accident_78.csv,
vehicle_78.csv, by = "st_case")
mod_dat79 <- left_join(accident_79.csv,
vehicle_79.csv, by = "st_case")
mod_dat80 <- left_join(accident_80.csv,
vehicle_80.csv, by = "st_case")
mod_dat81 <- left_join(accident_81.csv,
vehicle_81.csv, by = "st_case")
mod_dat82 <- left_join(accident_82.csv,
vehicle_82.csv, by = "st_case")
mod_dat83 <- left_join(accident_83.csv,
vehicle_83.csv, by = "st_case")
mod_dat84 <- left_join(accident_84.csv,
vehicle_84.csv, by = "st_case")
mod_dat85 <- left_join(accident_85.csv,
vehicle_85.csv, by = "st_case")
mod_dat86 <- left_join(accident_86.csv,
vehicle_86.csv, by = "st_case")
mod_dat87 <- left_join(accident_87.csv,
vehicle_87.csv, by = "st_case")
mod_dat88 <- left_join(accident_88.csv,
vehicle_88.csv, by = "st_case")
mod_dat89 <- left_join(accident_89.csv,
vehicle_89.csv, by = "st_case")
mod_dat90 <- left_join(accident_90.csv,
vehicle_90.csv, by = "st_case")
mod_dat91 <- left_join(accident_91.csv,
vehicle_91.csv, by = "st_case")
mod_dat92 <- left_join(accident_92.csv,
vehicle_92.csv, by = "st_case")
mod_dat93 <- left_join(accident_93.csv,
vehicle_93.csv, by = "st_case")
mod_dat94 <- left_join(accident_94.csv,
vehicle_94.csv, by = "st_case")
mod_dat95 <- left_join(accident_95.csv,
vehicle_95.csv, by = "st_case")
mod_dat96 <- left_join(accident_96.csv,
vehicle_96.csv, by = "st_case")
mod_dat97 <- left_join(accident_97.csv,
vehicle_97.csv, by = "st_case")
mod_dat98 <- left_join(accident_98.csv,
vehicle_98.csv, by = "st_case")
mod_dat99 <- left_join(accident_99.csv,
vehicle_99.csv, by = "st_case")
mod_dat00 <- left_join(accident_00.csv,
vehicle_00.csv, by = "st_case")
mod_dat01 <- left_join(accident_01.csv,
vehicle_01.csv, by = "st_case")
mod_dat02 <- left_join(accident_02.csv,
vehicle_02.csv, by = "st_case")
mod_dat03 <- left_join(accident_03.csv,
vehicle_03.csv, by = "st_case")
mod_dat04 <- left_join(accident_04.csv,
vehicle_04.csv, by = "st_case")
mod_dat05 <- left_join(accident_05.csv,
vehicle_05.csv, by = "st_case")
mod_dat06 <- left_join(accident_06.csv,
vehicle_06.csv, by = "st_case")
mod_dat07 <- left_join(accident_07.csv,
vehicle_07.csv, by = "st_case")
mod_dat08 <- left_join(accident_08.csv,
vehicle_08.csv, by = "st_case")
mod_dat09 <- left_join(accident_09.csv,
vehicle_09.csv, by = "st_case")
mod_dat10 <- left_join(accident_10.csv,
vehicle_10.csv, by = "st_case")
mod_dat11 <- left_join(accident_11.csv,
vehicle_11.csv, by = "st_case")
mod_dat12 <- left_join(accident_12.csv,
vehicle_12.csv, by = "st_case")
mod_dat13 <- left_join(accident_13.csv,
vehicle_13.csv, by = "st_case")
## As list
moddatlist <- list(mod_dat75, mod_dat76, mod_dat77, mod_dat78, mod_dat79,
mod_dat80, mod_dat81, mod_dat82, mod_dat83, mod_dat84
, mod_dat85, mod_dat86, mod_dat87, mod_dat88, mod_dat89
, mod_dat90, mod_dat91, mod_dat92, mod_dat93, mod_dat94
, mod_dat95, mod_dat96, mod_dat97, mod_dat98, mod_dat99
, mod_dat00, mod_dat01, mod_dat02, mod_dat03, mod_dat04
, mod_dat05, mod_dat06, mod_dat07, mod_dat08, mod_dat09
, mod_dat10, mod_dat11, mod_dat12, mod_dat13)
## Merge dataframes in list
mod_dat_all <- rbindlist(moddatlist, fill = T)
## Create key for aggregations
mod_dat_all$key <- paste0(mod_dat_all$st_case, mod_dat_all$year)
## Filter columns
mod_dat_all <- select(mod_dat_all, state.x, st_case, dr_drink, make, deaths, speedrel,
trav_sp, body_typ, mod_year, prev_acc, prev_sus, prev_dwi,
prev_spd, dr_hgt, dr_wgt, permvit, county, city, day_week,
day.x, month.x, hour.x, year, road_fnc, latitude, longitud, reljct1,
typ_int, lgt_cond, weather, fatals, drunk_dr, key)
## mod_dat_all should be the file that is worked with in the Jupyter Notebook
|
2f4c6f619b16aee55b81e73b22451cee29a23203
|
993c745624125a614a059ba35401e143c934653a
|
/BPD_API_Example.R
|
468a824732e32674416e77aa753d451f884a960d
|
[
"MIT"
] |
permissive
|
mammmals/BioPlexDisplayExamples
|
b6570ff5653314713d6a23278e50d47fb026c0ff
|
3a6a19d209bf7c135a8b0f743bb5a98fe9c08d9e
|
refs/heads/master
| 2020-03-25T12:36:14.500437
| 2018-08-06T21:21:25
| 2018-08-06T21:21:25
| 143,784,244
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 678
|
r
|
BPD_API_Example.R
|
#api with jsonlite
library(RCurl)
library(jsonlite)
base.url = "http://bioplex.hms.harvard.edu/bioplexDisplay/api/api.php?geneQuery="
#return a single table:
one.table=fromJSON(getURL(paste0(base.url,55909)))
#loop over multiple tables
gene.IDs = c(55909,1956,5796,10000000000)
presence = vector()
multiple.tables = list()
for(i in 1:length(gene.IDs)){
if(validate(getURL(paste0(base.url,gene.IDs[i])))){
multiple.tables[[i]] = fromJSON(
getURL(
paste0(base.url,gene.IDs[i])
)
)
presence[i] = "yes"
} else{
multiple.tables[[i]] = 0
presence[i] = "no"
}
}
output.table = as.data.frame(cbind(gene.IDs,presence))
|
cdbf9ab33beddbd7098bc58e0fb25e12691d087e
|
e813c4bf652f5ea919c7fff8ee2d84cc36e56f2b
|
/Quality Control Charts/u-chart.R
|
29c8eb4e1fbfa5b68576c0c4fae11ce8ba6beffb
|
[] |
no_license
|
AnakinNN/R-for-Industrial-Engineering
|
c77c909f28448f8135decc7a57366ef3dbc5956a
|
777423c6e8eb470f7fc15aa0a52968b527beaa47
|
refs/heads/master
| 2023-03-30T09:55:33.206536
| 2021-04-05T15:10:11
| 2021-04-05T15:10:11
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 656
|
r
|
u-chart.R
|
### U-CHART ###
# Import the qcc package
library(qcc)
# Read a csv file and name it as df
df <- read.csv(file.choose(),header = FALSE)
# Assign the columna namea
colnames(df) <- c("Defects", "Sample_size")
# Create the U-chart
u_chart <- with(df, qcc(df$Defects, df$Sample_size, type = "u", data.name = "defects"))
# Get the summary for the chart
summary(u_chart)
# EXAMPLE
library(qcc)
defects <- as.integer(rnorm(50, 3, 1))
sample_size <- as.integer(rnorm(50, 20, 3))
df <- data.frame(defects, sample_size)
u_chart <- with(df, qcc(df$defects, df$sample_size, type = "u", data.name = "defects"))
summary(u_chart)
|
d0f2b5636f3690110e6c0de0c13d60c601f047e0
|
022e5a9f38fd71cbc182215c07d83de7b82113a8
|
/08-figures.R
|
b3997a144efd5ff11c1e04258abb80a807031eb6
|
[
"MIT"
] |
permissive
|
yangxhcaf/ESDL
|
f5ecdc60b599150d96fadd3eea4d5bcfb936a9a6
|
bf6c4a4c3798c94ca9aa0d300ef26e589a971a6c
|
refs/heads/master
| 2022-12-05T07:51:06.921545
| 2020-08-29T09:15:19
| 2020-08-29T09:15:19
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 2,858
|
r
|
08-figures.R
|
library(tidyverse)
library(tictoc)
# load results:
load('~/Documents/Projects/ESDL_earlyadopter/ESDL/200805_results_ews_halfwindow_gpp.RData')
## join the ecosystem information:
df <- left_join(df, df_biomes)
# map the biomes
df %>%
ggplot(aes(lon, lat)) +
geom_tile(aes(fill = biome)) +
theme_void()
## Comparison of maps
df %>% filter(stat == "std") %>%
ggplot(aes(lon,lat)) +
geom_tile(aes(fill = value)) +
scale_fill_viridis_c() +
facet_grid(stat ~ feature)
## comparison of distributions:
df %>% filter(stat == "fd") %>%
filter(feature == "diff") %>%
#left_join(df_biomes) %>%
ggplot(aes(value, group = biome)) +
geom_density(aes(fill = biome), alpha = 0.5) +
geom_vline(
xintercept = quants, color = "grey60", size = 0.5) +
annotate(
"text", x = quants, y = 6,
label = c("50%","75%", "90%", "95%"),
hjust = 0)+
geom_rug(aes(color = value)) +
# scale_color_gradient2(
# high = 'red', low = 'black', midpoint = quants[1],
# mid = 'yellow', na.value = "gray84") +
scale_color_viridis_c(option = "C") +
#scale_x_log10() +
# facet_wrap(~feature, scales = "free") +
theme_light()
## use only the upper qunatile on the difference:
quants <- df %>% filter(stat == "fd") %>%
filter(feature == "diff") %>%
pull(value) %>%
quantile(prob = c(0.5, 0.75,0.9, 0.95))
df %>% filter(stat == "fd") %>%
filter(feature == "diff") %>%
#%>%
ggplot(aes(value, biome)) +
geom_boxplot(aes(fill = biome), alpha = 0.5, show.legend = FALSE) +
# geom_vline(
# xintercept = quants, color = "grey60", size = 0.5, linetype = 2) +
# annotate(
# "text", x = quants, y = 17.5,
# label = c("50%","75%", "90%", "95%"),
# hjust = 0)+
#geom_rug(aes(color = value), size = 0.25) +
#scale_color_viridis_c() +
facet_wrap(~stat, scales = "free_x") +
theme_light()
quants_all <- df %>%
filter(feature == "diff") %>%
group_by(biome, stat) %>%
summarize(q75 = quantile(value, prob = 0.9))
## large differences in std are >0.3 : the tail of the distribution
df %>% filter(stat == "std") %>%
filter(feature == "diff") %>%
ggplot(aes(lon, lat)) +
geom_tile(aes(fill = value)) +
scale_fill_viridis_c(option = "C") +
# scale_fill_gradient2(
# high = 'orange', low = 'black', midpoint = quants[1],
# mid = 'gray50', na.value = "gray84") +
theme_void()
df %>% filter(stat == "std") %>%
filter(feature == "diff") %>%
filter(value > quant90)
## checking values in variance instead of std.
# df %>%
# filter(stat == "std") %>%
# mutate(var = value ^ 2) %>%
# ggplot(aes(var)) +
# geom_density() +
# geom_rug(aes(color = var)) +
# scale_color_viridis_c() +
# facet_wrap(~feature, scales = "free") +
# theme_light()
|
04838eebc881208cf2257f969042b147724a266e
|
3b9c67c1a4dcbeb269255ddfbdcfcacb10cc7554
|
/man/infit.conf.Rd
|
09890651c5c9d406cfb55404b32388413d8c3098
|
[
"MIT"
] |
permissive
|
hyunsooseol/BootFit
|
23e5048ed95d0b6dde3fdb5abc52954538a46344
|
9f1327d99f4e925cbca1969742ca2692d35373a8
|
refs/heads/master
| 2021-04-04T10:18:17.041184
| 2020-03-28T08:39:02
| 2020-03-28T08:39:02
| 248,448,402
| 3
| 0
| null | null | null | null |
UTF-8
|
R
| false
| true
| 436
|
rd
|
infit.conf.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/infitConfi.R
\name{infit.conf}
\alias{infit.conf}
\title{Bootstrap boot infit.mnsq CI Function}
\usage{
infit.conf(object)
}
\arguments{
\item{data:}{Data Frame}
}
\value{
: boot infit.mnsq CI Function
}
\description{
This function calculates bootstrap item infit 95% Confidence Interval
using 'boot' package
}
\examples{
#Not run
#infit.conf(data)
}
|
6d6d340636279691f221af1f742342d30684a6a2
|
34ddd88340d93fc8a674411dfc02340609f3495f
|
/plot2.R
|
33bafda360bbacaf2b89adee7fdc876031c8a489
|
[] |
no_license
|
khemkaiitr/ExData_Plotting1
|
22e75f2290364d56a1523955e04c7faa93b3f172
|
cddd532ba715b941e7b356d53346ed92c9d0e7a1
|
refs/heads/master
| 2021-01-18T19:51:09.268375
| 2014-09-07T14:25:45
| 2014-09-07T14:25:45
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 526
|
r
|
plot2.R
|
source('getData.R') #This line gets the data in working directory
ndata <- subset(data, as.Date(data$Date, format = '%d/%m/%Y') == '2007-02-01' | as.Date(data$Date, format = '%d/%m/%Y') == '2007-02-02')
# Create the plot
png(file = "plot2.png", bg = "white", width = 480, height = 480)
myplot <- plot(strptime(paste(ndata$Date, ndata$Time), format = '%d/%m/%Y %H:%M:%S'), as.numeric(as.character(ndata$Global_active_power)), xlab = '', ylab ='Global Active Power (kilowatts)',
type = 'l')
dev.off()
|
cc4e41402a41dc7bbbd8b37cb66bd37c4aea7900
|
2580e61158bf6db87581f1885b1e6629730ddc8b
|
/R/helpers-general.R
|
9a02875c66074b7479add928b6694a96c69d30b4
|
[] |
no_license
|
jimmyday12/fitzRoy
|
22d134e027c817c739098902a49eee97d2191e72
|
386f9c9f12d787d1f0fe429ff669ec3853b6f8f8
|
refs/heads/main
| 2023-08-17T23:25:35.570311
| 2023-08-07T04:17:14
| 2023-08-07T04:17:14
| 116,004,026
| 117
| 46
| null | 2023-08-07T04:17:15
| 2018-01-02T10:33:15
|
R
|
UTF-8
|
R
| false
| false
| 3,605
|
r
|
helpers-general.R
|
#' Check Season
#'
#' Checks the season for various things
#'
#' @param x Season in Year format
#'
#' @keywords internal
#' @noRd
check_season <- function(x) {
if (is.null(x)) {
x <- Sys.Date() %>%
format("%Y") %>%
as.numeric()
}
if (min(nchar(x)) < 4) rlang::abort(glue::glue("Season should be in YYYY format"))
return(x)
}
#' Check comp
#'
#' Checks the comp for various things
#'
#' @param x Comp name
#'
#' @keywords internal
#' @noRd
check_comp <- function(x) {
valid <- c(
"AFLM",
"AFLW",
"VFL",
"VFLW",
"WAFL",
"U18B",
"U18G"
)
if (!x %in% valid) {
rlang::abort(glue::glue(
"`Comp` must be one of {glue::glue_collapse(valid, sep = \", \", last = \" or \")}
You provided the following: {x}"
))
} else {
return(x)
}
}
#' Check Source
#'
#' Checks the source for various things
#'
#' @param x Source name
#'
#' @keywords internal
#' @noRd
check_source <- function(x) {
# if (!x %in% c("AFL", "footywire", "afltables", "squiggle", "fryzigg")) rlang::abort(glue::glue("Source should be either \"AFL\", \"footywire\" or \"afltables\". You supplied {x}"))
valid <- c(
"AFL",
"footywire",
"afltables",
"squiggle",
"fryzigg"
)
if (!x %in% valid) {
rlang::abort(glue::glue(
"`Source` must be one of {glue::glue_collapse(valid, sep = \", \", last = \" or \")}
You provided the following: {x}"
))
} else {
return(x)
}
}
#' Check Comp Source
#'
#' Checks both comp and source for various things
#'
#' @param comp Comp name
#' @param source Source name
#'
#' @keywords internal
#' @noRd
check_comp_source <- function(comp, source) {
check_comp(comp)
check_source(source)
valid <- c(
"AFL",
"fryzigg"
)
if ((!source %in% valid) & comp == "AFLW") {
rlang::abort(glue::glue(
"For AFLW, source must be one of {glue::glue_collapse(valid, sep = \", \", last = \" or \")}
You provided the following: {source}"
))
}
}
#' Verify Year
#'
#' Verifies year
#'
#' @param year Year in numeric format YYYY
#'
#' @keywords internal
#' @noRd
verify_year <- function(year) {
year <- suppressWarnings(as.numeric(year))
if (is.na(year)) {
stop(paste("Not a year."))
}
else if (year >= 1897 & year <= as.numeric(format(Sys.Date(), "%Y"))) {
return(year)
}
else {
stop(paste("Not a valid year within available range."))
}
}
#' Returns start and end dates given a season range
#'
#'
#' @param season Season in numeric format YYYY
#'
#' @keywords internal
#' @noRd
return_start_end_dates <- function(season) {
season_checked <- season %>% purrr::map_dbl(check_season)
if (is.null(season)) {
start_date <- lubridate::ymd(paste0(format(Sys.Date(), "%Y"), "/01/01"), quiet = TRUE)
end_date <- lubridate::parse_date_time(Sys.Date(), c("dmy", "ymd"), quiet = TRUE)
} else {
start_date <- lubridate::parse_date_time(
paste0(min(season_checked), "-01-01"), c("ymd"),
quiet = TRUE
)
end_date <- lubridate::parse_date_time(
paste0(max(season_checked), "-12-31"), c("ymd"),
quiet = TRUE
)
}
if (end_date > Sys.Date()) {
end_date <- lubridate::parse_date_time(Sys.Date(), c("dmy", "ymd"), quiet = TRUE)
}
if (is.na(start_date)) {
stop(paste(
"Date format not recognised",
"Check that start_date is in dmy or ymd format"
))
}
if (is.na(end_date)) {
stop(paste(
"Date format not recognised",
"Check that end_date is in dmy or ymd format"
))
}
return(list(
start_date = start_date,
end_date = end_date
))
}
|
d89de65da1935f92d4a17341ea523ee28f1c49cc
|
74a9517302f057830a377ede2be56693c0696956
|
/server.r
|
2a74894095c9959b700d8f76a11d1db6a646d48f
|
[] |
no_license
|
alanmcknight/SalesAnalysis
|
be7a7711b019c79584ecd91fe3dd816b6465a543
|
68e3d6fa4a196978160e7bee48d01b53b1d5b6b7
|
refs/heads/master
| 2021-01-16T21:43:15.068826
| 2017-11-30T14:20:29
| 2017-11-30T14:20:29
| 65,289,651
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 33,985
|
r
|
server.r
|
library(shiny)
library(shinydashboard)
library(plotly)
library(leaflet)
library(lubridate)
library(stringr)
source("user.R")
source("admin.R")
my_username <- c("stephen.mccann", "helen.campbell", "alan","mark.walker", "greg.wilson", "admin")
my_password <- c("Stephen","Helen", "Alan", "Mark", "Greg", "123")
get_role=function(user){
if(user!="admin") {
return("TEST")
}else{
return("ADMIN")
}
}
get_ui=function(role){
if(role=="TEST"){
return(list_field_user)
}else{
return(list_field_admin)
}
}
shinyServer(function(input, output,session) {
my_data <- read.csv("ApricotSalesMaster2.csv", header = TRUE, stringsAsFactors =FALSE, fileEncoding="latin1")
AdData <- read.csv("AdditionalReport.csv", header = TRUE, stringsAsFactors =FALSE, fileEncoding="latin1")
my_data$BTXDatecreated <- as.Date( as.character(my_data$BTXDatecreated), "%d/%m/%Y")
filterList1 <- colnames(my_data)
filterList2 <- c("All", sort(unique(my_data$Product)))
percent <- function(x, digits = 2, format = "f") {
paste0(formatC(100 * x, format = format, digits = digits), "%")
}
currency <- function(x) {
paste("£",format(x, big.mark=","),sep="")
}
specify_decimal <- function(x, k) format(round(x, k), nsmall=k)
data <- reactive({
my_data1 <- subset(my_data, my_data$BTXDatecreated >= input$dateRange[1] & my_data$BTXDatecreated <= input$dateRange[2])
if(input$filterName != "All"){
my_data1 <- subset(my_data1, my_data1$Product == input$filterName)
}
my_data1
})
data1 <- reactive({
my_data1 <- subset(my_data[, c(1:(which(colnames(my_data)=="UK.Residency.Years")), (ncol(my_data)-2):ncol(my_data))], my_data$BTXDatecreated >= input$dateRange[1] & my_data$BTXDatecreated <= input$dateRange[2])
if(input$filterName != "All"){
my_data1 <- subset(my_data1, my_data1$Product == input$filterName)
}
my_data1
})
## Reporting ##
data2 <- reactive({
if(input$reportSelect[1] == "Sales Report"){
my_data1 <- subset(my_data[, c(1:(which(colnames(my_data)=="UK.Residency.Years")), (ncol(my_data)-2):ncol(my_data))], my_data$BTXDatecreated >= input$dateRange1[1] & my_data$BTXDatecreated <= input$dateRange1[2])
my_data1}else if(input$reportSelect[1] == "USwitch Report"){
#my_data$BTXDatecreated <- as.Date( as.character(my_data$BTXDatecreated), "%d/%m/%Y")
Data1 <- AdData
USwitchData <- my_data[grep("APRUS", my_data$ECWebref),]
SalesData<- subset(USwitchData, USwitchData$BTXDatecreated >= input$dateRange1[1] & USwitchData$BTXDatecreated <= input$dateRange1[2])
CancellationData <- subset(USwitchData, USwitchData$Cancellation != "N")
CancellationData <- subset(CancellationData, as.Date(CancellationData$Cancellation, "%d/%m/%Y") >= input$dateRange1[1] & as.Date(CancellationData$Cancellation, "%d/%m/%Y") <= input$dateRange1[2])
USwitchData <- rbind(SalesData, CancellationData)
USwitchData <- USwitchData [!duplicated(USwitchData ), ]
USwitchData$recordtype <- "Sale"
USwitchData$salesmonth <- cut(as.Date(USwitchData$BTXDtraised), "month")
USwitchData$brand <- "Apricot"
USwitchData$surname <- "NA"
USwitchData1 <- merge(USwitchData, Data1, by = "BTXPolref", all.x=TRUE)
USwitchData1 <- USwitchData1[c("recordtype", "salesmonth", "brand", "BCMEmail.x", "BCMPcode.x", "BCMName", "surname", "BCMDob.x", "CFReg", "BTXDtraised.x", "ECWebref.x", "BTXPolref", "BTXPaymethod.x", "BTXOrigdebt.x", "BTXDatecreated.x", "Cancellation", "Cancellation", "FinanceValue", "BTXInsurer.x")]
USwitchData1$surname <- word(USwitchData1$BCMName, -1)
USwitchData1$BCMName <- word(USwitchData1$BCMName, -2)
colnames(USwitchData1) <- c("recordtype", "salesmonth", "brand", "emailaddress", "postcode", "firstname", "surname", "dob", "carregistrationnumber", "policystartdate", "policyquotereference", "providerquotereference", "purchasechannel", "premium", "policypurchasedate", "cancellationreason", "cancellationeffectivedate", "purchasetype", "insurerunderwritingpolicy")
USwitchData1 <- USwitchData1[!duplicated(USwitchData1), ]
USwitchData1$cancellationreason[USwitchData1$cancellationreason == "N"] <- ""
USwitchData1$cancellationeffectivedate[USwitchData1$cancellationeffectivedate == "N"] <- ""
USwitchData1$purchasechannel[USwitchData1$purchasechannel == "O"] <- "Online"
USwitchData1$purchasechannel[USwitchData1$purchasechannel != "Online"] <- "Telephone"
USwitchData1$cancellationreason[USwitchData1$cancellationreason != ""] <- "NTU"
USwitchData1$purchasetype[USwitchData1$purchasetype != "0"] <- "Monthly"
USwitchData1$purchasetype[USwitchData1$purchasetype == "0"] <- "Annual"
USwitchData1
}else if(input$reportSelect[1] == "Call Connections Report"){
#my_data$BTXDatecreated <- as.Date( as.character(my_data$BTXDatecreated), "%d/%m/%Y")
Data1 <- AdData
CCData <- my_data[grep("APRCC", my_data$ECWebref),]
SalesData<- subset(CCData, CCData$BTXDatecreated >= input$dateRange1[1] & CCData$BTXDatecreated <= input$dateRange1[2])
CancellationData <- subset(CCData, CCData$Cancellation != "N")
CancellationData <- subset(CancellationData, as.Date(CancellationData$Cancellation, "%d/%m/%Y") >= input$dateRange1[1] & as.Date(CancellationData$Cancellation, "%d/%m/%Y") <= input$dateRange1[2])
CCData <- rbind(SalesData, CancellationData)
CCData <- CCData [!duplicated(CCData ), ]
CCData$brand <- "Apricot"
CCData$surname <- "NA"
# CCData$title <- "NA"
CCData1 <- merge(CCData, Data1, by = "BTXPolref", all.x=TRUE)
CCData1 <- CCData1[c( "BCMTitle", "BCMName", "surname", "BCMDob.x", "BCMPcode.x", "BCMEmail.x", "CFReg", "BTXPolref", "ECWebref.x", "BTXDatecreated.x", "BTXDtraised.x", "BTXOrigdebt.x", "brand", "Cancellation")]
CCData1$BCMTitle <- word(CCData1$BCMTitle, 1)
CCData1$surname <- word(CCData1$BCMName, -1)
CCData1$BCMName <- word(CCData1$BCMName, -2)
colnames(CCData1) <- c("Title", "FirstName", "Surname", "DateOfBirth", "PostCode", "Email", "CarReg", "PartnerCustomerReference", "PartnerQuoteReference", "QuoteDate", "PolicyInceptionDate", "Premium", "Brand", "Cancellation")
CCData1 <- CCData1[!duplicated(CCData1), ]
CCData1$Cancellation[CCData1$Cancellation == "N"] <- ""
# CCData1$cancellationeffectivedate[CCData1$cancellationeffectivedate == "N"] <- ""
#
# CCData1$purchasechannel[CCData1$purchasechannel == "O"] <- "Online"
# CCData1$purchasechannel[CCData1$purchasechannel != "Online"] <- "Telephone"
# CCData1$cancellationreason[CCData1$cancellationreason != ""] <- "NTU"
#
# CCData1$purchasetype[CCData1$purchasetype != "0"] <- "Monthly"
# CCData1$purchasetype[CCData1$purchasetype == "0"] <- "Annual"
CCData1
}else if(input$reportSelect[1] == "ALPS LE Report"){
ALPSReport <- AdData[AdData$BTXDtsettled == "" & AdData$BTXInsurer == "Auto Legal Protection Services" & AdData$BTXPoltype == "LE",]
ALPSReport$BCMTitle <- word(ALPSReport$BCMTitle, 1)
ALPSReport$surname <- word(ALPSReport$BCMName, -1)
ALPSReport$BCMName <- word(ALPSReport$BCMName, -2)
ALPSReport$No..of.Units <- 0
ALPSReport$PriceSoldFor <- 0
ALPSReport$IPT <- 0.91
ALPSReport <- ALPSReport[,c("BTXPolref", "BCMTitle", "BCMName", "surname", "BTXDtraised", "BCMAddr1", "BCMAddr2", "BCMAddr3", "BCMAddr4", "BCMPcode", "No..of.Units", "BTXOrigdebt", "IPT")]
colnames(ALPSReport) <- c("Broker Policy Number", "Title", "FirstName", "Surname / Company Name", "Startdate", "Address 1", "Address 2", "Address 3", "Address 4", "Post Code", "No. of Units", "PriceSoldFor", "IPT")
ALPSReport
}else if(input$reportSelect[1] == "MIS Report"){
MISReport <- AdData[AdData$BTXDtsettled == "" & AdData$BTXInsurer == "MIS Claims" & AdData$BTXPoltype != "HQ",]
MISReport <- MISReport[,c("BTXPolref", "BCMName", "BCMAddr1", "BCMAddr2", "BCMAddr3", "BCMAddr4", "BCMPcode", "BCMTel", "BTXDtraised")]
MISReport$BTXDtraised <- as.Date(MISReport$BTXDtraised, "%d/%m/%Y")
year(MISReport$BTXDtraised) <- year(MISReport$BTXDtraised)+1
MISReport$UserID <- substr(MISReport[,1], 1, 6)
AdData$CFReg <- ifelse(AdData$CFReg == "", AdData$TW1Regmark, AdData$CFReg)
VehicleReg <- AdData[AdData$CFReg != "",c("BTXPolref", "CFReg")]
VehicleReg <- VehicleReg[!duplicated(VehicleReg), ]
VehicleReg$UserID <- substr(VehicleReg$BTXPolref, 1, 6)
MISReport <- merge(MISReport, VehicleReg, by = "UserID", all.x=TRUE)
MISReport$UserID <- NULL
MISReport$BTXPolref <- NULL
MISReport <- MISReport[,c("BTXPolref.x", "BCMName", "BCMAddr1", "BCMAddr2", "BCMAddr3", "BCMAddr4", "BCMPcode", "BCMTel", "CFReg", "BTXDtraised")]
colnames(MISReport) <- c("Broker Ref", "Name", "Address 1", "Address 2", "Address 3", "Address 4", "Postcode", "Phone Number", "Vehicle Registration", "Policy Renewal Date")
MISReport <- MISReport[!duplicated(MISReport), ]
MISReport
}else if(input$reportSelect[1] == "XS Cover Report"){
XSReport <- AdData[AdData$BTXDtsettled == "" & AdData$BTXInsurer == "XS Cover" & AdData$BTXPoltype == "XS",]
XSReport$BTXDtraised1 <- as.Date(XSReport$BTXDtraised, "%d/%m/%Y")
year(XSReport$BTXDtraised1) <- year(XSReport$BTXDtraised1)+1
XSReport$Cover <- 0
XSReport <- XSReport[,c("BTXPolref", "BTXTrantype", "BTXDtraised", "BTXDtraised1", "BCMName", "BCMAddr1", "BCMAddr2", "BCMAddr3", "BCMAddr4", "BCMPcode", "BCMTel", "CFReg", "Cover", "BTXOrigdebt")]
XSReport$BTXTrantype[XSReport$BTXTrantype == "Renewal"] <- "REN"
XSReport$BTXTrantype[XSReport$BTXTrantype == "New Business"] <- "NB"
XSReport$UserID <- substr(XSReport[,1], 1, 6)
VehicleReg <- AdData[AdData$CFReg != "",c("BTXPolref", "CFReg")]
VehicleReg <- VehicleReg[!duplicated(VehicleReg), ]
VehicleReg$UserID <- substr(VehicleReg$BTXPolref, 1, 6)
XSReport <- merge(XSReport, VehicleReg, by = "UserID", all.x=TRUE)
XSReport$CFReg.x <- XSReport$CFReg.y
XSReport$CFReg.y <- NULL
XSReport$BTXPolref.y <- NULL
XSReport$UserID <- NULL
colnames(XSReport) <- c("Reference", "Reason for Issue", "Inception Date", "Termination Date", "Assured", "Address1", "Address2", "Address3", "Address4", "Postcode", "Telno", "Vehicle Reg", "Cover", "Premium inc IPT")
XSReport
}else if(input$reportSelect[1] == "Quotezone Report"){
my_data2 <- 2
my_data2
}
})
#Summary Table Sales Tab
data6 <- reactive({
my_data1 <- data()
my_data2 <- subset(my_data1, BTXPaydt != "")
my_data2 <- my_data1[ which(my_data1$BTXPaydt != "" | my_data1$BTXTrantype == "New Business"),]
Totals <- data.frame(matrix(NA, nrow = 1, ncol = 6))
colnames(Totals) <- c("New Business", "Renewals", "Pending Renewals", "New Business Cancellations", "New Business Cancellation Percentage", "Profit")
Totals[1,1] <- nrow(subset(my_data1, Cancellation == "N" & BTXTrantype == "New Business"))
Totals[1,2] <- nrow(subset(my_data2, Cancellation == "N" & BTXTrantype == "Renewal"))
Totals[1,3] <- nrow(subset(my_data1, BTXTrantype == "Pending Renewal"))
Totals[1,4] <- nrow(subset(my_data1, Cancellation != "N" & BTXTrantype == "New Business"))
Totals[1,5] <- percent(as.numeric(Totals[1,4])/(as.numeric(Totals[1,1]) + as.numeric(Totals[1,4])))
Totals[1,6] <- currency(sum(as.numeric(my_data2$TotalValue)))
Totals
})
#Summary Table Sales Tab
# data7 <- reactive({
# Profit <- data8()
# Profit[,6] <- currency(Profit[,2])
# Profit[,2] <- Profit[,6]
# Profit[,6] <- NULL
# Profit
# })
## Main Graph and Table##
data8 <- reactive({
my_data1 <- data()
my_data2 <- my_data1
#my_data1 <- subset(my_data1, BTXPaydt != "")
if(length(input$dataset3) == 1){
Profit <- aggregate(my_data2$TotalValue~my_data2[[input$dataset3[1]]], my_data2, FUN = sum)
Profit[,2] <- round(Profit[,2], 2)
Sales <- my_data2[ which(my_data2$Cancellation=='N'),]
Summary <- aggregate(cbind(Sales$TotalValue, Sales$TrafficCost)~Sales[[input$dataset3[1]]], Sales, FUN = length)
if(input$dataset4 == "Count" | input$dataset4 == "% Uptake"){
TrafficCostCount <- subset(Sales, Sales$TrafficCost != 0)
TrafficCostCount <- aggregate(TrafficCostCount$TrafficCost~TrafficCostCount[[input$dataset3[1]]], TrafficCostCount, FUN = length)
AddOnCount1 <- subset(Sales, Sales$AddOnCount != 0)
AddOnCount1 <- aggregate(AddOnCount1$AddOnCount~AddOnCount1[[input$dataset3[1]]], AddOnCount1, FUN = length)
FinanceValueCount <- subset(Sales, Sales$FinanceValue != 0)
FinanceValueCount <- aggregate(FinanceValueCount$FinanceValue~FinanceValueCount[[input$dataset3[1]]], FinanceValueCount, FUN = length)
DiscountCount <- subset(Sales, Sales$Discount < 0)
DiscountCount <- aggregate(DiscountCount$Discount~DiscountCount[[input$dataset3[1]]], DiscountCount, FUN = length)
names(TrafficCostCount)[1] <- "Subset1"
names(AddOnCount1)[1] <- "Subset1"
names(FinanceValueCount)[1] <- "Subset1"
names(DiscountCount)[1] <- "Subset1"
Summary2 <- merge(TrafficCostCount, AddOnCount1, by="Subset1", all=T)
Summary2 <- merge(Summary2, FinanceValueCount, by="Subset1", all=T)
Summary2 <- merge(Summary2, DiscountCount, by="Subset1", all=T)
}else{
Summary2 <- aggregate(cbind(Sales$TrafficCost, Sales$AddOnValue, Sales$FinanceValue, Sales$Discount)~Sales[[input$dataset3[1]]], Sales, FUN = sum)
}
names(Summary2)[1]<-input$dataset3[1]
names(Profit)[1]<-input$dataset3[1]
Profit$Sales <- Summary[,2][match(Profit[,1], Summary[,1])]
Cancellations <- my_data2[ which(my_data2$Cancellation!="N"),]
if(nrow(Cancellations) >0){
CountCancellations <- aggregate(as.numeric(Cancellations$TotalValue), by=list(Category=Cancellations[[input$dataset3[1]]]), FUN=length)
names(CountCancellations)[2]<-"Count"
Profit$Cancellations <- CountCancellations$Count[match(Profit[,1], CountCancellations$Category)]
} else{Profit$Cancellations <- 0}
Profit <- Profit[,c(1,3,4,2)]
Profit <- merge(Profit,Summary2, by=input$dataset3[1], all.x=T)
Profit[is.na(Profit)] <- 0
if(input$dataset4 == "Mean"){
Profit[,5:8] <- round(Profit[,5:8]/Profit[,2], 2)
Profit[,4] <- round(Profit[,4]/(as.numeric(Profit[,2])+as.numeric(Profit[,2])), 2)
}
if(input$dataset4 == "% Uptake"){
Profit[,5:8] <- round(Profit[,5:8]/Profit[,2]*100, 2)
}
Profit[,9] <- round((as.numeric(Profit[,3])/(as.numeric(Profit[,2])+as.numeric(Profit[,3])))*100, 2)
Profit$Y1Profit <- round((Profit[,4]+Profit[,5])*0.75, 2)
Profit$Y2Profit <- round((Profit[,4]+Profit[,5])*0.56, 2)
names(Profit)[4:9]<-c(paste(input$dataset4, "of Profit", sep = " ") , paste(input$dataset4, "of Traffic Cost", sep = " "), paste(input$dataset4, " of Add-Ons", sep = " "), paste(input$dataset4, "of Finance", sep = " "), paste(input$dataset4, "of Discount", sep = " "), "Sales Cancellation Percentage")
if(input$dataset4 == "Count" | input$dataset4 == "% Uptake" ){
#Profit[,c("Y1Profit", "Y2Profit")] <- NULL
Profit <- Profit[ -c(4, 10:11) ]
}
if(input$dataset4 == "% Uptake"){
names(Profit)[4]<-"% Paid Traffic"
}
if(input$dataset4 == "Mean"){
Profit <- Profit[ -c(9:11) ]
}
Profit2 <- aggregate(my_data2$TotalValue~my_data2[[input$dataset3[1]]], my_data2, FUN = sum)
names(Profit2)[2]<-"Total Profit"
Profit <- merge(Profit, Profit2, by=1, all.x=T)
Profit[,ncol(Profit)+1] <- round(Profit[,ncol(Profit)]/(Profit[,2]+Profit[,3]), 2)
names(Profit)[ncol(Profit)]<-"Average Profit"
Profit
}else if(length(input$dataset3) == 2){
Profit <- aggregate(as.numeric(my_data2$TotalValue) ~ my_data2[[input$dataset3[1]]] + my_data2[[input$dataset3[2]]], my_data2, FUN=sum)
Profit[,3] <- round(Profit[,3], 2)
Sales <- my_data2[ which(my_data2$Cancellation=='N'),]
Summary <- aggregate(Sales$TotalValue~Sales[[input$dataset3[1]]]+ Sales[[input$dataset3[2]]], Sales, FUN = length)
if(input$dataset4 == "Count" | input$dataset4 == "% Uptake"){
TrafficCostCount <- subset(Sales, Sales$TrafficCost != 0)
TrafficCostCount <- aggregate(TrafficCostCount$TrafficCost~TrafficCostCount[[input$dataset3[1]]]+TrafficCostCount[[input$dataset3[2]]], TrafficCostCount, FUN = length)
AddOnCount1 <- subset(Sales, Sales$AddOnCount != 0)
AddOnCount1 <- aggregate(AddOnCount1$AddOnCount~AddOnCount1[[input$dataset3[1]]]+AddOnCount1[[input$dataset3[2]]], AddOnCount1, FUN = length)
FinanceValueCount <- subset(Sales, Sales$FinanceValue != 0)
FinanceValueCount <- aggregate(FinanceValueCount$FinanceValue~FinanceValueCount[[input$dataset3[1]]]+FinanceValueCount[[input$dataset3[2]]], FinanceValueCount, FUN = length)
DiscountCount <- subset(Sales, Sales$Discount < 0)
DiscountCount <- aggregate(DiscountCount$Discount~DiscountCount[[input$dataset3[1]]]+DiscountCount[[input$dataset3[2]]], DiscountCount, FUN = length)
names(TrafficCostCount)[1] <- "Subset1"
names(TrafficCostCount)[2] <- "Subset2"
names(AddOnCount1)[1] <- "Subset1"
names(AddOnCount1)[2] <- "Subset2"
names(FinanceValueCount)[1] <- "Subset1"
names(FinanceValueCount)[2] <- "Subset2"
names(DiscountCount)[1] <- "Subset1"
names(DiscountCount)[2] <- "Subset2"
Summary2 <- merge(TrafficCostCount, AddOnCount1, by=c("Subset1", "Subset2"), all=T)
Summary2 <- merge(Summary2, FinanceValueCount, by=c("Subset1", "Subset2"), all=T)
Summary2 <- merge(Summary2, DiscountCount, by=c("Subset1", "Subset2"), all=T)
}else{
Summary2 <- aggregate(cbind(Sales$TrafficCost, Sales$AddOnValue, Sales$FinanceValue, Sales$Discount)~Sales[[input$dataset3[1]]]+ Sales[[input$dataset3[2]]], Sales, FUN = sum)
}
names(Summary)[1]<-input$dataset3[1]
names(Summary)[2]<-input$dataset3[2]
names(Summary2)[1]<-input$dataset3[1]
names(Summary2)[2]<-input$dataset3[2]
names(Profit)[1]<-input$dataset3[1]
names(Profit)[2]<-input$dataset3[2]
Profit <- merge(Profit, Summary, by=c(input$dataset3[1],input$dataset3[2]), all.x=T)
names(Profit)[4] <- "Sales"
Cancellations <- my_data2[which(my_data2$Cancellation!="N"),]
if(nrow(Cancellations) >0){
CountCancellations <- aggregate(as.numeric(Cancellations$TotalValue) ~ Cancellations[[input$dataset3[1]]] + Cancellations[[input$dataset3[2]]], Cancellations, FUN=length)
names(CountCancellations)[1]<-input$dataset3[1]
names(CountCancellations)[2]<-input$dataset3[2]
Profit <- merge(Profit, CountCancellations, by=c(input$dataset3[1],input$dataset3[2]), all=T)
names(Profit)[5] <- "Cancellations"
}else{Profit$Cancellations <- 0}
Profit <- Profit[,c(1,2,4,5,3)]
Profit <- merge(Profit,Summary2, by=c(input$dataset3[1],input$dataset3[2]), all.x=T)
Profit[is.na(Profit)] <- 0
if(input$dataset4 == "Mean"){
Profit[,6:9] <- round(Profit[,6:9]/Profit[,3], 2)
Profit[,5] <- round(Profit[,5]/(as.numeric(Profit[,3])+as.numeric(Profit[,4])), 2)
}
if(input$dataset4 == "% Uptake"){
Profit[,6:9] <- round(Profit[,6:9]/Profit[,3]*100, 2)
}
Profit[,10] <- round((as.numeric(Profit[,4])/(as.numeric(Profit[,3])+as.numeric(Profit[,4]))*100), 2)
Profit$Y1Profit <- round((Profit[,5]+Profit[,6])*0.5+abs(Profit[,9]*0.5*-0.5), 2)
Profit$Y2Profit <- round((Profit[,5]+Profit[,6])*0.25+abs(Profit[,9]*0.25*-0.5), 2)
names(Profit)[5:10]<-c(paste(input$dataset4, "of Profit", sep = " ") , paste(input$dataset4, "of Traffic Cost", sep = " "), paste(input$dataset4, " of Add-Ons", sep = " "), paste(input$dataset4, "of Finance", sep = " "), paste(input$dataset4, "of Discount", sep = " "), "Sales Cancellation Percentage")
if(input$dataset4 == "Count" | input$dataset4 == "% Uptake" ){
#Profit[,c("Y1Profit", "Y2Profit")] <- NULL
Profit <- Profit[ -c(5, 11:12) ]
}
if(input$dataset4 == "% Uptake"){
names(Profit)[5]<-"% Paid Traffic"
}
if(input$dataset4 == "Mean"){
Profit <- Profit[ -c(10:12) ]
}
Profit2 <- aggregate(my_data2$TotalValue~my_data2[[input$dataset3[1]]]+my_data2[[input$dataset3[2]]], my_data2, FUN = sum)
names(Profit2)[3]<-"Total Profit"
Profit <- merge(Profit, Profit2, by=c(1, 2), all.x=T)
Profit[,ncol(Profit)+1] <- round(Profit[,ncol(Profit)]/(Profit[,3]+Profit[,4]), 2)
names(Profit)[ncol(Profit)]<-"Average Profit"
Profit
} else if(length(input$dataset3) == 3){
Profit <- aggregate(as.numeric(my_data2$TotalValue) ~ my_data2[[input$dataset3[1]]] + my_data2[[input$dataset3[2]]]+ my_data2[[input$dataset3[3]]], my_data2, FUN=sum)
Profit[,4] <- round(Profit[,4], 2)
Sales <- my_data2[ which(my_data2$Cancellation=='N'),]
Summary <- aggregate(Sales$TotalValue~Sales[[input$dataset3[1]]]+ Sales[[input$dataset3[2]]]+ Sales[[input$dataset3[3]]], Sales, FUN = length)
if(input$dataset4 == "Count" | input$dataset4 == "% Uptake"){
TrafficCostCount <- subset(Sales, Sales$TrafficCost != 0)
TrafficCostCount <- aggregate(TrafficCostCount$TrafficCost~TrafficCostCount[[input$dataset3[1]]]+TrafficCostCount[[input$dataset3[2]]]+TrafficCostCount[[input$dataset3[3]]], TrafficCostCount, FUN = length)
AddOnCount1 <- subset(Sales, Sales$AddOnCount != 0)
AddOnCount1 <- aggregate(AddOnCount1$AddOnCount~AddOnCount1[[input$dataset3[1]]]+AddOnCount1[[input$dataset3[2]]]+AddOnCount1[[input$dataset3[3]]], AddOnCount1, FUN = length)
FinanceValueCount <- subset(Sales, Sales$FinanceValue != 0)
FinanceValueCount <- aggregate(FinanceValueCount$FinanceValue~FinanceValueCount[[input$dataset3[1]]]+FinanceValueCount[[input$dataset3[2]]]+FinanceValueCount[[input$dataset3[3]]], FinanceValueCount, FUN = length)
DiscountCount <- subset(Sales, Sales$Discount < 0)
DiscountCount <- aggregate(DiscountCount$Discount~DiscountCount[[input$dataset3[1]]]+DiscountCount[[input$dataset3[2]]]+DiscountCount[[input$dataset3[3]]], DiscountCount, FUN = length)
names(TrafficCostCount)[1] <- "Subset1"
names(TrafficCostCount)[2] <- "Subset2"
names(TrafficCostCount)[3] <- "Subset3"
names(AddOnCount1)[1] <- "Subset1"
names(AddOnCount1)[2] <- "Subset2"
names(AddOnCount1)[3] <- "Subset3"
names(FinanceValueCount)[1] <- "Subset1"
names(FinanceValueCount)[2] <- "Subset2"
names(FinanceValueCount)[3] <- "Subset3"
names(DiscountCount)[1] <- "Subset1"
names(DiscountCount)[2] <- "Subset2"
names(DiscountCount)[3] <- "Subset3"
Summary2 <- merge(TrafficCostCount, AddOnCount1, by=c("Subset1", "Subset2", "Subset3"), all=T)
Summary2 <- merge(Summary2, FinanceValueCount, by=c("Subset1", "Subset2", "Subset3"), all=T)
Summary2 <- merge(Summary2, DiscountCount, by=c("Subset1", "Subset2", "Subset3"), all=T)
}else{
Summary2 <- aggregate(cbind(Sales$TrafficCost, Sales$AddOnValue, Sales$FinanceValue, Sales$Discount)~Sales[[input$dataset3[1]]]+ Sales[[input$dataset3[2]]]+ Sales[[input$dataset3[3]]], Sales, FUN = sum)
}
names(Summary)[1]<-input$dataset3[1]
names(Summary)[2]<-input$dataset3[2]
names(Summary)[3]<-input$dataset3[3]
names(Summary2)[1]<-input$dataset3[1]
names(Summary2)[2]<-input$dataset3[2]
names(Summary2)[3]<-input$dataset3[3]
names(Profit)[1]<-input$dataset3[1]
names(Profit)[2]<-input$dataset3[2]
names(Profit)[3]<-input$dataset3[3]
Profit <- merge(Profit, Summary, by=c(input$dataset3[1],input$dataset3[2], input$dataset3[3]), all.x=T)
names(Profit)[5] <- "Sales"
Cancellations <- my_data2[ which(my_data2$Cancellation!="N"),]
if(nrow(Cancellations) >0){
CountCancellations <- aggregate(as.numeric(Cancellations$TotalValue) ~ Cancellations[[input$dataset3[1]]] + Cancellations[[input$dataset3[2]]] + Cancellations[[input$dataset3[3]]], Cancellations, FUN=length)
names(CountCancellations)[1]<-input$dataset3[1]
names(CountCancellations)[2]<-input$dataset3[2]
names(CountCancellations)[3]<-input$dataset3[3]
Profit <- merge(Profit, CountCancellations, by=c(input$dataset3[1],input$dataset3[2], input$dataset3[3]), all=T)
names(Profit)[6] <- "Cancellations"
} else{Profit$Cancellations <- 0}
Profit <- Profit[,c(1,2,3,5,6,4)]
Profit <- merge(Profit,Summary2, by=c(input$dataset3[1],input$dataset3[2],input$dataset3[3]), all.x=T)
Profit[is.na(Profit)] <- 0
if(input$dataset4 == "Mean"){
Profit[,7:10] <- round(Profit[,7:10]/Profit[,4], 2)
Profit[,6] <- round(Profit[,6]/(as.numeric(Profit[,4])+as.numeric(Profit[,5])), 2)
}
if(input$dataset4 == "% Uptake"){
Profit[,7:10] <- round(Profit[,7:10]/Profit[,4]*100, 2)
}
Profit[,11] <- round(as.numeric((Profit[,5])/(as.numeric(Profit[,4])+as.numeric(Profit[,5])))*100, 2)
Profit$Y1Profit <- round((Profit[,6]+Profit[,7])*0.5+abs(Profit[,10]*0.5*-0.5), 2)
Profit$Y2Profit <- round((Profit[,6]+Profit[,7])*0.25+abs(Profit[,10]*0.25*-0.5), 2)
names(Profit)[6:11]<-c(paste(input$dataset4, "of Profit", sep = " ") , paste(input$dataset4, "of Traffic Cost", sep = " "), paste(input$dataset4, " of Add-Ons", sep = " "), paste(input$dataset4, "of Finance", sep = " "), paste(input$dataset4, "of Discount", sep = " "), "Sales Cancellation Percentage")
if(input$dataset4 == "Count" | input$dataset4 == "% Uptake" ){
#Profit[,c("Y1Profit", "Y2Profit")] <- NULL
Profit <- Profit[ -c(6, 11:13) ]
}
if(input$dataset4 == "% Uptake"){
names(Profit)[6]<-"% Paid Traffic"
}
if(input$dataset4 == "Mean"){
Profit <- Profit[ -c(11:13) ]
}
Profit2 <- aggregate(my_data2$TotalValue~my_data2[[input$dataset3[1]]]+my_data2[[input$dataset3[2]]]+my_data2[[input$dataset3[3]]], my_data2, FUN = sum)
names(Profit2)[4]<-"Total Profit"
Profit <- merge(Profit, Profit2, by=c(1, 2, 3), all.x=T)
Profit[,ncol(Profit)+1] <- round(Profit[,ncol(Profit)]/(Profit[,4]+Profit[,5]), 2)
names(Profit)[ncol(Profit)]<-"Average Profit"
Profit
}
})
data9 <- reactive({
my_data9 <- data()
my_data2 <- my_data9
my_data9<- subset(my_data9, BTXPaydt != "")
#if(input$filterName2 == 0){return()}
#if(is.null(input$filterName2)) return(NULL)
Profit <- aggregate(as.numeric(my_data9$TotalValue), by=list(Category=my_data9$BTXDatecreated), FUN=sum)
Sales <- my_data9[ which(my_data9$Cancellation=='N'),]
CountSales <- aggregate(as.numeric(Sales$TotalValue), by=list(Category=Sales$BTXDatecreated), FUN=length)
names(CountSales)[2]<-"Count"
Profit$Sales <- CountSales$Count[match(Profit$Category, CountSales$Category)]
Cancellations <- data()[ which(data()$Cancellation!='N'),]
if(nrow(Cancellations) >0){
CountCancellations <- aggregate(as.numeric(Cancellations$TotalValue), by=list(Category=Cancellations$BTXDatecreated), FUN=length)
names(CountSales)[2]<-"Count"
names(CountCancellations)[2]<-"Count"
Profit$Sales <- CountSales$Count[match(Profit$Category, CountSales$Category)]
Profit$Cancellations <- CountCancellations$Count[match(Profit$Category, CountCancellations$Category)]
} else{Profit$Cancellations <- 0}
Profit[is.na(Profit)] <- 0
Profit[,5] <- as.numeric(Profit[,4])/(as.numeric(Profit[,3])+as.numeric(Profit[,4]))*100
Profit[,6] <- Profit[,2]/(as.numeric(Profit[,3])+as.numeric(Profit[,4]))
# Profit$CancellationPercentage <- as.numeric(Profit$Cancellations)/(as.numeric(Profit$Sales)+as.numeric(Profit$Cancellations))
names(Profit)[1]<-"BTXDatecreated"
names(Profit)[2]<-"Total Profit"
names(Profit)[5]<-"Sales Cancellation Percentage"
names(Profit)[6]<-"Average Profit"
Profit <- Profit[order(Profit[1]),]
Profit[is.na(Profit)] <- 0
Profit
})
output$selectUI2 <- renderUI({
selectInput("filterName", "Select Product", filterList2)
})
output$dailyPlot2 <- renderPlotly({
if(length(input$dataset3) == 0){
plot_ly(
x = data9()[,1],
y = data9()[,input$plotFilter],
name = "Performance",
type = "bar"
)}
else if(length(input$dataset3) == 1){
plot_ly(
x = data8()[,1],
y = data8()[,input$plotFilter],
name = "Performance",
type = "bar"
)
} else if(length(input$dataset3) > 1){
plot_ly(data8()) %>%
add_trace(data = data8(), type = "bar", x = data8()[,1], y = data8()[,input$plotFilter], color = data8()[,2]) %>%
layout(barmode = "stack")
}
})
output$dailyPlot3 <- renderPlotly({
if(length(input$dataset3) == 0){
plot_ly(
x = data9()[,1],
y = data9()[,input$plotFilter],
name = "Performance",
type = "bar"
)}
else if(length(input$dataset3) == 1){
# plot_ly(data8(), x = ~data8()[,1], y = ~data8()[,4], type = 'bar', name = 'Gross Profit') %>%
# add_trace(y = ~data8()[,10], name = 'Y1Profit') %>%
# add_trace(y = ~data8()[,11], name = 'Y2Profit') %>%
# layout(yaxis = list(title = 'Sum'), xaxis = list(title = ""), barmode = 'group')
p <- plot_ly(data8(), x = ~data8()[,1], y = ~data8()[,4], type = 'scatter', mode = 'lines', name = 'Gross Profit') %>%
add_trace(y = ~data8()[,10], name = 'Y1Profit', mode = 'lines+markers') %>%
add_trace(y = ~data8()[,11], name = 'Y2Profit', mode = 'lines+markers') %>%
layout(yaxis = list(title = 'Sum £'), xaxis = list(title = ""))
p
# x = data8()[,1],
# y = data8()[,Sales],
# name = "Performance",
# type = "bar"
} else if(length(input$dataset3) > 1){
plot <- data8()
plot <- subset(plot, plot[,2] == "New Business")
p <- plot_ly(plot, x = ~plot[,1], y = ~plot[,5], type = 'scatter', mode = 'lines', name = 'Gross Profit') %>%
add_trace(y = ~plot[,11], name = 'Y1Profit', mode = 'lines+markers') %>%
add_trace(y = ~plot[,12], name = 'Y2Profit', mode = 'lines+markers') %>%
layout(yaxis = list(title = 'Sum £'), xaxis = list(title = ""))
p
}
})
output$downloadData <- downloadHandler(
filename = function() { 'SaleData.csv' }, content = function(file) {
write.csv(data1(), file, row.names = FALSE)
}
)
output$reportDownload <- downloadHandler(
filename = function() { paste0(input$reportSelect[1],".csv") }, content = function(file) {
write.csv(data2(), file, row.names = FALSE)
}
)
output$my_output_data6 <- renderDataTable({data6()}, options =list(paging = FALSE, searching = FALSE, info = FALSE))
output$my_output_data8 <- renderDataTable({
if(length(input$dataset3) > 0){data8()[,1:(ncol(data8())-2)]}
})
USER <- reactiveValues(Logged = FALSE,role=NULL)
ui1 <- function(){
tagList(
div(id = "login",
wellPanel(textInput("userName", "Username"),
passwordInput("passwd", "Password"),
br(),actionButton("Login", "Log in")))
,tags$style(type="text/css", "#login {font-size:10px; text-align: left;position:absolute;top: 40%;left: 50%;margin-top: -10px;margin-left: -150px;}")
)}
ui2 <- function(){list(tabPanel("Sales",get_ui(USER$role)[2:3]),get_ui(USER$role)[[1]])}
observe({
if (USER$Logged == FALSE) {
if (!is.null(input$Login)) {
if (input$Login > 0) {
Username <- isolate(input$userName)
Password <- isolate(input$passwd)
Id.username <- which(my_username == Username)
Id.password <- which(my_password == Password)
if (length(Id.username) > 0 & length(Id.password) > 0) {
if (Id.username == Id.password) {
USER$Logged <- TRUE
USER$role=get_role(Username)
}
}
}
}
}
})
observe({
if (USER$Logged == FALSE) {
output$page <- renderUI({
box(
div(class="outer",do.call(bootstrapPage,c("",ui1()))))
})
}
if (USER$Logged == TRUE) {
output$page <- renderUI({
box(width = 12,
div(class="outer",do.call(navbarPage,c(inverse=TRUE,title = "Apricot Dashboard",ui2())))
)})
#print(ui)
}
})
# number <- "6"
# observe({
# session$sendCustomMessage(type='myCallbackHandler', number)
# })
})
|
b991149eb39d08a73e2af803ffdf45722f3ea2d2
|
284f05c38d756c4077dde234b91197313eddc1f5
|
/code.R
|
b335c347eed6ea1e30d59ea630aa1ef13891167c
|
[] |
no_license
|
apth3hack3r/Fake-Insta-Profile-Detector
|
30210e30d24d16f85df31757ba54e596f1d3b644
|
40656071f099c22b67a7e8d23e11bbeff0599bf5
|
refs/heads/main
| 2023-04-11T00:31:27.576653
| 2021-04-26T06:04:20
| 2021-04-26T06:04:20
| 340,871,057
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 2,533
|
r
|
code.R
|
train = read.csv("train.csv") # loading training data
attach(train)
names(train)
dim(train)
test = read.csv("test.csv") # loading test data
#Few plots which are also shown in ppt.
library(ggplot2)
ggplot(data=train,aes(x=fake,fill=as.factor(private))) +geom_bar(position="fill")
ggplot(data=train,aes(x=fake,fill=as.factor(external.URL))) +geom_bar(position="fill")
ggplot(data=train,aes(x=fake,fill=as.factor(profile.pic))) +geom_bar(position="fill")
# Now we start building differnt models
#### Logistic Regression ####
#model=glm(fake~profile.pic+nums.length.fullname+nums.length.username+fullname.words+name..username+description.length+X.followers+X.follows+X.posts+external.URL+private,data=train, family = binomial)
#summary(model)
# the model above was built using all predictor variables
#model using statistically important variables
model_lr=glm(fake~profile.pic+nums.length.username+X.followers+X.follows+X.posts,data=train, family = binomial)
summary(model_lr)
probs=predict(model_lr,test,type="response")
lr_pred=rep("0",120)
lr_pred[probs>0.5]="1"
table(lr_pred,test$fake) #confusion matrix
mean(lr_pred==test$fake) #mean of true positive and true negative (accuracy)
mean(lr_pred!=test$fake) #mean of false positive and false negative (error)
#### Classification Tree ####
library(tree) # library required to call tree function
tree_mod=tree(as.factor(fake)~.,train) # building tree model
summary(tree_mod)
plot(tree_mod)
text(tree_mod, pretty=0) #plotting tree with predictor variables named on it
tree_pred=predict(tree_mod,test,type="class") # making prediction on test set
table(tree_pred, test$fake) #confusion matrix
mean(tree_pred==test$fake) #mean of true positive and true negative (accuracy)
mean(tree_pred!=test$fake) #mean of false positive and false negative (error)
#### Ridge Regression ####
library(glmnet)
set.seed(123)
x= model.matrix(train$fake~.,train)[,-1]
y=train$fake
cv.ridge = cv.glmnet(x, y, alpha = 0, family = "binomial")
l1.model = glmnet(x, y, alpha = 0, family = "binomial",lambda = cv.ridge$lambda.min)
x.test = model.matrix(test$fake ~.,data=test)[,-1]
probabilities = predict(l1.model, newx = x.test)
predicted.classes = ifelse(probabilities > 0.5, "1", "0")
observed.classes = test$fake
table(predicted.classes,observed.classes)
mean(predicted.classes == observed.classes)
|
958a5fa97f91fc4e874fdcd77f80bd95d2109b28
|
bc3b002789321e6375a4f5e35100061ece9586fd
|
/test-examples.R
|
5b85a2397ac1f670c47c6c119af5d151a4f1b3c9
|
[] |
no_license
|
nachocab/clickme_ractives
|
1583f129e41c8c111a61f061f7316a2cbb914813
|
c94b4ea11d75e08a13f9051c96b3ed1c3b8375c0
|
refs/heads/master
| 2020-04-26T15:52:40.775060
| 2013-06-03T13:26:44
| 2013-06-03T13:26:44
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 3,925
|
r
|
test-examples.R
|
test_that("the HTML example file for the vega ractive is generated", {
ractive <- "vega"
# we do this to ensure that the HTML file doesn't exist before we create it
spec <- "area"
opts <- get_opts(ractive, params = list(spec = spec))
unlink(opts$path$html_file)
data <- read.csv(file.path(opts$path$data, "area_bar_scatter.csv"))
opts <- clickme_vega(data, spec, browse = FALSE)
expect_true(file.exists(opts$path$html_file))
spec <- "bar"
opts <- get_opts(ractive, params = list(spec = spec))
unlink(opts$path$html_file)
data <- read.csv(file.path(opts$path$data, "area_bar_scatter.csv"))
opts <- clickme_vega(data, spec, browse = FALSE)
expect_true(file.exists(opts$path$html_file))
spec <- "scatter"
opts <- get_opts(ractive, params = list(spec = spec))
unlink(opts$path$html_file)
data <- read.csv(file.path(opts$path$data, "area_bar_scatter.csv"))
opts <- clickme_vega(data, spec, browse = FALSE)
expect_true(file.exists(opts$path$html_file))
spec <- "stocks"
opts <- get_opts(ractive, params = list(spec = spec))
unlink(opts$path$html_file)
stocks <- read.csv(file.path(opts$path$data, "stocks.csv"))
opts <- clickme_vega(stocks, spec, browse = FALSE)
expect_true(file.exists(opts$path$html_file))
spec <- "lifelines"
opts <- get_opts(ractive, params = list(spec = spec))
unlink(opts$path$html_file)
people <- read.csv(file.path(opts$path$data, "lifelines_people.csv"))
events <- read.csv(file.path(opts$path$data, "lifelines_events.csv"))
opts <- clickme_vega(people, spec, params = list(event_data = events, height = 200), browse = FALSE)
expect_true(file.exists(opts$path$html_file))
})
test_that("clickme_vega", {
ractive <- "vega"
# we do this to ensure that the HTML file doesn't exist before we create it
spec <- "area"
opts <- get_opts(ractive, params = list(spec = spec))
unlink(opts$path$html_file)
data <- read.csv(file.path(opts$path$data, "area_bar_scatter.csv"))
opts <- clickme_vega(data, "area", browse = FALSE)
expect_equal(opts$name$html_file, "data_area-vega.html")
expect_true(file.exists(opts$path$html_file))
opts <- clickme_vega(data, "area", data_prefix = "my_data", params = list(width = 401), browse = FALSE)
expect_equal(opts$params$spec, "area")
expect_equal(opts$params$width, 401)
unlink(opts$path$html_file)
opts <- clickme_vega(data, "area", data_prefix = "my_data", browse = FALSE)
expect_equal(opts$data_prefix, "my_data")
expect_equal(opts$name$html_file, "my_data-vega.html")
unlink(opts$path$html_file)
})
test_that("the HTML example file for the one_zoom ractive is generated", {
ractive <- "one_zoom"
# we do this to ensure that the HTML file doesn't exist before we create it
opts <- get_opts(ractive)
unlink(opts$path$html_file)
data <- file.path(opts$path$data, "mammals.tree")
clickme(data, ractive, browse = FALSE)
expect_true(file.exists(opts$path$html_file))
})
test_that("the HTML example file for the line_with_focus ractive is generated", {
ractive <- "line_with_focus"
# we do this to ensure that the HTML file doesn't exist before we create it
opts <- get_opts(ractive)
unlink(opts$path$html_file)
data <- read.csv(file.path(opts$path$data, "original_data.csv"))
clickme(data, ractive, browse = FALSE)
expect_true(file.exists(opts$path$html_file))
})
test_that("the HTML example file for the longitudinal_heatmap ractive is generated", {
ractive <- "longitudinal_heatmap"
# we do this to ensure that the HTML file doesn't exist before we create it
opts <- get_opts(ractive)
unlink(opts$path$html_file)
data <- read.csv(file.path(opts$path$data, "original_data.csv"))
clickme(data, ractive, browse = FALSE)
expect_true(file.exists(opts$path$html_file))
})
|
42dcd966319d0689abeae0ff7e3caf1ff86f733e
|
eacca63e768d93fa74dde6f79fb3e4e1cd144dfd
|
/HW2RCommands.R
|
e4a79d6c92710820b26807db29fccd381341a468
|
[] |
no_license
|
BU-IE-582/fall19-bahadirpamuk
|
1834bc00283e018a287f63d9f7993e3d02057f3e
|
d795a8e463f484c3146f990d8a7fad22c019f38a
|
refs/heads/master
| 2020-08-05T06:40:25.648633
| 2020-01-03T17:03:39
| 2020-01-03T17:03:39
| 212,434,081
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 5,762
|
r
|
HW2RCommands.R
|
library(data.table)
library(dplyr)
library(tidyr)
library("plot3D")
library(corrplot)
MuskData <- fread(file="c:/Users/BAHADIR/Desktop/IE 582/HW 2/RCode/Musk1.csv", header=FALSE, sep="," , stringsAsFactors=TRUE)
MuskDataReduced <- MuskData[,3:168]
pca<-princomp(MuskDataReduced,cor=T)
par(mfrow=c(1,1))
plot(pca$scores[,1], pca$scores[,2], col=(MuskData[,V1]+1), pch=".",cex=7)
plot(pca$scores[,2], pca$scores[,3], col=(MuskData[,V1]+1), pch=".",cex=7)
plot(pca$scores[,1], pca$scores[,3], col=(MuskData[,V1]+1), pch=".",cex=7)
scatter3D(pca$scores[,1], pca$scores[,2], pca$scores[,3], col=(MuskData[,V1]+1), pch=".",cex=5, theta = 40, phi = -5)
distance <- dist(MuskDataReduced, method = "manhattan", diag = TRUE, upper = TRUE)
mds=cmdscale(distance)
plot(mds[,1],mds[,2],main='Manhattan', col=(MuskData[,V1]+1), pch=".",cex=7)
distance <- dist(MuskDataReduced, method = "minkowski", diag = TRUE, upper = TRUE , p=1.5)
mds=cmdscale(distance)
plot(mds[,1],mds[,2],main='Minkowski p=1.5', col=(MuskData[,V1]+1), pch=".",cex=7)
distance <- dist(MuskDataReduced, method = "euclidean", diag = TRUE, upper = TRUE)
mds=cmdscale(distance)
plot(mds[,1],mds[,2],main='Euclidean', col=(MuskData[,V1]+1), pch=".",cex=7)
distance <- dist(MuskDataReduced, method = "minkowski", diag = TRUE, upper = TRUE , p=3)
mds=cmdscale(distance)
plot(mds[,1],mds[,2],main='Minkowski p=3', col=(MuskData[,V1]+1), pch=".",cex=7)
distance <- dist(MuskDataReduced, method = "maximum", diag = TRUE, upper = TRUE)
mds=cmdscale(distance)
plot(mds[,1],mds[,2],main='Maximum', col=(MuskData[,V1]+1), pch=".",cex=7)
MuskDataCombined <- as.data.frame(MuskData[, lapply(.SD, mean),by = V2])
MuskDataCombinedReduced <- MuskDataCombined[,3:168]
corrplot(cor(MuskDataCombinedReduced), type = "upper", order = "hclust", tl.col = "black", tl.srt = 45)
corrMatrix <- cor(MuskDataCombinedReduced)
corrMatrix[upper.tri(corrMatrix)] <- 0
diag(corrMatrix) <- 0
data.new <- MuskDataCombinedReduced[,!apply(corrMatrix,2,function(x) any(abs(x) > 0.95))]
pca<-princomp(data.new,cor=T)
plot(pca$scores[,1], pca$scores[,2], col=(MuskDataCombined[,'V1']+1), pch=".",cex=7)
plot(pca$scores[,2], pca$scores[,3], col=(MuskDataCombined[,'V1']+1), pch=".",cex=7)
plot(pca$scores[,1], pca$scores[,3], col=(MuskDataCombined[,'V1']+1), pch=".",cex=7)
scatter3D(pca$scores[,1], pca$scores[,2], pca$scores[,3], col=(MuskDataCombined[,'V1']+1), pch=".",cex=5, theta = 40, phi = -5)
distance <- dist(MuskDataCombinedReduced, method = "manhattan", diag = TRUE, upper = TRUE)
mds=cmdscale(distance)
plot(mds[,1],mds[,2],main='Manhattan', col=(MuskDataCombined[,'V1']+1), pch=".",cex=7)
distance <- dist(MuskDataCombinedReduced, method = "minkowski", diag = TRUE, upper = TRUE , p=1.5)
mds=cmdscale(distance)
plot(mds[,1],mds[,2],main='Minkowski p=1.5', col=(MuskDataCombined[,'V1']+1), pch=".",cex=7)
distance <- dist(MuskDataCombinedReduced, method = "euclidean", diag = TRUE, upper = TRUE)
mds=cmdscale(distance)
plot(mds[,1],mds[,2],main='Euclidean', col=(MuskDataCombined[,'V1']+1), pch=".",cex=7)
distance <- dist(MuskDataCombinedReduced, method = "minkowski", diag = TRUE, upper = TRUE , p=3)
mds=cmdscale(distance)
plot(mds[,1],mds[,2],main='Minkowski p=3', col=(MuskDataCombined[,'V1']+1), pch=".",cex=7)
distance <- dist(MuskDataCombinedReduced, method = "maximum", diag = TRUE, upper = TRUE)
mds=cmdscale(distance)
plot(mds[,1],mds[,2],main='Maximum', col=(MuskDataCombined[,'V1']+1), pch=".",cex=7)
library(imager)
resim <- load.image("c:/Users/BAHADIR/Desktop/IE 582/HW 2/Picture.jpg")
str(resim)
par(mfrow=c(1,1))
plot(resim)
RNoise <- replicate(256,runif(256,min(resim[,,1]),0.1*max(resim[,,1])))
GNoise <- replicate(256,runif(256,min(resim[,,2]),0.1*max(resim[,,2])))
BNoise <- replicate(256,runif(256,min(resim[,,3]),0.1*max(resim[,,3])))
noisyImage <- resim
noisyImage[,,1] <- (noisyImage[,,1] + RNoise)
noisyImage[,,2] <- (noisyImage[,,2] + GNoise)
noisyImage[,,3] <- (noisyImage[,,3] + BNoise)
noisyImage[,,1] <- ifelse(noisyImage[,,1] >1 , 1, noisyImage[,,1])
noisyImage[,,2] <- ifelse(noisyImage[,,2] >1 , 1, noisyImage[,,2])
noisyImage[,,3] <- ifelse(noisyImage[,,3] >1 , 1, noisyImage[,,3])
plot(noisyImage)
par(mfrow=c(1,3))
cscale <- function(r,g,b) rgb(r,0,0)
plot(noisyImage,colourscale=cscale,rescale=FALSE)
cscale <- function(r,g,b) rgb(0,g,0)
plot(noisyImage,colourscale=cscale,rescale=FALSE)
cscale <- function(r,g,b) rgb(0,0,b)
plot(noisyImage,colourscale=cscale,rescale=FALSE)
par(mfrow=c(1,1))
grayNoisyImage <- grayscale(noisyImage)
plot(grayNoisyImage)
patchesCoordX <- rep(seq(13,244,1),232)
patchesCoordY <- rep(seq(13,244,1),each = 232)
patches <- extract_patches(grayNoisyImage, patchesCoordX, patchesCoordY, 25, 25, boundary_conditions = 0L)
dataFrames <- as.data.frame(matrix(unlist(patches), nrow=length(patches), byrow=T))
dim(dataFrames)
pca<-princomp(dataFrames,cor=T)
plot(pca$scores[,1], pca$scores[,2], pch=".",cex=1)
plot(pca$scores[,2], pca$scores[,3], pch=".",cex=1)
plot(pca$scores[,1], pca$scores[,3], pch=".",cex=1)
scatter3D(pca$scores[,1], pca$scores[,2], pca$scores[,3],pch=".",cex=1, theta = 55, phi = -5)
pca1Pic <- matrix(pca$scores[,1],nrow=232,ncol =232)
dim(pca1Pic)
plot(as.cimg(pca1Pic))
pca2Pic <- matrix(pca$scores[,2],nrow=232,ncol =232)
plot(as.cimg(pca2Pic))
pca3Pic <- matrix(pca$scores[,3],nrow=232,ncol =232)
plot(as.cimg(pca3Pic))
pca1Pic <- matrix(pca$loadings[,1],nrow=232,ncol =232)
dim(pca1Pic)
plot(as.cimg(pca1Pic))
pca2Pic <- matrix(pca$loadings[,2],nrow=232,ncol =232)
plot(as.cimg(pca2Pic))
pca3Pic <- matrix(pca$loadings[,3],nrow=232,ncol =232)
plot(as.cimg(pca3Pic))
|
0b4c0588e708563230548c9cfee4999d5a509ca4
|
7f0f99122d26a2ec67f8ebfde3600d1444535b06
|
/CODE/27_sleep_classification_alogrithm.R
|
2534d397470e7e53478f09ee52cb4fe7ed57d4eb
|
[] |
no_license
|
CarterLoftus/social_sleep
|
7e52fc6c2cdb6c23ee721fbf2152878977e113f2
|
799a5db3320f8a9d7904211e42a549f02d1eb317
|
refs/heads/main
| 2023-04-18T22:04:47.409617
| 2023-03-15T11:55:33
| 2023-03-15T11:55:33
| 518,434,403
| 0
| 1
| null | null | null | null |
UTF-8
|
R
| false
| false
| 36,890
|
r
|
27_sleep_classification_alogrithm.R
|
library(sjPlot)
library(insight)
library(httr)
library(brms)
library( zoo )
library( hms )
library( data.table )
library( stringr )
library( lubridate )
library( lmerTest )
library( plotrix )
library( suncalc )
library( LaplacesDemon )
library( dplyr )
library( purrr )
library( HDInterval )
library(multcomp)
library( nlme )
library(tidyr)
library(lmerTest)
library( sp )
library( rgdal )
library( stats )
library(rgeos)
library( entropy )
library( reshape2 )
library( plyr )
library(rstan)
library( brms )
library(fitdistrplus)
library( gamm4 )
library(glmmTMB)
library( mgcv )
library( rstudioapi )
################# Functions #########################
## function for normalizing a vector
normalize_func <- function( x ) return( (x - mean( x, na.rm = T ) )/ sd( x, na.rm = T ) )
## function for turning tag_names into colors
babcolor<-function(IDvec){
IDvec <- as.character( IDvec )
outputVec <- ifelse( IDvec %in% c( '6917', '6933', '6915', '6934', '6921', '6911' ), 'skyblue', ifelse( IDvec %in% c( '6927', '6932', '6910', '2454', '2451','2428' ), 'blue' , ifelse( IDvec %in% c( '6891', '6894', '2455' ), 'grey', ifelse( IDvec %in% c( '6898', '2448', '2436' ), 'yellow', ifelse( IDvec %in% c( '6900', '6892', '6890', '6903', '2447', '6914', '6924', '6908', '2441', '2450' ), 'pink', ifelse( IDvec %in% c( '6897', '2434', '2433' ), 'red', 'black' ) ) ) ) ) )
return(outputVec)
}
## function for setting transparency of a color while plotting
transp <- function(col, alpha=.5){
res <- apply(col2rgb(col),2, function(c) rgb(c[1]/255, c[2]/255, c[3]/255, alpha))
return(res)
}
## function for plotting times from noon to noon. It will make correct 12:00 - 24:00 to be 00:00 - 12:00 and 00:00 - 12:00 to be 12:00 to 24:00
ts_func <- function( time_vec ){
num_time <- as.numeric( as_hms( time_vec ) )
corr_time <- ifelse( num_time < 12*60*60, num_time + 12*60*60, num_time - 12*60*60 )
return( corr_time )
}
class_meth <- 'percentile_thresh' # I will generalize to allowing other classification methods. For now, it only works with percentile thresh
sep_thresh <- T # this determines whether the log VeDBA threshold that makes the determination between inactive and active should be recalculated as the declared percentile of the raw log VeDBA, and not percentile of the rolling median of the log VeDBA, when determining whether each epoch is sleep or wake (the percentile of the rolling median of the log VeDBA is used in the determination of the sleep period). This parameter is only relevant if class_meth == 'percentile_thresh'
################# Determining sleep periods with modification of Van Hees et al. 2018 method ###################
################## Read in the d1 (accelerometer burst) data ###################
## d1 is a dataframe with a row for each minute for each baboon. Each row contains the raw (or interpolated) GPS acc burst, and several different measures calculated from bursts (like VeDBA)
d1 <- fread("DATA/sleep_analysis_data/processed_ACC_emom.csv")
## turn the data table into a dataframe
d1 <- as.data.frame( d1 )
## turn timestamp into POSIX element and time into character
d1$timestamp <- as.POSIXct( d1$timestamp, tz = 'UTC' )
## some of the timestamps are not exactly on the minute because the burst occurred late. Round the timestamps to the nearest minute
d1$timestamp <- round_date( d1$timestamp, unit = 'min' )
## make a column for the time of the burst
d1$time <- str_split_fixed(d1$timestamp, " ", 2)[,2]
## change times to local time here
d1$local_timestamp <- d1$timestamp + 3*60*60
## make a column for local time
d1$local_time <- str_split_fixed(d1$local_timestamp, " ", 2)[,2]
## view the dataframe and it's summary
head(d1)
summary(d1)
dup_inds <- sort( c( which( duplicated( d1[ , c( 'tag', 'timestamp' ) ] ) ) , which( duplicated( d1[ , c( 'tag', 'timestamp' ) ], fromLast = T ) ) ) )
d1[ dup_inds, ]
d1 <- d1[ !duplicated( d1[ , c( 'tag', 'local_timestamp' ) ] ), ]
## assign each minute of data to a given night. A night lasts from noon to noon. First, apply a time shift so that each night is a unit, and not each day
time_shift <- d1$local_timestamp - 12*60*60
## save the date of the first night of the study (the date of the night is always the date of the evening at the beginning of that night; so the first night of the study is 2012-07-31, although the data starts on 2012-08-01, because the data on that first morning is still technically part of the data for the previous night, as a night is noon to noon)
start_date <- as.Date(min(d1$local_timestamp)- 12*60*60)
## assign night as number of nights from the start of the study, with all data before the first noon representing night 1
d1$night <- as.numeric( as.Date(time_shift) - start_date + 1 )
d1$night_date <- as.Date( d1$local_timestamp - 12*60*60 )
## show how many baboon-nights there are
nrow( unique( d1[ , c( 'tag', 'night' ) ] ) )
## check where the night changes from one night to the next to see if it is at noon
d1[(diff(c( d1$night_date )) == 1),]
## save a variable denoting the total number of minutes in the day
mins_in_day <- 60*12.5 # there are 12.5 hours between 18:00:00 and 06:30:00
missing_mins <- 45 ## this is the maximum total number of minutes of data that can be missing from a day and still have that day included in the analysis (for sleep period time and sleep based analyses; i.e. not ave_vedba)
time_gap <- 20*60 ## this is the maximum allowable time gap between two accelerometer bursts (in seconds) that can exist in a noon-to-noon period without removing this noon-to-noon period from the data
mov_window <- 9 ## this is the size of the moving window (in minutes) used in calculating the rolling median of the average VeDBA
block_size <- 30 ## duration in minutes of the blocks of continuous inactivity that will be considered sleep
gap_size <- 45 ## maximum duration between sleep blocks that will be merged
percentile_for_no_mult <- 0.90 # this is the percentile threshold of the log VeDBA within the 18:00 to 06:30 period used to classify activity vs. inactivity (without multiplying by a multiplier)
waso_block <- 3 ## this is the number of consecutive minutes of inactivity needed to classify a period as sleep. A waso_block of 1 means that anytime the value is below the threshold, the baboon in considered sleeping and anytime the value is above the threshold the baboon is considered awake
frag_block <- 2 ## this is the number of minutes of waking that need to be consecutive to be considered a wake bout during the night (other epochs of wake that do not meet this criterion will still be considered wake for WASO and wake_bouts, but not frag_wake_bouts)
dark_start <- '19:55:00' # the time at which evening astronomical twilight ends (or whatever time you want to use to consider the start of 'night')
dark_end <- '05:23:00' # the time at which morning astronomical twilight starts (or whatever time you want to use to consider the end of 'night')
## shows the time (as well as one previous time and one later time) where a minute is skipped. This shows that throughout the data, a burst at every minute is represented
sort( unique( d1$time ) ) [ which( diff( as_hms( sort( unique( d1$time ) ) ) ) != as_hms( '00:01:00' ) ) + -1:1 ]
## again confirms that every minute is represented in the data except for one (can tell this by comparing this number to the minutes_in_day variable above)
length( unique(d1$time) )
## create a vector containing the names of each baboon
tag_names <- unique( d1$tag )
## make a copy of d1. We will fill in this new dataframe with information about if the baboon was asleep in each epoch
full_dat <- d1[ d1$local_time > "18:00:00" | d1$local_time < "06:30:00", ]
full_dat$sleep_per <- NA ## binary indicating whether a row belongs to the sleep period window
full_dat$pot_sleep <- NA ## binary indicating whether the row is below the VeDBA threshold, making it a potential sleep bout. Three or more of these in a row get labeled as sleep bouts
full_dat$sleep_bouts <- NA ## binary indicating whether the row is considered sleep, based on the waso or nap requirements
full_dat$n_bursts <- NA ## the number of bursts collected in a given noon-to-noon period (to be compared to the total number of minutes in the day). This column will indicate whether the data for a given night is insufficient to calculate the sleep period (and thus: onset, waking, SPT, sleep_eff, TST, sleep_bouts -- because this depends on a percentile of bursts' log vedba, WASO, wake_bouts, summed_VeDBA, night_VeDBA_corr, dark_TST, prev_naps, prev_day_sleep)
full_dat$max_time_diff <- NA ## the maximum difference between consecutive fixes in a given noon-to-noon period. With the previous column, this column will indicate whether the data is insufficient to calculate the sleep period (and thus: onset, waking, SPT, sleep_eff, TST, WASO, wake_bouts, summed_VeDBA, night_VeDBA_corr, prev_naps )
## create a vector containing the names of each baboon
tag_names <- ( unique( full_dat$tag ) )
# par( mfrow = c( 4, 4 ) )
## for each individual...
for( tag in tag_names ){
## subset the data to just this individual's data
id_dat <- full_dat[ full_dat$tag == tag, ]
## create a vector the nights for which this individual has data
nights <- unique( id_dat$night_date )
#if( length( nights ) > 15 ){ # unhash this if you only want to run sleep classification for individuals that have many nights of data, and thus reliable thresholds (as their thresholds are based on a percentile of their entire 18:00 - 06:30 log VeDBA data, if class_meth is set to 'percentile_thresh' )
# we are going to classify sleep based on a percentile of the individual's rolling log VeDBA for the study period, let's first find the individual's rolling log VeDBA for the full study period, and save the relevant percentile as the threshold
# create an empty vector to fill with the rolling log VeDBAs from each night
full_roll <- c()
# for each night on which this individual has data
for( night in nights ){
# subset the individual's data to this night
night_dat <- id_dat[ id_dat$night_date == night, ]
# take the rolling median of the log VeDBA
roll_log_vedba <- rollmedian( night_dat$log_vedba, mov_window, fill = NA, align = 'center' )
# add the rolling medians to the vector of the individuals rolling medians for the whole study period
full_roll <- c( full_roll, roll_log_vedba )
}
## determine the threshold activity vs. inactivity threshold based on the percentile, multiplier, and the rolling median just produced
thresh <- quantile( full_roll, percentile_for_no_mult, na.rm = T )
if( sep_thresh ){ # if we should recalculate the log VeDBA threshold from the unsmoothed log VeDBA data before using it for the epoch by epoch sleep classification...
# recalculate the threshold
unsmooth_thresh <- quantile( id_dat$log_vedba, percentile_for_no_mult, na.rm = T )
}
for( night in nights ){
## subset this individual's data to just that night
night_dat <- id_dat[ id_dat$night_date == night, ]
## create empty columns for the sleep period, potential sleep bouts, and sleep bout binary variables
night_dat$sleep_per <- NA
night_dat$pot_sleep <- NA
night_dat$sleep_bouts <- NA
## save a column of the total number of bursts for that day. This will also make it easier to remove these days from the dataframe later
night_dat$n_bursts <- nrow( night_dat )
## sort the timestamps, and book end them with the beginning and end of the night
sorted_times <- c( as.POSIXct( paste( as.Date( night, origin = "1970-01-01", tz = 'UTC' ), '18:00:00' ), tz = 'UTC' ), sort( night_dat$local_timestamp ), as.POSIXct( paste( as.Date( ( night + 1 ), origin = "1970-01-01", tz = 'UTC' ), '06:30:00' ), tz = 'UTC' ) )
## find the time difference in seconds between each burst
time_diffs <- as.numeric( diff( sorted_times, units = 'secs' ) )
if( length( time_diffs ) > 0 ){ ### There is one night for one baboon with only one single burst, which is why this if statement is needed
## save a column of the maximum time difference between burst for that day (this will make it easier to pull out days with insufficient data later)
night_dat$max_time_diff <- max( time_diffs )
}else{
night_dat$max_time_diff <- NA
}
### find blocks of continuous inactivity
## take the rolling median of the log VeDBA and save it as a column
roll_log_vedba <- rollmedian( night_dat$log_vedba, mov_window, fill = NA, align = 'center' )
## find the run length encoding of periods above and below the threshold
temp <- rle( as.numeric( roll_log_vedba < thresh ) )
## mark the rows that are part of runs (i.e. part of chunks that are greater than the block_size of either continuous activity or continuous inactivity )
sleep_per_runs <- as.numeric( rep( temp$lengths > block_size, times = temp$lengths ) )
## mark the rows corresponding to sleep bouts. These sleep bouts are runs of inactivity
sleep_per_sleep_bouts <- as.numeric( roll_log_vedba < thresh & sleep_per_runs == 1 )
## find when sleep bouts start and end
diffs <- diff( c(0, sleep_per_sleep_bouts ) )
starts <- which( diffs == 1 ) [ -1 ]
ends <- which( diffs == -1 )
## if there are any sleep bouts...
if( length( which( diffs == 1 ) ) != 0 ){
## find the duration of the gaps between each sleep bout (the end of one sleep bout and the start of the next)
gaps <- as.numeric( night_dat$local_timestamp [ starts ] - night_dat$local_timestamp [ ends[ 1: length( starts ) ] ], units = 'mins' )
## sleep bouts separated by gaps that are shorter than that specified by gap_size will be merged. Note which of these gaps are shorter than the gap_size
inds_to_remove <- which( gaps < gap_size )
## if there are NO gaps between sleep bouts that are to be removed...
if( length( inds_to_remove ) == 0 ){
## set sleep onset index to be the start of sleep bouts
onset <- which( diffs == 1 )
## set waking index to be the end of sleep bouts
wake <- ends
}else{ ## if there ARE gaps between sleep bouts that are to be removed...
## set sleep onset index to be the start of sleep bouts that do not correspond to the gaps to be removed (because these will be within sleep periods, not a start of a new bout)
onset <- which( diffs == 1 ) [ - (inds_to_remove + 1) ]
## set waking index to be the end of sleep bouts that do not correspond to the gaps to be removed
wake <- ends [ - inds_to_remove ]
}
## determine which sleep period is the longest
per_ind <- which.max( as.numeric( night_dat$local_timestamp[ wake ] - night_dat$local_timestamp[ onset ], units = 'secs' ) )
## fill in the sleep period data frame with the sleep onset and waking time associated with the longest sleep period in the day (noon to noon)
night_dat$sleep_per <- as.numeric( night_dat$local_timestamp >= night_dat$local_timestamp[ onset[ per_ind ] ] & night_dat$local_timestamp <= night_dat$local_timestamp[ wake[ per_ind ] ] )
}else{ ## if there aren't any sleep bouts, record all rows as a 0 in the sleep_per column
night_dat$sleep_per <- 0
}
# I was including the possibility of taking another rolling median here of the log VeDBA to determine the epoch-by-epoch sleep-wake classification. But instead I am going to do this with the raw log VeDBA (I was doing that before anyway, as waso_window as set to 1, so I wasn't actually taking a rolling median)
# ## take the rolling median of the log VeDBA
# night_dat$roll_log_vedba <- rollmedian( night_dat$log_vedba, waso_window, fill = NA, align = 'center' )
#
if( sep_thresh ){
night_dat$pot_sleep <- as.numeric( night_dat$log_vedba < unsmooth_thresh )
## find the run length encoding of periods above and below the threshold
temp <- rle( as.numeric( night_dat$log_vedba < unsmooth_thresh ) )
## mark the rows that are part of runs (i.e. part of chunks that are greater than the block_size of either continuous activity or continuous inactivity )
runs <- as.numeric( rep( temp$lengths >= waso_block, times = temp$lengths ) )
## mark the rows corresponding to sleep bouts. These sleep bouts are runs of inactivity
sleep_bouts <- as.numeric( night_dat$log_vedba < unsmooth_thresh & runs == 1 )
}else{
night_dat$pot_sleep <- as.numeric( night_dat$log_vedba < thresh )
## find the run length encoding of periods above and below the threshold
temp <- rle( as.numeric( night_dat$log_vedba < thresh ) )
## mark the rows that are part of runs (i.e. part of chunks that are greater than the block_size of either continuous activity or continuous inactivity )
runs <- as.numeric( rep( temp$lengths >= waso_block, times = temp$lengths ) )
## mark the rows corresponding to sleep bouts. These sleep bouts are runs of inactivity
sleep_bouts <- as.numeric( night_dat$log_vedba < thresh & runs == 1 )
}
## make which rows are part of runs of inactivity. These are the periods of sleep within and outside of the sleep period
night_dat$sleep_bouts <- sleep_bouts
### put the night data back into full_dat
full_dat[ full_dat$tag == tag & full_dat$night_date == night, ] <- night_dat
}
}
pre_clean_full <- full_dat
study_nights <- min( d1$night ):max( d1$night )
study_night_dates <- as.Date( min( d1$night_date ):max( d1$night_date ), origin = '1970-01-01' )
sleep_per <- data.frame( tag = rep( unique( d1$tag ), each = length( study_nights ) ), night = rep( study_nights, times = length( tag ) ), night_date = rep( study_night_dates, times = length( tag ) ), total_pot_sleep = NA, total_sleep_bouts = NA, onset = NA, waking = NA, SPT = NA, WASO = NA, TST = NA, sleep_eff = NA, wake_bouts = NA, frag_wake_bouts = NA, summed_VeDBA = NA, night_VeDBA_corr = NA, ave_vedba = NA, dark_pot_sleep = NA, dark_ave_vedba = NA, max_time_diff = NA, n_bursts= NA )
## create empty vectors for the durations of sleep and wake bouts. We will fill these in to see if the distributions of the durations of these bouts later
sleep_durs <- c()
wake_durs <- c()
## for each individual...
for( tag in tag_names ){
## subset the data to just this individual's data
id_dat <- full_dat[ full_dat$tag == tag, ]
## create a vector the nights for which this individual has data
nights <- unique( id_dat$night_date )
## for each night on which this individual has data
for( night in nights ){
## subset this individual's data to just that night
night_dat <- id_dat[ id_dat$night_date == night, ]
## should already be in order, but just in case
night_dat <- night_dat[ order( night_dat$local_timestamp ), ]
sleep_per[ sleep_per$tag == tag & sleep_per$night_date == night, ]$n_bursts <- unique( night_dat$n_bursts )
sleep_per[ sleep_per$tag == tag & sleep_per$night_date == night, ]$max_time_diff <- unique( night_dat$max_time_diff )
sleep_per[ sleep_per$tag == tag & sleep_per$night_date == night, ]$total_pot_sleep <- sum( night_dat$pot_sleep )
sleep_per[ sleep_per$tag == tag & sleep_per$night_date == night, ]$total_sleep_bouts <- sum( night_dat$sleep_bouts )
sleep_per[ sleep_per$tag == tag & sleep_per$night_date == night, ]$ave_vedba <- mean( night_dat$log_vedba )
sleep_per[ sleep_per$tag == tag & sleep_per$night_date == night, ]$dark_pot_sleep <- sum( night_dat$pot_sleep[ night_dat$local_time > dark_start | night_dat$local_time < dark_end ] )
sleep_per[ sleep_per$tag == tag & sleep_per$night_date == night, ]$dark_ave_vedba <- mean( night_dat$log_vedba[ night_dat$local_time > dark_start | night_dat$local_time < dark_end ] )
SPT_dat <- night_dat[ night_dat$sleep_per == 1, ]
if( nrow( SPT_dat ) > 0 ){
onset <- min( SPT_dat$local_timestamp )
sleep_per[ sleep_per$tag == tag & sleep_per$night_date == night, ]$onset <- onset
waking <- max( SPT_dat$local_timestamp )
sleep_per[ sleep_per$tag == tag & sleep_per$night_date == night, ]$waking <- waking
SPT <- as.numeric( waking - onset, units = 'mins' ) + 1
sleep_per[ sleep_per$tag == tag & sleep_per$night_date == night, ]$SPT <- SPT
WASO <- sum( SPT_dat$sleep_bouts == 0 )
sleep_per[ sleep_per$tag == tag & sleep_per$night_date == night, ]$WASO <- WASO
TST <- sum( SPT_dat$sleep_bouts == 1 )
sleep_per[ sleep_per$tag == tag & sleep_per$night_date == night, ]$TST <- TST
sleep_per[ sleep_per$tag == tag & sleep_per$night_date == night, ]$sleep_eff <- TST/ nrow( SPT_dat )
sleep_per[ sleep_per$tag == tag & sleep_per$night_date == night, ]$summed_VeDBA <- sum( SPT_dat$log_vedba )
sleep_per[ sleep_per$tag == tag & sleep_per$night_date == night, ]$night_VeDBA_corr <- sum( SPT_dat$log_vedba ) / SPT
temp <- rle( SPT_dat$sleep_bouts )
runs <- as.numeric( rep( temp$lengths >= frag_block, times = temp$lengths ) )
frag_wake_bouts <- as.numeric( SPT_dat$sleep_bouts == 0 & runs == 1 )
diffs <- diff( c( 1, frag_wake_bouts ) )
sleep_per[ sleep_per$tag == tag & sleep_per$night_date == night, ]$frag_wake_bouts <- sum( diffs == 1 )
## find the distinct sleep bouts (i.e. epochs of sleep separated by waking)
diffs <- diff( c( 0, SPT_dat$sleep_bouts ) )
## save the number of distinct wake bouts
sleep_per[ sleep_per$tag == tag & sleep_per$night_date == night, ]$wake_bouts <- sum( diffs == -1 )
## find durations of sleep and wake bouts
temp <- rle( SPT_dat$sleep_bouts )
## add the duration of sleep bouts to the sleep bout duration vector
sleep_durs <- c( sleep_durs, temp$lengths[ temp$values == 1 ] )
## add the duration of wake bouts to the wake bout duration vector
wake_durs <- c( wake_durs, temp$lengths[ temp$values == 0 ] )
}
}
}
sum( !is.na( sleep_per$SPT ) )
### check number of nights for which sleep period was calculated and inspect those for which no sleep period was calculated ###
sleep_per_nona <- sleep_per[ !is.na( sleep_per$SPT ), ]
nrow( sleep_per_nona )
left_out <- unique( d1[ , c( 'tag', 'night' ) ] )[ !paste( unique( d1[ , c( 'tag', 'night' ) ] )$tag, unique( d1[ , c( 'tag', 'night' ) ] )$night ) %in% paste( sleep_per_nona$tag, sleep_per_nona$night ), ]
for( i in 1:nrow( left_out ) ){
tag_night_dat <- d1[ d1$tag == left_out$tag[ i ] & d1$night == left_out$night[ i ], ]
plot( tag_night_dat$local_timestamp, tag_night_dat$log_vedba )
}
nrow( left_out )
tag_night_dat <- d1[ d1$tag == 2428 & d1$night == 5, ]
plot( tag_night_dat$local_timestamp, tag_night_dat$log_vedba )
############# Cleaning the dataframes of data on nights with insufficient data ################
# how many baboon-nights did we have ACC data for
nrow( unique( d1[ , c( 'tag','night' ) ] ) ) # 649
nrow( unique( full_dat[ , c( 'tag','night' ) ] ) ) # 646
sum( !is.na( sleep_per$dark_ave_vedba ) ) # 646
## remove all these variable from the night, and from the days on the early side of the noon-to-noon period if the noon-to-noon period is missing a lot of data (because then we might not be able to reliably calculate the sleep VeDBA threshold, and a lot of epochs might be missing, which would skew TST and such)
nrow( unique( full_dat[ full_dat$n_bursts < ( mins_in_day - missing_mins ), c( 'tag', 'night') ] ) ) # 74 baboon-nights removed from this cleaning step
sum( sleep_per$n_bursts < ( mins_in_day - missing_mins ) & !is.na( sleep_per$n_bursts ) ) # confirmed 74 removed from this cleaning step
sleep_per[ sleep_per$n_bursts < ( mins_in_day - missing_mins ) & !is.na( sleep_per$n_bursts ), c( 'onset', 'waking', 'SPT', 'sleep_eff', 'TST', 'WASO', 'wake_bouts', 'summed_VeDBA', 'night_VeDBA_corr', 'ave_vedba', 'total_pot_sleep', 'total_sleep_bouts', 'dark_ave_vedba', 'dark_pot_sleep' ) ] <- NA
sleep_per_nona <- sleep_per[ !is.na( sleep_per$SPT ), ]
nrow( sleep_per_nona )
#### this next cleaning step below can be deleted because it doesn't actually remove anything
## remove all these variable from the night, and from the days on the early side of the noon-to-noon period (only for those depending on SPT) if the noon-to-noon period has large gaps of missing data (because then we can't reliably calculate the SPT)
nrow( unique( full_dat[ full_dat$max_time_diff > time_gap, c( 'tag', 'night') ] ) ) # 0 baboon-nights removed from this cleaning step
sum( sleep_per$max_time_diff > time_gap & !is.na( sleep_per$max_time_diff ) ) # 0 baboon-nights removed from this cleaning step
sleep_per[ sleep_per$max_time_diff > time_gap & !is.na( sleep_per$max_time_diff ), c( 'onset', 'waking', 'SPT', 'sleep_eff', 'TST', 'WASO', 'wake_bouts', 'summed_VeDBA', 'ave_vedba', 'total_pot_sleep', 'total_sleep_bouts', 'dark_ave_vedba', 'dark_pot_sleep' ) ] <- NA
sleep_per_nona <- sleep_per[ !is.na( sleep_per$SPT ), ]
nrow( sleep_per_nona )
## remove data for sleep period and sleep bouts on days when there is a lot of missing data, because we cannot reliably calculate the sleep VeDBA threshold and there may be a lot of missing epochs
full_dat[ full_dat$n_bursts < ( mins_in_day - missing_mins ), c( 'sleep_per' ) ] <- NA
## remove data for sleep period on days when there are large gaps of missing data, because we can't reliably calculate the SPT with gaps in the data
full_dat[ full_dat$max_time_diff > time_gap, 'sleep_per' ] <- NA
## how many baboon-nights do we have ACC data for after cleaning
sum( !is.na( sleep_per$total_sleep_bouts ) ) # 572 baboon-nights
## reformat sleep timestamp
sleep_per$onset <- as.POSIXct( sleep_per$onset, origin = "1970-01-01 00:00:00", tz = "UTC" )
## reformat waking timestamp
sleep_per$waking <- as.POSIXct( sleep_per$waking, origin = "1970-01-01 00:00:00", tz = "UTC" )
## make columns for just the time part of the sleep onset and waking timestamps
sleep_per$onset_time <- as_hms( sleep_per$onset )
sleep_per$waking_time <- as_hms( sleep_per$waking )
write.csv( full_dat, paste0( 'DATA/sleep_analysis_data/full_dat_', class_meth, '_sep_thresh_', sep_thresh, '.csv' ), row.names = F )
write.csv( sleep_per, paste0( 'DATA/sleep_analysis_data/sleep_per_', class_meth, '_sep_thresh_', sep_thresh, '.csv' ), row.names = F )
full_dat <- read.csv( paste0( 'DATA/sleep_analysis_data/full_dat_', class_meth, '_sep_thresh_', sep_thresh, '.csv' ) )
sleep_per <- read.csv( paste0( 'DATA/sleep_analysis_data/sleep_per_', class_meth, '_sep_thresh_', sep_thresh, '.csv' ) )
full_dat$local_timestamp <- as.POSIXct( full_dat$local_timestamp, tz = 'UTC' )
###### accuracy of sleep classification #######
sec_focal_dat <- as.data.frame( fread( 'DATA/thermal_focal_follows/sec_focal_dat.csv' ) )
full_dat$corr_local_timestamp <- full_dat$local_timestamp - 16
labeled_full_dat <- merge( x = full_dat, y = sec_focal_dat, by.x = c( 'tag', 'corr_local_timestamp' ), by.y = c( 'tag', 'local_timestamp' ), all.x = T, all.y = F, sort = F )
labeled_full_dat$classified_sleep <- ifelse( labeled_full_dat$sleep_behavior == 'Unalert', 1, 0 )
valid_dat <- labeled_full_dat[ !is.na( labeled_full_dat$sleep_bouts ) & !is.na( labeled_full_dat$sleep_behavior ), ]
unique( valid_dat$tag )
length( unique( valid_dat$tag ) )
nrow( valid_dat )
confusion_matrix <- table( valid_dat$sleep_bouts, valid_dat$sleep_behavior )
accur <- ( confusion_matrix[ '0', 'Active' ] + confusion_matrix[ '0', 'Alert' ] + confusion_matrix[ '1', 'Unalert' ] ) / sum( confusion_matrix )
print( accur )
print( confusion_matrix )
################### Visualizing the sleep period and sleep/wake classification ########################## I think this has to be run after the stuff above
sleep_per_func <- function( tag, night, m_m = missing_mins, t_g = time_gap, m_w = mov_window, p_f_n_m = percentile_for_no_mult, b_s = block_size, g_s = gap_size, title = T, x_axis = T, plot_waso = T, w_b = waso_block, las = 1, ... ){
## save a variable denoting the total number of minutes in the day
mins_in_day <- mins_in_day
## subset the data to the given tag on the given night
night_dat <- full_dat[ full_dat$tag == tag & full_dat$night == night, ]
## sort the timestamps (they are probably already sorted)
night_times <- as.numeric( as_hms( c( night_dat$local_time, '18:00:00', '06:30:00' ) ) )
night_times[ night_times < 12*60*60 ] <- night_times[ night_times < 12*60*60 ] + 24*60*60
sorted_times <- sort( night_times )
## find the time difference in seconds between each burst
time_diffs <- as.numeric( diff( sorted_times ) )
## if there is more than a single burst...
if(length(time_diffs) != 0){
## if the number of bursts exceed the minimum required number of bursts in a night (determined by missing mins) and if the gaps in the data are within the allowable gap size (determined by time_gap)...
if( nrow( night_dat ) > ( mins_in_day - missing_mins ) & max( time_diffs ) < time_gap ){
## take the rolling median of the log VeDBA and save it as a column
night_dat$roll_log_vedba <- rollmedian( night_dat$log_vedba, mov_window, fill = NA, align = 'center' )
## determine the threshold activity vs. inactivity threshold based on the percentile and the rolling median just produced
thresh <- quantile( night_dat$roll_log_vedba, percentile_for_no_mult, na.rm = T )
## put the rows of the dataframe in order from noon to noon (they should already be in this order, so this should be redundant)
night_dat <- night_dat[ order( ts_func( night_dat$local_time ) ), ]
## turn the times into numerical elements for plotting
ts_time <- ts_func( night_dat$local_time )
if( title == F ){
## plot the log VeDBA
#plot( ts_time, night_dat$log_vedba, type = 'l', xlab = 'Time', ylab = '', xaxt = 'n', las = las )
plot( ts_time, night_dat$log_vedba, type = 'l', xlab = 'Time', ylab = '', xaxt = 'n', las = las, ylim = c( -2, 10 ), ... )
}else{
## plot the log VeDBA
plot( ts_time, night_dat$log_vedba, type = 'l', xlab = 'Time', ylab = '', main = paste( tag, night ), xaxt = 'n', las = las, ylim = c( -2, 10 ), ... )
}
if( x_axis == T ){
axis( 1, at = seq( 0, 60*24*60, 60*60), labels = c( as_hms( seq( 12*60*60, 60*23*60, 60*60) ), as_hms( seq( 0, 60*12*60, 60*60) ) ) )
}
title( ylab = 'log VeDBA', line = 3.9 )
## plot the rolling median of the log VeDBA
lines( ts_time, night_dat$roll_log_vedba, col = 'red')
## plot the threshold of the log VeDBA
abline( h = thresh, col = 'blue' )
### find blocks of continuous inactivity
## find the run length encoding of periods above and below the threshold
temp <- rle(as.numeric( night_dat$roll_log_vedba < thresh ) )
## mark the rows that are part of runs (i.e. part of chunks that are greater than the block_size of either continuous activity or continuous inactivity )
night_dat$runs <- as.numeric( rep( temp$lengths > block_size, times = temp$lengths ) )
## mark the rows corresponding to sleep bouts. These sleep bouts are runs of inactivity
night_dat$sleep_bouts <- as.numeric( night_dat$roll_log_vedba < thresh & night_dat$runs == 1 )
## find when sleep bouts start and end
diffs <- diff( c(0, night_dat$sleep_bouts ) )
starts <- which( diffs == 1 ) [ -1 ]
ends <- which( diffs == -1 )
## if there are any sleep bouts...
if( length( which( diffs == 1 ) ) != 0){
## find the duration of the gaps between each sleep bout (the end of one sleep bout and the start of the next)
gaps <- as.numeric( night_dat$local_timestamp [ starts ] - night_dat$local_timestamp [ ends[ 1: length( starts ) ] ], units = 'mins' )
## sleep bouts separated by gaps that are shorter than that specified by gap_size will be merged. Note which of these gaps are shorter than the gap_size
inds_to_remove <- which( gaps < gap_size )
## if there are NO gaps between sleep bouts that are to be removed...
if( length( inds_to_remove ) == 0 ){
## set sleep onset index to be the start of sleep bouts
onset <- which( diffs == 1 )
## set waking index to be the end of sleep bouts
wake <- ends
}else{ ## if there ARE gaps between sleep bouts that are to be removed...
## set sleep onset index to be the start of sleep bouts that do not correspond to the gaps to be removed (because these will be within sleep periods, not a start of a new bout)
onset <- which( diffs == 1 ) [ - (inds_to_remove + 1) ]
## set waking index to be the end of sleep bouts that do not correspond to the gaps to be removed
wake <- ends [ - inds_to_remove ]
}
## determine which sleep period is the longest
per_ind <- which.max( as.numeric( night_dat$local_timestamp[ wake ] - night_dat$local_timestamp[ onset ], units = 'secs' ) )
## plot the sleep onset time and waking time on the log VeDBA plot
abline( v = c( ts_time[ onset[ per_ind ] ], ts_time[ wake[ per_ind ] ] ), col = 'orange', lty = 3, lwd = 4 )
## if you also want to plot WASO (this says WASO but it actually should plot all wake epochs)
if( plot_waso == T ){
## calculate the threshold for sleeping and waking within the sleep period
SPT_thresh <- quantile( night_dat$log_vedba, percentile_for_no_mult, na.rm = T)
## plot the threshold
abline( h = SPT_thresh, col = 'blue', lty = 2, lwd = 2 )
## find blocks of continuous inactivity
## calcuate the run length encoding
temp <- rle(as.numeric( night_dat$log_vedba < SPT_thresh ) )
## mark the runs of activity or inactivity
night_dat$night_runs <- as.numeric( rep( temp$lengths >= waso_block, times = temp$lengths ) )
## mark the runs of inactivity as sleep bouts
night_dat$night_sleep_bouts <- as.numeric( night_dat$log_vedba < SPT_thresh & night_dat$night_runs == 1 )
## find the starts and ends of waking bouts
diffs <- diff( c(1, night_dat$night_sleep_bouts ) )
starts <- which( diffs == -1 )
## add back in a "- 1" at the end of this line if you wish for the start and end times to be accurate. Now I just want to make it so the polygons show up even without a border
ends <- which( diffs == 1 )
## if there are waking bouts
if( length( which( diffs == -1 ) ) != 0){
## if the last waking bout never ends...
if( length( starts ) != length( ends ) ){
## make it end at the end of the sleep period
ends <- c(ends, length( diffs) )
}
## save the start times and end times of waking bouts
starts <- ts_func( night_dat$local_time[ starts ] )
ends <- ts_func( night_dat$local_time[ ends ] )
## plot a polygon for each distinct waking bout
for( n in 1:length( starts ) ){
polygon( x = c(starts[ n ], ends[ n ], ends[ n ], starts[ n ], starts[ n ] ), y = c( 0, 0, 10, 10, 0), col = transp('blue', .25), border = NA )
}
}
}
lines( ts_time, night_dat$log_vedba, col = 'black')
## fill in the sleep period data frame with the sleep onset and waking time associated with the longest sleep period in the day (noon to noon)
return( c( night_dat$local_timestamp[ onset[ per_ind ] ], night_dat$local_timestamp[ wake[ per_ind ] ] ) )
}
}
}
}
for( i in 1:nrow( sleep_per ) ){
sleep_per_func( tag = sleep_per$tag[ i ], night = sleep_per$night[ i ] )
}
|
6902f8c9a412543ea338ee96a2992ca4ad4f18c8
|
0d6a33bf526efa8fe4a5747f5e96f0b2a42465c2
|
/data/Monserud1999/dataManipulate.R
|
7da2d3d8f9d1c9cd8ce04d08c3bf3cf67a728ca0
|
[
"CC0-1.0",
"LicenseRef-scancode-public-domain",
"BSD-2-Clause"
] |
permissive
|
jscamac/baad
|
1d57f9ba09553fac43662411e1512c0b23e2f9a6
|
55b352123ab0bc8b55f3203d5e44bf16c23979a9
|
refs/heads/master
| 2021-01-12T22:34:51.709989
| 2016-04-19T02:57:11
| 2016-04-19T02:57:11
| 27,855,898
| 2
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 188
|
r
|
dataManipulate.R
|
manipulate <- function(raw) {
names(raw)[names(raw)=="species"] <- "wrong.sp"
# h_c was crown length, should be height to crown base
raw$h_c <- with(raw, h_t - h_c)
raw
}
|
58621c62c1e9ac8694067c09896366fbb206ad7b
|
143a357cb6142ae963437d902db30a6e1ec005d4
|
/testing.R
|
ddebaa8dbbea0c42a23ee9a2bee5fb5b12f57029
|
[
"MIT"
] |
permissive
|
wStockhausen/tmbTCSAM02
|
424c81e1a140e99894d3b9483f92e8564ac9d79d
|
258a3e28f4a8e26acde34e6bf4c68ccae71a541d
|
refs/heads/master
| 2021-07-30T03:45:46.800217
| 2021-07-17T00:24:59
| 2021-07-17T00:24:59
| 210,910,086
| 1
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 1,842
|
r
|
testing.R
|
#----------testing-----------
#require(tmbTCSAM02);
fns = list.files(path="./R",pattern="*.R",full.names=TRUE);
for (fn in fns) source(fn);
setGlobals();
ls(all.names=TRUE);
#fn_mc = "./inst/exampleOld/M21.13.MCI.inp";
fn_mc = "./inst/example21_13/M21.13.MCI.inp";
mc = readTCSAM_ModelConfiguration(fn_mc);
#--read fn_datasets
topDir = dirname(fn_mc);
cat(paste0("--topDir = '",topDir,"'\n"));
ds = readTCSAM_DatasetNames(file.path(topDir,mc$fn_datasets));
#--read datasets
#----read biological data file
bd = readTCSAM_BioData(file.path(topDir,ds[["fn_BioInfo"]]));
#----read fishery datasets
fds = list();
if (ds$nFshs>0) for (i in 1:ds$nFshs) {
fnp = ds[["fn_Fshs"]][i];
fds[[fnp]] = readTCSAM_FleetData(file.path(topDir,fnp),verbose=TRUE);
}
#----read survey datasets
sds = list();
if (ds$nSrvs>0) for (i in 1:ds$nSrvs) {
fnp = ds[["fn_Srvs"]][i];
sds[[fnp]] = readTCSAM_FleetData(file.path(topDir,fnp));
}
#----read molt increment data files
mids = list();
if (ds$nMIDs>0) for (i in 1:ds$nMIDs) {
fnp = ds[["fn_MIDs"]][i];
mids[[fnp]] = readTCSAM_GrowthData(file.path(topDir,fnp));
}
#----read chela height data files
# mids = list();
# if (ds$nMIDs>0) for (i in 1:ds$nMIDs) {
# fnp = ds[["fn_MIDs"]][i];
# mids[[fnp]] = readTCSAM_ChelaHeightData(file.path(topDir,fnp));
# }
#--read maturity ogive data files
mods = list();
if (ds$nMODs>0) for (i in 1:ds$nMODs) {
fnp = ds[["fn_MODs"]][i];
mods[[fnp]] = readTCSAM_MaturityOgiveData(file.path(topDir,fnp));
}
#--read fn_paramsInfo
topDir = "./";
if (exists("fn")) topDir = dirname(fn);
cat(paste0("--topDir = '",topDir,"'\n"));
paramsInfo = readTCSAM_ModelParametersInfo(file.path(topDir,mc$fn_paramsInfo));
tst = readTCSAM_ModelParametersInfo(file.path(dirname(fn_mc),mc$mc$fn_paramsInfo));
|
cbd6c74d883f23067f1f69d7d2b9e0a4bd0fe68d
|
b19c1384616de8b9791a64006e4018ed7693daa6
|
/RFrontEndSolution/Tests Repo/Rscripts All (163 files)/descriptives.R
|
207ba091a0ed5b1815d2295f23f01857216a0e93
|
[
"MIT"
] |
permissive
|
AlexandrosPlessias/CompilerFrontEndForRLanguage
|
1e04c031f83cac87f1eadc5d2c373f0b641bfb9e
|
71e3e60476f6f83b05cc97c625265edbde086341
|
refs/heads/master
| 2022-12-21T12:23:14.412959
| 2020-09-25T10:27:16
| 2020-09-25T10:27:16
| 276,627,938
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 704
|
r
|
descriptives.R
|
# Calculate descriptive statistics on data
# install.packages("psych") # Remove "#" for first run
library(psych) # Package of functions applicable to Psychology
datapsych <- read.table("http://www.uvm.edu/~dhowell/methods8/DataFiles/Tab2-1.dat", header = TRUE)
### Alternative ways to read data
#datapsych <- read.table("C:/Users/Dave/Documents/Webs/methods8/DataFiles/Tab2-1.dat")
# or, combine the next two lines
#setwd("C:/Users/Dave/Documents/webs/Methods8/DataFiles/")
#datapsych <- read.table("Tab2-1.dat")
head(datapsych)
attach(datapsych)
# Type ?describe to see the options for the describe command
output <- describe(RxTime)
print(output)
hist(RxTime)
|
c4498b1689081e5e2864ebfaef12af367fc97e1a
|
6be70ffdb95ed626d05b5ef598b842c5864bac4d
|
/old/house_party_calls_replication_hybrid.R
|
11ebf31d67be8a46ea7bf370a2712e67e947c086
|
[] |
no_license
|
Hershberger/partycalls
|
c4f7a539cacd3120bf6b0bfade327f269898105a
|
8d9dc31dd3136eae384a8503ba71832c78139870
|
refs/heads/master
| 2021-09-22T17:54:29.106667
| 2018-09-12T21:16:56
| 2018-09-12T21:16:56
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 222
|
r
|
house_party_calls_replication_hybrid.R
|
library(partycalls)
set.seed(1209003941)
house_party_calls <- lapply(93:109, code_party_calls_by_congress_number,
hybrid = TRUE)
save(house_party_calls,
file = "test_data/house_party_calls_replication_hybrid.RData")
|
e96a80891f6c1d1759c0c694a2b96644860df0d0
|
8b53088df90d1d522476c8aa68d9a13e5d834501
|
/codes/script.R
|
2effce651bf6831ed3cd183b1f5f773519c687a5
|
[] |
no_license
|
ahcm-linux/P2-R_COVID-19_2021
|
208d025e38b290fc0dbfc68fcea0c33f5fbe1887
|
c977128482f7ace8e9a7ddd294100d2bda71d0ea
|
refs/heads/main
| 2023-05-27T00:25:53.894900
| 2021-06-09T01:53:33
| 2021-06-09T01:53:33
| 374,804,061
| 0
| 0
| null | null | null | null |
WINDOWS-1250
|
R
| false
| false
| 29,275
|
r
|
script.R
|
###############################################################################################################
# Data analysis of COVID-19 published at: (article submitted waiting for publication)
# date of creation: 06/07/2021 (date in US format)
# R version: 4.0.3
# script name: script.R
# aim: data analysis
# input: files from the folder 'data'
# output: files saved in the subdirectories of folder 'outputs'
# external sources: files 'functions.R' and 'packages.R' from folder 'codes'
###############################################################################################################
# PARAMETERS --------------------------------------------------------------------------------------------------
# choose colors (change these parameters to get figures with the desired colors)
color_male = "deepskyblue2" # color for representing Gender = Male
color_female = "chocolate2" # color for representing Gender = Female
color_alive = "chartreuse2" # color for representing Outcome = Alive
color_dead = "red2" # color for representing Outcome = Dead
# EXTERNAL SOURCES --------------------------------------------------------------------------------------------
# install and load packages
base::source("codes/packages.R")
# load subroutines
base::source("codes/functions.R")
# DATA --------------------------------------------------------------------------------------------------------
# load datasets
data_long <- utils::read.csv('data/data_long.csv', header = TRUE, sep = '\t')
data_wide <- utils::read.csv('data/data_wide.csv', header = TRUE, sep = '\t')
# levels
analytes_levels <- base::c("Eritrocitos", "Plaquetas", "Monocitos", "Neutrofilos", "Leucocitos")
# analytes
Analytes <- base::paste0("Hemograma_X_", analytes_levels)
# labels
analytes_labels_long <- base::c("Hemograma_X_Plaquetas" = "Platelets", "Hemograma_X_Monocitos" = "Monocytes", "Hemograma_X_Eritrocitos" = "Erythrocytes", "Hemograma_X_Neutrofilos" = "Neutrophils", "Hemograma_X_Leucocitos" = "Leukocytes")
analytes_labels_wide <- base::c("Plaquetas" = "Platelets", "Monocitos" = "Monocytes", "Eritrocitos" = "Erythrocytes", "Neutrofilos" = "Neutrophils", "Leucocitos" = "Leukocytes")
# DESCRIPTIVE ANALYSIS ----------------------------------------------------------------------------------------
## Data -------------------------------------------------------------------------------------------------------
# hsl data, long
sub_hsl_long <- base::subset(data_long, hospital == 'HSL' & !base::is.na(de_desfecho) & exame_analito %in% Analytes)
obito <- base::grep('óbito', sub_hsl_long$de_desfecho)
sub_hsl_long$de_desfecho[obito] <- 'Dead'
sub_hsl_long$de_desfecho[base::setdiff(1:base::nrow(sub_hsl_long), obito)] <- 'Alive'
sub_hsl_long$exame_analito <- base::factor(sub_hsl_long$exame_analito, levels = base::paste0("Hemograma_X_", analytes_levels), ordered = TRUE)
sub_hsl_long <- plyr::ddply(sub_hsl_long, ~ id_paciente, dplyr::mutate, time_of_death = base::ifelse(midpoint_interval == base::max(midpoint_interval) & de_desfecho == "Dead", 1, 0))
## Plots ------------------------------------------------------------------------------------------------------
custom_theme <- ggplot2::theme_light(base_size = 12) +
ggplot2::theme(panel.grid.minor.x = ggplot2::element_blank(),
panel.grid.minor.y = ggplot2::element_blank(),
legend.position = "top",
legend.box.background = ggplot2::element_rect(colour = "black", fill = NA),
legend.key = ggplot2::element_rect(colour = NA, fill = NA),
strip.text = ggplot2::element_text(colour = "black", hjust = 0, size = 14),
strip.background = ggplot2::element_rect(fill = NA, colour = NA))
# (Alive) Male and Female average trajectories
plot_gender_trajectories <- base::subset(sub_hsl_long, de_desfecho == "Alive") %>%
dplyr::group_by(gender, midpoint_interval, exame_analito) %>%
dplyr::mutate(mean_traj = stats::median(median_obs)) %>%
dplyr::mutate(lwr = stats::quantile(median_obs, probs = 0.25)) %>%
dplyr::mutate(upr = stats::quantile(median_obs, probs = 0.75)) %>%
ggplot2::ggplot(ggplot2::aes(x = midpoint_interval, y = median_obs, group = id_paciente)) +
ggplot2::geom_ribbon(ggplot2::aes(ymin = lwr, ymax = upr, fill = gender), alpha = 0.05, show.legend = FALSE) +
ggplot2::geom_point(alpha = 0.5, colour = grDevices::grey(0.2)) +
ggplot2::geom_line(alpha = 0.3, colour = grDevices::grey(0.2)) +
ggplot2::geom_line(ggplot2::aes(y = mean_traj, group = gender, colour = gender), size = 2, alpha = 0.8) +
ggplot2::facet_wrap(~ exame_analito, scales = "free", nrow = 2, labeller = ggplot2::as_labeller(analytes_labels_long)) +
ggplot2::scale_colour_manual("Gender: ", values = base::c("M" = color_male, "F" = color_female), labels = base::c("M" = "Male", "F" = "Female")) +
ggplot2::scale_fill_manual(values = base::c("M" = color_male, "F" = color_female)) +
ggplot2::scale_y_continuous(breaks = scales::pretty_breaks()) +
ggplot2::scale_x_continuous(breaks = scales::pretty_breaks()) +
ggplot2::labs(x = "Hospitalization time", y = "Count/ul") +
custom_theme
# Dead and Alive average trajectories
plot_outcome_trajectories <- sub_hsl_long %>%
dplyr::group_by(de_desfecho, midpoint_interval, exame_analito) %>%
dplyr::mutate(mean_traj = stats::median(median_obs)) %>%
dplyr::mutate(lwr = stats::quantile(median_obs, probs = 0.25)) %>%
dplyr::mutate(upr = stats::quantile(median_obs, probs = 0.75)) %>%
ggplot2::ggplot(ggplot2::aes(x = midpoint_interval, y = median_obs, group = id_paciente)) +
ggplot2::geom_ribbon(ggplot2::aes(ymin = lwr, ymax = upr, fill = de_desfecho), alpha = 0.05, show.legend = FALSE) +
ggplot2::geom_point(alpha = 0.5, colour = grDevices::grey(0.2)) +
ggplot2::geom_line(alpha = 0.3, colour = grDevices::grey(0.2)) +
ggplot2::geom_line(ggplot2::aes(y = mean_traj, group = de_desfecho, colour = de_desfecho), size = 2, alpha = 0.8) +
ggplot2::facet_wrap(~ exame_analito, scales = "free_y", nrow = 2, labeller = ggplot2::as_labeller(analytes_labels_long)) +
ggplot2::scale_colour_manual("Outcome: ", values = base::c("Alive" = color_alive, "Dead" = color_dead)) +
ggplot2::scale_fill_manual(values = base::c("Alive" = color_alive, "Dead" = color_dead)) +
ggplot2::scale_y_continuous(breaks = scales::pretty_breaks()) +
ggplot2::scale_x_continuous(breaks = scales::pretty_breaks()) +
ggplot2::labs(x = "Hospitalization time", y = "Count/ul") +
custom_theme
# Percentages and Sum trajectories
df_perc_gender <- sub_hsl_long %>% base::subset(exame_analito == "Hemograma_X_Plaquetas") %>% dplyr::select(dplyr::starts_with(base::c("gender", "midpoint_interval", "time_of_death")))
df_perc_outcome <- sub_hsl_long %>% base::subset(exame_analito == "Hemograma_X_Plaquetas") %>% dplyr::select(dplyr::starts_with(base::c("de_desfecho", "midpoint_interval", "time_of_death")))
base::colnames(df_perc_gender) <- base::colnames(df_perc_outcome) <- base::c("variable", "time", "death")
df_perc_male <- plyr::ddply(df_perc_gender, ~ time, plyr::summarise, perc_variable = base::sum(variable == "M") / base::length(time), sum_variable = base::sum(variable == "M"))
df_perc_female <- plyr::ddply(df_perc_gender, ~ time, plyr::summarise, perc_variable = base::sum(variable == "F") / base::length(time), sum_variable = base::sum(variable == "F"))
df_perc_gender <- base::rbind(base::data.frame(df_perc_male, variable = "M"), base::data.frame(df_perc_female, variable = "F"))
df_perc_alive <- plyr::ddply(df_perc_outcome, ~ time, plyr::summarise, perc_variable = base::sum(death == 0) / base::length(time), sum_variable = base::sum(death == 0))
df_perc_dead <- plyr::ddply(df_perc_outcome, ~ time, plyr::summarise, perc_variable = base::sum(death == 1) / base::length(time), sum_variable = base::sum(death == 1))
df_perc_outcome <- base::rbind(base::data.frame(df_perc_alive, variable = "A"), base::data.frame(df_perc_dead, variable = "D"))
df_perc <- base::rbind(base::data.frame(df_perc_gender, group = "1"), base::data.frame(df_perc_outcome, group = "2"))
plot_perc_trajectories <- ggplot2::ggplot(base::subset(df_perc, variable %in% base::c("M", "D")), ggplot2::aes(x = time, y = perc_variable)) +
ggplot2::geom_line(ggplot2::aes(group = variable, colour = variable), alpha = 0.8, show.legend = FALSE) +
ggplot2::geom_point(ggplot2::aes(group = variable, colour = variable), size = 2, alpha = 1, show.legend = FALSE) +
ggplot2::facet_wrap(~ group, scales = "free_y", ncol = 1, labeller = ggplot2::as_labeller(base::c("1" = "Gender (percentage)", "2" = "Outcome (percentage)"))) +
ggplot2::scale_colour_manual("Legend: ", values = base::c("M" = color_male, "F" = color_female, "A" = color_alive, "D" = color_dead), labels = base::c("M" = "Male", "F" = "Female", "A" = "Alive", "D" = "Dead")) +
ggplot2::scale_y_continuous(breaks = scales::pretty_breaks(), labels = scales::percent_format(accuracy = 1)) +
ggplot2::scale_x_continuous(breaks = scales::pretty_breaks()) +
ggplot2::labs(x = "", y = "") +
custom_theme + ggplot2::theme(strip.text = ggplot2::element_text(size = 12))
plot_sum_trajectories <- ggplot2::ggplot(base::subset(df_perc, variable %in% base::c("M", "F", "D")), ggplot2::aes(x = time, y = sum_variable)) +
ggplot2::geom_line(ggplot2::aes(group = variable, colour = variable), alpha = 0.8) +
ggplot2::geom_point(ggplot2::aes(group = variable, colour = variable), size = 2, alpha = 1) +
ggplot2::facet_wrap(~ group, scales = "free_y", ncol = 1, labeller = ggplot2::as_labeller(base::c("1" = "Gender (count)", "2" = "Outcome (count)"))) +
ggplot2::scale_colour_manual("Legend: ", values = base::c("M" = color_male, "F" = color_female, "A" = color_alive, "D" = color_dead), labels = base::c("M" = "Male", "F" = "Female", "A" = "Alive", "D" = "Dead")) +
ggplot2::scale_y_continuous(breaks = scales::pretty_breaks()) +
ggplot2::scale_x_continuous(breaks = scales::pretty_breaks()) +
ggplot2::labs(x = "", y = "") +
custom_theme + ggplot2::theme(strip.text = ggplot2::element_text(size = 12))
shared_legend <- g_legend(plot_sum_trajectories)
shared_y_title <- grid::textGrob("Hospitalization time", gp = grid::gpar(fontsize = 12), vjust = -1.5)
plot_perc_sum_trajectories <- gridExtra::arrangeGrob(shared_legend,
gridExtra::arrangeGrob(plot_perc_trajectories + ggplot2::theme(legend.position = "none"),
plot_sum_trajectories + ggplot2::theme(legend.position = "none"),
nrow = 1, bottom = shared_y_title),
nrow = 2, heights = base::c(2, 10))
## Tables -----------------------------------------------------------------------------------------------------
# count Dead and Alive by gender
base::suppressMessages(table_desc_data <- sub_hsl_long %>% dplyr::group_by(gender, de_desfecho) %>% dplyr::select(dplyr::starts_with("id_paciente")) %>% base::unique() %>% dplyr::count())
base::colnames(table_desc_data) <- base::c("Gender", "Outcome", "Count")
table_desc_data$Gender <- base::gsub("F", "Female", table_desc_data$Gender)
table_desc_data$Gender <- base::gsub("M", "Male", table_desc_data$Gender)
# INFERENTIAL ANALYSIS ----------------------------------------------------------------------------------------
## Data -------------------------------------------------------------------------------------------------------
# hsl data, wide
sub_hsl <- base::subset(data_wide, hospital == 'HSL' & !base::is.na(de_desfecho))
obito <- base::grep('óbito', sub_hsl$de_desfecho)
sub_hsl$de_desfecho[obito] <- 'Dead'
sub_hsl$de_desfecho[base::setdiff(1:base::nrow(sub_hsl), obito)] <- 'Alive'
sub_hsl$break_point <- stats::relevel(base::factor(base::ifelse(sub_hsl$n_days > 20, "A", "B")), ref = "B")
sub_hsl$GenderOutcome <- base::paste(sub_hsl$gender, sub_hsl$de_desfecho, sep = "_")
## Models -----------------------------------------------------------------------------------------------------
# fit
set.seed(2021)
analytes_models <- base::lapply(Analytes, function(.x) {
base::cat("wait while R is fitting a model for", analytes_labels_long[.x], "...\n")
y <- sub_hsl[, .x]
dataset <- base::data.frame(sub_hsl, y = y)
# model
fit <- nlme::lme(y ~ gender + de_desfecho + splines::bs(midpoint_interval) + gender : splines::bs(midpoint_interval) + de_desfecho : splines::bs(midpoint_interval),
random =~ 1|id_paciente, data = dataset)
# bootstrap
var_comp <- base::as.double(nlme::VarCorr(fit))
fit_boot <- generic.group.boot.REB.0.2(y, X = stats::model.matrix(fit, data = dataset)[, -1], alpha = fit$coef$fixed[1],
beta = fit$coef$fixed[-1], sigma.u = var_comp[3], sigma.e = var_comp[4],
group = dataset$id_paciente, k = 1000, verbose = FALSE, stop = FALSE)
base::list(fit = fit, boot = fit_boot)
})
base::names(analytes_models) <- Analytes
# results
analytes_fit <- base::lapply(analytes_models, "[[", 1)
analytes_boot <- base::lapply(analytes_models, "[[", 2)
# summary
models_summary <- base::lapply(analytes_fit, base::summary)
# regression fixed effects
models_coef <- base::lapply(analytes_fit, function(.x) {
fixed_eff <- .x$coef$fixed
sigma <- base::as.double(nlme::VarCorr(.x))
base::c(fixed_eff, sigma.u = sigma[1], sigma.e = sigma[2], lambda = sigma[1] / sigma[2])
})
# var and sd of random effects and residuals
models_sd <- base::lapply(analytes_fit, function(.x) {
var_sd <- nlme::VarCorr(.x)
matrix(base::as.double(var_sd), ncol = 2, dimnames = base::list(base::rownames(var_sd), base::colnames(var_sd)))
})
# AIC
models_aic <- base::lapply(analytes_fit, stats::AIC)
# bootstrap CIs
models_boot_ci <- base::lapply(1:base::length(analytes_boot), function(.x) {
boot_ci <- base::lapply(analytes_boot[[.x]], function(.y) {
ci <- base::t(base::apply(.y, 2, stats::quantile, probs = base::c(0.025, 0.975)))
base::data.frame(est = models_coef[[.x]], ci, check.names = FALSE)
})
})
base::names(models_boot_ci) <- base::names(analytes_boot)
models_boot_ci_adj <- base::lapply(models_boot_ci, "[[", 2)
## Plots ------------------------------------------------------------------------------------------------------
standard_theme <- ggplot2::theme(
panel.grid.minor.x = ggplot2::element_blank(),
panel.grid.minor.y = ggplot2::element_blank(),
legend.position = "top",
legend.box.background = ggplot2::element_rect(colour = "black", fill = NA),
legend.key = ggplot2::element_rect(colour = "transparent", fill = "transparent"),
strip.text = ggplot2::element_text(face = "bold", colour = "black"),
strip.background = ggplot2::element_rect(fill = NA, colour = "black")
)
# time x gender
p_time_gender <- base::list()
for (.x in 1:base::length(analytes_fit)) {
analyte <- base::names(analytes_fit)[[.x]]
y <- sub_hsl[, analyte]
dataset <- base::data.frame(sub_hsl, y = y)
base::suppressMessages(
p <- sjPlot::plot_model(analytes_fit[[.x]], type = "eff", terms = base::c("midpoint_interval", "gender"),
robust = TRUE, vcov.fun = "vcovHC", vcov.type = "HC3", show.data = TRUE,
dot.alpha = 0.3, dot.size = 1, line.alpha = 0.8, line.size = 1,
title = analytes_labels_long[analyte]) +
ggplot2::scale_colour_manual("Gender: ", values = base::c("M" = color_male, "F" = color_female), labels = base::c("M" = "Male", "F" = "Female")) +
ggplot2::scale_fill_manual("Gender: ", values = base::c("M" = color_male, "F" = color_female), labels = base::c("M" = "Male", "F" = "Female")) +
ggplot2::scale_y_continuous(breaks = scales::pretty_breaks()) +
ggplot2::scale_x_continuous(breaks = scales::pretty_breaks()) +
ggplot2::labs(x = "Hospitalization time", y = "Count/ul") +
ggplot2::theme_light(base_size = 12) + standard_theme
)
if (.x != 1) p <- p + ggplot2::theme(legend.position = "none")
p_time_gender[[.x]] <- p
}
shared_legend <- g_legend(p_time_gender[[1]])
p_time_gender[[1]] <- p_time_gender[[1]] + ggplot2::theme(legend.position = "none")
plot_time_gender <- gridExtra::arrangeGrob(shared_legend, gridExtra::arrangeGrob(grobs = p_time_gender, nrow = 2), nrow = 2, heights = base::c(5, 40))
# time x outcome
p_time_outcome <- base::list()
for (.x in 1:base::length(analytes_fit)) {
analyte <- base::names(analytes_fit)[[.x]]
y <- sub_hsl[, analyte]
dataset <- base::data.frame(sub_hsl, y = y)
base::suppressMessages(
p <- sjPlot::plot_model(analytes_fit[[.x]], type = "eff", terms = base::c("midpoint_interval", "de_desfecho"),
robust = TRUE, vcov.fun = "vcovHC", vcov.type = "HC3", show.data = TRUE,
dot.alpha = 0.3, dot.size = 1, line.alpha = 0.8, line.size = 1,
title = analytes_labels_long[analyte]) +
ggplot2::scale_colour_manual("Outcome: ", values = base::c("Alive" = color_alive, "Dead" = color_dead)) +
ggplot2::scale_fill_manual("Outcome: ", values = base::c("Alive" = color_alive, "Dead" = color_dead)) +
ggplot2::scale_y_continuous(breaks = scales::pretty_breaks()) +
ggplot2::scale_x_continuous(breaks = scales::pretty_breaks()) +
ggplot2::labs(x = "Hospitalization time", y = "Count/ul") +
ggplot2::theme_light(base_size = 12) + standard_theme
)
if (.x != 1) p <- p + ggplot2::theme(legend.position = "none")
p_time_outcome[[.x]] <- p
}
shared_legend <- g_legend(p_time_outcome[[1]])
p_time_outcome[[1]] <- p_time_outcome[[1]] + ggplot2::theme(legend.position = "none")
plot_time_outcome <- gridExtra::arrangeGrob(shared_legend, gridExtra::arrangeGrob(grobs = p_time_outcome, nrow = 2), nrow = 2, heights = base::c(5, 40))
# uncomment the code below to generate plots of residuals
#plot_residuals <- base::lapply(analytes_fit, function(.x) {
# df_res <- base::data.frame(res = stats::residuals(.x), fit = stats::fitted(.x), index = 1:base::nrow(sub_hsl))
# p_res_fit <- ggplot2::ggplot(df_res, ggplot2::aes(x = fit, y = res)) +
# ggplot2::geom_hline(yintercept = base::c(-2, 2), linetype = 'dashed', size = 0.5) +
# ggplot2::geom_point(shape = 20, size = 2, alpha = 0.5) +
# ggplot2::scale_y_continuous(breaks = scales::pretty_breaks()) +
# ggplot2::scale_x_continuous(breaks = scales::pretty_breaks()) +
# ggplot2::theme_light(base_size = 12) +
# ggplot2::labs(y = "Quantile residual", x = "Fitted value")
# p_res_index <- ggplot2::ggplot(df_res, ggplot2::aes(x = index, y = res)) +
# ggplot2::geom_hline(yintercept = base::c(-2, 2), linetype = 'dashed', size = 0.5) +
# ggplot2::geom_point(shape = 20, size = 2, alpha = 0.5) +
# ggplot2::scale_y_continuous(breaks = scales::pretty_breaks()) +
# ggplot2::scale_x_continuous(breaks = scales::pretty_breaks()) +
# ggplot2::theme_light(base_size = 12) +
# ggplot2::labs(y = "Quantile residual", x = "Index of observation")
# p_res_density <- ggplot2::ggplot(df_res, ggplot2::aes(x = res, fill = "1")) +
# ggplot2::geom_density(position = "identity", alpha = 0.1, size = 0.5, show.legend = FALSE) +
# ggplot2::scale_fill_manual(values = grDevices::grey(0.4)) +
# ggplot2::scale_y_continuous(breaks = scales::pretty_breaks()) +
# ggplot2::scale_x_continuous(breaks = scales::pretty_breaks()) +
# ggplot2::theme_light(base_size = 12) +
# ggplot2::labs(y = "Density", x = "Quantile residual")
# p_res_qq <- ggplot2::ggplot(df_res, ggplot2::aes(sample = res)) +
# ggplot2::stat_qq(size = 1, alpha = 0.5) + ggplot2::stat_qq_line() +
# ggplot2::scale_fill_manual(values = grDevices::grey(0.4)) +
# ggplot2::scale_y_continuous(breaks = scales::pretty_breaks()) +
# ggplot2::scale_x_continuous(breaks = scales::pretty_breaks()) +
# ggplot2::theme_light(base_size = 12) +
# ggplot2::labs(y = "Sample quantile", x = "Theoretical quantile")
# p_diagnostics <- gridExtra::arrangeGrob(p_res_fit, p_res_index, p_res_density, p_res_qq, nrow = 2)
#})
## Tables -----------------------------------------------------------------------------------------------------
# create tables of model estimates and corresponding bootstrap CIs
models_tables <- base::lapply(1:base::length(analytes_fit), function(.x) {
html_table <- sjPlot::tab_model(analytes_fit[[.x]], dv.labels = analytes_labels_long[base::names(analytes_fit)[.x]],
show.re.var = TRUE, show.p = FALSE, show.se = FALSE, show.stat = FALSE,
show.aic = FALSE, show.r2 = FALSE, show.reflvl = TRUE, title = "Linear Mixed Model")
td <- base::unlist(strsplit(html_table$page.complete, split = "</td>"))
col1 <- base::grep("col1", td)[-1]
boot_ci <- base::sapply(col1, function(.y) {
results <- models_boot_ci_adj[[.x]]
iv_name <- base::gsub("(.+>)", "", td[.y])
iv_boot <- base::which(base::rownames(results) == iv_name)
lwr <- base::round(results[iv_boot, 2], 2)
upr <- base::round(results[iv_boot, 3], 2)
if (base::sign(lwr) == base::sign(upr)) {
if (lwr < 0) lwr <- base::paste0("45;", base::format(base::abs(lwr), nsmall = 2))
if (lwr < 0) lwr <- base::paste0("45;", base::format(base::abs(upr), nsmall = 2))
ci <- base::paste0(lwr, " – ", upr, " *")
} else {
if (lwr < 0) lwr <- base::paste0("45;", base::format(base::abs(lwr), nsmall = 2))
if (lwr < 0) lwr <- base::paste0("45;", base::format(base::abs(upr), nsmall = 2))
ci <- base::paste0(lwr, " – ", upr)
}
html_ci <- base::gsub("(?!.*\">)(\\d.+\\d)", ci, td[.y + 2], perl = TRUE)
})
td[col1[1:base::length(boot_ci)] + 2] <- boot_ci
html_table$page.complete <- base::paste(td, collapse = "</td>")
html_table$page.complete <- base::gsub("CI", "Bootstrap CI", html_table$page.complete)
html_table$page.complete <- base::gsub("splines::bs\\(midpoint_interval\\)1", "Time [1st term]", html_table$page.complete)
html_table$page.complete <- base::gsub("splines::bs\\(midpoint_interval\\)2", "Time [2nd term]", html_table$page.complete)
html_table$page.complete <- base::gsub("splines::bs\\(midpoint_interval\\)3", "Time [3rd term]", html_table$page.complete)
html_table$page.complete <- base::gsub("genderM", "Gender [Male]", html_table$page.complete)
html_table$page.complete <- base::gsub("de_desfechoDead", "Outcome [Dead]", html_table$page.complete)
html_table$page.complete <- base::gsub("paciente", "patient", html_table$page.complete)
base::return(html_table)
})
base::names(models_tables) <- base::names(analytes_fit)
# OUTPUTS -----------------------------------------------------------------------------------------------------
## Plots ------------------------------------------------------------------------------------------------------
# gender trajectories
ggplot2::ggsave(filename = base::paste0("./outputs/figures/pdf/", "FigA1", ".pdf"), plot = plot_gender_trajectories, width = 2.5 * 105, height = 2.5 * 74.25, units = "mm")
ggplot2::ggsave(filename = base::paste0("./outputs/figures/png/", "FigA1", ".png"), plot = plot_gender_trajectories, width = 2.5 * 105, height = 2.5 * 74.25, units = "mm")
ggplot2::ggsave(filename = base::paste0("./outputs/figures/jpg_low-quality/", "FigA1", ".jpg"), plot = plot_gender_trajectories, width = 2.5 * 105, height = 2.5 * 74.25, units = "mm", dpi = 100)
# outcome trajectories
ggplot2::ggsave(filename = base::paste0("./outputs/figures/pdf/", "FigA2", ".pdf"), plot = plot_outcome_trajectories, width = 2.5 * 105, height = 2.5 * 74.25, units = "mm")
ggplot2::ggsave(filename = base::paste0("./outputs/figures/png/", "FigA2", ".png"), plot = plot_outcome_trajectories, width = 2.5 * 105, height = 2.5 * 74.25, units = "mm")
ggplot2::ggsave(filename = base::paste0("./outputs/figures/jpg_low-quality/", "FigA2", ".jpg"), plot = plot_outcome_trajectories, width = 2.5 * 105, height = 2.5 * 74.25, units = "mm", dpi = 100)
# percentages and sum trajectories
ggplot2::ggsave(filename = base::paste0("./outputs/figures/pdf/", "FigS1", ".pdf"), plot = plot_perc_sum_trajectories, width = 2 * 105, height = 1.7 * 74.25, units = "mm")
ggplot2::ggsave(filename = base::paste0("./outputs/figures/png/", "FigS1", ".png"), plot = plot_perc_sum_trajectories, width = 2 * 105, height = 1.7 * 74.25, units = "mm")
ggplot2::ggsave(filename = base::paste0("./outputs/figures/jpg_low-quality/", "FigS1", ".jpg"), plot = plot_perc_sum_trajectories, width = 2 * 105, height = 1.7 * 74.25, units = "mm", dpi = 100)
# time x gender
ggplot2::ggsave(filename = base::paste0("./outputs/figures/pdf/", "FigA3", ".pdf"), plot = plot_time_gender, width = 2.5 * 105, height = 2.5 * 74.25, units = "mm")
ggplot2::ggsave(filename = base::paste0("./outputs/figures/png/", "FigA3", ".png"), plot = plot_time_gender, width = 2.5 * 105, height = 2.5 * 74.25, units = "mm")
ggplot2::ggsave(filename = base::paste0("./outputs/figures/jpg_low-quality/", "FigA3", ".jpg"), plot = plot_time_gender, width = 2.5 * 105, height = 2.5 * 74.25, units = "mm", dpi = 100)
# tme x outcome
ggplot2::ggsave(filename = base::paste0("./outputs/figures/pdf/", "FigA4", ".pdf"), plot = plot_time_outcome, width = 2.5 * 105, height = 2.5 * 74.25, units = "mm")
ggplot2::ggsave(filename = base::paste0("./outputs/figures/png/", "FigA4", ".png"), plot = plot_time_outcome, width = 2.5 * 105, height = 2.5 * 74.25, units = "mm")
ggplot2::ggsave(filename = base::paste0("./outputs/figures/jpg_low-quality/", "FigA4", ".jpg"), plot = plot_time_outcome, width = 2.5 * 105, height = 2.5 * 74.25, units = "mm", dpi = 100)
# uncomment the code below to save plots of residuals
#save_plot_residuals <- base::lapply(1:base::length(plot_residuals), function(.x) {
# p <- plot_residuals[[.x]]
# title <- analytes_labels_long[base::names(plot_residuals)[.x]]
# ggplot2::ggsave(filename = base::paste0("./outputs/figures/pdf/", "FigS3_Model_Residuals_", title, ".pdf"), plot = p, width = 2 * 105, height = 1.5 * 74.25, units = "mm")
# ggplot2::ggsave(filename = base::paste0("./outputs/figures/png/", "FigS3_Model_Residuals_", title, ".png"), plot = p, width = 2 * 105, height = 1.5 * 74.25, units = "mm")
# ggplot2::ggsave(filename = base::paste0("./outputs/figures/jpg_low-quality/", "FigS3_Model_Residuals_", title, ".jpg"), plot = p, width = 2 * 105, height = 1.5 * 74.25, units = "mm", dpi = 100)
#})
## Tables -----------------------------------------------------------------------------------------------------
# data description (csv)
utils::write.table(table_desc_data, file = "./outputs/tables/csv/TabS1.csv", sep = ";", quote = FALSE, row.names = FALSE)
# LMM estimates (csv)
save_tables_csv <- base::lapply(1:base::length(models_boot_ci_adj), function(.x) {
title <- analytes_labels_long[base::names(models_boot_ci_adj)[.x]]
results <- models_boot_ci_adj[[.x]]
results[base::c("sigma.u", "sigma.e", "lambda"), 2:3] <- NA
results[base::c("lambda"), 1] <- results["sigma.u", 1] / (results["sigma.u", 1] + results["sigma.e", 1])
base::row.names(results) <- base::gsub("splines::bs\\(midpoint_interval\\)1", "Time [1st term]", base::row.names(results))
base::row.names(results) <- base::gsub("splines::bs\\(midpoint_interval\\)2", "Time [2nd term]", base::row.names(results))
base::row.names(results) <- base::gsub("splines::bs\\(midpoint_interval\\)3", "Time [3rd term]", base::row.names(results))
base::row.names(results) <- base::gsub("genderM", "Gender [Male]", base::row.names(results))
base::row.names(results) <- base::gsub("de_desfechoDead", "Outcome [Dead]", base::row.names(results))
base::row.names(results) <- base::gsub("sigma.e", "sigma2", base::row.names(results))
base::row.names(results) <- base::gsub("sigma.u", "tau00id_patient", base::row.names(results))
base::row.names(results) <- base::gsub("lambda", "ICC", base::row.names(results))
N <- base::matrix(base::c(base::length(base::unique(sub_hsl$id_paciente)), base::nrow(sub_hsl), base::rep(NA, 4)), ncol = 3)
base::rownames(N) <- base::c("Nid_patient", "Observations")
base::colnames(N) <- base::colnames(results)
results <- base::rbind(results, N)
base::colnames(results) <- base::c("Estimates", "Lower Limit Bootstrap CI", "Upper Limit Bootstrap CI")
utils::write.table(base::round(results, 2), file = base::paste0("./outputs/tables/csv/", "TabS2_", title, ".csv"), sep = ";", quote = FALSE, row.names = TRUE, col.names = TRUE)
})
# data description (html)
utils::write.table(htmlTable::htmlTable(table_desc_data, rnames = FALSE), file = "./outputs/tables/html/TabS1.html", sep = ";", quote = FALSE, row.names = FALSE, col.names = FALSE)
# LMM estimates (html)
save_tables_html <- base::lapply(1:base::length(analytes_fit), function(.x) {
title <- analytes_labels_long[base::names(analytes_fit)[.x]]
utils::write.table(models_tables[[.x]]$page.complete, file = base::paste0("./outputs/tables/html/", "TabS2_", title, ".html"), quote = FALSE, row.names = FALSE, col.names = FALSE)
})
## RData ------------------------------------------------------------------------------------------------------
# uncomment the code below to save the image of all R objects create through the script
#base::save.image('script_objects.RData')
|
63a528c732a3710529a1c6800be3dfcea6316ffb
|
7d7f692dce8c762149d05abcc74b79bab0aa9066
|
/code/pcode_check.R
|
a0729307646b2e8a8119c309facaa45e1ac1a6bc
|
[] |
no_license
|
luiscape/hdxviz-col-humanitarian-trends
|
74b0142e1a70a99bf03f321d0425288e55776fe1
|
a666333b943fa3044327a381c5d89e58827ca6f0
|
refs/heads/master
| 2020-06-04T22:32:28.778701
| 2014-12-05T03:37:42
| 2014-12-05T03:37:42
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 554
|
r
|
pcode_check.R
|
## Script for checking the pcodes of
## admin3s.
# Load original data.
data <- read.table('data/source/140812_UARIV_RNI_CONSULTA_EDADES.txt',
header = T,
sep = ';')
# Load updated pcode data.
pcodes <- read.csv('data/col_admin3_dane.csv')
# Checking for unique locations
muniques <- data.frame(original = unique(data$MPIO_ORIGEN))
muniques$pcode <- gsub(".*-", "", muniques$original)
# Checking
summary(muniques$pcode %in% pcodes$admin3) # 4 FALSE
other <- muniques[!(muniques$pcode %in% pcodes$admin3), ]
|
4b0beeffa7f5d6c20933b0e28bd13f8d901ab8a2
|
2e6c45d0a0e20e57d60a0703240f043f7d410b1d
|
/load_the_dataset.R
|
693e32bcea0d4d2bf5eb21657c66952c80ad7fd8
|
[] |
no_license
|
rmm241/Iris-Dataset-Visualization
|
2051bb4b366ca6f7d4782611e36cee0f650357d0
|
101336bac33d101fcb9e8eeed47b2a816d2b3865
|
refs/heads/master
| 2020-07-03T10:13:18.324011
| 2019-08-12T07:09:01
| 2019-08-12T07:09:01
| 201,875,846
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 123
|
r
|
load_the_dataset.R
|
library(ggplot2)
library(readr)
library(gridExtra)
library(grid)
library(plyr)
iris_ds <- read.csv('./Iris.csv')
|
697a51d9adff28082c946c1177057f1c7a67a1bf
|
56944b3b2887eb8d448bc1d9276026afb86b4340
|
/r/piratePlotsWrangler.r
|
61a1764f34aa48d02e9d1c7121f9df1f48e32cfa
|
[] |
no_license
|
Jeremieauger/codingTheMicrobiome
|
424fe2dac5b20bebdfecc8c576eac7ee72cee7d6
|
5bb3a705a0c4d4c46fd9eeb878629fb75c8f9810
|
refs/heads/master
| 2021-07-19T11:22:06.252761
| 2021-02-18T20:59:41
| 2021-02-18T20:59:41
| 76,469,396
| 0
| 0
| null | 2016-12-14T17:25:45
| 2016-12-14T14:59:25
|
Shell
|
UTF-8
|
R
| false
| false
| 3,718
|
r
|
piratePlotsWrangler.r
|
Call:
lm(formula = expected_count ~ day, data = foxCo2DF)
Residuals:
Min 1Q Median 3Q Max
-72856 -37810 -24860 -4182 788841
Coefficients:
Estimate Std. Error t value Pr(>|t|)
(Intercept) 72866 25986 2.804 0.00737 **
day -5098 5250 -0.971 0.33656
---
Signif. codes: 0 ‘***’ 0.001 ‘**’ 0.01 ‘*’ 0.05 ‘.’ 0.1 ‘ ’ 1
Residual standard error: 127300 on 46 degrees of freedom
Multiple R-squared: 0.02009, Adjusted R-squared: -0.001212
F-statistic: 0.9431 on 1 and 46 DF, p-value: 0.3366
par(mfrow=c(1,4), cex.main = 2)
pirateplot(formula = log(expected_count) ~ day, avg.line.fun = median,
hdi.iter = 0,
data = mebCo2DF,
main = "Aérobique - Sans Atb")
pirateplot(formula = log(expected_count) ~ day, avg.line.fun = median,
hdi.iter = 0,
data = foxCo2DF,
main = "Aérobique - Céfoxitine")
pirateplot(formula = log(expected_count) ~ day, avg.line.fun = median,
hdi.iter = 0,
data = mebAnaDF,
main = "Anaérobique - Sans Atb")
pirateplot(formula = log(expected_count) ~ day, avg.line.fun = median,
hdi.iter = 0,
data = foxAnaDF,
main = "Anaérobique - Céfoxitine")
pal=piratepal("southpark")
pirateplot(formula = log(expected_count) ~ day, avg.line.fun = median,
data = foxAnaDF,
pal = pal
main = "Anaérobique - Céfoxitine")
> wilcox.test(subset(foxCo2DF, day == "0")$expected_count,
+ subset(foxCo2DF, day == "7")$expected_count,
+ paired = TRUE)
Wilcoxon signed rank test
V = 194, p-value = 0.2182
alternative hypothesis: true location
shift is not equal to 0
> wilcox.test(subset(rsemExpect, day == 0 & treatment == 1)$expected_count,
+ subset(rsemExpect, day == 7 & treatment == 1)$expected_count,
+ paired = TRUE)
Wilcoxon signed rank test with continuity correction
V = 99, p-value = 0.2977
alternative hypothesis: true location shift is not equal to 0
### Making the comparative plots in pirate plots
par(mfrow=c(1,1))
### Ray
elenQte <- read.csv("~/GitHub/codingTheMicrobiome/r/elenQte_FromRay.csv")
library(reshape)
stackedDF <- melt(elenQte, id=c("ID", "Treatment"))
colnames(stackedDF) <- c('Patient0', 'Treatment', 'Time', 'Ratio')
pirateplot(formula = log(Ratio) ~ Time + Treatment, avg.line.fun = median,
hdi.iter = 0,
data = stackedDF,
main = "Ray-Meta")
### BWA
DFLM <- read.csv("~/GitHub/codingTheMicrobiome/r/bwaForLm.csv")
pirateplot(formula = log(Ratio) ~ Time + Treatment, avg.line.fun = median,
hdi.iter = 0,
data = DFLM,
main = "Burrows-Wheeler Aligner")
### RSEM - TPM + expect
rsemDF <- read.csv("~/GitHub/codingTheMicrobiome/data/rsemDF.csv")
# sample patient day treatment age sex expected_count TPM FPKM
par(mfrow=c(1,1))
pirateplot(formula = log(TPM) ~ day + treatment, avg.line.fun = median,
hdi.iter = 0,
data = rsemDF,
main = "Transcripts Per Million (TPM)")
pirateplot(formula = log(expected_count) ~ day + treatment, avg.line.fun = median,
hdi.iter = 0,
data = rsemDF,
main = "Expected Count")
pirateplot(formula = log(expected_count) ~ day, avg.line.fun = median,
hdi.iter = 0,
data = subset(rsemDF, treatment == 1),
main = "Expected Count")
> pirateplot(formula = log(expected_count) ~ day, avg.line.fun = median,
+ data = subset(rsemDF, treatment == 1),
+ main = "RSEM\nExpected Count", xlab = "Temps (jours)")
|
939731cd3c62a5ad37fe2fc7118efc1b75b13628
|
cdff5f098ca959c5af7cbab56f161dfc83ccf31f
|
/Project Data Wrangling Exercise 2 Dealing with missing values.R
|
170b56c791bca1159e63057d28e8cfdd417e7a2d
|
[] |
no_license
|
LHData/Data-Wrangling-Ex-2-Dealing-with-missing-values
|
6078e5d9aff13ea1de1a0b6234f02813d94c9f4e
|
d9acc7557ea357dac11d3e07df6d17dc3211fa58
|
refs/heads/master
| 2020-04-24T16:48:03.164804
| 2019-02-22T19:22:53
| 2019-02-22T19:22:53
| 172,122,018
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 1,148
|
r
|
Project Data Wrangling Exercise 2 Dealing with missing values.R
|
library(dplyr)
library(readr)
#0: Load the data in RStudio
titanic <- read_csv("titanic_original.csv")
#possible stuff for RMarkdown
#colnames(titanic)
#distinct(titanic, embarked)
#1: Port of embarkation. Fill in missing values of embarked column with S
titanic <- mutate(titanic, embarked = ifelse(is.na(embarked), "S", embarked))
#2: Age. Replace missing values for age with the mean
titanic <- mutate(titanic, age = ifelse(is.na(age), mean(titanic$age, na.rm = TRUE), age))
#I'm not a fan of filling in the age with the mean.
#I think those observations should just be left out of any calculations
#If we had to fill in a value, I would prefer the median because it looks like the age data is skewed
#3: Lifeboat. Fill in empty values in the boat column with "none"
titanic <- mutate(titanic, boat = ifelse(is.na(boat), "none", boat))
#4: Cabin
#I think it does make sense to fill in cabin with "none" because those could be passengers without an assigned cabin
titanic <- mutate(titanic, has_cabin_number = ifelse(is.na(titanic$cabin), 0, 1))
#save data frame to csv
write_csv(titanic, "titanic_clean.csv")
|
5d7af1f9b63074d34976f31b5d22eb62f1c1ddde
|
753e3ba2b9c0cf41ed6fc6fb1c6d583af7b017ed
|
/service/paws.kinesisanalyticsv2/man/delete_application_input_processing_configuration.Rd
|
af431726b558763ff4b93d7103217835893df27c
|
[
"Apache-2.0"
] |
permissive
|
CR-Mercado/paws
|
9b3902370f752fe84d818c1cda9f4344d9e06a48
|
cabc7c3ab02a7a75fe1ac91f6fa256ce13d14983
|
refs/heads/master
| 2020-04-24T06:52:44.839393
| 2019-02-17T18:18:20
| 2019-02-17T18:18:20
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| true
| 1,235
|
rd
|
delete_application_input_processing_configuration.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/paws.kinesisanalyticsv2_operations.R
\name{delete_application_input_processing_configuration}
\alias{delete_application_input_processing_configuration}
\title{Deletes an InputProcessingConfiguration from an input}
\usage{
delete_application_input_processing_configuration(ApplicationName,
CurrentApplicationVersionId, InputId)
}
\arguments{
\item{ApplicationName}{[required] The name of the application.}
\item{CurrentApplicationVersionId}{[required] The application version. You can use the DescribeApplication operation to get the current application version. If the version specified is not the current version, the \code{ConcurrentModificationException} is returned.}
\item{InputId}{[required] The ID of the input configuration from which to delete the input processing configuration. You can get a list of the input IDs for an application by using the DescribeApplication operation.}
}
\description{
Deletes an InputProcessingConfiguration from an input.
}
\section{Accepted Parameters}{
\preformatted{delete_application_input_processing_configuration(
ApplicationName = "string",
CurrentApplicationVersionId = 123,
InputId = "string"
)
}
}
|
835770559a0c83b3413e95b0e48533bae908c66a
|
991f3309becccaffc7b16fab9cc0f264862029de
|
/wk4/hw4p2.R
|
76f0121cf6b5c669fb3aaaa1ab8b25a04208d777
|
[] |
no_license
|
franciszxlin/MSCFNumericalMethods
|
b1ffcce395a0704204b416544476bbf3800dc037
|
ee6571c798c3919a3329a7e249a559dce42b29e8
|
refs/heads/master
| 2021-08-22T12:21:01.671966
| 2017-11-30T06:08:20
| 2017-11-30T06:08:20
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 937
|
r
|
hw4p2.R
|
#homework 4 problem 2
#Crank-Nicolson scheme finite difference scheme
cn<-function(sol, dim, lam)
{
# Construct C and D matrices
C=matrix(0, nrow=dim, ncol=dim)
D=matrix(0, nrow=dim, ncol=dim)
for (i in 1:dim)
{
C[i, i]=1+lam
D[i, i]=1-lam
}
for (i in 1:(dim-1))
{
C[i, i+1]=-lam/2
D[i, i+1]=lam/2
}
for (i in 2:dim)
{
C[i, i-1]=-lam/2
D[i, i-1]=lam/2
}
for (i in 1:n1)
{
un_mat<-matrix(sol[i,2:n2], nrow=dim, ncol=1)
mm<-solve(C)%*%D
un1_mat<-mm%*%un_mat
sol[i+1,2:n2]=as.numeric(un1_mat)
}
return(sol)
}
#Parameters: we will discretize the domain into grid
n1=500
n2=700
T=1
X=1
dt=T/n1
dx=X/n2
lam=dt/dx^2
sol=matrix(0, nrow=n1+1, ncol=n2+1)
#Populate the first row
for (i in 2:n2)
{
sol[1, i]=exp(i*dx)
}
#Solve the PDE
dim=n2-2+1
sol=cn(sol, dim, lam)
head(sol[ ,1:5])
tail(sol[ ,1:5])
#Part ii Calculate rowsums
rowsum<-rowSums(sol)*dx
plot(0:500, rowsum)
|
cc830e0535d5ab585f5105b9b0a8847bf450e913
|
29585dff702209dd446c0ab52ceea046c58e384e
|
/COMPoissonReg/R/sdev.R
|
4d9b38e3ca15b8c864243235ab8819e9f6b4d6a8
|
[] |
no_license
|
ingted/R-Examples
|
825440ce468ce608c4d73e2af4c0a0213b81c0fe
|
d0917dbaf698cb8bc0789db0c3ab07453016eab9
|
refs/heads/master
| 2020-04-14T12:29:22.336088
| 2016-07-21T14:01:14
| 2016-07-21T14:01:14
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 49
|
r
|
sdev.R
|
sdev <- function (object, ...) UseMethod("sdev")
|
ae9c705891636be0ca86d673f34a19dca3d7b8a1
|
552ef1b37b1689c0347071a4ac10f542cb47543f
|
/man/link_zip.Rd
|
3d18137ced0bcf5fd0da27e4a9e83cf252125c78
|
[] |
no_license
|
lhenneman/hyspdisp
|
d49fb29a3944ca0c50398c70ff21459fee247358
|
1763245269211f48da803d282720e6d818a2e619
|
refs/heads/master
| 2021-05-05T06:37:39.251922
| 2019-10-16T19:41:36
| 2019-10-16T19:41:36
| 118,811,581
| 5
| 3
| null | 2019-06-05T13:45:14
| 2018-01-24T19:27:40
|
R
|
UTF-8
|
R
| false
| false
| 1,988
|
rd
|
link_zip.Rd
|
\name{link_zip}
\alias{link_zip}
%- Also NEED an '\alias' for EACH other topic documented here.
\title{
Link particles to ZIP codes and take concentration
}
\description{
Takes as input particle locations, zip code spatial object, and a ZIP-ZCTA crosswalk file, and outputs a data table linking particles with zip codes. Rarely called on its own, many of the inputs default to values called by \code{hyspdisp_fac_model}.
}
\usage{
link_zip(d, zc = zcta2, cw = crosswalk, gridfirst = F, hpbl_file = NULL)
}
%- maybe also 'usage' for other objects documented here.
\arguments{
\item{d}{
Data table of particle positions. Expected variables are:
\enumerate{
\item lon (particle longitude)
\item lat (particle latitude)
}
}
\item{zc}{
ZIP code \code{SpatialPolygonsDataFrame} object.
Expected variables are:
\enumerate{
\item ZCTA5CE10
}
}
\item{cw}{
ZIP - ZCTA crosswalk file. Must include columns named \code{ZIP} and \code{ZCTA}.
}
\item{gridfirst}{
Logical. If TRUE, count parcels in a fine grid before allocating to ZIP codes. This is preferred, as allocating parcels to ZIP codes using only their locations inflates values in larger ZIP codes.
}
\item{hpbl_file}{
monthly mean boundary layer heights from NOAA's Earth System Research Library: \url{https://www.esrl.noaa.gov/psd/data/gridded/data.20thC_ReanV2.monolevel.mm.html}
}
}
\details{
%% ~~ If necessary, more details than the description above ~~
}
\value{
This function returns a data table of zip codes that contain particles.
}
\references{
%% ~put references to the literature/web site here ~
}
\author{
%% ~~who you are~~
}
\note{
%% ~~further notes~~
}
%% ~Make other sections like Warning with \section{Warning }{....} ~
\seealso{
%% ~~objects to See Also as \code{\link{help}}, ~~~
}
\examples{
}
% Add one or more standard keywords, see file 'KEYWORDS' in the
% R documentation directory.
\keyword{ ~kwd1 }% use one of RShowDoc("KEYWORDS")
\keyword{ ~kwd2 }% __ONLY ONE__ keyword per line
|
95f15fab6d6852b8fc4ffcb19ec1485bf2fcad86
|
6a69c035a69b54bcc3a118c2b393f3de25f5f330
|
/copied solution_wiComment.R
|
d4df455fa164566f3e962548eaeda7bab7a8d6e5
|
[] |
no_license
|
Wjack07/-Kaggle-Expedia
|
c67bfbaf8bcbf3a734ff1c490f803506ed36e6d3
|
e70b053e16ea7ce29f7cf7aefde7f4b2d89d44dc
|
refs/heads/master
| 2020-12-24T19:37:04.489104
| 2016-04-22T15:38:15
| 2016-04-22T15:38:15
| 56,866,307
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 2,254
|
r
|
copied solution_wiComment.R
|
library(data.table)
setwd(".../[Case 9] Expedia")
filename1 = "train_small.csv"
filename2 = "test_small.csv"
myTrain <- read.csv(filename1)
myTest <- read.csv(filename2)
myTrain_datatable <- data.table(myTrain)
dest_id_hotel_cluster <-myTrain_datatable[,length(is_booking),by=list(srch_destination_id, hotel_cluster)]
# This way of writing is very specific for data table format
# the length(is_booking) means under such a list arrangement, how many are booked. Very specific way
# by=... is useful since it will sort out stuff by these variaty
# the result is to count for each pair of destination and hotel_cluster, how many time it has been booked
# by R's defualt, V1 means how many counts
# Now we need to analysize each destination, and found out what is their most popular hotel_cluster
top_five <- function(hc,v1){
hc_sorted <- hc[order(v1,decreasing=TRUE)]
# here, it uses 'order' function to ascend sort the array first
# and then by re-order hc, it sort hc based on v1 and pass t hc_sorted
n <- min(5,length(hc_sorted))
# only chose the 5 (or less than 5) destination
x<-paste(hc_sorted[1:n],collapse=" ")
# this part just print sorted array and send to x
return(x)
# having return is the stand way of writing. But in R, without return sometimes can work
# for example, the way the raw post shared solution on the website
}
dest_top_five <- dest_id_hotel_cluster[,top_five(hotel_cluster,V1),by=srch_destination_id]
# This way of writing is also useful.
# as long as 'by' is defined, only the data with the same 'by' value will be input, as array.
# and the written top_five, only take two arrays (hc and v1), and then return the value
result_1 <- merge(myTest,dest_top_five, by="srch_destination_id",all.x=TRUE)
# merge function is very useful, it can merge two data frame by comparing their parameter (by by.x by.y)
# all.x =TRUE means making NA if not found
result_2 <- result_1[,c('id','V1')] # now we only extract id and V1
result_3 <- result_2[order(result_2$id),] # now we re-order it by id
setnames(result_3,c("id","hotel_cluster")) # just to give it header
write.csv(result_3, file='submission.csv', row.names=FALSE)
|
fe10c04cf5a9cd9f2464825f8909d21a1401ade1
|
b481d707880afa5e87ba635d1a4bb162f2441573
|
/DataPrep_MissingValues.R
|
eefa961cbf0febe32edf951461c6b7a274cc2f76
|
[] |
no_license
|
mendytarun/R-DataPrep
|
4b0252f9a0c6b9ddf5c3cacd2648e5751a44c2b4
|
43eaee03ca36f7e4629f38e82a2aac6de1615f72
|
refs/heads/master
| 2021-01-22T19:36:26.492181
| 2017-03-30T07:59:29
| 2017-03-30T07:59:29
| 85,219,108
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 5,981
|
r
|
DataPrep_MissingValues.R
|
# replacing blanks with NA while importing data
fin.df <- read.csv("Future-500.csv",na.strings = "")
fin.df
head(fin.df)
str(fin.df)
summary(fin.df)
tail(fin.df,3)
# factors recap..we got some columns as factor though they
# should not be and other way around
levels(fin.df$Industry)
# changing from non factor to factor
fin.df$ID <- factor(fin.df$ID)
fin.df$Inception <- factor(fin.df$Inception)
str(fin.df)
# Factor Variable Trap
# if we need to convert a factor in to numeric we first need to convert it to character
# other wise we'll se factorize value instead of real value
# here in b we get 1 2 1 instead of 11 12 11
a <- factor(c("11","12","11")) # factor
b <- as.numeric(a)
b
# right way
c <- as.numeric(as.character(a))
c
head(fin.df)
str(fin.df)
# removing special characters from revenue expenses and growth
fin.df$Growth <- gsub("%","",fin.df$Growth) # gsub function find and replaces and also converts factor into character if performed on factor
fin.df$Revenue <- gsub("\\$","",fin.df$Revenue)
fin.df$Revenue <- gsub(",","",fin.df$Revenue)
fin.df$Expenses <- gsub(",","",fin.df$Expenses)
fin.df$Expenses <- gsub(" Dollars","",fin.df$Expenses)
str(fin.df)
# converting to numeric
fin.df$Growth <- as.numeric(fin.df$Growth)
fin.df$Revenue <- as.numeric(fin.df$Revenue)
fin.df$Expenses <- as.numeric(fin.df$Expenses)
fin.df$Profit <- as.numeric(fin.df$Profit)
str(fin.df)
# how to locate missing data
head(fin.df,25)
# function complete.case picks the row which don't have NA in any of the columns and give reult in true or false
# factors will have NA's like <NA> in order to distinguish with orignal data
missing.fin.df <- fin.df[!complete.cases(fin.df),]
missing.fin.df
nrow(missing.fin.df)
# filtering non missing values - effect of NA
fin.df[fin.df$Revenue == 9746272,]
# here we get two NA rows as they have revenue as NA anything compared with NA is neither true nor false
# filtering using which - non miissing value
# which browses vector and picks only true value..ignore NA too
fin.df[which(fin.df$Revenue==9746272),]
# filtering using is.na for missing data..gives rows with NA
fin.df[is.na(fin.df$Expenses),]
fin.df[is.na(fin.df$State),]
# removing records with missing data
nrow(fin.df)
fin.df_backup <- fin.df
nrow(fin.df[!complete.cases(fin.df),]) # getting rows which have atleast ine column as null
fin.df[!is.na(fin.df$Industry),] # df without Industry NA rows
fin.df <- fin.df[!is.na(fin.df$Industry),]
nrow(fin.df) # 2 rowsremived which have industry NA
# resetting the row index in dataframe...hwn you delete rows... row id doesnot reset..they remain same unlike excel
rownames(fin.df) <- 1:nrow(fin.df)
tail(fin.df)
rownames(fin.df) <- NULL # faster way of resetting the row names
# replacing missing values - Factual Analysis Method
fin.df[!complete.cases(fin.df),]
fin.df[is.na(fin.df$State) & fin.df$City == "New York","State"] <- "NY"
fin.df[!complete.cases(fin.df),]
fin.df[is.na(fin.df$State) & fin.df$City == "San Francisco","State"] <- "CA"
fin.df[c(11,377),]
# replacing missing value with median..median imputation
fin.df[!complete.cases(fin.df),]
nrow(fin.df[!complete.cases(fin.df),])
v.industry <- (fin.df[is.na(fin.df$Employees),])$Industry # getting industries which have EMployess as NA
v.industry
mean(fin.df[,"Employees"],na.rm = T) # mean without Industry
mean(fin.df[fin.df$Industry==v.industry[1],"Employees"],na.rm=T) # mean with Industry
median(fin.df[,"Employees"],na.rm = T) # median without industry
med.emp.retail <- median(fin.df[fin.df$Industry==v.industry[1],"Employees"],na.rm=T) # median with Induustry
fin.df[is.na(fin.df$Employees) & fin.df$Industry == v.industry[1],"Employees"] <- med.emp.retail
fin.df[3,]
mean(fin.df[fin.df$Industry==v.industry[2],"Employees"],na.rm=T) # mean with Industry
med.emp.service <- median(fin.df[fin.df$Industry==v.industry[2],"Employees"],na.rm=T) # median with Induustry
med.emp.service
fin.df[is.na(fin.df$Employees) & fin.df$Industry == v.industry[2],"Employees"] <- med.emp.service
fin.df[330,]
# median imputation in growth
fin.df[!complete.cases(fin.df),]
growth.industry <- (fin.df[is.na(fin.df$Growth),"Industry"])
growth.industry
med.growth.service <- median(fin.df[fin.df$Industry==growth.industry[1],"Growth"],na.rm=T)
fin.df[is.na(fin.df$Growth) & fin.df$Industry == growth.industry[1],"Growth"] <- med.growth.service
fin.df[!complete.cases(fin.df),]
# median imputation in revenue and expenses
fin.df[is.na(fin.df$Revenue),]
func.median.set <- function(na_column,median_column){
median.out <- median(fin.df[fin.df$Industry == na_column,median_column],na.rm=T)
return(median.out)
}
median.revenue <- func.median.set( "Construction","Revenue")
median.expenses <- func.median.set( "Construction","Expenses")
fin.df[is.na(fin.df$Revenue) & fin.df$Industry == "Construction","Revenue"] <- median.revenue
fin.df[is.na(fin.df$Expenses) & fin.df$Industry == "Construction","Expenses"] <- median.expenses
fin.df[!complete.cases(fin.df),]
# calculating missing data - Revenue - Expenses = Profit or Expenses = Revenue - profit
fin.df[is.na(fin.df$Profit),"Profit"] <- fin.df[is.na(fin.df$Profit),"Revenue"] - fin.df[is.na(fin.df$Profit),"Expenses"]
fin.df[!complete.cases(fin.df),]
str(fin.df)
# scatter plot classified by industry showing revenue profit expenses
library(ggplot2)
p <- ggplot(data=fin.df)
p + geom_point(aes(x=Revenue,y=Expenses, color = Industry,size=Profit))
# scatter plot that includes industry trends for the expenses~revenue
# relationship
d <- ggplot(data=fin.df,aes(x=Revenue,y=Expenses,color=Industry))
d + geom_point() + geom_smooth(fill=NA,size=1.3)
# box plots showing gwoth by industry
e <- ggplot(data=fin.df,aes(x=Industry,y=Growth,color=Industry))
e + geom_boxplot()
e + geom_jitter()+geom_boxplot(size=1, alpha=0.5,outlier.color = NA)
|
a833669aea98001f870d6ffd2de9a60125aa0571
|
e639b8dc495cee285869e27c3714affee2d9d876
|
/man/samSourceType.Rd
|
a98676a3b15fdbd72f6c732df8ff0f2a4818359e
|
[] |
no_license
|
jefferys/SamSeq
|
606d200bf2cc6c58cc2b931d8d2bcacf2ac8e57f
|
62523c0b3e4023d4b031f4e1973e682c467ba57d
|
refs/heads/master
| 2021-01-12T17:48:50.140263
| 2019-10-24T01:18:16
| 2019-10-24T01:18:16
| 69,393,110
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| true
| 561
|
rd
|
samSourceType.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/SamSource.R
\name{samSourceType}
\alias{samSourceType}
\title{SamSource data type accessor}
\usage{
samSourceType(x)
}
\arguments{
\item{x}{Any object for which \code{SamSource(x)} is defined.}
}
\value{
The type of access required for the sam file the data was taken from,
as a character vector. Can only be "file" currently. "url" is a likely
future extension.
}
\description{
Accessor to extract the type of access required for the sam file the data was
taken from resides.
}
|
abe9203ffb62214dfe1b59b865b51693ec61a3c3
|
e68e99f52f3869c60d6488f0492905af4165aa64
|
/man/torch_stft.Rd
|
6669db36f264154aaaeccb474ff9555e40a82b99
|
[
"MIT"
] |
permissive
|
mlverse/torch
|
a6a47e1defe44b9c041bc66504125ad6ee9c6db3
|
f957d601c0295d31df96f8be7732b95917371acd
|
refs/heads/main
| 2023-09-01T00:06:13.550381
| 2023-08-30T17:44:46
| 2023-08-30T17:44:46
| 232,347,878
| 448
| 86
|
NOASSERTION
| 2023-09-11T15:22:22
| 2020-01-07T14:56:32
|
C++
|
UTF-8
|
R
| false
| true
| 4,336
|
rd
|
torch_stft.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/gen-namespace-docs.R,
% R/gen-namespace-examples.R, R/wrapers.R
\name{torch_stft}
\alias{torch_stft}
\title{Stft}
\usage{
torch_stft(
input,
n_fft,
hop_length = NULL,
win_length = NULL,
window = NULL,
center = TRUE,
pad_mode = "reflect",
normalized = FALSE,
onesided = NULL,
return_complex = NULL
)
}
\arguments{
\item{input}{(Tensor) the input tensor}
\item{n_fft}{(int) size of Fourier transform}
\item{hop_length}{(int, optional) the distance between neighboring sliding window
frames. Default: \code{NULL} (treated as equal to \code{floor(n_fft / 4)})}
\item{win_length}{(int, optional) the size of window frame and STFT filter.
Default: \code{NULL} (treated as equal to \code{n_fft})}
\item{window}{(Tensor, optional) the optional window function.
Default: \code{NULL} (treated as window of all \eqn{1} s)}
\item{center}{(bool, optional) whether to pad \code{input} on both sides so
that the \eqn{t}-th frame is centered at time \eqn{t \times \mbox{hop\_length}}.
Default: \code{TRUE}}
\item{pad_mode}{(string, optional) controls the padding method used when
\code{center} is \code{TRUE}. Default: \code{"reflect"}}
\item{normalized}{(bool, optional) controls whether to return the normalized
STFT results Default: \code{FALSE}}
\item{onesided}{(bool, optional) controls whether to return half of results to
avoid redundancy Default: \code{TRUE}}
\item{return_complex}{(bool, optional) controls whether to return complex tensors
or not.}
}
\description{
Stft
}
\section{Short-time Fourier transform (STFT). }{
Short-time Fourier transform (STFT).
\if{html}{\out{<div class="sourceCode">}}\preformatted{Ignoring the optional batch dimension, this method computes the following
expression:
}\if{html}{\out{</div>}}
\deqn{
X[m, \omega] = \sum_{k = 0}^{\mbox{win\_length-1}}%
\mbox{window}[k]\ \mbox{input}[m \times \mbox{hop\_length} + k]\ %
\exp\left(- j \frac{2 \pi \cdot \omega k}{\mbox{win\_length}}\right),
}
where \eqn{m} is the index of the sliding window, and \eqn{\omega} is
the frequency that \eqn{0 \leq \omega < \mbox{n\_fft}}. When
\code{onesided} is the default value \code{TRUE},
\if{html}{\out{<div class="sourceCode">}}\preformatted{* `input` must be either a 1-D time sequence or a 2-D batch of time
sequences.
* If `hop_length` is `NULL` (default), it is treated as equal to
`floor(n_fft / 4)`.
* If `win_length` is `NULL` (default), it is treated as equal to
`n_fft`.
* `window` can be a 1-D tensor of size `win_length`, e.g., from
`torch_hann_window`. If `window` is `NULL` (default), it is
treated as if having \eqn{1} everywhere in the window. If
\eqn{\mbox{win\_length} < \mbox{n\_fft}}, `window` will be padded on
both sides to length `n_fft` before being applied.
* If `center` is `TRUE` (default), `input` will be padded on
both sides so that the \eqn{t}-th frame is centered at time
\eqn{t \times \mbox{hop\_length}}. Otherwise, the \eqn{t}-th frame
begins at time \eqn{t \times \mbox{hop\_length}}.
* `pad_mode` determines the padding method used on `input` when
`center` is `TRUE`. See `torch_nn.functional.pad` for
all available options. Default is `"reflect"`.
* If `onesided` is `TRUE` (default), only values for \eqn{\omega}
in \eqn{\left[0, 1, 2, \dots, \left\lfloor \frac{\mbox{n\_fft}}{2} \right\rfloor + 1\right]}
are returned because the real-to-complex Fourier transform satisfies the
conjugate symmetry, i.e., \eqn{X[m, \omega] = X[m, \mbox{n\_fft} - \omega]^*}.
* If `normalized` is `TRUE` (default is `FALSE`), the function
returns the normalized STFT results, i.e., multiplied by \eqn{(\mbox{frame\_length})^{-0.5}}.
Returns the real and the imaginary parts together as one tensor of size
\eqn{(* \times N \times T \times 2)}, where \eqn{*} is the optional
batch size of `input`, \eqn{N} is the number of frequencies where
STFT is applied, \eqn{T} is the total number of frames used, and each pair
in the last dimension represents a complex number as the real part and the
imaginary part.
}\if{html}{\out{</div>}}
}
\section{Warning}{
This function changed signature at version 0.4.1. Calling with the
previous signature may cause error or return incorrect result.
}
|
a111e0b99b7ca409f163148fd7060cafc4cae0ca
|
17cdd6bd71003a5087473808e3ffa85849f86e0c
|
/Time Series Analysis and Forecasting.R
|
41e2406797dd7f2013d715ede1200a3bf356ca77
|
[] |
no_license
|
JackTomada95/Time-Series-Analysis-with-R
|
f0d531820b01b821bce678ad639e7641b7b35e8a
|
a61f549bf125a640722f33fb97a8bcab5c613257
|
refs/heads/master
| 2022-11-16T02:02:46.795444
| 2020-07-19T14:12:38
| 2020-07-19T14:12:38
| 279,861,649
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 6,872
|
r
|
Time Series Analysis and Forecasting.R
|
library(seasonal)
# there is a variety of quantitative models to use for time series. Some of them are 100 years old
# there ar both linear and non-linear models
# linear models are the most used ones.
# LINEAR MODELS
########### 1. simple linear models
# naive, mean and drift method...
#... if there is a pattern (seasonality, trends) they are not good
# but they do a good job in modeling random data (they can be used as benchmark for other complex models)
############ 2. exponential smoothing
# they put more weight on recent observations and they work well on seasonal data.
############ 3. arima
# explains patterns in the data based on autoregression. It can be used for seasonal data as well.
############ 4. seasonal decomposition
# they require the dataset to be of a seasonal nature
# there must be a minimim number of seasonal cycles. Not as popular as arima and exponential smoothing
# NON-LINEAR MODELS:
############ 5. Neural Networks (very important)
############ 6. SVM (limited)
############ 7. Clustering (kml package)
# Exponential Smoothing and ARIMA are very flexible and work well with univariate time series
###############################################################################
# seasonal decomposition intro
# if there is a seasonal component, there are many models you can choose.
# Seasonal Decomposition makes sense only if there is a seasonal component
# Seasonal Decomposition decomposes seasonal time series data to its components: trend, seasonality and remainders
# additive method (adds components up, constant seasonal components) and multiplicative method (multiplies components)
# drawbacks of seasonal decomposition:
# 1. NA values
# 2. Slow to catch sudden changes (fast raises in the dataset)
# 3. constant seasonality ( strong assumption)
# but there are many alternative methods
### example: decomposing a time series
plot(nottem)
# the nottem dataset has stable seasonality and no trend, so it can be perfectly described with an additive model.
# whenever you use the decompose function, make sure the dataset has a predetermined frequency
frequency(nottem) # 12 months (units for each month)
length(nottem) / 12 #??? 20 years
decompose(nottem, type = "additive")
# x is the original dataset
# then we have trends, seasonality and noise except for beginning and end
plot(decompose(nottem, type = "additive"))
# interpretation:
# no trend (the mean is constant) and clear seasonality (constant over the whole time series)
library(ggplot2)
library(forecast)
autoplot(decompose(nottem, type = "additive"))
plot(stl(nottem, s.window = "periodic"))
dec <- stl(nottem, s.window = "periodic") # same, but they are on the columns
# seasonal adjustement
# extract the seasonal adjusted dataset
mynottem <- decompose(nottem, "additive")
class(mynottem)
nottemadjusted <- nottem - mynottem$seasonal
plot(nottemadjusted) # no more seasonality, it looks like a random time series, exaclty what we want
# there is no trend with it
plot(mynottem$seasonal)
# with the stl function, you can forecast a decomposed time series
plot(stlf(nottem, method = "arima"))
# Decomposition exercise
myts <- AirPassengers
autoplot(myts)
frequency(myts)
# there is a trend AND seasonality
mymodel1 <- decompose(myts, type = "additive")
mymodel2 <- decompose(myts, type = "multiplicative")
plot(mymodel1) # there is some pattern left in the remainder (that is not a good sign)
plot(mymodel2) # this part is different, it looks more random, but there are still some patterns. But more information is extracted
adjusted.ap <- myts - mymodel1$seasonal
autoplot(adjusted.ap) # plot(mymodel1$trend - mymodel1$random) is the same
# there is still a pattern. exponential smoothing could work better
autoplot(myts)
################## simple moving average
# if you have a time series it is helpful to smooth the data:
# it means you get the dataset to be closer to the average and reduce the highs and lows
# in other words, you decrease the impat of extreme values
# classic smoother: simple moving average (used in science, trading etc)
# if you have an SME of 3, it means you take the average of three successive time periods and take the average
# periods = successive values in a time series
library(TTR)
x <- c(1,2,3,4,5,6,7)
SMA(x, n = 3)
lynx.smooth <- SMA(lynx, n = 9)
plot(lynx)
plot(lynx.smooth)
#Y this method is only useful if you have a non-seasonal dataset
# this method is very useful to get the general trend and removing white noise
################## exponential smoothing with ETS
# very popular system that does a great job in modeling time series data
# the goal of exponential smoothing is to describe the time series with three parameters:
# Error - additive, multiplicative (multiplicative is only possible if all x > 0)
# Trend - non-present, additive, multiplicative
# Seasonality - non-present, additive, multiplicative
# parameters can be mixed (e.g. additive trend with multiplicative seasonality)
# with exponential smoothing, you put more weight on recent observations.
# for most scenarios, it makes perfect sense
# ses() simple exponential smoothing (data without trends and seasonality)
# holt() for datasets with a trend but without seasonality
# argument "dumped" to damp down (estinguere) the trend over time
# holt-winters exponential smoothing hw() for data with trend and seasonal component + a damping parameter
# AUTOMATED MODEL SELECTION: ets() from the forecast library
# model selection based on informatio criterion
# models can be customized
# for the three parameters (errors, seasonality and trend) there are smoothing coefficients
# these coefficients tell you iif the model relies more on recent data or not (high coefficient ~ 1)
# smooth coefficient ~ 0 (it means all the data are important, not only the recent ones)
# these smoothing coefficients are:
# 1. alpha Initial level
# 2. beta Trend
# 3. gamma Seasonality
# 4. phi Damped parameter
library(forecast)
# nottem is seasonal with no trend
plot(nottem)
etsmodel <- ets(nottem)
etsmodel
# note: ETS(A,N,A), exactly what we expect (no trend)
# this is a smooth model
plot(nottem, lwd = 2)
lines(etsmodel$fitted, col = "red")
# the fitted value are close to the original one.
autoplot(forecast(etsmodel, h = 12))
autoplot(forecast(etsmodel, h = 12, level = 95))
# manually setting the ets model (multiplicative, automatic, multiplicative)
etsmodel.man <- ets(nottem, model = "MZM")
etsmodel.man
# the aic is higher, hence the model is worse than the previous one
plot(nottem, lwd = 2)
lines(etsmodel$fitted, col = "red")
lines(etsmodel.man$fitted, col = "green")
|
16d87c1915f3bf70e24bb7597fb9b23b2d1e14cc
|
0f380dcb3509961dbbcf59f8b2dfb1d70f92e993
|
/R/localRegression.R
|
6f30c461a749547b5c1e1fc4d4777f545df1f62e
|
[] |
no_license
|
ttriche/regulatoR
|
dced0aa8c0f60b191c38d106b333f3dda84317fa
|
d7e6b00ef1514423fdf8ca32a73eebc715642161
|
refs/heads/master
| 2016-09-10T00:04:25.637227
| 2013-02-26T23:14:05
| 2013-02-26T23:14:05
| 4,615,162
| 4
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 1,235
|
r
|
localRegression.R
|
## sparse correlation weighted regression for regulation
## this is kind of the heart of the entire package
##
## setup: y ~ (3 * mean(c(x2, x3, x4, x5, x6))) - 2*x15 + 5*x20
## X is a 100xN matrix with only the above predictors, the rest noise
## x2, x3, x4, x5, and x6 are highly multicollinear and within 100bp
## x15 is an enhancer locus not correlated w/others and far away
## x20 is the exonic mean copy number, relative to normal.
##
## method: bin using FeatureBlocks, add 'TypeI' and 'TypeII' interactions, CNV?
## select predictors using model <- FWDselect::qselection(crit='R2')
## fit a robust lm on model$selection[ which.max(model$R2) ]
## fit a robust lm on the intercept (y ~ 1)
## obtain a p-value for the improvement from including coefficients
## this p-value is then adjusted genomewide to rank 'silencing' events
##
## example: fit = localRegression(LAML, LAML.gene, 'ECDH', 'bin', 100)
##
localRegression <- function(mSE, eSE, gene, how='bin', binSize=100, inBins=F) {
require(MASS)
require(robust)
require(FWDselect)
if(!inBins) {
## create featureBlocks for the provided gene from the provided 5mC SE
}
}
|
820740c8b11b7b8365c6c73b511a1c7ed6941ed3
|
964d62f637341338694889099fcb8cffd5b1ce35
|
/workout1/code/make-teams-table.R
|
80641b34884b4cb805a5d4868e8085f0fa389daf
|
[] |
no_license
|
baraniscen/hw-stat133
|
00b4ded1dc3327a5905b7d7c8c7fbfaca34b3bdf
|
33d81696e6543056e301d24751075e76220d8faa
|
refs/heads/master
| 2020-03-30T13:37:20.778039
| 2018-10-02T16:44:12
| 2018-10-02T16:44:12
| 151,280,376
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 3,138
|
r
|
make-teams-table.R
|
#---
# title: "Data preparation"
#---
getwd()
setwd()
dataset_workout <- read.csv("desktop/hw-stat133/workout1/data/nba2018.csv", header=TRUE)
str(dataset_workout)
dataset_workout$experience <- as.integer(dataset_workout$experience) #experience as integers
str(dataset_workout)
dataset_workout$salary <- (dataset_workout$salary)*10^-6 #salaries to million
levels(dataset_workout$position)[levels(dataset_workout$position)=="C"] <- "center" #rename to center
levels(dataset_workout$position)[levels(dataset_workout$position)=="PF"] <- "power_fd" #rename to power_fd
levels(dataset_workout$position)[levels(dataset_workout$position)=="PG"] <- "point_guard" #rename to point_guard
levels(dataset_workout$position)[levels(dataset_workout$position)=="SF"] <- "small_fwd" #rename to small_fwd
levels(dataset_workout$position)[levels(dataset_workout$position)=="SG"] <- "shoot_guard" #rename to shoot_guard
dataset_workout
#new variables created
dataset_workout <- mutate(dataset_workout, missed_fg = field_goals_atts - field_goals, missed_ft = points1_atts - points1, rebounds = off_rebounds + def_rebounds, efficiency = (points + total_rebounds + assists + steals + blocks - missed_fg - missed_ft - turnovers)/(dataset_workout$games)
)
dataset_workout
summary(dataset_workout$efficiency) #check the sink
sink('efficiency-summary.txt')
summary(dataset_workout$efficiency)
sink()
#creating nba2018-teams.csv
sink('teams-summary.txt') #check the sink
summarise(
group_by(dataset_workout, team),
experience_total = sum(experience),
total_salary = sum(salary),
points_3 = sum(points3),
points_2 = sum(points2),
points_1 = sum(points1),
total_points = sum(points1) + sum(points2) + sum(points3),
total_off_rebounds = sum(off_rebounds),
total_def_rebounds = sum(def_rebounds),
total_assists = sum(assists),
total_steals = sum(steals),
total_blocks = sum(blocks),
total_turnovers = sum(turnovers),
total_fouls = sum(fouls),
total_efficiency = sum(efficiency)
)
sink()
summary_teams <- summarise(
group_by(dataset_workout, team),
experience_total = sum(experience),
total_salary = sum(salary),
points_3 = sum(points3),
points_2 = sum(points2),
points_1 = sum(points1),
total_points = sum(points1) + sum(points2) + sum(points3),
total_off_rebounds = sum(off_rebounds),
total_def_rebounds = sum(def_rebounds),
total_assists = sum(assists),
total_steals = sum(steals),
total_blocks = sum(blocks),
total_turnovers = sum(turnovers),
total_fouls = sum(fouls),
total_efficiency = sum(efficiency)
)
write.csv(summary_teams <- summarise(
group_by(dataset_workout, team),
experience_total = sum(experience),
total_salary = sum(salary),
points_3 = sum(points3),
points_2 = sum(points2),
points_1 = sum(points1),
total_points = sum(points1) + sum(points2) + sum(points3),
total_off_rebounds = sum(off_rebounds),
total_def_rebounds = sum(def_rebounds),
total_assists = sum(assists),
total_steals = sum(steals),
total_blocks = sum(blocks),
total_turnovers = sum(turnovers),
total_fouls = sum(fouls),
total_efficiency = sum(efficiency)
), 'nba2018-teams.csv')
|
2a7e3819e8e6daab143e50338d88852ea21cda0a
|
3503adc96d77bafbe1f1b4ed7c03891eaf0cd93f
|
/roster.R
|
9bd427ce4683692d71c60eddb4b77cbcbc7fd1db
|
[] |
no_license
|
vibhuk10/NFLplayerstats
|
2b81fc0ca0d7bba59eb9842cb9b4321845b76424
|
21aff5181bad5f11dc1fc47382936844a9e9a9fa
|
refs/heads/master
| 2023-02-26T18:47:32.688284
| 2021-02-09T06:19:26
| 2021-02-09T06:19:26
| 284,153,127
| 1
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 5,849
|
r
|
roster.R
|
roster01 <-
pbp_19 %>%
select(passer_player_id, passer_player_name) %>%
na.omit() %>%
distinct(passer_player_id, .keep_all = TRUE) %>%
rename(name = passer_player_name, id = passer_player_id)
roster02 <-
pbp_19 %>%
select(rusher_player_id, rusher_player_name) %>%
na.omit() %>%
distinct(rusher_player_id, .keep_all = TRUE) %>%
rename(name = rusher_player_name, id = rusher_player_id)
roster03 <-
pbp_19 %>%
select(receiver_player_id, receiver_player_name) %>%
na.omit() %>%
distinct(receiver_player_id, .keep_all = TRUE) %>%
rename(name = receiver_player_name, id = receiver_player_id)
roster04 <-
pbp_19 %>%
select(lateral_receiver_player_id, lateral_receiver_player_name) %>%
na.omit() %>%
distinct(lateral_receiver_player_id, .keep_all = TRUE) %>%
rename(name = lateral_receiver_player_id, id = lateral_receiver_player_name)
roster05 <-
pbp_19 %>%
select(interception_player_id, interception_player_name) %>%
na.omit() %>%
distinct(interception_player_id, .keep_all = TRUE) %>%
rename(name = interception_player_id, id = interception_player_name)
roster06 <-
pbp_19 %>%
select(punt_returner_player_id, punt_returner_player_name) %>%
na.omit() %>%
distinct(punt_returner_player_id, .keep_all = TRUE) %>%
rename(name = punt_returner_player_id, id = punt_returner_player_name)
roster07 <-
pbp_19 %>%
select(kickoff_returner_player_id, kickoff_returner_player_name) %>%
na.omit() %>%
distinct(kickoff_returner_player_id, .keep_all = TRUE) %>%
rename(name = kickoff_returner_player_id, id = kickoff_returner_player_name)
roster08 <-
pbp_19 %>%
select(punter_player_id, punter_player_name) %>%
na.omit() %>%
distinct(punter_player_id, .keep_all = TRUE) %>%
rename(name = punter_player_id, id = punter_player_name)
roster09 <-
pbp_19 %>%
select(kicker_player_id, kicker_player_name) %>%
na.omit() %>%
distinct(kicker_player_id, .keep_all = TRUE) %>%
rename(name = kicker_player_id, id = kicker_player_name)
roster10 <-
pbp_19 %>%
select(blocked_player_id, blocked_player_name) %>%
na.omit() %>%
distinct(blocked_player_id, .keep_all = TRUE) %>%
rename(name = blocked_player_id, id = blocked_player_name)
roster11 <-
pbp_19 %>%
select(tackle_for_loss_1_player_id, tackle_for_loss_1_player_name) %>%
na.omit() %>%
distinct(tackle_for_loss_1_player_id, .keep_all = TRUE) %>%
rename(name = tackle_for_loss_1_player_id, id = tackle_for_loss_1_player_name)
roster12 <-
pbp_19 %>%
select(qb_hit_1_player_id, qb_hit_1_player_name) %>%
na.omit() %>%
distinct(qb_hit_1_player_id, .keep_all = TRUE) %>%
rename(name = qb_hit_1_player_id, id = qb_hit_1_player_name)
roster13 <-
pbp_19 %>%
select(qb_hit_2_player_id, qb_hit_2_player_name) %>%
na.omit() %>%
distinct(qb_hit_2_player_id, .keep_all = TRUE) %>%
rename(name = qb_hit_2_player_id, id = qb_hit_2_player_name)
roster14 <-
pbp_19 %>%
select(forced_fumble_player_1_player_id, forced_fumble_player_1_player_name) %>%
na.omit() %>%
distinct(forced_fumble_player_1_player_id, .keep_all = TRUE) %>%
rename(name = forced_fumble_player_1_player_id, id = forced_fumble_player_1_player_name)
roster15 <-
pbp_19 %>%
select(solo_tackle_1_player_id, solo_tackle_1_player_name) %>%
na.omit() %>%
distinct(solo_tackle_1_player_id, .keep_all = TRUE) %>%
rename(name = solo_tackle_1_player_id, id = solo_tackle_1_player_name)
roster16 <-
pbp_19 %>%
select(solo_tackle_2_player_id, solo_tackle_2_player_name) %>%
na.omit() %>%
distinct(solo_tackle_2_player_id, .keep_all = TRUE) %>%
rename(name = solo_tackle_2_player_id, id = solo_tackle_2_player_name)
roster17 <-
pbp_19 %>%
select(assist_tackle_1_player_id, assist_tackle_1_player_name) %>%
na.omit() %>%
distinct(assist_tackle_1_player_id, .keep_all = TRUE) %>%
rename(name = assist_tackle_1_player_id, id = assist_tackle_1_player_name)
roster18 <-
pbp_19 %>%
select(assist_tackle_2_player_id, assist_tackle_2_player_name) %>%
na.omit() %>%
distinct(assist_tackle_2_player_id, .keep_all = TRUE) %>%
rename(name = assist_tackle_2_player_id, id = assist_tackle_2_player_name)
roster19 <-
pbp_19 %>%
select(pass_defense_1_player_id, pass_defense_1_player_name) %>%
na.omit() %>%
distinct(pass_defense_1_player_id, .keep_all = TRUE) %>%
rename(name = pass_defense_1_player_id, id = pass_defense_1_player_name)
roster20 <-
pbp_19 %>%
select(pass_defense_2_player_id, pass_defense_2_player_name) %>%
na.omit() %>%
distinct(pass_defense_2_player_id, .keep_all = TRUE) %>%
rename(name = pass_defense_2_player_id, id = pass_defense_2_player_name)
roster21 <-
pbp_19 %>%
select(fumbled_1_player_id, fumbled_1_player_name) %>%
na.omit() %>%
distinct(fumbled_1_player_id, .keep_all = TRUE) %>%
rename(name = fumbled_1_player_id, id = fumbled_1_player_name)
roster22 <-
pbp_19 %>%
select(fumble_recovery_1_player_id, fumble_recovery_1_player_name) %>%
na.omit() %>%
distinct(fumble_recovery_1_player_id, .keep_all = TRUE) %>%
rename(name = fumble_recovery_1_player_id, id = fumble_recovery_1_player_name)
roster23 <-
pbp_19 %>%
select(penalty_player_id, penalty_player_name) %>%
na.omit() %>%
distinct(penalty_player_id, .keep_all = TRUE) %>%
rename(name = penalty_player_id, id = penalty_player_name)
roster <-
rbind(roster01, roster02, roster03, roster04, roster05, roster06, roster07,
roster08, roster09, roster10, roster11, roster12, roster13, roster14,
roster15, roster16, roster17, roster18, roster19, roster20, roster21,
roster22, roster23
)
full_roster_19 <-
roster %>%
distinct(id, .keep_all = TRUE)
full_roster_19 %>% write_csv("data-raw/NFL_full_roster_2019.csv")
|
7b4ed486141cd905d1aef88c2332b004814a45f3
|
b725ae79645bb08446f3c7eb4c95da47b2627ad3
|
/R/utilities.R
|
bd9d44ee4800b9ce1b974c84d44a6a9a4fc1e43d
|
[] |
no_license
|
cbig/zonator
|
cf1692e9d210d96317164c94dfc902464b3e361c
|
bfa5a27689d853ef824634a5a6be52f9b3f54c24
|
refs/heads/master
| 2021-01-18T22:08:44.457702
| 2020-05-18T18:29:08
| 2020-05-18T18:29:08
| 8,728,996
| 12
| 6
| null | 2018-04-11T06:51:40
| 2013-03-12T13:57:04
|
R
|
UTF-8
|
R
| false
| false
| 19,272
|
r
|
utilities.R
|
# This file is a part of zonator package
# Copyright (C) 2012-2014 Joona Lehtomaki <joona.lehtomaki@gmai.com>. All rights
# reserved.
# This program is open source software; you can redistribute it and/or modify
# it under the terms of the FreeBSD License (keep this notice):
# http://en.wikipedia.org/wiki/BSD_licenses
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
#' A function check feature/group names.
#'
#' Checks a vector of names only contains unique items and if they're not,
#' unique names will be created. Also, the items must be
#' suitable for columns names. Function is strict so that if the vector is not
#' valid or it cannot be coerced to be one an error is induced.
#'
#' @param x Charcter or numeric vector.
#'
#' @return Valid vector of the original size.
#'
#' @author Joona Lehtomaki \email{joona.lehtomaki@@gmail.com}
#' @export
#'
check_names <- function(x) {
# Check for type
if (!any(is.character(x), is.numeric(x))) {
stop("Names vector must be either character of numeric")
}
# Check for only unique items
if (length(unique(x)) != length(x)) {
warning("All feature/group names are not unique, creating unique names")
x <- make.names(x, unique=TRUE)
}
# Check for empty names
if (any(x == "") || any(nchar(x) == 0)) {
stop("No item in names vector can be empty")
}
# Get rid of white space if present
if (any(grepl("\\s", x))) {
warning("Name items contain whitespaces, replacing with '.'")
x <- gsub("\\s", "\\.", x)
}
return(as.character(x))
}
#' A function to deal with potentially relative paths.
#'
#' Checks if a path can be resolved (i.e. whether it exists). An additional
#' parameter \code{parent.path} can be provided, in which case \code{x} is
#' appended to it and the concatenated path is checked for existence. If the
#' path cannot be resolved, raise an error.
#'
#' @param x Character string path.
#' @param parent.path Character string root path.
#' @param require.file Logical indicating if a file is required for return or
#' if an existing parent folder is enough
#'
#' @return A cleaned character string
#'
#' @author Joona Lehtomaki \email{joona.lehtomaki@@gmail.com}
#' @export
#'
check_path <- function(x, parent.path=NULL, require.file=FALSE) {
# Expand path
x <- path.expand(x)
# Replace "\\" with "/"
x <- gsub("\\\\", "/", x)
# Deal with potentially relative paths in x. Note that if there is no
# parent.path relative paths do not make any difference.
if (grepl("\\.{2}/", x) && !is.null(parent.path)) {
match <- gregexpr("\\.{2}/", x)[[1]]
# How many '../' token are there in x?
dot.tokens <- length(attr(match, "match.length"))
# Get rid of the tokens
x <- gsub("\\.{2}/", "", x)
# Break the parent path to elements
dir.elements <- unlist(strsplit(parent.path, .Platform$file.sep))
parent.path <- paste(dir.elements[1:(length(dir.elements) - dot.tokens)],
collapse = .Platform$file.sep)
}
# Is x valid file path on its own?
if (file.exists(x)) {
return(x)
} else if (!is.null(parent.path)) {
path <- file.path(parent.path, x)
# Is x a valid file path when combined with the parent.path?
if (file.exists(path)) {
return(path)
} else {
# Is the parent path at least valid?
if (file.exists(parent.path) && !require.file) {
return(parent.path)
} else {
stop("Path ", file.path(parent.path, x), " cannot be resolved.")
}
}
} else {
stop("Path ", x, parent.path, " cannot be resolved.")
}
}
#' Clean leading and trailing whitespaces from a given string. Additionally,
#' all occurrences of multiple whitespaces are replaced with a single
#' whitespace.
#'
#' @param x Character string.
#'
#' @return An absolute path to a file of NULL if the path does not exist.
#'
#' @author Joona Lehtomaki \email{joona.lehtomaki@@gmail.com}
#' @export
#'
clean_str <- function(x) {
x <- gsub("\\s+", " ", x)
# returns string w/o leading or trailing whitespace
x <- gsub("^\\s+|\\s+$", "", x)
return(x)
}
#' Find out the number of decimal places in a number.
#'
#' Original implementation from https://stackoverflow.com/questions/5173692/how-to-return-number-of-decimal-places-in-r
#'
#' @note R usually restricts the number of decimal to 9 in printing etc. Unless
#' \code{true_number = TRUE}, return 9 and give a warning.
#'
#' @param x Float or double numeric number.
#' @param true_number Logical setting whether the true number (see notes) of
#' decimal places.
#'
#' @return Integer number of decimal places. Maximum
#'
#' @author Joona Lehtomaki \email{joona.lehtomaki@@gmail.com}
#' @export
#'
decimalplaces <-
Vectorize(function(x, true_number = FALSE) {
if ((x %% 1) != 0) {
decimals <- nchar(strsplit(sub('0+$', '', as.character(x)), ".",
fixed = TRUE)[[1]][[2]])
} else {
return(0)
}
if (decimals > 9 & true_number == FALSE) {
decimals <- 9
warning("Number of decimal places more than 9 but true_number set to FALSE")
}
return(decimals)
},
c("x"), USE.NAMES = TRUE)
#' Transform an absolute path to relative path in relation to given location
#'
#' @note Both \code{path} and \code{relative_to} must be in absolute form.
#'
#' @param path Character string path.
#' @param org_relative_to Character string path to which \code{path} is originally
#' relative to.
#' @param new_relative_to Character string path to which \code{path} is supposed to
#' be relative to.
#'
#' @return Character string relative file path.
#'
#' @author Joona Lehtomaki \email{joona.lehtomaki@@gmail.com}
#' @export
#'
file_path_relative_to <- function(path, org_relative_to, new_relative_to) {
# Check if the path provided is relative. If it is, make it absolute
if (startsWith(path, "..")) {
path <- normalizePath(file.path(dirname(org_relative_to), path), mustWork = FALSE)
}
# Get path components split by path separators
path_comps <- unlist(strsplit(dirname(path), split = .Platform$file.sep))
relative_to_comps <- unlist(strsplit(dirname(new_relative_to), split = .Platform$file.sep))
# Compare path components
suppressWarnings(diff <- path_comps == relative_to_comps)
# Get the file name
file_path_base <- basename(path)
# Get path elements equal to the lenght of relative_to_comps: this number is used
# to generate the correct amount ".." in the path
rel_path <- paste0(rep("..", sum(!diff[1:length(relative_to_comps)])),
collapse = .Platform$file.sep)
# Construct the actual directory part
dir_path <- paste0(path_comps[!diff[1:length(path_comps)]],
collapse = .Platform$file.sep)
# Put everything together
rel_file_path <- file.path(rel_path, dir_path, file_path_base)
return(rel_file_path)
}
#' Re-implementation of \code{\link{file_path_sans_ext}} in \code{tools}. This
#' version can handle "." just before the file extenstion, unlike the original
#' implementation.
#'
#' @param x Character vector giving file paths.
#' @param compression Logical: should compression extension '.gz', '.bz2' or
#' '.xz' be removed first?
#'
#' @return File path without the file extension.
#'
#' @author Joona Lehtomaki \email{joona.lehtomaki@@gmail.com}
#' @export
#'
file_path_sans_ext <- function(x, compression = FALSE) {
if (compression)
x <- sub("[.](gz|bz2|xz)$", "", x)
sub("([^.]+.+)\\.[[:alnum:]]+$", "\\1", x)
}
#' Get all Zonation run configuration parameters.
#'
#' This set of parameters is all that is accepted by Zonation.
#'
#' @note Parameters are hard-coded to this package and know nothing
#' of potential future developments with Zonation.
#'
#' @param just_names Logical indicating if only the parameter names should be
#' returned.
#'
#' @return Characted vector of paramter names or a list of (parameter = section)
#' structure.
#'
#' @author Joona Lehtomaki \email{joona.lehtomaki@@gmail.com}
#' @export
#'
zparameters <- function(just_names = FALSE) {
# Only canocical parameters are accepted
all_parameters_file <- system.file("extdata/template.dat",
package = "zonator")
if (just_names) {
accepted_parameters <- leaf_tags(read_dat(all_parameters_file),
omit_sections = TRUE)
accepted_parameters <- names(accepted_parameters)
} else {
accepted_parameters <- leaf_tags(read_dat(all_parameters_file),
omit_sections = FALSE)
# Split the section.param_name Strings into tuples of (section, param_name)
accepted_parameters <- strsplit(names(accepted_parameters), "\\.")
param_list <- list()
for (item in accepted_parameters) {
param_list[[item[2]]] <- item[1]
}
accepted_parameters <- param_list
}
return(accepted_parameters)
}
#' Find all the leaf tags in a potentially nested list. The generic form of a
#' list is tag = value; find all the tags in a list.
#'
#' @param x List to be searched.
#' @param omit_sections Logical indicating if sections should be omitted from
#' vector names.
#'
#' @return Characted vector of tags.
#'
#' @author Joona Lehtomaki \email{joona.lehtomaki@@gmail.com}
#' @export
#' @examples
#' l <- list("a" = 1, "b" = list("c" = 3, "d" = 4), "e" = 5)
#' leaf_tags(l)
#'
leaf_tags <- function(x, omit_sections = FALSE) {
if (!is.list(x)) {
stop("Function only accepts lists")
}
# Get the tag names, these will include nested tags separated by "."
tags <- rapply(x, function(x) x[1])
if (omit_sections) {
names(tags) <- as.vector(sapply(names(tags),
function(x) tail(unlist(strsplit(x, "\\.")),
n = 1)))
}
return(tags)
}
line_as_numeric <- function(x) {
return(as.numeric(line_as_string(x)))
}
line_as_string <- function(x) {
return(unlist(strsplit(x, "\\s+")))
}
#' Map vector to actual column indexes.
#'
#' Compare a vector of column names or indexes against another vector which is
#' known to be true.
#'
#'
#'
#' @param x Character or numeric vector of possible matches.
#' @param y Character or numeric vector of true values.
#'
#' \code{x} and \code{y} must be of the same length.
#'
#' @return A numeric vector of the same length of x and y containing matched
#' column indexes.
#'
#' @keywords zonation results
#' @author Joona Lehtomaki <joona.lehtomaki@@gmail.com>
#'
#' @export
#'
map_indexes <- function(x, y) {
if (is.character(x)) {
if (!all(x %in% y)){
warning(paste("Column names", paste(x[!x %in% y], collapse=", "),
"not found in curves header"))
x <- x[x %in% y]
if (length(x) == 0) {
return(NULL)
}
}
inds <- sapply(x, function(xx) {which(xx == y)})
} else if (is.numeric(x)) {
inds <- x
if (any(x < 1)) {
warning(paste("Column indexes", paste(x[which(x < 1)], collapse=", "),
"smaller than 1"))
inds <- x[which(x >= 1)]
}
ncols <- length(y)
if (any(x > ncols)) {
warning(paste("Column indexes", paste(x[which(x > ncols)], collapse=", "),
"greater than ncol"))
inds <- x[which(x <= ncols)]
}
}
return(as.numeric(inds))
}
#' Re-calculate group curves data.
#'
#' When results grouping is changed group-specific curves data has to be
#' re-calculated. Normally group curves file is produced by Zonation based on
#' the groupings provided by the user. Same information can almost completely
#' (except for ext-values) be calculated afterwards from the feature-specific
#' curves files.
#'
#' This function calculates the following stats for \code{\link{Zvariant}}
#' object based on a vector of new group IDs:
#'
#' \describe{
#' \item{\code{min}:}{Minimum value of representation on each iteration among
#' features within a group.}
#' \item{\code{mean}:}{Mean value of representation on each iteration among
#' features within a group.}
#' \item{\code{max}:}{Maximum value of representation on each iteration among
#' features within a group.}
#' \item{\code{w.mean}:}{Weighted (based on feature weight) mean value of
#' representation on each iteration among features within a group.}
#' }
#'
#' @note Current implementation does not calculate values for \code{ext2}
#' (extinction risk). Column \code{ext2} is retained in the returned data
#' frame for compatibility, but column will be populated with NAs.
#'
#' @param x Data frame of feature specific representation levels.
#' @param weights numeric vector for feature specific weights
#' @param group.ids numeric vector of new group codes. Number of groups must
#' match with columns in \code{x}.
#'
#' @return \code{ZGroupCurvesDataFrame} with new group statistics.
#'
#' @keywords zonation results
#' @author Joona Lehtomaki <joona.lehtomaki@@gmail.com>
#'
#' @export
#'
regroup_curves <- function(x, weights, group.ids) {
# Assume standard Zonation feature-specific curves file structure, which
# means that feature data starts from column 8.
features <- x[, 8:ncol(x)]
# There should be as many weights as is the length of the provided group.ids
if (length(weights) != length(group.ids)) {
stop(paste0("Number of weights (", length(weights), ") and group ids (",
length(group.ids), ") differs"))
}
# There should be as many features as is the length of the provided group.ids
if (ncol(features) != length(group.ids)) {
stop(paste0("Number of features (", ncol(features), ") and group ids (",
length(group.ids), ") differs"))
}
# Get the unique group ids and loop over all ids. Naming convention for the
# group stats is:
# "min.g1" "mean.g1" "max.g1" "w.mean.g1" "ext2.g1"
ids <- unique(group.ids)
groups.list <- list()
for (id in ids) {
group.names <- paste0(c("min.group", "mean.group", "max.group",
"w.mean.group", "ext2.group"), id)
group.data <- features[, which(group.ids == id)]
group.weights <- weights[which(group.ids == id)]
# Calculate row-wise stats, but only if there are more than 1 feature in
# the group
if (is.data.frame(group.data)) {
group.df <- data.frame("min"=apply(group.data, 1, min),
"mean"=apply(group.data, 1, mean),
"max"=apply(group.data, 1, max),
"w.mean"=apply(group.data, 1,
weighted.mean, w=group.weights))
} else if (is.vector(group.data)) {
group.df <- data.frame("min"=group.data,
"mean"=group.data,
"max"=group.data,
"w.mean"=group.data)
}
group.df$ext2 <- NA
names(group.df) <- group.names
groups.list[[as.character(id)]] <- group.df
}
# pr_lost and cost should be identical for curves and group curves
regrouped.x <- cbind(x[, c(1, 2)], do.call("cbind", groups.list))
# Previous will prefix column names with "id." prefix, get rid of it
colnames(regrouped.x) <- gsub("^[0-9]+\\.", "", colnames(regrouped.x))
regrouped.x <- new("ZGroupCurvesDataFrame", regrouped.x,
is.group = c(rep(FALSE, 2), rep(TRUE, ncol(regrouped.x) - 2)))
return(regrouped.x)
}
#' Requires a given package and if not present installs and loads it.
#'
#' @param package Character name of a package.
#' @param ... Additional arguments passed on to \code{\link{install.packages}}.
#'
#' @author Joona Lehtomaki \email{joona.lehtomaki@@gmail.com}
#'
#' @importFrom utils install.packages
#'
#' @export
#'
require_package <- function(package, ...) {
if (suppressWarnings(!require(package, character.only=TRUE, quietly=TRUE))) {
parent.function <- sys.calls()[[1]][1]
message(paste("Function ", parent.function, " requires package: ", package,
". Package not found, installing...", sep=""))
install.packages(package, ...) # Install the packages
require(package, character.only=TRUE) # Remember to load the library after installation
}
}
#' Get the directory of Zonation tutorial.
#'
#' @return path Character path to Zonation tutorial directory.
#'
#' @author Joona Lehtomaki \email{joona.lehtomaki@@gmail.com}
#' @export
#'
get_tutorialdir <- function() {
return(get_options()$tutorial.dir)
}
#' Get subset of curves/group curves columns.
#'
#' Function gets a single column (feature/group/stat) from curves or group
#' curves. Useful for construction melt-type data frames for plotting.
#'
#' @param x ZCurvesDataFrame or ZGroupCurvesDataFrame object
#' @param stat character string of the statistic used ['min', 'mean', 'max',
#' 'w.mean', 'ext2'].
#' @param name character name of a group/feature.
#' @param size numeric defining line width.
#' @param lty integer defining line type.
#'
#' @return data frame for a single column in curves / group curves data.
#'
#' @keywords internal
#'
#' @author Joona Lehtomaki \email{joona.lehtomaki@@gmail.com}
#'
sub_curves <- function(x, stat, name, size=0.6, lty=1) {
col.name <- paste0(stat, '.', name)
sub.curves <- curves(x, cols=col.name, groups=TRUE)
names(sub.curves) <- c('pr_lost', 'value')
sub.curves$name <- name
sub.curves$stat <- stat
# We need to coerce Zcurves to a data frame here for later rbinding
return(data.frame(sub.curves))
}
#' Get unique group names.
#'
#' Method extracts group names directly from group curves data frame header
#' based on a hard-coded set of prefixes
#'
#' @param x data frame groups data.
#'
#' @return character vector of unique group names
#'
#' @keywords internal
#'
#' @author Joona Lehtomaki \email{joona.lehtomaki@@gmail.com}
#'
unique_grp_names <- function(x) {
# Leave pr.lost and cost out
group.names <- names(x)[-c(1, 2)]
# Since group name can be whatever, just replace the known header prefixes
# with nothing
prefixes <- '(min\\.|mean\\.|max\\.|w\\.mean\\.|ext2\\.)'
group.names <- gsub(prefixes, "", group.names)
group.names <- unique(group.names)
return(group.names)
}
#' Get various Zonation legends
#'
#' Zonation result rank rasters can be displayed in various color schemes.
#'
#' Each color scheme is a list with following item:
#'
#' \describe{
#' \item{\code{values}:}{Value breaks in the rank priority map}
#' \item{\code{labels}:}{Labels to be used in the map legend}
#' \item{\code{colors}:}{Colors used for the value classes}
#' }
#'
#' Following color schemes are available:
#'
#' \enumerate{
#' \item{"spectral"}
#' }
#'
#' @param x String character name for the color scheme.
#'
#' @return A list color scheme.
#'
#' @note Color schemes are stored in env \code{.options}.
#'
#' @author Joona Lehtomaki \email{joona.lehtomaki@@gmail.com}
#' @export
#' @examples
#' zlegend("spectral")
#'
zlegend <- function(x) {
if (x == "spectral") {
return(.options$z_colors_spectral)
} else if (x == "BrBG") {
return(.options$z_colors_BrBG)
} else {
stop("No legend scheme ", x, " defined")
}
}
|
e29ff17680f51c9b6fe53b2417d93994f0d67d27
|
1318b29d7b0f212ebe1a87145a13ee563ea094d8
|
/R/Nonpara.Two.Sample.R
|
645fb137194c2fb25f68c1678922e773b947e6c8
|
[] |
no_license
|
cran/TrialSize
|
73c3ff9086760334fa83d4608c111bb0ea32765b
|
314e951e9d33786b6a2883f7cd483984cb611243
|
refs/heads/master
| 2021-06-04T14:24:45.564491
| 2020-07-06T20:40:03
| 2020-07-06T20:40:03
| 17,693,970
| 2
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 166
|
r
|
Nonpara.Two.Sample.R
|
Nonpara.Two.Sample <-
function(alpha, beta,k, p1,p2,p3){
n=(qnorm(1-alpha/2)*sqrt(k*(k+1)/12)+qnorm(1-beta)*sqrt(k^2*(p2-p1^2)+k*(p3-p1^2)))^2/(k^2*(1/2-p1)^2)
}
|
89ce4c6d275337d4537eccac4bbf417e4f6aa642
|
0c780d32356a4b3a2fefa9392ac545bd078de909
|
/R/get-perform.R
|
81972666ed9adbcc55d58699cfa453591e711579
|
[
"MIT"
] |
permissive
|
heavywatal/futurervpa
|
80fdeb533017eba10478d760fd9e15c1cd5b34d5
|
8fc6cfa6e831c2b6e8245b4eb83d2a769cc508e9
|
refs/heads/master
| 2020-04-02T03:56:01.315555
| 2018-10-21T09:20:12
| 2018-10-21T09:20:12
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 2,749
|
r
|
get-perform.R
|
#' Get perform
#'
#' @rdname get-perform
#' @export
get.perform <- function(fout0,Blimit=0,longyear=50,smallcatch=0.5,N=NULL,
shortyear=c(3,5,10),tmp.year=NULL){
stat1 <- get.stat(fout0,eyear=0,hsp=Blimit,tmp.year=tmp.year)[c("catch.mean","catch.CV","biom.mean","biom.CV","ssb.mean","lower.HSpoint")]
stat2 <- get.stat2(fout0,eyear=0,tmp.year=tmp.year)
stat2 <- data.frame(t(as.data.frame(strsplit(colnames(stat2),"-"))),value=as.numeric(stat2))
rownames(stat2) <- NULL
# waaによる加重平均年齢&組成
xx <- subset(stat2,X1=="TB" & X2=="MA")
nage <- sum(!is.na(xx$value))
tmp <- c(rep(2,ceiling(nage/3)),rep(3,ceiling(nage/3)))
tmp <- c(rep(1,nage-length(tmp)),tmp)
if(sum(tmp==1)==0 & sum(tmp==2)>1) tmp[1] <- 1
xx$bvalue <- xx$value * fout0$waa[,1,1]
xx$waa <- fout0$waa[,1,1]
large.portion1 <- tapply(xx$bvalue[!is.na(xx$bvalue)],tmp,sum,na.rm=T)
stat1$largefish.nature <- large.portion1[names(large.portion1)==3]/sum(large.portion1)
aage.biom <- sum(xx$bvalue * 0:(length(xx$bvalue)-1))/sum(xx$bvalue)
xx <- subset(stat2,X1=="TC" & X2=="MA")
xx$bvalue <- xx$value * fout0$waa[,1,1]
aage.catch <- sum(xx$bvalue * 0:(length(xx$bvalue)-1))/sum(xx$bvalue)
large.portion2 <- tapply(xx$bvalue[!is.na(xx$bvalue)],tmp,sum,na.rm=T)
stat1$largefish.catch <- large.portion2[names(large.portion2)==3]/sum(large.portion2)
# 漁獲量<0.5平均漁獲量の頻度
if(is.null(tmp.year)) tmp.year <- nrow(fout0$vwcaa)
stat1$catch.safe <- 1/mean(fout0$vwcaa[tmp.year,]<smallcatch*mean(fout0$vwcaa[tmp.year,]))
stat1$catch.safe <- ifelse(stat1$catch.safe>longyear,longyear,stat1$catch.safe)
# 親魚量<Blimitの頻度 → 確率の逆数
stat1$ssb.safe <- 1/stat1$"lower.HSpoint"
stat1$ssb.safe <- ifelse(stat1$ssb.safe>longyear,longyear,stat1$ssb.safe)
# ABC.yearから5年目までの平均累積漁獲量
short.catch <- numeric()
for(i in 1:length(shortyear)){
years <- fout0$input$ABC.year:(fout0$input$ABC.year+shortyear[i])
short.catch[i] <- mean(apply(fout0$vwcaa[rownames(fout0$vwcaa)%in%years,-1],2,sum))
}
names(short.catch) <- paste("short.catch",shortyear,sep="")
short.catch <- as.data.frame(t(short.catch))
# 平衡状態になった年
years <- names(fout0$vssb[,1])[-1]
heikou.diff <- which(diff(fout0$vssb[,1])/fout0$vssb[-1,1]<0.01)
if(length(heikou.diff)>0) stat1$eq.year <- years[min(heikou.diff)] else stat1$eq.year <- Inf
dat <- data.frame(stat1,short.catch,aage.biom=aage.biom,aage.catch=aage.catch,effort=fout0$multi,
waa=as.data.frame(t(fout0$waa[,1,1])),meigara=as.data.frame(t(tmp)))
return(dat)
}
|
af34a4719e79e2602fde5116a11fec3271504f7e
|
ffdea92d4315e4363dd4ae673a1a6adf82a761b5
|
/data/genthat_extracted_code/fma/examples/elco.Rd.R
|
585f688c3449056909fb5bc8013517c1af527336
|
[] |
no_license
|
surayaaramli/typeRrh
|
d257ac8905c49123f4ccd4e377ee3dfc84d1636c
|
66e6996f31961bc8b9aafe1a6a6098327b66bf71
|
refs/heads/master
| 2023-05-05T04:05:31.617869
| 2019-04-25T22:10:06
| 2019-04-25T22:10:06
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 144
|
r
|
elco.Rd.R
|
library(fma)
### Name: elco
### Title: Sales of Elco's laser printers
### Aliases: elco
### Keywords: datasets
### ** Examples
plot(elco)
|
3a60222b5a5e9a99e8ff0d608f5b5501ee1ad0aa
|
63f8e36bf88e7f7636ecdfac6b2625012246de19
|
/RScripts/Summary_statistics_for_text.R
|
115c61a09ebd8c648d147b72c2a5c7f7e87daaef
|
[] |
no_license
|
jasmineplows/longitudinal_changes_HMOs_over_24_months
|
1aa3c8972465b2dae3a9c860b7a8b53c860c2bd1
|
c7c335e452f9c99b322566799ed2b88fd7d9cb85
|
refs/heads/main
| 2023-03-15T13:57:42.768773
| 2021-03-18T00:37:18
| 2021-03-18T00:37:18
| 348,784,746
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 1,365
|
r
|
Summary_statistics_for_text.R
|
coefficients_function <- function(dS, hmo) {
f <- paste0("`", hmo, "`", " ~ Timepoint*Secretor + (1|study_id)")
fit <- lmer(f, data = dS)
result <- summary(fit)
return(result$coefficients)
}
summary_function <- function(dS, x) {
x <- ensym(x)
summarise(dS,
median = median(!! x, na.rm=T), lower = quantile(!!x, 0.25, na.rm=T), upper = quantile(!!x, 0.75, na.rm=T), n = n())
}
###Summary data for abundance
######Fully grouped dataset
grouped_dataSet_abundance <- data_for_abundance_table %>%
group_by(Secretor, Timepoint)
#######Grouped by timepoint only
grouped_time_only_dataSet_abundance <- data_for_abundance_table %>%
group_by(Timepoint)
#######Grouped by secretor only
grouped_secretor_only_dataSet_abundance <- data_for_abundance_table %>%
group_by(Secretor)
#####Below here to create tables of summary statistics, not required for inline statistics
summary_function <- function(x) {
summary <- list(tibble(median = median(x, na.rm=T), lower = quantile(!!x, 0.25, na.rm=T), upper = quantile(!!x, 0.75, na.rm=T), n = length(x[!is.na(x)])))
return(summary)
}
summary_table <- data_for_median_plots %>%
group_by(Secretor, Timepoint) %>%
dplyr::summarise(across("2FL":"SUM", summary_function)) %>%
unnest("2FL":"SUM", names_sep = "_")
write.table(summary_table, file = "summary_stats.txt", sep = "\t")
|
b76ee009de21f5811c2b1bffc5c76ac9791bbe1a
|
fe62480523fc5d0cf8ea255da0ab5d0251079f88
|
/lab_sessions/2020_4_15/demonstrations_after_lunch.R
|
a366ac0ea02feb158ac5def43818fd7ac0272478
|
[
"CC0-1.0"
] |
permissive
|
griff-rees/network-analysis-course
|
f8344d75541fab920cc5def9421ba82fcb9457ac
|
ae7d968dccae2273b5d928dbb900ddfc96f1f85f
|
refs/heads/master
| 2023-05-29T23:22:09.181993
| 2021-01-18T17:07:43
| 2021-01-18T17:07:43
| 254,641,551
| 1
| 3
|
CC0-1.0
| 2021-06-11T14:02:43
| 2020-04-10T13:29:26
|
HTML
|
UTF-8
|
R
| false
| false
| 1,468
|
r
|
demonstrations_after_lunch.R
|
# Only install igraph if not currently installed, and load it either way
if (!require(igraph)) {
install.packages("igraph", repos="http://cran.uk.r-project.org")
}
setwd("~/Downloads/network-analysis-course/")
nodes <- read.csv("data/star-wars-network-nodes.csv")
edges <- read.csv("data/star-wars-network-edges.csv")
g <- graph_from_data_frame(d=edges, vertices=nodes, directed=FALSE)
plot(g)
dark_side <- c("DARTH VADER", "MOTTI", "TARKIN")
light_side <- c("R2-D2", "CHEWBACCA", "C-3PO", "LUKE", "CAMIE", "BIGGS",
"LEIA", "BERU", "OWEN", "OBI-WAN", "HAN", "DODONNA",
"GOLD LEADER", "WEDGE", "RED LEADER", "RED TEN", "GOLD FIVE")
neutral <- c("GREEDO", "JABBA")
V(g)$color <- NA
vertex_attr(g)
V(g)$color[V(g)$name %in% dark_side] <- "red"
V(g)$color
V(g)$color[V(g)$name %in% light_side] <- "gold"
V(g)$color[V(g)$name %in% neutral] <- "green"
dark_side_graph <- induced_subgraph(g, dark_side)
V(dark_side_graph)
plot(dark_side_graph)
light_side_graph <- induced_subgraph(g, light_side)
plot(light_side_graph)
head(edges)
d <- graph_from_data_frame(d=edges, vertices=nodes, directed=TRUE)
plot(d)
g
E(g)
d
E(d)
head(edges$weight)
head(E(d))
head(E(d)$weight)
edge_attr(g)
E(g)$color <- "blue"
E(g)$weight
E(g)$color[E(g)$weight >= 5] <- "red"
head(E(g)$color)
edge_attr(g)
plot(g)
g[]
d[]
summary(g)
betweenness(g)
page_rank(g)
centr_betw(g)
max(E(g)$weight)
E(g)$weight
edge_attr(g)
E(g)
head(edges)
tail(edges,)
|
168b673c88b3fe43cabe2157c1c833cf687e81c3
|
a06fa2937438142f8a8b39199f331984e6eca572
|
/ui.R
|
0e1a2cd3a61802ad57209f040a6e877bc6d7ef99
|
[] |
no_license
|
mskarthik/DevelopingDataProducts
|
1ef895019c373e5f1d3286622860b68eee8e9e18
|
bb8d4e55b8cf2b11e314d7f21c7574d2b37f1d36
|
refs/heads/master
| 2016-09-09T22:27:52.254508
| 2015-01-18T08:23:13
| 2015-01-18T08:23:13
| 29,419,988
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 535
|
r
|
ui.R
|
library(shiny)
# Define UI for application that draws a histogram
shinyUI(pageWithSidebar(
# Application title
headerPanel("Mortgage Calculator"),
sidebarPanel(
numericInput("LoanAmount", label = h3("Loan Amount"),value = 250000),
numericInput("Rate", label = h3("Interest Rate"), value = 4),
numericInput("LoanTerm", label = h3("Loan Term"), value = 30)
),
mainPanel(
h3('Monthly Installment'),
verbatimTextOutput("A"),
h4('Amortization Schedule'),
tableOutput("AmortTable")
)
))
|
5591b673d07838bbb11c3fba51e1d777f9af1fe0
|
57eb853c3a157d6144b3198bb3786fd02496ad6d
|
/plot2.R
|
42b47a15cb7bb901ddc2be26c254e3cb0c8872da
|
[] |
no_license
|
leighmt/ExData_Plotting1
|
bf4db3e02d8bf86824798cda28d49f877381ec64
|
b08d585915b61e2bf5d5490265b250ccb4d21ddb
|
refs/heads/master
| 2021-01-16T19:10:04.803579
| 2014-06-05T13:40:01
| 2014-06-05T13:40:01
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 1,259
|
r
|
plot2.R
|
## Generate a line chart of Global Active Power for the time period
## 2007-02-01 and 2007-02-02.
# Set location of archive
url <- "https://d396qusza40orc.cloudfront.net/exdata%2Fdata%2Fhousehold_power_consumption.zip"
archive <- "data/exdata-data-household_power_consumption.zip"
# Check if data exists
if(!(file.exists("data"))) {
dir.create("data")
}
# Download and unzip data if it doesn't already exist.
if(!(file.exists("data/household_power_consumption.txt"))) {
download.file(url = url, destfile = archive, method = "curl", quiet = TRUE)
unzip(archive, exdir="data/")
}
## Read household power consumption data
data <- read.csv("data/household_power_consumption.txt",
sep = ";",
colClasses = "character")
# Get subset of data required for assignment
graphdata <- subset(data, as.Date(Date, format = "%d/%m/%Y") >= '2007-02-01' &
as.Date(Date, format = "%d/%m/%Y") <= '2007-02-02')
# Generate PNG
png(filename = "plot2.png", width = 480, height = 480)
with(graphdata,
plot(as.POSIXct(paste(as.Date(Date, format = "%d/%m/%Y"), Time)),
Global_active_power,
type = "l",
ylab = "Global Active Power (kilowatts)",
xlab = "")
)
dev.off()
|
b55062cd840d00a703e089734429cea5c376d9cb
|
3b114e03d661141ec13388c7925583079bb814f7
|
/unbalance_data.R
|
15ac0a0b3f7b7ff631a7bece294c82a0542750cb
|
[] |
no_license
|
Cyc-Evelyn/R_for-machine-learning
|
b7cc7606602dfd5ffade1e5e2b97a1d6655292e5
|
d55ebc94583659e384e50bc3704f336a3080062a
|
refs/heads/main
| 2023-04-20T16:34:18.326935
| 2021-05-09T07:12:38
| 2021-05-09T07:12:38
| 364,108,695
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 2,304
|
r
|
unbalance_data.R
|
setwd("C:/Users/USER/Desktop/how to rate model")
data <- read.csv("Mass Data 03.csv", header=T, sep=",")
names(data)
#only select important features
data <- data[c(2,3,4,5)]
data_original <- data
#encode Package
data$Package <- ifelse(data$Package == 'A', 1, data$Package)
data$Package <- ifelse(data$Package == 'B', 2, data$Package)
data$Package <- ifelse(data$Package == 'C', 3, data$Package)
data$Package <- ifelse(data$Package == 'L', 4, data$Package)
data$Package <- as.numeric(data$Package)
table(data$Package) #most of the data are class '1'
dataA <- data[data$Package == 1,]
dataB <- data[data$Package == 2,]
dataC <- data[data$Package == 3,]
dataL <- data[data$Package == 4,]
#use smote to balance data
install.packages('smotefamily')
library(smotefamily)
#first balance class A,B('1'&'2')
dataAB <- rbind(dataA, dataB)
balanced <- SMOTE(dataAB, dataAB$Package, dup_size = 10)
names(balanced)
balanced <- balanced$data
#THERE'S A NEW COLUMN 'CLASS' ,SAME AS Package ,SO REMOVE
names(balanced)
balanced <- balanced[,-5]
#CHECK BEFORE AFTER BALANCE
table(dataAB$Package)
table(balanced$Package)
#replace class 2 with balanced data
dataB <- balanced[balanced$Package == 2,]
#each other class use same method
dataAC <- rbind(dataA, dataC)
table(dataAC$Package)
balanced <- SMOTE(dataAC, dataAC$Package, dup_size = 10)
balanced <- balanced$data
names(balanced)
balanced <- balanced[,-5]
table(balanced$Package)
dataC <- balanced[balanced$Package == 3,]
dataAL <- rbind(dataA, dataL)
table(dataAL$Package)
balanced <- SMOTE(dataAL, dataAL$Package, dup_size = 10)
balanced <- balanced$data
names(balanced)
balanced <- balanced[,-5]
table(balanced$Package)
dataL <- balanced[balanced$Package == 4,]
#combine balanced data
balanced <- rbind(dataA, dataB, dataC, dataL)
#return into the original class
balanced$Package <- ifelse(balanced$Package == 1, 'A', balanced$Package)
balanced$Package <- ifelse(balanced$Package == 2, 'B', balanced$Package)
balanced$Package <- ifelse(balanced$Package == 3, 'C', balanced$Package)
balanced$Package <- ifelse(balanced$Package == 4, 'L', balanced$Package)
balanced$Package <- as.factor(balanced$Package)
data <- data_original
data$Package <- as.factor(data$Package)
table(data$Package)
table(balanced$Package)
write.csv(balanced, file="balanced.csv")
|
b6538d0e984327161fe7d8a77b0f51e115d67ced
|
52a3279763bf2c2b008e7ef23ae26d791aff68cb
|
/man/mstmap.data.frame.Rd
|
1ce0551e5638958f185bbab3e44863790af72e34
|
[] |
no_license
|
cran/ASMap
|
3289e69aaffb89142673f4a4a5d9542b7a5a105b
|
22d9cc46ee3b69531f0f6171db35693829db477a
|
refs/heads/master
| 2023-07-21T17:33:43.863813
| 2023-07-16T18:00:09
| 2023-07-16T19:30:29
| 24,411,213
| 1
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 11,920
|
rd
|
mstmap.data.frame.Rd
|
\name{mstmap.data.frame}
\alias{mstmap.data.frame}
\title{
Extremely fast linkage map construction for data frame objects using MSTmap.
}
\description{
Extremely fast linkage map construction for data frame objects utilizing the
source code for MSTmap (see Wu et al., 2008). The construction includes
linkage group clustering, marker ordering and genetic distance calculations.
}
\usage{
\method{mstmap}{data.frame}(object, pop.type = "DH", dist.fun = "kosambi",
objective.fun = "COUNT", p.value = 1e-06, noMap.dist = 15,
noMap.size = 0, miss.thresh = 1, mvest.bc = FALSE,
detectBadData = FALSE, as.cross = TRUE, return.imputed = FALSE,
trace = FALSE, \ldots)
}
\arguments{
\item{object}{
A \code{"data.frame"} object containing marker information. The
data.frame must explicitly be arranged with markers in rows and
genotypes in columns. Marker names are obtained from the \code{rownames} of the
\code{object} and genotype names are obtained from the \code{names}
component of the \code{object} (see Details).
}
\item{pop.type}{
Character string specifying the population type of the data frame
\code{object}. Accepted values are \code{"DH"} (doubled haploid),
\code{"BC"} (backcross), \code{"RILn"} (non-advanced RIL population with
n generations of selfing) and \code{"ARIL"} (advanced RIL) (see
Details). Default is \code{"DH"}.
}
\item{dist.fun}{
Character string defining the distance function used for calculation of
genetic distances. Options are \code{"kosambi"} and \code{"haldane"}.
Default is \code{"kosambi"}.
}
\item{objective.fun}{
Character string defining the objective function to be used when
constructing the map. Options are \code{"COUNT"} for minimising the sum of
recombination events between markers and \code{"ML"} for maximising the
likelihood objective function. Default is \code{"COUNT"}.
}
\item{p.value}{
Numerical value to specify the threshold to use when clustering
markers. Defaults to \code{1e-06}. If a value greater than one
is given this feature is turned off inputted marker data are assumed to
belong to the same linkage group (see Details).
}
\item{noMap.dist}{
Numerical value to specify the smallest genetic distance a set of
isolated markers can appear distinct from other linked markers. Isolated
markers will appear in their own linkage groups ad will be of size
specified by \code{noMap.size}.
}
\item{noMap.size}{
Numerical value to specify the maximum size of isolated marker linkage
groups that have been identified using \code{noMap.dist}. This feature
can be turned off by setting it to 0. Default is 0.
}
\item{miss.thresh}{
Numerical value to specify the threshold proportion of missing marker
scores allowable in each of the markers. Markers above this threshold
will not be included in the linkage map. Default is 1.
}
\item{mvest.bc}{
Logical value. If \code{TRUE} missing markers will be imputed
before clustering the markers into linkage groups. This is restricted
to \code{"BC","DH","ARIL"} populations only (see Details).
}
\item{detectBadData}{
Logical value. If \code{TRUE} possible genotyping errors are detected,
set to missing and then imputed as part of the
marker ordering algorithm. Genotyping errors will also be printed in the
file specified by \code{trace}. This is restricted
to \code{"BC","DH","ARIL"} populations only. (see Details). Default is \code{FALSE}.
}
\item{as.cross}{
Logical value. If \code{TRUE} the constructed linkage map is returned as
a \pkg{qtl} cross object (see Details). If \code{FALSE} then the constructed
linkage map is returned as a \code{data.frame} with extra columns
indicating the linkage group, marker name/position and genetic distance.
Default is \code{TRUE}.
}
\item{return.imputed}{
Logical value. If \code{TRUE} then the imputed marker probability matrix is
returned for the linkage groups that are constructed (see
Details). Default is \code{FALSE}.
}
\item{trace}{
An automatic tracing facility. If \code{trace = FALSE} then
minimal \code{MSTmap} output is piped to the screen during the algorithm.
If \code{trace = TRUE}, then detailed output from MSTmap is
piped to "\code{MSToutput.txt}". This file is equivalent to the output that
would be obtained from running the MSTmap executable from the command line.
}
\item{\ldots}{
Currently ignored.
}
}
\details{
The data frame \code{object} must have an explicit format with markers
in rows and genotypes in columns. The marker names are required to be in
the \code{rownames} component and the genotype names are
required to be in the \code{names} component of the \code{object}. In
each set of names there must be no spaces. If spaces are detected they
are exchanged for a "-". Each of the columns of the data frame must be of class
\code{"character"} (not factors). If converting from a matrix, this can
easily be achieved by using the \code{stringAsFactors = FALSE} argument
for any \code{data.frame} method.
It is important to know what population type the data frame
\code{object} is and to correctly input this into \code{pop.type}. If
\code{pop.type = "ARIL"} then it is assumed that the minimal number of heterozygotes have been
set to missing before proceeding. The advanced RIL population is then
treated like a backcross population for the purpose of linkage map
construction. Genetic distances are adjusted post construction.
For non-advanced RIL populations \code{pop.type =
"RILn"}, the number of generations of selfing is limited to 20 to
ensure sensible input.
The content of the markers in \code{object} can either be all numeric
(see below) or all character. If markers are of type character then
the following allelic content must be explicitly adhered to. For \code{pop.type} \code{"BC"},
\code{"DH"} or \code{"ARIL"} the two allele types should
be represented as (\code{"A"} or \code{"a"}) and (\code{"B"} or
\code{"b"}). For non-advanced RIL populations (\code{pop.type = "RILn"})
phase unknown heterozygotes should be represented as
\code{"X"}. For all populations, missing marker scores should be represented
as (\code{"U"} or \code{"-"}).
This function also extends the functionality of the MSTmap
algorithm by allowing users to input a complete numeric data frame of
marker probabilities for \code{pop.type} \code{"BC"}, \code{"DH"} or
\code{"ARIL"}. The values must be inclusively between 1 (A) and 0 (B) and be
representative of the probability that the A allele is present. No
missing values are allowed.
The algorithm allows an adjustment of the \code{p.value} threshold for
clustering of markers to distinct linkage groups (see Wu et al.,
2008) and is highly dependent on the number of individuals in
the population. As the number of individuals increases the
\code{p.value} threshold should be decreased accordingly. This may
require some trial and error to achieve desired results.
If \code{mvest.bc = TRUE} and the population type is \code{"BC","DH","ARIL"}
then missing values are imputed before markers are clustered into
linkage groups. This is only a simple imputation that places a 0.5
probability of the missing observation being one allele or the other and
is used to assist the clustering algorithm when there is known to be high numbers of
missing observations between pairs of markers.
It should be highlighted that for population types
\code{"BC","DH","ARIL"}, imputation of missing values occurs
regardless of the value of \code{mvest.bc}. This is achieved using an EM algorithm that is
tightly coupled with marker ordering (see Wu et al., 2008). Initially
a marker order is obtained omitting missing marker scores and then
imputation is performed based on the underlying recombinant probabilities
of the flanking markers with the markers containing the missing
value. The recombinant probabilities are then recomputed and an update of
the pairwise distances are calculated. The ordering algorithm is then
run again and the complete process is repeated until
convergence. Note, the imputed probability matrix for the linkage map
being constructed is returned if \code{return.imputed = TRUE}.
For populations \code{"BC","DH","ARIL"}, if \code{detectBadData = TRUE},
the marker ordering algorithm also
includes the detection of genotyping errors. For any individual
genotype, the detection method is based on a weighted Euclidean metric
(see Wu et al., 2008) that is a function of the
recombination probabilities of all the markers with the marker containing
the suspicious observation. Any genotyping errors detected are set to
missing and the missing values are then imputed if \code{mv.est =
TRUE}. Note, the detection of these errors and their
amendment is returned in the imputed probability matrix if
\code{return.imputed = TRUE}
If \code{as.cross = TRUE} then the constructed object is returned as a
\pkg{qtl} cross object with the appropriate class structure. For \code{"RILn"}
populations the constructed object is given the class \code{"bcsft"} by
using the \pkg{qtl} package conversion function \code{convert2bcsft}
with arguments \code{F.gen = n} and \code{BC.gen =
0}. For \code{"ARIL"} populations the constructed object is given the
class \code{"riself"}.
If \code{return.imputed = TRUE} and \code{pop.type} is one of
\code{"BC","DH","ARIL"}, then the marker probability matrix is
returned for the linkage groups that have been constructed using the
algorithm. Each linkage group is named identically to the linkage groups
of the map and, if \code{as.cross = TRUE}, contains an ordered
\code{"map"} element and a \code{"data"}
element consisting of marker probabilities of the A allele being present
(i.e. P(A) = 1, P(B) = 0). Both elements contain a
possibly reduced version of the marker set that includes all
non-colocating markers as well as the first marker of any set of
co-locating markers. If \code{as.cross = FALSE} then an ordered data frame of matrix
probabilities is returned.
}
\value{
If \code{as.cross = TRUE} the function returns an R/qtl cross object with the appropriate
class structure. The object is a list with usual components
\code{"pheno"} and \code{"geno"}. If \code{as.cross = FALSE} the
function returns an ordered data frame object
with additional columns that indicate the linkage group, the position
and marker names and genetic distance of the markers within in each
linkage group. If markers were omitted for any reason during the
construction, the object will have an \code{"omit"} component with
all omitted markers in a collated matrix. If \code{return.imputed =
TRUE} then the object will also contain an \code{"imputed.geno"} element.
}
\references{
Wu, Y., Bhat, P., Close, T.J, Lonardi, S. (2008) Efficient and Accurate
Construction of Genetic Linkage Maps from Minimum Spanning Tree of a
Graph. Plos Genetics, \bold{4}, Issue 10.
Taylor, J., Butler, D. (2017) R Package ASMap: Efficient Genetic
Linkage Map Construction and Diagnosis. Journal of Statistical Software,
\bold{79}(6), 1--29.
}
\author{
Julian Taylor, Dave Butler, Timothy Close, Yonghui Wu, Stefano Lonardi
}
\seealso{
\code{\link{mstmap.cross}}
}
\examples{
data(mapDH, package = "ASMap")
## forming data frame object from R/qtl object
dfg <- t(do.call("cbind", lapply(mapDH$geno, function(el) el$data)))
dimnames(dfg)[[2]] <- as.character(mapDH$pheno[["Genotype"]])
dfg <- dfg[sample(1:nrow(dfg), nrow(dfg), replace = FALSE),]
dfg[dfg == 1] <- "A"
dfg[dfg == 2] <- "B"
dfg[is.na(dfg)] <- "U"
dfg <- cbind.data.frame(dfg, stringsAsFactors = FALSE)
## construct map
testd <- mstmap(dfg, dist.fun = "kosambi", trace = FALSE)
pull.map(testd)
## let's get a timing on that ...
system.time(testd <- mstmap(dfg, dist.fun = "kosambi", trace = FALSE))
}
\keyword{misc}
|
c03fc542365e2cef6a6264a0ba5c17a8b6559960
|
9d82f6cf8d99913a3d889c0e1033c26dfca06749
|
/man/B3.Rd
|
2f196663f13332e83049525fcdc3b227f7ee4a40
|
[] |
no_license
|
cran/ptest
|
9897e6234a036fee94747ac13fe312b9d476836d
|
7e9599c577f9cd8486c1a8d5369f28d6d1905c96
|
refs/heads/master
| 2021-01-21T22:10:48.716762
| 2016-11-12T21:41:37
| 2016-11-12T21:41:37
| 73,577,492
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| true
| 1,300
|
rd
|
B3.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/data.R
\docType{data}
\name{B3}
\alias{B3}
\title{Benchmark set B3}
\format{A vector containg 518 genes' names.}
\source{
The raw data can be downloaded from
\url{http://www.cbs.dtu.dk/cellcycle/yeast_benchmark/benchmark.php}.
}
\usage{
data(B3)
}
\description{
List for yeast genes which are \bold{less} likely to be periodic
(the benchmark set 3 in de Lichtenberg et al. (2005)).
}
\details{
Genes annotated in MIPS (Mewes et al., 2002) as 'cell cycle
and DNA processing'. From these, we removed genes annotated
specifically as 'meiosis' and genes included in B1 (67), leaving
518 genes. As a large number of genes involved in the cell cycle
are not subject to transcriptional regulation (not periodic), and
because B1 was explicitly removed, a relatively small fraction
of these genes should be expected to be periodically expressed.
}
\examples{
data(alpha)
data(B3)
alphaB3 <- alpha[rownames(alpha) \\\%in\\\% B3,]
}
\references{
De Lichtenberg, U., Jensen, L. J., Fausboll, A.,
Jensen, T. S., Bork, P.,& Brunak, S. (2005).
Comparison of computational methods for the identification
of cell cycle-regulated genes. Bioinformatics, 21(7), 1164-1171.
}
\keyword{datasets}
|
41bbe759d7e48a28bd15cf163009347df1dc2fec
|
49686eff0afdc426d0e603abba419e9e928065f5
|
/plot4.R
|
8f161df578b5d2c8b6befb1022426b79ae1c5a12
|
[] |
no_license
|
punadsmile/ExData_Plotting1
|
9ae2e80d9e09e1edaa345948128482fff121613f
|
5a78ebd215030593c899f317bebf60607bb90130
|
refs/heads/master
| 2020-04-05T23:10:14.096386
| 2014-11-09T10:45:29
| 2014-11-09T10:45:29
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 2,228
|
r
|
plot4.R
|
#something I need to say, i don't know why my plot displays Chinese, but the characters are excatly the same meaning as the examples given in the Porject README file. Pls do not regard it as a wrong answer.^-^
library("dplyr")
#I need some function of dplyr, so load dplyr library
file <- read.csv("p1.txt", sep = ";")
#network problem, so I already downloaded the file and changed its name.
filter <- filter(file, Date == "1/2/2007" | Date == "2/2/2007")
#filter the data with the dates I want
filter$DateTime <- paste(filter$Date, filter$Time)
filter$DateTime <- strptime(filter$DateTime, format = "%d/%m/%Y %H:%M:%S")
#combine the Date & Time
##select data for the first plot
select1 <- select(filter, DateTime, Global_active_power)
gap <- as.numeric(paste(filter$Global_active_power))
#select the columns I want and ready for plotting
##select data for the second plot
select2 <- select(filter, DateTime, Voltage)
vol <- as.numeric(paste(filter$Voltage))
##select data for the third plot
select3 <- select(filter, DateTime, Sub_metering_1, Sub_metering_2, Sub_metering_3)
sub1<-as.numeric(paste(filter$Sub_metering_1))
sub2<-as.numeric(paste(filter$Sub_metering_2))
sub3<-as.numeric(paste(filter$Sub_metering_3))
##select data for the last plot
select4 <- select(filter, DateTime, Global_reactive_power)
grap <- as.numeric(paste(filter$Global_reactive_power))
##make all the plots
par(mfrow = c(2,2))
#set space for four plots
plot(select1$DateTime, gap, xlab=" ", ylab="Global Active Power (kilowatts)", type = "l")
#make plot1
plot(select2$DateTime, vol, xlab="datetime", ylab="Voltage", type = "l")
#make plot2
plot(select3$DateTime, sub1, xlab=" ", ylab="Energy sub metering", type = "l")
#make plot3 with 1 line
lines(select3$DateTime, sub2, type = "l", col = "red")
lines(select3$DateTime, sub3, type = "l", col = "blue")
#add 2 lines to plot3
legend("topright", legend=c("Sub_metering_1", "Sub_metering_2", "Sub_metering_3"), col = c("black","red","blue"), cex=0.5,lty="solid")
#make a legend
plot(select4$DateTime, grap, type="l", xlab="datetime",ylab="Global Reactive Power (kilowatts)")
#make plot4
dev.copy(png, file="plot4.png", width=480, height=480)
#seve the plot image
dev.off()
#finish
|
8d58573a9a7c0a2f0aaf5a87eb61dcd41cd06803
|
f41146d721917805eecaacf1079d635a79f24a6a
|
/man/rboolm.Rd
|
883f995141c170574085e6236d0686f6c1b1cf9b
|
[
"MIT"
] |
permissive
|
krzjoa/matricks
|
13fc2e6547534f7578202da8332fb86ef1a5f650
|
fd9987b6a4ee41be2520f0abfa935144fef06477
|
refs/heads/master
| 2020-09-14T05:39:12.488603
| 2020-03-03T22:15:06
| 2020-03-03T22:15:06
| 223,035,650
| 4
| 0
| null | null | null | null |
UTF-8
|
R
| false
| true
| 498
|
rd
|
rboolm.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/random-matrix.R
\name{rboolm}
\alias{rboolm}
\title{Create matrix of random choosen boolean values}
\usage{
rboolm(nrow, ncol, true.proba = 0.5)
}
\arguments{
\item{nrow}{number of rows}
\item{ncol}{numer of columns}
\item{true.proba}{probability of true values; default: 0.5}
}
\value{
a matrix
}
\description{
Create matrix of random choosen boolean values
}
\examples{
rboolm(3, 3)
rboolm(4, 5, true.proba = 0.3)
}
|
b49983c680639a869f2701c368b4851ab659663f
|
914765027979b092d632fdbf4e9fbcd417f8223e
|
/Scripts/04.3.Catch.GAMM.R
|
fc32f69f14828c7f3a17b0c4fd64254dca56242e
|
[] |
no_license
|
UWA-Marine-Ecology-Group-students/Analysis_Miller_lobster
|
6cc2dc785e7afc8a70f3ae26bd0993f49af60b10
|
1bb1d1e24c294fb74cf3479c5b968c4f0ae173e4
|
refs/heads/master
| 2023-07-27T05:24:48.634970
| 2021-08-31T03:46:18
| 2021-08-31T03:46:18
| 395,499,954
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 6,017
|
r
|
04.3.Catch.GAMM.R
|
# A simple function for full subsets multiple regression in ecology with R
#
# R. Fisher
# S.K. Wilson
# S.M. Sin
# A.C. Lee
# Dr Tim J. Langlois
# Explore catch data----
rm(list=ls()) # Clears memory
# librarys----
detach("package:plyr", unload=TRUE)#will error - don't worry
library(tidyr)
library(dplyr)
library(lubridate)
library(readr)
options(dplyr.width = Inf) #enables head() to display all coloums
library(mgcv)
library(MuMIn)
library(car)
library(doBy)
library(ggplot2)
library(RColorBrewer)
library(doSNOW)
library(gamm4)
library(RCurl) #needed to download data from GitHub
# install package----
# library(devtools) #run once
devtools::install_github("beckyfisher/FSSgam_package") #run once
library(FSSgam)
# Set work directory----
work.dir=("Z://Analysis_Miller_lobster") #for laptop
## Sub directories ----
data.dir<-paste(work.dir,"Data",sep="/")
map.dir<-paste(work.dir,"Map Layers",sep="/")
plots.dir<-paste(work.dir,"Plots",sep="/")
model.dir<-paste(work.dir,"Model_out_catch",sep="/")
# Bring in and format the data----
name<-"catch"
setwd(data.dir)
dat <-read_csv("catch.sw.sst.csv")%>%
#change locations> to four
dplyr::rename(response=Count,
Taxa=sizeclass)%>%
drop_na(Taxa)%>%
drop_na(response)%>%
## Transform variables
mutate(Date=as.factor(yday(Date)))%>% #as julian day
mutate(Site=as.factor(Site))%>%
mutate(Location=as.factor(Location))%>%
glimpse()
dat%<>%
mutate(Location=str_replace_all(.$Location, c("Little Horseshoe"="Boundary", "Golden Ridge"="Boundary", "Irwin Reef"="Mid", "White Point"="Mid")))%>%
mutate(Location=as.factor(Location))%>%
glimpse()
unique(dat$Location)
zeros <- dat%>%
filter(Location=="Cliff Head")%>%
filter(Taxa=="All")%>%
filter(response=="0")%>%
glimpse()
length(zeros$Sample)
dat<-as.data.frame(dat)
glimpse(dat)
unique(dat$Location)
unique(dat$response)
#dat%<>%
# mutate(Location=str_replace_all(.$Location, c("Little Horseshoe"="Cliff Head", "White Point"="Irwin Reef")))%>%
# glimpse()
ggplot(data=dat,aes(y=response,x=Location))+
# geom_smooth(method="gam")+
geom_boxplot(notch=T)+
geom_point(alpha=0.25)+
facet_grid(.~Taxa)
# Set predictor variables---
pred.vars.fact=c("Location")
pred.vars.cont=c("Hs.m.sw",
"T1.s.sw",
# "Hs.m.sea",
"sst")
# Removed correlated
# "Date","T1.s.sw",
# "T1.s.sea",
# "Hs(m).tot","Tp(s).tot","T1(s).tot","Tp(s).sea","Tp(s).sw","Dir(deg).sw","Dir(deg).sea",
# Check for correalation of predictor variables- remove anything highly correlated (>0.95)---
round(cor(dat[,pred.vars.cont]),2)
# # Plot of likely transformations - thanks to Anna Cresswell for this loop!
par(mfrow=c(3,2))
for (i in pred.vars.cont) {
x<-dat[ ,i]
x = as.numeric(unlist(x))
hist((x))#Looks best
plot((x),main = paste(i))
hist(sqrt(x))
plot(sqrt(x))
hist(log(x+1))
plot(log(x+1))
}
# Review of individual predictors - we have to make sure they have an even distribution---
#If the data are squewed to low numbers try sqrt>log or if squewed to high numbers try ^2 of ^3
# Decided that X4mm, X2mm, X1mm and X500um needed a sqrt transformation
#Decided Depth, x63um, InPreds and BioTurb were not informative variables.
# Check to make sure Response vector has not more than 80% zeros----
unique.vars.use=unique(as.character(dat$Taxa))
# Run the full subset model selection----
setwd(model.dir) #Set wd for example outputs - will differ on your computer
# Presets
glimpse(dat)
names(dat)
resp.vars=unique.vars.use
use.dat=dat
out.all=list()
var.imp=list()
Model1=gam(response~s(sst,k=3,bs='cr')+ s(Site,bs='re')+s(Date,bs='re'),family=tw(), data=use.dat)
summary(Model1)
# # Loop through the FSS function for each Taxa----
for(i in 1:length(resp.vars)){
use.dat=dat[which(dat$Taxa==resp.vars[i]),]
Model1=gam(response~s(sst,k=3,bs='cr')+ s(Site,bs='re')+s(Date,bs='re'),family=tw(), data=use.dat)
# gam.check(Model1)
# plot.gam(Model1)
# summary(Model1)
model.set=generate.model.set(use.dat=use.dat,
test.fit=Model1,
pred.vars.cont=pred.vars.cont,
pred.vars.fact=pred.vars.fact,
factor.factor.interactions = F,
smooth.smooth.interactions = F,
factor.smooth.interactions = F,
k=3,
cov.cutoff = 0.7,
null.terms="s(Site,bs='re')+s(Date,bs='re')")
# r2.type="r2",
out.list=fit.model.set(model.set,
max.models=600,
parallel=T)
names(out.list)
out.list$failed.models # examine the list of failed models
mod.table=out.list$mod.data.out # look at the model selection table
mod.table=mod.table[order(mod.table$AICc),]
mod.table$cumsum.wi=cumsum(mod.table$wi.AICc)
out.i=mod.table[which(mod.table$delta.AICc<=10),]
out.all=c(out.all,list(out.i))
var.imp=c(var.imp,list(out.list$variable.importance$aic$variable.weights.raw)) #Either raw importance score
# var.imp=c(var.imp,list(out.list$variable.importance$aic$variable.weights.raw)) #Or importance score weighted by r2
# plot the best models
for(m in 1:nrow(out.i)){
best.model.name=as.character(out.i$modname[m])
png(file=paste(name,m,resp.vars[i],"mod_fits.png",sep="_"))
if(best.model.name!="null"){
par(mfrow=c(3,1),mar=c(9,4,3,1))
best.model=out.list$success.models[[best.model.name]]
plot(best.model,all.terms=T,pages=1,residuals=T,pch=16)
mtext(side=2,text=resp.vars[i],outer=F)}
dev.off()
}
}
# Model fits and importance---
names(out.all)=resp.vars
names(var.imp)=resp.vars
all.mod.fits=do.call("rbind",out.all)
all.var.imp=do.call("rbind",var.imp)
write.csv(all.mod.fits[,-2],file=paste(name,"all.mod.fits.180619.csv",sep="_"))
write.csv(all.var.imp,file=paste(name,"all.var.imp.180619.csv",sep="_"))
|
3f9f3c3a39e7dca6d84542f88e623d6e27fbc9d1
|
55eb928b52688c5158f412451a3572209d89f5bc
|
/exploration.R
|
1c3c767d16e8a5068d2b6a50130174c16b40ca5f
|
[] |
no_license
|
indyfree/us-healthcare-politics
|
86d068f03167ce843e8c1c90c59b1009d9d9c9ac
|
2820b40b17cd4793b26e16bd32ec075a5699ce34
|
refs/heads/master
| 2021-03-19T06:02:54.344912
| 2019-01-31T15:47:42
| 2019-01-31T15:47:42
| 116,642,892
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 2,508
|
r
|
exploration.R
|
library('ggplot2') # visualization
library('ggthemes') # visualization
# Data exploration Scripts and Visualisations
source ('./dataLoader.R')
source('./preprocesing.R')
states2016 <- get_state_features(plans2016, rates2016, benefits2016)
states2016 <- merge(states2016, pol2012, by = 'State')
states2016$Color <- as.factor(states2016$Color)
states2016 <- subset(states2016, states2016$NumAbortionPlans > 0)
#ggplot(data=states2016, aes(x=State, y=NumAbortionPlans)) + geom_point(aes(colour=Color))
states2015 <- get_state_features(plans2015, rates2015, benefits2015)
states2015 <- merge(states2015, pol2012, by = 'State')
states2015$Color <- as.factor(states2015$Color)
#states2015 <- subset(states2015, states2015$NumAbortionPlans > 0)
#summary(states2015)
ggplot(data=states2016, aes(x=PercentageAbortionPlans, y=ExtraPay)) + geom_point(aes(colour=Color))
p1 <- ggplot(data=states2015, aes(x=State, y=PercentageAbortionPlans, fill=Color)) + geom_bar(stat='identity') + ggtitle("Percentage of Abortion Plans per State 2015")
p1 <- p1 + theme(
axis.title.x = element_blank(),
axis.title.y = element_blank())
p2 <- ggplot(data=states2016, aes(x=State, y=PercentageAbortionPlans, fill=Color)) + geom_bar(stat='identity') + ggtitle("Percentage Abortion Plans per State 2016")
p2 <- p2 + theme(
axis.title.x = element_blank(),
axis.title.y = element_blank())
#p1
#multiplot(p1, p2, cols=2)
multiplot <- function(..., plotlist=NULL, file, cols=1, layout=NULL) {
library(grid)
# Make a list from the ... arguments and plotlist
plots <- c(list(...), plotlist)
numPlots = length(plots)
# If layout is NULL, then use 'cols' to determine layout
if (is.null(layout)) {
# Make the panel
# ncol: Number of columns of plots
# nrow: Number of rows needed, calculated from # of cols
layout <- matrix(seq(1, cols * ceiling(numPlots/cols)),
ncol = cols, nrow = ceiling(numPlots/cols))
}
if (numPlots==1) {
print(plots[[1]])
} else {
# Set up the page
grid.newpage()
pushViewport(viewport(layout = grid.layout(nrow(layout), ncol(layout))))
# Make each plot, in the correct location
for (i in 1:numPlots) {
# Get the i,j matrix positions of the regions that contain this subplot
matchidx <- as.data.frame(which(layout == i, arr.ind = TRUE))
print(plots[[i]], vp = viewport(layout.pos.row = matchidx$row,
layout.pos.col = matchidx$col))
}
}
}
|
2f233c066c130c06cc9ee5ba88be489f0cd3f242
|
dfae36f8d11b92f6fbab3e720ad8dfedc0ec9aee
|
/man/zeropad.Rd
|
aa752f53484292121e373cd818e04f3dd2a0e5a7
|
[] |
no_license
|
stevetnz/stevesRfunctions
|
871abc73c05f03e28879632209c7819f96cae805
|
afb0b49bc951f2eb5fc7918aa71fa6626b4bc58c
|
refs/heads/master
| 2018-09-09T11:31:07.851685
| 2018-06-05T04:25:45
| 2018-06-05T04:25:45
| 115,288,767
| 1
| 0
| null | null | null | null |
UTF-8
|
R
| false
| true
| 611
|
rd
|
zeropad.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/zeropad.R
\name{zeropad}
\alias{zeropad}
\title{Format numbers in constant width with leading zeros}
\usage{
zeropad(x, digits = 0, width = NULL)
}
\arguments{
\item{x}{Vector of numbers.}
\item{digits}{Number of decimal places (defaults to 0).}
\item{width}{Width of character values returned.}
}
\description{
Formats numbers for alphabetic sorting by padding with zeros
to constant string width.
If width is not supplied, a default value is calculated
based on the values in \code{x}.
}
\examples{
paste0('ID', zeropad(1:20))
}
|
9eeb0c47a8faf2ee72992ba0f9286bcfd29b2d52
|
bff6a33ede3dfe6558804694727ea15f77fd13eb
|
/main.R
|
7bb724eac721618428f43a4c2c8b93f67aad9db8
|
[] |
no_license
|
amineds/data-analysis-with-Rstudio-in-French
|
88048b21b184cf891d77ae93444845062bf98bca
|
cbb961f1ac2c2f56840aee88bbcb9a2da693d266
|
refs/heads/master
| 2020-12-24T14:36:37.669364
| 2015-07-23T21:11:11
| 2015-07-23T21:11:11
| 39,345,380
| 0
| 0
| null | null | null | null |
ISO-8859-1
|
R
| false
| false
| 2,436
|
r
|
main.R
|
library(ggplot2)
library(dplyr)
### Lecture de fichiers, cas particulier du format `.csv`
## Première chose à faire, définir le répertoire de travail
## à l'aide de la fonction `setwd`
#source : "https://d396qusza40orc.cloudfront.net/getdata%2Fdata%2FGDP.csv"
gdp_data <- read.csv("gdp.csv",
stringsAsFactors=FALSE,
header = FALSE,
skip=5,
nrows=190,
strip.white = TRUE,
skipNul = TRUE
)
head(gdp_data,n=5) #les 5 premières lignes
dim(gdp_data)
names(gdp_data)
#Profilage de chaque colonne
str(gdp_data)
### Nettoyage des données
#Ne garder que les colonnes qui nous intéressent
gdp_data <- gdp_data[,c(1,2,4,5)]
#Adaptation de la variable V5 : revenu domestique brut (GDP)
gdp_data$V5 <- as.numeric(gsub(",",'',gdp_data$V5))
#source : "https://d396qusza40orc.cloudfront.net/getdata%2Fdata%2FEDSTATS_Country.csv"
eds_data <- read.csv("edstats_country.csv",
stringsAsFactors=TRUE,
header = TRUE,
skip=0,
skipNul = TRUE,
strip.white = TRUE
)
### Formattage de données
#On commence par combiner les 2 dataframes avec la fonction merge.
merged_data = merge(gdp_data,
eds_data,
by.x="V1",
by.y="CountryCode",
all=FALSE
)
#On renomme certaines colonnes et on ordonne notre dataframe selon le `Rank`
merged_data <- reshape::rename(merged_data,c(V1="CountryCode",V2="Rank",V4="CountryName",V5="GDP"))
merged_data <- arrange(merged_data,Rank)
### Représentation statistique des données
## Application de quelques fonctions statistiques sur les données
#Moyenne du rang pour les pays membres de l'OCDE
mean(merged_data$Rank[merged_data$Income.Group=="High income: OECD"])
#Moyenne du rang pour les pays hors OCDE
mean(merged_data$Rank[merged_data$Income.Group=="High income: nonOECD"])
range(merged_data$GDP)
### Représentation graphique des données
p <- ggplot(merged_data, aes(x=factor(Income.Group),y=GDP,label=merged_data$CountryCode))
p <- p + geom_boxplot() + coord_flip()
#p <- p + geom_text()
p
### Consolidation des données
merged_data %>%
group_by(Region) %>%
summarise(sum_gdp = sum(GDP)) %>%
arrange(desc(sum_gdp))
|
f2764f11caef561e59f9f739c27ebdc8724455e2
|
217e28f77f8386ca2f57507cc1fa301897ee3193
|
/server.R
|
0dd7825a8db5010047d263df9afbba9697152d06
|
[] |
no_license
|
jorgehpo/ConcentricRadviz
|
fbb0bfce0e15d351ff43d8dc3771929f6fb050f7
|
48b5f222c3124fa9408c92418504d186351d5c39
|
refs/heads/master
| 2021-05-06T04:21:37.792887
| 2017-12-20T21:25:47
| 2017-12-20T21:25:47
| 114,928,256
| 1
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 2,731
|
r
|
server.R
|
#server.R
require(shiny)
require(TSP)
source("SortAllDGs.R")
source("OffsetDG.R")
sigmoid <- function(x, s=10, t= -1){
return (1/(1+exp(-s*(x+t))))
}
options(shiny.maxRequestSize=100*1024^2)
#concorde_path("/home/jorgehpo/Desktop/concorde/TSP")
shinyServer(function(input, output,session) {
dataRadviz = NULL
output$myCanvas <- reactive({
if (is.null(input$file1))
return(NULL)
dataRadviz <<- read.csv(input$file1$datapath)
dataRadviz
})
observe({
if ((!is.null(input$SortAllDGs)) && (!is.null(dataRadviz))){
cat("===============================================\n")
cat("Comecou a ordenacao...",date(),"\n")
cat("===============================================\n")
retSort = list()
retSort$DGs = input$SortAllDGs$dgs
retSort$anchorAngles = input$SortAllDGs$dgs
retSort$anchorIndex = input$SortAllDGs$idsDAs
nSamp = min(500, nrow(dataRadviz))
samp = sample(1:nrow(dataRadviz), nSamp)
classes = matrix(0, nrow = nSamp, ncol = 0)
dataset = matrix(0, ncol = 0, nrow = nSamp)
for (i in 1:length(input$SortAllDGs$dgs)){
myD = dataRadviz[samp, as.numeric(input$SortAllDGs$idsDAs[[i]]) + 1]
classes = cbind(classes, apply(myD, 1, which.max))
myD = sweep(myD, MARGIN = 1, apply(myD,MARGIN = 1, max), FUN = "/") #normaliza por linha de forma bonita
dataset = cbind(dataset, myD)
}
dataset = sigmoid(dataset, s= input$SortAllDGs$sigmoidScale, t= input$SortAllDGs$sigmoidTranslate)
classes = apply(classes, 1, paste, collapse = "")
dataset = sweep(dataset, MARGIN = 1, apply(dataset,MARGIN = 1, sum), FUN = "/") #todo mundo soma 1
dataset = as.matrix(dataset)
retSort$offsets = sortAllDGs(input$SortAllDGs$dgs, dataset = dataset, classes = classes)
session$sendCustomMessage(type='MessageDGsSolved',retSort)
}
})
observe(
{
input$cityObj
if (!is.null(dataRadviz)){
cities = unlist(input$cityObj$cities)
groupId = unlist(input$cityObj$groupId)
anglesUsed = unlist(input$cityObj$anglesUsed)
if (length(cities) > 0){
dataCols = as.matrix(dataRadviz[,cities+1])
if(length(cities)<=2){
order = 1:length(cities)
}else{
mat = 1-cor(dataCols)
suppressWarnings({order = solve_TSP(TSP(mat))})
}
returnObj = list()
returnObj$cities = input$cityObj$cities[order];
returnObj$groupId = input$cityObj$groupId;
returnObj$offset = computeOffsetDG(anglesUsed, length(cities))
session$sendCustomMessage(type='MessageTSPSolved',returnObj)
}
}
}
)
})
|
00ae008ff216d07cf3cf4862538135cb654ada2d
|
9b659628745c424b0abe2ecb1b038e328296679e
|
/boxy-wCfe_tissues/boxy-tissues-anova.R
|
abb42042c30169330141622185d4ac3a9a49871d
|
[] |
no_license
|
driscollmml/sandbox
|
5d1f1cfa2e9f7e6cfdc4708f0513d06d9354e1de
|
790cd253bb616f13dce1b1fe079ef9a3bd3c8587
|
refs/heads/master
| 2020-04-10T23:31:50.576887
| 2019-09-05T15:46:07
| 2019-09-05T15:46:07
| 161,356,647
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 314
|
r
|
boxy-tissues-anova.R
|
#! /usr/local/bin/R
# library
library(ggplot2)
library(readxl)
data_for_R <- read_excel("/Users/tpd0001/github/sandbox/boxy-wCfe_tissues/wCfe-tissues-ANOVA.xlsx")
# balanced design two-way ANOVA
res.aov2 <- aov(copies ~ tissue * target, data = data_for_R)
summary(res.aov2)
TukeyHSD(res.aov2, which = "tissue")
|
aec220649ed4c4664db98a2cdd5132690a1a79fe
|
0cd1ee53f120be720bf68b4288885256a69e2db5
|
/man/PhysActBedRest-package.Rd
|
e8d040e3ae1791aedaf965964557e085e769f270
|
[] |
no_license
|
cran/PhyActBedRest
|
18bb44330f565f793bac278b38fb8076210aff00
|
93f440bc6bdcc845c35de0d595ba709ee3b166db
|
refs/heads/master
| 2020-03-29T13:29:50.204813
| 2014-02-11T00:00:00
| 2014-02-11T00:00:00
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 1,465
|
rd
|
PhysActBedRest-package.Rd
|
\name{PhysActBedRest-package}
\alias{PhysActBedRest-package}
\alias{PhysActBedRest}
\docType{package}
\title{Mark Bedrest}
\description{
This package contains functions, which apply an automated algorithm to mark time intervals in Actigraph accelerometer data as either being either bedrest or active. The functions are intended as an alternative to those identifying "sleep". Tracy et al. (2014) show that the functions are more accurate than the Sadeh algorithm at identifying these behaviors. The package contains separate functions for data obtained from different locations, (e.g. waist or wrist worn). The package is designed to be used after the "wear marking" function in PhysicalActivity package. The "wear marking" function can be used to eliminate nonwear time intervals that the BedRest function in will classify as bedrest.
}
\details{
\tabular{ll}{
Package: \tab PhysActBedRest \cr
Type: \tab Package\cr
Version: \tab 1.0\cr
Date: \tab 2014-02-11\cr
License: \tab GPL(>=3)\cr
}
}
\author{J. Dustin Tracy, Zhiyi Xu, Leena Choi, Sari Acra, Kong Y. Chen, Maciej S. Buchowski
J. Dustin Tracy <jtracy2@student.gsu.edu>}
\references{Tracy JD, Xu Z, Choi L, Acra S, Chen KY and Buchowski MS(2014) Separating bedtime rest from activity using waist or wrist-worn accelerometers in youth. \emph{PLoS one} DOI: 10.1371/journal.pone.0092512}
\keyword{chron, accelerometer, sleep, bedrest
}
\seealso{
\pkg{PhysicalActivity}.
}
|
c5e529788f9d4f5f72b096a652e3f6151a037319
|
d4bfe22bab5c1f026d38a02c2b94ca09fba452ce
|
/figure/plot2.R
|
e1b6c88c1c508b62e440f364d0c53e700363ed7e
|
[] |
no_license
|
anemos23/ExData_Plotting1
|
a3d141e5ff6000e59f19d856e6e55bafc446c63b
|
6eb03ac4bd87d63827628b241671d1c40e1607c7
|
refs/heads/master
| 2021-01-22T01:43:21.162718
| 2015-01-11T16:54:28
| 2015-01-11T16:54:28
| 27,638,148
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 674
|
r
|
plot2.R
|
##read the file in a data frame
df <- read.csv("~/R/household_power_consumption.txt", header = TRUE, sep = ";", stringsAsFactors = FALSE)
##subsetting the data frame to the required dates
df_req <- subset(df, df$Date == "1/2/2007" | df$Date == "2/2/2007")
##create and add a timeline variable that can be used for plotting
timeline <- 1:2880
df_time <- cbind(timeline, df_req)
##produce the plot and store it in a .png file
png("plot2.png", 480, 480)
plot(df_time$timeline, df_time$Global_active_power, type = "l", ylab = "Global active power (in kilowatts)", xlab = "", xaxt = "n")
axis(1, at = c(1, 1440, 2880), labels = c("Thu", "Fri", "Sat"), tick = TRUE)
dev.off()
|
9d29721b06cdf50d51d9b89dfe227e588ec4ef59
|
c5448803c6435f5ced328f1d3bc30fc56a4e70a3
|
/Documents/Big Data Analytics/DA and E/jupyter/fma/man/telephone.Rd
|
7516649d8c6542df9b8ed811b8c72826d8e47043
|
[] |
no_license
|
JohannesKokozela/Research_Project
|
0028dc0531e68d80b0ad2bee7b2e0f3a7f7596ba
|
a1b462d3f77ae6437d3648ab3f45fd76ce72ebbe
|
refs/heads/master
| 2021-07-13T01:25:59.689912
| 2017-10-14T13:41:07
| 2017-10-14T13:41:07
| 106,183,621
| 1
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 369
|
rd
|
telephone.Rd
|
\name{telephone}
\alias{telephone}
\title{Telephone cost}
\description{Telephone cost in San Francisco, New York: 1915--1996.}
\usage{telephone}
\format{Time series data}
\source{Makridakis, Wheelwright and Hyndman (1998) \emph{Forecasting: methods and applications}, John Wiley & Sons: New York.
Chapter 9.}
\keyword{datasets}
\examples{plot(telephone)
}
|
65a3ce1a8e5dc4029122b0bea8414c577b03a78e
|
1dec09df1d39cfb292ddf1301cc285c4727f5271
|
/R/LWnomo1.R
|
8e958b3bbe7a8bc6ed1b15a435237010fba1086f
|
[] |
no_license
|
cran/LW1949
|
15d3316301a81f24cb30087f10f78fa97ca2908d
|
78efc68e5ed7929946f2beb6a7de83074e70fcc1
|
refs/heads/master
| 2021-01-09T21:48:52.060550
| 2017-03-20T16:34:16
| 2017-03-20T16:34:16
| 48,083,142
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 5,178
|
r
|
LWnomo1.R
|
#' Recreate Litchfield and Wilcoxon's Nomograph No. 1
#'
#' Recreate Litchfield and Wilcoxon's (1949) nomograph to estimate the
#' contribution to the chi-squared from the expected percent effect and
#' the observed minus the expected percent effect.
#' @export
#' @import
#' plotrix
#' @details
#' Use the nomograph by laying a straight edge from the expected percent effect
#' in the first scale to the observed (corrected, if necessary) minus the
#' expected percent effect in the second scale and reading the point where the
#' straight edge crosses the third scale as the contribution.
#'
#' The formula behind the nomograph is
#' (observed - expected)^2 / (100 * expected)
#' @param values
#' A logical scalar indicating whether values should be output.
#' @param ...
#' Additional parameters to \code{\link{par}}.
#' @return
#' If \code{values} is TRUE, a list of length four, with the x and y
#' coordinates and the corresponding values (all displayed in the log10
#' scale) of the end points of the three scales. Information is provided
#' twice for the first scale, once for the left tick marks and once for the
#' right tick marks.
#' @import
#' graphics
#' @references
#' Litchfield, JT Jr. and F Wilcoxon. 1949.
#' A simplified method of evaluating dose-effect experiments.
#' Journal of Pharmacology and Experimental Therapeutics 96(2):99-113.
#' \href{http://jpet.aspetjournals.org/content/96/2/99.abstract}{[link]}.
#' @examples
#' LWnomo1()
LWnomo1 <- function(values=FALSE, ...) {
bigtix <- function(x, fudge=10, roundingto=c(1, 2, 5)) {
onedigit <- signif(x, 1) - round(x, fudge) == 0
gooddigit <- substring(format(signif(x, 1), sci=TRUE), 1, 1) %in% roundingto
onedigit & gooddigit
}
# 1st scale, ep,
# expected % on log scale, log10(ep)
ep2l <- c(
seq(50, 80, 5), seq(82, 90, 2), seq(91, 95, 1),
seq(95.5, 98, 0.5), seq(98.2, 99, 0.2), seq(99.1, 99.5, 0.1),
seq(99.55, 99.8, 0.05), seq(99.82, 99.9, 0.02), seq(99.91, 99.95, 0.01),
seq(99.955, 99.98, 0.005))
ep1l <- rev(100-ep2l)
ep1l. <- sort(unique(c(range(ep1l), ep1l[bigtix(ep1l)])))
ep2l. <- rev(100 - ep1l.)
# 3rd scale, chicont,
# 100 times the contrib. to the chi-squared divided by n
# on the log scale, log10(100*contrib/n), where n is the total number
chicontl <- 100*
c(seq(0.001, 0.002, 0.0002), seq(0.0025, 0.005, 0.0005),seq(0.006, 0.01, 0.001),
seq(0.012, 0.02, 0.002), seq(0.025, 0.05, 0.005), seq(0.06, 0.1, 0.01),
seq(0.12, 0.2, 0.02), seq(0.25, 0.5, 0.05), seq(0.6, 1, 0.1),
seq(1.2, 2, 0.2))
chicontladj <- chicontl/100
chicontl. <- sort(unique(c(range(chicontl), chicontl[bigtix(chicontl)])))
chicontladj. <- chicontl./100
# 2nd scale, opmep,
# observed minus expected % on log scale times 2, 2*log10|op - ep|
# range of 2nd scale, as the sum of the ranges of the 1st and 3rd scales
# ep + chicont = opmep
opmeprange <- 10^((log10(c(0.02, 50)) + log10(c(0.1, 200)))/2)
opmepladj <- sort(unique(c(opmeprange,
seq(0.05, 0.1, 0.01), seq( 0.12, 0.2, 0.02), seq( 0.25, 0.5, 0.05),
seq(0.6, 1, 0.1), seq( 1.2, 2, 0.2), seq( 2.5, 5, 0.5),
seq(6, 10, 1), seq(12, 20, 2), seq(25, 50, 5),
seq(60, 100, 10))))
opmepl <- 2*log10(opmepladj)
opmepladj. <- sort(unique(c(range(opmepladj), opmepladj[bigtix(opmepladj)])))
opmepl. <- 2*log10(opmepladj.)
par(xaxs="i", yaxs="i", mar=c(1, 1.5, 4.5, 0.5), las=1, ...)
plot(0:1, 0:1, type="n", axes=FALSE, xlab="", ylab="")
# http://stackoverflow.com/a/29893376/2140956
# fix the number of lines for right labels on first axis
nlines <- 1.5
# convert 1 from lines to inches
inches <- nlines * par("cin")[2] * par("cex") * par("lheight")
# convert from inches to user coords
mycoord <- diff(grconvertX(c(0, inches), from="inches", to="user"))
axis(2, pos=0.1, at=rescale(log10(ep1l), 0:1), labels=FALSE, tck=-0.01)
axis(2, pos=0.1, at=rescale(log10(ep1l.), 0:1), labels=round(rev(ep2l.), 2))
axis(2, pos=0.1+mycoord, at=rescale(log10(ep1l.), 0:1),
labels=round(ep1l., 2), tick=FALSE, hadj=0)
axis(2, pos=0.5, at=rescale(opmepl, 0:1)[-1], labels=FALSE, tck=-0.01)
axis(2, pos=0.5, at=rescale(opmepl., 0:1)[-1],
labels=round(opmepladj., 3)[-1])
axis(2, pos=0.9, at=rescale(log10(chicontl), 0:1), labels=FALSE, tck=-0.01)
axis(2, pos=0.9, at=rescale(log10(chicontl.), 0:1),
labels=round(chicontladj., 4))
mtext(c("Expected\n% effect", "Observed minus\nexpected % effect",
"(Chi)\U00B2\nfor samples\nof one"), side=3, at=c(0.1, 0.5, 0.9), line=1)
if(values) {
scale1l <- data.frame(x= c(0.1, 0.1), y=0:1, values=c(99.98, 50))
scale1r <- data.frame(x= c(0.1, 0.1), y=0:1, values=c(0.02, 50))
scale2 <- data.frame(x= c(0.5, 0.5), y=0:1, values=c(0.045, 100))
scale3 <- data.frame(x= c(0.9, 0.9), y=0:1, values=c(0.001, 2))
out <- list(scale1l=scale1l, scale1r=scale1r, scale2=scale2, scale3=scale3)
return(out)
}
}
|
89e03668b7e24b5f1146e8d9608d6628a07a0d51
|
7ba42ea09417547219343e5532a1f7954bdf10b2
|
/R/xlnet-embeddings.R
|
53149eeb86c020857439ecd62f2a60ac71e3cf46
|
[
"Apache-2.0"
] |
permissive
|
r-spark/sparknlp
|
622822b53e2b5eb43508852e39a911a43efa443f
|
4c2ad871cc7fec46f8574f9361c78b4bed39c924
|
refs/heads/master
| 2023-03-16T05:35:41.244593
| 2022-10-06T13:42:00
| 2022-10-06T13:42:00
| 212,847,046
| 32
| 7
|
NOASSERTION
| 2023-03-13T19:33:03
| 2019-10-04T15:27:28
|
R
|
UTF-8
|
R
| false
| false
| 2,835
|
r
|
xlnet-embeddings.R
|
#' Load a pretrained Spark NLP XlnetEmbeddings model
#'
#' Create a pretrained Spark NLP \code{XlnetEmbeddings} model
#'
#' @template roxlate-pretrained-params
#' @template roxlate-inputs-output-params
#' @param case_sensitive whether to treat the tokens as case insensitive when looking up their embedding
#' @param batch_size batch size
#' @param dimension the embedding dimension
#' @param lazy_annotator use as a lazy annotator or not
#' @param max_sentence_length set the maximum sentence length
#' @param storage_ref storage reference name
#' @export
nlp_xlnet_embeddings_pretrained <- function(sc, input_cols, output_col, case_sensitive = NULL,
batch_size = NULL, dimension = NULL,
lazy_annotator = NULL, max_sentence_length = NULL, storage_ref = NULL,
name = NULL, lang = NULL, remote_loc = NULL) {
args <- list(
input_cols = input_cols,
output_col = output_col,
case_sensitive = case_sensitive,
batch_size = batch_size,
dimension = dimension,
lazy_annotator = lazy_annotator,
max_sentence_length = max_sentence_length,
storage_ref = storage_ref
) %>%
validator_nlp_xlnet_embeddings()
model_class <- "com.johnsnowlabs.nlp.embeddings.XlnetEmbeddings"
model <- pretrained_model(sc, model_class, name, lang, remote_loc)
spark_jobj(model) %>%
sparklyr::jobj_set_param("setInputCols", args[["input_cols"]]) %>%
sparklyr::jobj_set_param("setOutputCol", args[["output_col"]]) %>%
sparklyr::jobj_set_param("setCaseSensitive", args[["case_sensitive"]]) %>%
sparklyr::jobj_set_param("setBatchSize", args[["batch_size"]]) %>%
sparklyr::jobj_set_param("setDimension", args[["dimension"]]) %>%
sparklyr::jobj_set_param("setLazyAnnotator", args[["lazy_annotator"]]) %>%
sparklyr::jobj_set_param("setMaxSentenceLength", args[["max_sentence_length"]]) %>%
sparklyr::jobj_set_param("setStorageRef", args[["storage_ref"]])
new_ml_transformer(model)
}
#' @import forge
validator_nlp_xlnet_embeddings <- function(args) {
args[["input_cols"]] <- cast_string_list(args[["input_cols"]])
args[["output_col"]] <- cast_string(args[["output_col"]])
args[["case_sensitive"]] <- cast_nullable_logical(args[["case_sensitive"]])
args[["batch_size"]] <- cast_nullable_integer(args[["batch_size"]])
args[["dimension"]] <- cast_nullable_integer(args[["dimension"]])
args[["lazy_annotator"]] <- cast_nullable_logical(args[["lazy_annotator"]])
args[["max_sentence_length"]] <- cast_nullable_integer(args[["max_sentence_length"]])
args[["storage_ref"]] <- cast_nullable_string(args[["storage_ref"]])
args
}
new_nlp_xlnet_embeddings <- function(jobj) {
sparklyr::new_ml_transformer(jobj, class = "nlp_xlnet_embeddings")
}
|
b20bba45e35d65bf87e851b48d39547493f012da
|
1befa50ec497f45d2c5a5973e3526fa708cf71aa
|
/resamplePRISM.R
|
1079f406890d6ea121e0c159e692293bf92eee84
|
[] |
no_license
|
mcrimmins/DroughtIndices
|
0248841f70e84c68fd6d084b28844635a2f5d700
|
5508184711a03b456cd90aaf4f144dcb4745ab7f
|
refs/heads/master
| 2020-03-25T05:29:09.683410
| 2019-07-03T02:01:57
| 2019-07-03T02:01:57
| 143,449,343
| 0
| 1
| null | null | null | null |
UTF-8
|
R
| false
| false
| 1,047
|
r
|
resamplePRISM.R
|
# Resample PRISM to Livneh grid resolution
# MAC 08/09/18
library(raster)
library(zoo)
library(rasterVis)
# set rasteroptions
rasterOptions(progress = 'text')
# map layers
#states <- getData('GADM', country='United States', level=1)
# dates 1895-2017 PRISM data
dates=seq(as.Date("1895-01-01"), as.Date("2017-12-31"), by="month")
# load data
#tmean<-stack("/scratch/crimmins/PRISM/monthly/processed/west/WESTmonthlyPRISM_tmean_1895_2017.grd")
#tmax<-stack("/scratch/crimmins/PRISM/monthly/processed/west/WESTmonthlyPRISM_tmax_1895_2017.grd")
tmin<-stack("/scratch/crimmins/PRISM/monthly/processed/west/WESTmonthlyPRISM_tmin_1895_2017.grd")
#prec<-prec[[which(dates=="1980-01-01"):which(dates=="2017-12-01")]]
# resample to Livneh grid
livneh<-stack("/scratch/crimmins/livneh/processed/WESTmonthlyLivneh_prec_1915_2015.grd")
livnehGrid<-livneh[[1212]]; rm(livneh); gc()
gridResample <- resample(tmin,livnehGrid,method='bilinear', filename="/scratch/crimmins/PRISM/monthly/processed/west/resampled/resampledWESTmonthlyPRISM_tmin_1895_2017.grd")
|
cf7f345f04ce8879c063baa8cfbc0e6fe3eb5df4
|
ed1f5670a858affeb3377307119ea38e9901470c
|
/setup/pop1plus.R
|
d92f3c9fd61f03dd021baffd17c6877dd7049737
|
[] |
no_license
|
bthe/NA-fin-ist
|
e116d84a46154a44a54a05a5dcb5838be0bdf642
|
575e00d3aeac49f320a655b932acfc3942c7626c
|
refs/heads/master
| 2021-01-21T04:43:16.330777
| 2020-11-06T13:04:08
| 2020-11-06T13:04:08
| 48,041,698
| 0
| 2
| null | null | null | null |
UTF-8
|
R
| false
| false
| 6,920
|
r
|
pop1plus.R
|
for(ms in c(1,4)){
print(pop %>%
filter(grepl(sprintf('NF-B.-%s',ms),ref)) %>%
mutate(hypo = paste('Hypothesis',hypo)) %>%
ggplot(aes(year,number)) +
geom_line(aes(col=hypo,lty=hypo)) +
facet_wrap(~area,scale='free_y',ncol=2) +
geom_point(aes(year,obs),col='black') +
geom_errorbar(aes(year,ymax=upper,ymin=lower),col='black') +
theme_bw() + ylab('1+ population') + xlab('Year') +
geom_text(aes(label=area),x=1960,y=Inf, vjust = 2,hjust = 1,col='black') +
theme(axis.text.y=element_text(angle = 90,hjust = 0.5,size=5),
axis.text.x=element_text(size = 7),
legend.position = c(0.7,0.1),
panel.margin = unit(0.3,'cm'),
plot.margin = unit(c(0.2,0.2,0.2,0.2),'cm'),
strip.background = element_blank(),
strip.text.x = element_blank())+
scale_x_continuous(breaks = seq(1860,2015,by=20),
minor_breaks = seq(1860,2015,by=5))+
expand_limits(y = 0)+
guides(color = guide_legend(""), lty = guide_legend(""))+
ggtitle(sprintf('Baseline %s%% (%s)',ms,ifelse(ms==1,'1+','Mature'))))
}
for(ms in c(1,4)){
for(hyp in 1:3){
print(pop %>%
filter(!(trialtype %in% c('B','Y','Q','J','A','E')), msyr == ms/100,hypo == hyp) %>%
ggplot(aes(year,number,col=type)) +
geom_line(aes(lty=type)) +
facet_wrap(~area,scale='free_y',ncol=2) +
geom_point(aes(year,obs),col='black') +
geom_errorbar(aes(year,ymax=upper,ymin=lower),col='black') +
theme_bw() + ylab('1+ population') + xlab('Year') +
geom_text(aes(label=area),x=1960,y=Inf, vjust = 2,hjust = 1,col='black') +
theme(axis.text.y=element_text(angle = 90,hjust = 0.5,size=5),
axis.text.x=element_text(size = 7),
legend.position = c(0.75,0.1),
panel.margin = unit(0.3,'cm'),
plot.margin = unit(c(0.2,0.2,0.2,0.2),'cm'),
strip.background = element_blank(),
strip.text.x = element_blank())+
scale_x_continuous(breaks = seq(1860,2015,by=20),
minor_breaks = seq(1860,2015,by=5))+
expand_limits(y = 0)+
guides(col=guide_legend(title=NULL,ncol=2),lty=guide_legend(title=NULL))+
ggtitle(sprintf('Other hypothesis %s trials %s%% (%s)',
hyp,ms,ifelse(ms==1,'1+','Mature'))))
}
}
# plot E trials
for(ms in c(1,4)){
print(pop %>%
filter(trialtype =='E', msyr == ms/100) %>%
mutate(hypo = paste('Hypothesis',hypo)) %>%
ggplot(aes(year,number,col=hypo)) +
geom_line(aes(lty=hypo)) +
facet_wrap(~area,scale='free_y',ncol=2) +
geom_point(aes(year,ifelse(year<1990 & area %in% c('EI/F','EG','WI'),NA,obs)),col='black') +
geom_errorbar(aes(year,ymax=ifelse(year<1990 & area %in% c('EI/F','EG','WI'),NA,upper),
ymin=ifelse(year<1990 & area %in% c('EI/F','EG','WI'),NA,lower)),
col='black') +
theme_bw() + ylab('1+ population') + xlab('Year') +
geom_text(aes(label=area),x=1960,y=Inf, vjust = 2,hjust = 1,col='black') +
theme(axis.text.y=element_text(angle = 90,hjust = 0.5,size=5),
axis.text.x=element_text(size = 7),
legend.position = c(0.75,0.1),
panel.margin = unit(0.3,'cm'),
plot.margin = unit(c(0.2,0.2,0.2,0.2),'cm'),
strip.background = element_blank(),
strip.text.x = element_blank())+
scale_x_continuous(breaks = seq(1860,2015,by=20),
minor_breaks = seq(1860,2015,by=5))+
expand_limits(y = 0)+
guides(col=guide_legend(title=NULL,ncol=2),lty=guide_legend(title=NULL))+
ggtitle(sprintf('Exclude 1987/9 abundance in EG, WI and EI/F %s%% (%s)',
ms,ifelse(ms==1,'1+','Mature'))))
}
# plot J trials
for(ms in c(1,4)){
print(pop %>%
filter(trialtype =='J', msyr == ms/100) %>%
mutate(hypo = paste('Hypothesis',hypo)) %>%
ggplot(aes(year,number,col=hypo)) +
geom_line(aes(lty=hypo)) +
facet_wrap(~area,scale='free_y',ncol=2) +
geom_point(aes(year,obs/0.8),col='black') +
geom_errorbar(aes(year,ymax=upper/0.8,
ymin=lower/0.8),
col='black') +
theme_bw() + ylab('1+ population') + xlab('Year') +
geom_text(aes(label=area),x=1960,y=Inf, vjust = 2,hjust = 1,col='black') +
theme(axis.text.y=element_text(angle = 90,hjust = 0.5,size=5),
axis.text.x=element_text(size = 7),
legend.position = c(0.75,0.1),
panel.margin = unit(0.3,'cm'),
plot.margin = unit(c(0.2,0.2,0.2,0.2),'cm'),
strip.background = element_blank(),
strip.text.x = element_blank())+
scale_x_continuous(breaks = seq(1860,2015,by=20),
minor_breaks = seq(1860,2015,by=5))+
expand_limits(y = 0)+
guides(col=guide_legend(title=NULL,ncol=2),lty=guide_legend(title=NULL))+
ggtitle(sprintf('g(0) = 0.8 %s%% (%s)',
ms,ifelse(ms==1,'1+','Mature'))))
}
# plot A trials
for(ms in c(1,4)){
print(pop %>%
filter(trialtype =='A', msyr == ms/100) %>%
mutate(hypo = paste('Hypothesis',hypo)) %>%
ggplot(aes(year,number,col=hypo)) +
geom_line(aes(lty=hypo)) +
facet_wrap(~area,scale='free_y',ncol=2) +
geom_point(aes(year,pro.obs),col='black') +
geom_errorbar(aes(year,ymax=pro.upper,
ymin=pro.lower),
col='black') +
theme_bw() + ylab('1+ population') + xlab('Year') +
geom_text(aes(label=area),x=1960,y=Inf, vjust = 2,hjust = 1,col='black') +
theme(axis.text.y=element_text(angle = 90,hjust = 0.5,size=5),
axis.text.x=element_text(size = 7),
legend.position = c(0.75,0.1),
panel.margin = unit(0.3,'cm'),
plot.margin = unit(c(0.2,0.2,0.2,0.2),'cm'),
strip.background = element_blank(),
strip.text.x = element_blank())+
scale_x_continuous(breaks = seq(1860,2015,by=20),
minor_breaks = seq(1860,2015,by=5))+
expand_limits(y = 0)+
guides(col=guide_legend(title=NULL,ncol=2),lty=guide_legend(title=NULL))+
ggtitle(sprintf('Pro-rated abundance %s%% (%s)',
ms,ifelse(ms==1,'1+','Mature'))))
}
|
f7167df2c9d6a844a8f450fbf8a2cf06eb51d703
|
73fc537bb4ca79f15edebcbfef0c90878666380c
|
/man/plot.ensembleBMA.Rd
|
181f03c01f740393b4a5a4e33a6a2ff2baea2648
|
[] |
no_license
|
cran/ensembleBMA
|
b3012f476e3c7e44580edb9fb23e06bec7fce12c
|
2bbb7ed69a64dd97b55a40d832b19fbc77e89b10
|
refs/heads/master
| 2022-09-16T14:20:24.345306
| 2022-09-02T06:20:05
| 2022-09-02T06:20:05
| 17,695,812
| 2
| 3
| null | null | null | null |
UTF-8
|
R
| false
| false
| 4,183
|
rd
|
plot.ensembleBMA.Rd
|
\name{plot.ensembleBMA}
\alias{plot.ensembleBMA}
\alias{plot.ensembleBMAgamma}
\alias{plot.ensembleBMAgamma0}
\alias{plot.ensembleBMAnormal}
\alias{plot.fitBMA}
\alias{plot.fitBMAgamma}
\alias{plot.fitBMAgamma0}
\alias{plot.fitBMAnormal}
\alias{plotBMAgamma}
\alias{plotBMAgamma0}
\alias{plotBMAnormal}
\title{
Plot the Predictive Distribution Function for ensemble forcasting models
}
\description{
Plots the Predictive Distribution Function (PDF)
of an ensemble forecasting model.
}
\usage{
\method{plot}{ensembleBMAgamma}( x, ensembleData, dates=NULL, ask=TRUE, ...)
\method{plot}{ensembleBMAgamma0}( x, ensembleData, dates=NULL, ask=TRUE, ...)
\method{plot}{ensembleBMAnormal}( x, ensembleData, dates=NULL, ask=TRUE, ...)
\method{plot}{fitBMAgamma}( x, ensembleData, dates=NULL, ...)
\method{plot}{fitBMAgamma0}( x, ensembleData, dates=NULL, ...)
\method{plot}{fitBMAnormal}( x, ensembleData, dates=NULL, ...)
}
\arguments{
\item{x}{
A model fit to ensemble forecasting data.
}
\item{ensembleData}{
An \code{ensembleData} object that includes ensemble forecasts,
verification observations and possibly dates.
Missing values (indicated by \code{NA}) are allowed. \\
This need not be the data used for the model \code{fit},
although it must include the same ensemble members.
}
\item{dates}{
The dates for which the PDF will be computed.
These dates must be consistent with \code{fit} and \code{ensembleData}.
The default is to use all of the dates in \code{fit}.
The dates are ignored if \code{fit} originates from \code{fitBMA},
which also ignores date information.
}
\item{ask}{
A logical value indicating whether or not the user should be prompted
for the next plot.
}
\item{\dots}{
Included for generic function compatibility.
}
}
\details{
This method is generic, and can be applied to any ensemble forecasting
model. \cr
The colored curves are the weighted PDFs of the ensemble members,
and the bold curve is the overall PDF. The vertical black line represents
the median forecast, and the dotted back lines represent the .1 and .9
quartiles. The vertical orange line is the verifying observation (if
any).\cr
Exchangeable members are represented in the plots by the weighted
group sum rather than by the indivdual weighted PDFs of each member.
}
\references{
A. E. Raftery, T. Gneiting, F. Balabdaoui and M. Polakowski,
Using Bayesian model averaging to calibrate forecast ensembles,
\emph{Monthly Weather Review 133:1155--1174, 2005}.
J. M. Sloughter, A. E. Raftery, T. Gneiting and C. Fraley,
Probabilistic quantitative precipitation forecasting
using Bayesian model averaging,
\emph{Monthly Weather Review 135:3209--3220, 2007}.
J. M. Sloughter, T. Gneiting and A. E. Raftery,
Probabilistic wind speed forecasting
using ensembles and Bayesian model averaging,
\emph{Journal of the American Statistical Association, 105:25--35, 2010}.
C. Fraley, A. E. Raftery, T. Gneiting,
Calibrating Multi-Model Forecast Ensembles
with Exchangeable and Missing Members using Bayesian Model Averaging,
\emph{Monthly Weather Review 138:190-202, 2010}.
C. Fraley, A. E. Raftery, T. Gneiting and J. M. Sloughter,
\code{ensembleBMA}: An \code{R} Package for Probabilistic Forecasting
using Ensemble and Bayesian Model Averaging,
Technical Report No. 516R, Department of Statistics, University of
Washington, 2007 (revised 2010).
}
\examples{
data(ensBMAtest)
ensMemNames <- c("gfs","cmcg","eta","gasp","jma","ngps","tcwb","ukmo")
obs <- paste("T2","obs", sep = ".")
ens <- paste("T2", ensMemNames, sep = ".")
tempTestData <- ensembleData( forecasts = ensBMAtest[,ens],
dates = ensBMAtest[,"vdate"],
observations = ensBMAtest[,obs],
station = ensBMAtest[,"station"],
forecastHour = 48,
initializationTime = "00")
\dontrun{# R check
tempTestFit <- ensembleBMAnormal( tempTestData, trainingDays = 30)
plot(tempTestFit, tempTestData)
}
}
\keyword{models}
% docclass is function
|
a217b93d007d9d0c460867b83ba0365d61a17bf3
|
d4663d9791edda1d4744f52e2fbe585677d90136
|
/Sentiment Analysis/[001]Twitter-Sentiment-Analysis/03-wordcloud.R
|
279d4458abc4ae383acbe532f2bf36ae16730d22
|
[] |
no_license
|
romele-stefano/R-scripts
|
1bb9af6dcc353c8f2e2a85248e3de85a2bb0c037
|
557daa981540f7e04e296527993a563323db924e
|
refs/heads/master
| 2020-06-22T05:05:34.278438
| 2019-01-29T13:44:16
| 2019-01-29T13:44:16
| 74,959,531
| 2
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 1,373
|
r
|
03-wordcloud.R
|
#################
# WORDCLOUD #
#################
### TRUMP ###
# Eliminate character
text_trump <- gsub("&", "", text_trump)
text_trump <- gsub("http.*", "", text_trump)
text_trump <- gsub("@.*", "", text_trump)
# create wordcloud
corpus=Corpus(VectorSource(text_trump))
# Convert to lower-case
corpus=tm_map(corpus,tolower)
# Remove stopwords
corpus=tm_map(corpus,function(x) removeWords(x,stopwords()))
# convert corpus to a Plain Text Document
corpus=tm_map(corpus,PlainTextDocument)
# display.brewer.all() to see possible colors!
col=brewer.pal(8,"Paired")
wordcloud(corpus, min.freq=5, scale=c(4,.7),rot.per = 0.25,
random.color=T, max.word=250, random.order=F,colors=col)
### CLINTON ###
# Eliminate character
text_clinton <- gsub("&", "", text_clinton)
text_clinton <- gsub("http.*", "", text_clinton)
text_clinton <- gsub("@.*", "", text_clinton)
# create wordcloud
corpus=Corpus(VectorSource(text_clinton))
# Convert to lower-case
corpus=tm_map(corpus,tolower)
# Remove stopwords
corpus=tm_map(corpus,function(x) removeWords(x,stopwords()))
# convert corpus to a Plain Text Document
corpus=tm_map(corpus,PlainTextDocument)
# display.brewer.all() to see possible colors!
col=brewer.pal(8,"Paired")
wordcloud(corpus, min.freq=5, scale=c(5,1),rot.per = 0.25,
random.color=T, max.word=100, random.order=F,colors=col)
|
c4530d5de06d0c40d719c2af2aea53b92287c889
|
58b5653f2c3e582a3de4aa5059b33282e000e8cc
|
/man/draw.StudentT.Rd
|
7f736cf1d282153f856f7aad7416ff172e27c4a0
|
[] |
no_license
|
hayate0304/Rsymbulate
|
539c158af6d758ccf5254c5feaf440be26419089
|
3fc7fd0ca0e9c476e4da0f8382787ac5bbddd010
|
refs/heads/master
| 2020-03-25T18:57:41.909510
| 2019-01-24T04:47:27
| 2019-01-24T04:47:27
| 144,058,208
| 1
| 0
| null | null | null | null |
UTF-8
|
R
| false
| true
| 373
|
rd
|
draw.StudentT.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/distributions.R
\name{draw.StudentT}
\alias{draw.StudentT}
\title{A function that takes no arguments and
returns a single draw from the T distribution.}
\usage{
\method{draw}{StudentT}(self)
}
\description{
A function that takes no arguments and
returns a single draw from the T distribution.
}
|
44fa749fe368c8884bc3cae95a712a6e573bcc20
|
93d1fcc7758e5e99927be0529fb9d681db71e70c
|
/man/sparsify_simdat_r.Rd
|
27f3dce44f2d2f06b198ab11e773b93c69e9e9ae
|
[] |
no_license
|
psychmeta/psychmeta
|
ef4319169102b43fd87caacd9881014762939e33
|
b790fac3f2a4da43ee743d06de51b7005214e279
|
refs/heads/master
| 2023-08-17T20:42:48.778862
| 2023-08-14T01:22:19
| 2023-08-14T01:22:19
| 100,509,679
| 37
| 15
| null | 2023-08-14T01:06:53
| 2017-08-16T16:23:28
|
R
|
UTF-8
|
R
| false
| true
| 1,317
|
rd
|
sparsify_simdat_r.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/simulate_r.R
\name{sparsify_simdat_r}
\alias{sparsify_simdat_r}
\title{Create sparse artifact information in a "simdat_r_database" class object}
\usage{
sparsify_simdat_r(
data_obj,
prop_missing,
sparify_arts = c("rel", "u"),
study_wise = TRUE
)
}
\arguments{
\item{data_obj}{Object created by the "simdat_r_database" function.}
\item{prop_missing}{Proportion of studies in from which artifact information should be deleted.}
\item{sparify_arts}{Vector of codes for the artifacts to be sparsified: "rel" for reliabilities, "u" for u ratios, or c("rel", "u") for both.}
\item{study_wise}{Logical scalar argument determining whether artifact deletion should occur for all variables in a study (\code{TRUE}) or randomly across variables within studies (\code{FALSE}).}
}
\value{
A sparsified database
}
\description{
This function can be used to randomly delete artifact from databases produced by the \code{\link{simulate_r_database}} function.
Deletion of artifacts can be performed in either a study-wise fashion for complete missingness within randomly selected studies or element-wise missingness for completely random deletion of artifacts in the database.
Deletion can be applied to reliability estimates and/or u ratios.
}
|
3b2cd8303486f473f0506ed57b585e8d062b3eef
|
ee1727efd8dc3873db7ab949bbe37d609c017a02
|
/man/generate_monthly_flow_replicates.Rd
|
f6e7371779464b0f845359b02b6e59ba373563f8
|
[
"BSD-2-Clause"
] |
permissive
|
pnnl/capratTX
|
09f189ee5c8d96df32848f10eb06b80b185f7d15
|
860e60d86713af7b86a571d7743a490dee1bcce7
|
refs/heads/master
| 2023-01-22T17:22:36.058629
| 2020-12-04T23:06:12
| 2020-12-04T23:06:12
| 304,059,832
| 0
| 1
| null | null | null | null |
UTF-8
|
R
| false
| true
| 498
|
rd
|
generate_monthly_flow_replicates.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/climate.R
\name{generate_monthly_flow_replicates}
\alias{generate_monthly_flow_replicates}
\title{generate_monthly_flow_replicates}
\usage{
generate_monthly_flow_replicates(reservoir, data_path)
}
\arguments{
\item{reservoir}{name of the reservoir}
\item{data_path}{path to data directory "ERCOT Reservoir Watershed Delineations and Inflow Scenarios"}
}
\description{
generate_monthly_flow_replicates
}
\details{
...
}
|
4fa04662a1156e2bfc588d905a9963fbc4c01661
|
72d9009d19e92b721d5cc0e8f8045e1145921130
|
/GauPro/man/trend_LM.Rd
|
b055a5f6e399f9f25bbc2f3b131596f70f6f4839
|
[] |
no_license
|
akhikolla/TestedPackages-NoIssues
|
be46c49c0836b3f0cf60e247087089868adf7a62
|
eb8d498cc132def615c090941bc172e17fdce267
|
refs/heads/master
| 2023-03-01T09:10:17.227119
| 2021-01-25T19:44:44
| 2021-01-25T19:44:44
| 332,027,727
| 1
| 0
| null | null | null | null |
UTF-8
|
R
| false
| true
| 482
|
rd
|
trend_LM.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/trend_LM.R
\docType{class}
\name{trend_LM}
\alias{trend_LM}
\title{Trend R6 class}
\format{\code{\link{R6Class}} object.}
\usage{
trend_LM
}
\value{
Object of \code{\link{R6Class}} with methods for fitting GP model.
}
\description{
Trend R6 class
}
\examples{
t1 <- trend_LM$new(D=2)
}
\keyword{Gaussian}
\keyword{data,}
\keyword{kriging,}
\keyword{process,}
\keyword{regression}
|
a1879da0dc81d095c99721b152344531920ee8d2
|
8eb62638876d3e7b29732022759ed226774c7e0d
|
/genetic_score/8_PRS_SNPassoc/getScore.R
|
b8eae5fb87cda956ed011c243610f0ba64693183
|
[] |
no_license
|
isglobal-brge/master_thesis
|
93b7d7f09ba8b495895ff152826bfdbbeb8159b9
|
57f27a0997c260604986a603157f87b12730db7c
|
refs/heads/master
| 2023-07-06T09:16:17.497634
| 2023-06-29T11:59:46
| 2023-06-29T11:59:46
| 146,269,866
| 0
| 6
| null | 2023-06-29T10:12:54
| 2018-08-27T08:34:33
|
HTML
|
UTF-8
|
R
| false
| false
| 999
|
r
|
getScore.R
|
getScore <- function(geno, annot, snp.sel){
x <- geno[,snp.sel]
annot <- annot[snp.sel, ]
snp.maf <- dscore.character(snp.sel)
maf <- attr(snp.maf, "MAFs")
ff <- function(i, x, flip){
xx <- x[,i]
flip.i <- flip[i]
ans <- as.numeric(xx) - 1
ans[ans == -1] <- NA
out <- ans
if(flip.i) {
out[ans==2] <- 0
out[ans==0] <- 2
}
out
}
getComp <- function(x){
out <- x
out[x=="G"] <- "C"
out[x=="C"] <- "G"
out[x=="A"] <- "T"
out[x=="T"] <- "A"
out
}
maf.ref <- maf$minor_allele
maf.obs <- cbind(annot$allele.1, annot$allele.2)
eq1 <- sweep(maf.obs, 1, FUN="==", maf.ref)
eq2 <- sweep(maf.obs, 1, FUN="==", getComp(maf.ref))
eq <- cbind(eq1, eq2)
id <- apply(eq, 1, function(x) which(x)[1])
flip <- id%in%c(1,3)
xx <- data.frame(lapply(1:ncol(x), ff, x=data.frame(x), flip=flip))
colnames(xx) <- colnames(x)
ans <- rowSums(xx, na.rm = T) ## insertar na.rm =
ans
}
|
9de26706c5dc1813e5b2f5dbbddda4eadbc9408c
|
387df2ef56aec161c1d835fe9dec44c265c8698d
|
/man/get_webservice_token.Rd
|
af2fe0a8b5c36445288eb2548da61642c13f1476
|
[
"MIT"
] |
permissive
|
Azure/azureml-sdk-for-r
|
072d4e72ad7667a848fc61d50655f7632b6c69cf
|
19301e106c0cec69dac0931e98af7e3f713b7bf7
|
refs/heads/master
| 2023-08-16T16:01:25.257198
| 2022-09-14T16:35:20
| 2022-09-14T16:35:20
| 199,087,253
| 102
| 47
|
NOASSERTION
| 2022-07-25T19:03:47
| 2019-07-26T22:29:14
|
R
|
UTF-8
|
R
| false
| true
| 1,287
|
rd
|
get_webservice_token.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/webservice.R
\name{get_webservice_token}
\alias{get_webservice_token}
\title{Retrieve the auth token for a web service}
\usage{
get_webservice_token(webservice)
}
\arguments{
\item{webservice}{The \code{AksWebservice} object.}
}
\value{
An \code{AksServiceAccessToken} object.
}
\description{
Get the authentication token, scoped to the current user,
for a web service that was deployed with token-based authentication
enabled. Token-based authentication requires clients to use an Azure
Active Directory account to request an authentication token, which is
used to make requests to the deployed service. Only available for
AKS deployments.
In order to enable token-based authentication, set the
\code{token_auth_enabled = TRUE} parameter when you are creating or
updating a deployment (\code{aks_webservice_deployment_config()} for creation
or \code{update_aks_webservice()} for updating). Note that you cannot have both
key-based authentication and token-based authentication enabled.
Token-based authentication is not enabled by default.
To check if a web service has token-based auth enabled, you can
access the following boolean property from the Webservice object:
\code{service$token_auth_enabled}
}
|
0371b8c333be3707b5776aa90366325e2f776224
|
37d87e93626949ef5b98912a8467e3c22950ba07
|
/shiny/06-expressoes-reativas.R
|
0685be44b9ad1112c7575277c6a873912d59a46d
|
[
"MIT"
] |
permissive
|
curso-r/201906-dashboard
|
bb6b3bf036edc61e8ad15ded8bd8ce93408fbb47
|
559a9b96a8005ef26a73fcd9109b0d1b786faa46
|
refs/heads/master
| 2020-05-30T11:01:50.248573
| 2019-06-09T12:41:37
| 2019-06-09T12:41:37
| 189,687,929
| 4
| 0
|
MIT
| 2019-06-03T20:01:08
| 2019-06-01T03:56:10
|
HTML
|
UTF-8
|
R
| false
| false
| 2,227
|
r
|
06-expressoes-reativas.R
|
library(shiny)
library(shinydashboard)
library(dplyr)
dados <- readRDS("dados/pkmn.rds")
lista_pokemon <- as.list(dados$pokemon)
names(lista_pokemon) <- stringr::str_to_title(lista_pokemon)
ui <- dashboardPage(
dashboardHeader(title = "Pokemon"),
dashboardSidebar(
selectizeInput(
inputId = "pokemon",
choices = lista_pokemon,
label = "Selecione um Pokemon",
selected = "bulbasaur"
),
selectizeInput(
inputId = "tipo",
choices = unique(dados$tipo_1),
label = "Selecione um tipo",
selected = unique(dados$tipo_1)[1]
)
),
dashboardBody(
fluidRow(
box(
width = 4,
title = textOutput("a"),
htmlOutput("img")
),
box(
width = 6,
title = "Atributos",
valueBoxOutput("ataque", width = 12),
valueBoxOutput("defesa", width = 12),
valueBoxOutput("altura", width = 12),
valueBoxOutput("altura_media", width = 12)
)
)
)
)
server <- function(input, output) {
pokemon_dados <- reactive({
d <- dados %>%
filter(pokemon == input$pokemon) %>%
as.list()
d
})
dados_tipo <- reactive({
d <- dados %>%
filter(tipo_1 == pokemon_dados()$tipo_1)
#browser()
d
})
output$altura_media <- renderValueBox({
valueBox(
value = round(mean(dados_tipo()$altura), 2),
subtitle = "Média da altura"
)
})
output$img <- renderText({
url <- pokemon_dados()$url_imagem
glue::glue("<img width = 100% src='https://raw.githubusercontent.com/phalt/pokeapi/master/data/Pokemon_XY_Sprites/{url}'>")
})
output$ataque <- renderValueBox({
valueBox(
value = pokemon_dados()$ataque,
subtitle = "Ataque"
)
})
output$defesa <- renderValueBox({
valueBox(
value = pokemon_dados()$defesa,
subtitle = "Defesa"
)
})
output$altura <- renderValueBox({
valueBox(
value = pokemon_dados()$altura,
subtitle = "Altura"
)
})
output$a <- renderText({input$pokemon})
}
shinyApp(ui, server)
# Exercício. Adicone os atributos que você tinha adicionado no exercício do
# arquivo anterior. Desta vez usando a expressão reativa.
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.