blob_id
stringlengths 40
40
| directory_id
stringlengths 40
40
| path
stringlengths 2
327
| content_id
stringlengths 40
40
| detected_licenses
listlengths 0
91
| license_type
stringclasses 2
values | repo_name
stringlengths 5
134
| snapshot_id
stringlengths 40
40
| revision_id
stringlengths 40
40
| branch_name
stringclasses 46
values | visit_date
timestamp[us]date 2016-08-02 22:44:29
2023-09-06 08:39:28
| revision_date
timestamp[us]date 1977-08-08 00:00:00
2023-09-05 12:13:49
| committer_date
timestamp[us]date 1977-08-08 00:00:00
2023-09-05 12:13:49
| github_id
int64 19.4k
671M
⌀ | star_events_count
int64 0
40k
| fork_events_count
int64 0
32.4k
| gha_license_id
stringclasses 14
values | gha_event_created_at
timestamp[us]date 2012-06-21 16:39:19
2023-09-14 21:52:42
⌀ | gha_created_at
timestamp[us]date 2008-05-25 01:21:32
2023-06-28 13:19:12
⌀ | gha_language
stringclasses 60
values | src_encoding
stringclasses 24
values | language
stringclasses 1
value | is_vendor
bool 2
classes | is_generated
bool 2
classes | length_bytes
int64 7
9.18M
| extension
stringclasses 20
values | filename
stringlengths 1
141
| content
stringlengths 7
9.18M
|
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
c9aab6431bdfd09744b1a5cdc76b78380344c725
|
ae4580e6f8626853ce5353fe89cac50cc506481d
|
/Unsupervised Learning/HW5/p3.R
|
bc0705d930dd8c1f2e509f7f29c15c4353636661
|
[] |
no_license
|
kunalkm12/Unsupervised-Learning
|
8900bcb6520a8d55d127fd7eef643b36b1c9b1eb
|
39c52261c7eb88fde80b862ce57e9ac8bb1255ae
|
refs/heads/main
| 2023-01-12T13:48:00.718475
| 2020-11-10T20:18:39
| 2020-11-10T20:18:39
| 308,265,182
| 1
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 2,331
|
r
|
p3.R
|
rm(list = ls())
library(recommenderlab)
### Accessing the dataset
data(MovieLense)
### SPECIFICATION of k
k = 30
### Getting a feel of the matrix
getRatingMatrix(MovieLense)[1:10,1:10]
### Verifying if no of rows and columns match values in the problem statement
nrow(MovieLense)
ncol(MovieLense)
### Normalizing Ratings matrix
dats <- normalize(MovieLense)
### Visualizing both Raw and Normalized Ratings. Limiting no for visual clarity
x11()
image(MovieLense[150:250, 150:250], main = "Raw Ratings")
x11()
image(dats[150:250, 150:250], main = "Normalized Ratings")
### Plotting histograms of the data
x11()
hist(getRatings(dats), breaks = 100, main = "Normalized Ratings Histogram")
x11()
hist(rowCounts(dats), breaks = 100, main = "User Ratings Histogram")
x11()
hist(colCounts(dats), breaks = 100, main = "Ratings per Movie Histogram")
### Calculating values that are going to be used everytime
kk <- as(MovieLense, "matrix")
ss <- similarity(MovieLense, method = "cosine")
sim <- as(ss,"matrix")
final <- matrix(0,943,1664)
## Iterating through all users
for(i in 1:943){
### Checking for which movie the rating is missing
for(n in 1:1664){
if(is.na(kk[i,n])==TRUE){
j <- n
break
}
}
### Checking for which users have seen the movie
users = c()
for(m in 1:943){
if(is.na(kk[m,j])==FALSE){
users <- append(users,m)
}
}
### Taking k to be length of users if no of users that have watched j is less than k
if(length(users)<k){
k = length(users)
}
### Listing similarity of chosen users and ordering it, followed by k subset
similar <- sim[i, users]
ord <- order(-similar)
ord <- ord[1:k]
### Designing user-based recommender system
model <- Recommender(MovieLense[ord], method = "UBCF")
### Predicting rating for user i of movie j
pred <- predict(model, MovieLense[i], type = "ratingMatrix")
pre <- as(pred, "matrix")
### Adding to final matrix
for(n in 1:1664){
if(is.na(kk[i,n])==TRUE){
final[i,n] = pre[n]
}
else{
final[i,n] = kk[i,n]
}
}
}
|
e83101d23a86277932dd6251077eeee8302567dd
|
feb2e64dba1e0a88e9e157610305da70a60e1026
|
/Quiz2C7.R
|
8c9243120791cd42fe3597fcc4116b924264d20f
|
[] |
no_license
|
anncrawford01/RegressionModelsCrs7
|
8ce8a3d7dac6916d4383b9e953ac0cd5bbc3783a
|
954c590d461701f95ff98649c586ebc452b47e5d
|
refs/heads/master
| 2021-05-05T15:09:52.777394
| 2017-10-07T06:16:06
| 2017-10-07T06:16:06
| 103,148,938
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 1,730
|
r
|
Quiz2C7.R
|
## Regression Models Course 7
## Quiz 2
## Q1 ###
## https://www.zoology.ubc.ca/~schluter/R/fit-model/
x <- c(0.61, 0.93, 0.83, 0.35, 0.54, 0.16, 0.91, 0.62, 0.62)
y <- c(0.67, 0.84, 0.6, 0.18, 0.85, 0.47, 1.1, 0.65, 0.36)
plot(x,y)
fit <-lm(y ~ x)
## this plots the residuals vs fittted, Normal Q-Q, Scale-Location, Residuals vs Leverage
par(mfrow=c(2,2))
plot(fit)
summary(fit)
#### test NULL hypothesis mean = 0 - get pvalue
summary(lm(y ~ x))
## Q2 ###
summary(fit)$sigma
## Q3 ###
#http://www.r-tutor.com/elementary-statistics/simple-linear-regression/prediction-interval-linear-regression
summary(mtcars)
attach(mtcars)
mpg.lm <- lm( mpg ~ wt )
avgwt <- mean(wt)
newdata = data.frame(wt=avgwt)
#interval type as "confidence", and use the default 0.95 confidence level.
predict(mpg.lm, newdata, interval="confidence")
detach(mtcars)
## method 2
mpg.lm <- lm( mpg ~ wt , data = mtcars)
fit <- lm( mpg ~ I(wt - mean(wt) ), data=mtcars)
confint(fit)
## Q4 ###
#[, 6] wt Weight (1000 lbs)
?mtcars
## Q5 ###
attach(mtcars)
mpg.lm <- lm( mpg ~ wt )
avgwt <- mean(wt)
newdata = data.frame(wt=3)
#interval type as "confidence", and use the default 0.95 confidence level.
predict(mpg.lm, newdata, interval="predict")
detach(mtcars)
#slope <- fit$coefficients[2]
#intercept <-fit$coefficients[1]
#y = mx + b
#slope * 3 + intercept
## Q6 ###
#68-95-99.7
mpg.lm <- lm( mpg ~ wt , data = mtcars)
fit <- lm( mpg ~ I(wt - mean(wt) ), data=mtcars)
# take slope and multiply by 2
confint(fit)[2]*2
## Q9 ### ???
fit <- lm( mtcars$mpg ~ mtcars$wt )
num <- sum((mtcars$mpg - fitted.values(fit))^2)
## only intercept
denom <- sum((mtcars$mpg - mean(mtcars$mpg))^2)
num/denom
|
8671fa364683e49e7540fab7007ea233436031d4
|
1542b8ef5c6387facf4d49f8fd4f6b5ef5d8e9c0
|
/man/xGScore.Rd
|
a76dff0c0efbfe88e996754062d6ed6299351289
|
[] |
no_license
|
wuwill/XGR
|
7e7486614334b664a05e389cd646678c51d1e557
|
c52f9f1388ba8295257f0412c9eee9b7797c2029
|
refs/heads/master
| 2020-04-12T12:38:04.470630
| 2018-12-19T17:40:30
| 2018-12-19T17:40:30
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| true
| 3,756
|
rd
|
xGScore.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/xGScore.r
\name{xGScore}
\alias{xGScore}
\title{Function to extract scores given a list of genomic regions}
\usage{
xGScore(data, format = c("chr:start-end", "data.frame", "bed",
"GRanges"), build.conversion = c(NA, "hg38.to.hg19", "hg18.to.hg19"),
GS.annotation = c("fitCons", "phastCons", "phyloP", "mcap", "cadd"),
scoring.scheme = c("mean", "median", "max", "min", "sum"),
verbose = T, RData.location = "http://galahad.well.ox.ac.uk/bigdata")
}
\arguments{
\item{data}{input genomic regions (GR). If formatted as "chr:start-end"
(see the next parameter 'format' below), GR should be provided as a
vector in the format of 'chrN:start-end', where N is either 1-22 or X,
start (or end) is genomic positional number; for example, 'chr1:13-20'.
If formatted as a 'data.frame', the first three columns correspond to
the chromosome (1st column), the starting chromosome position (2nd
column), and the ending chromosome position (3rd column). If the format
is indicated as 'bed' (browser extensible data), the same as
'data.frame' format but the position is 0-based offset from chromomose
position. If the genomic regions provided are not ranged but only the
single position, the ending chromosome position (3rd column) is allowed
not to be provided. The data could also be an object of 'GRanges' (in
this case, formatted as 'GRanges')}
\item{format}{the format of the input data. It can be one of
"data.frame", "chr:start-end", "bed" or "GRanges"}
\item{build.conversion}{the conversion from one genome build to
another. The conversions supported are "hg38.to.hg19" and
"hg18.to.hg19". By default it is NA (no need to do so)}
\item{GS.annotation}{which genomic scores (GS) annotaions used. It can
be 'fitCons' (the probability of fitness consequences for point
mutations; \url{http://www.ncbi.nlm.nih.gov/pubmed/25599402}),
'phastCons' (the probability that each nucleotide belongs to a
conserved element/negative selection [0,1]), 'phyloP' (conservation at
individual sites representing -log p-values under a null hypothesis of
neutral evolution, positive scores for conservation and negative scores
for acceleration), 'mcap' (eliminating a majority of variants with
uncertain significance in clinical exomes at high sensitivity:
\url{http://www.ncbi.nlm.nih.gov/pubmed/27776117}), and 'cadd'
(combined annotation dependent depletion for estimating relative levels
of pathogenicity of potential human variants:
\url{http://www.ncbi.nlm.nih.gov/pubmed/24487276})}
\item{scoring.scheme}{the method used to calculate scores spanning a
set of GR. It can be one of "mean", "median", "max", "min" and "sum"}
\item{verbose}{logical to indicate whether the messages will be
displayed in the screen. By default, it sets to true for display}
\item{RData.location}{the characters to tell the location of built-in
RData files. See \code{\link{xRDataLoader}} for details}
}
\value{
a GenomicRanges object
}
\description{
\code{xGScore} is supposed to extract scores given a list of genomic
regions. Scores for genomic regions/variants can be
constraint/conservation or impact/pathogenicity. It returns a GR
object.
}
\examples{
\dontrun{
# Load the XGR package and specify the location of built-in data
library(XGR)
RData.location <- "http://galahad.well.ox.ac.uk/bigdata"
# a) provide the genomic regions
## load ImmunoBase
ImmunoBase <- xRDataLoader(RData.customised='ImmunoBase',
RData.location=RData.location)
## get lead SNPs reported in AS GWAS
data <- ImmunoBase$AS$variant
# b) extract fitness consequence score
gr <- xGScore(data=data, format="GRanges", GS.annotation="fitCons",
scoring.scheme="mean", RData.location=RData.location)
}
}
\seealso{
\code{\link{xRDataLoader}}
}
|
dad6b016501fd96e6edfd0d06bb7b2d9a753ff7a
|
ffdea92d4315e4363dd4ae673a1a6adf82a761b5
|
/data/genthat_extracted_code/mlbstats/examples/obp.Rd.R
|
6c79ad83dd55fbe005e3a68687be5aa58f75725d
|
[] |
no_license
|
surayaaramli/typeRrh
|
d257ac8905c49123f4ccd4e377ee3dfc84d1636c
|
66e6996f31961bc8b9aafe1a6a6098327b66bf71
|
refs/heads/master
| 2023-05-05T04:05:31.617869
| 2019-04-25T22:10:06
| 2019-04-25T22:10:06
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 137
|
r
|
obp.Rd.R
|
library(mlbstats)
### Name: obp
### Title: Calculates on-base percentage
### Aliases: obp
### ** Examples
obp(150, 40, 2, 400, 5)
|
12e64d954a1836caabe4eb34be0002f2ae6b95b4
|
596d5cf92032d3afbd4ca595b0e27da40b1f2c78
|
/R/choropleth_GnmNum.R
|
802e22ee9b1e0bc9b05ba0dbceb78e2086489179
|
[] |
no_license
|
datasketch/hgchmaps-1
|
0c8c3b35597d580ac8cb84a26d2152314486019a
|
5505615c88dd045247e8fdd167d83ab0992e0efb
|
refs/heads/master
| 2023-06-13T02:25:31.072468
| 2021-06-15T12:35:43
| 2021-06-15T12:35:43
| 534,674,465
| 0
| 0
| null | 2022-09-09T14:22:41
| 2022-09-09T14:22:41
| null |
UTF-8
|
R
| false
| false
| 3,862
|
r
|
choropleth_GnmNum.R
|
#' choropleth chart Gnm Num
#'
#' @description
#' `hgch_choropleth_GnmNum()` Create a Highcharter choropleth map based on a particular data type.
#' In this case, you can load data with only two columns, where the firts it's a **geoname column**,
#' and the second is a **numeric class column**, or make sure that the first two columns of
#' your data meet this condition
#'
#' @export
#' @family Gnm-Num plots
#' @section Ftype:
#' Gnm-Num
#' @examples
#' data <- sample_data("Gnm-Num", n = 30)
#' hgch_choropleth_GnmNum(data)
#'
#'
#' # if you want to calculate the average instead of the sum, you can use agg inside a function
#' hgch_choropleth_GnmNum(data,
#' agg = "mean")
#'
#'
hgch_choropleth_GnmNum <- function(data = NULL, ...) {
opts <- dsvizopts::merge_dsviz_options(...)
if (!is.null(data)) data[[1]] <- as_Gnm(data[[1]])
l <- hgchmaps_prep(data = data, opts = opts, ftype="Gnm-Num")
highchart(type = "map") %>%
hc_add_series(
mapData = l$geoInfo,
data = l$data,
joinBy = l$by_col,
borderColor = opts$theme$border_color,
nullColor = opts$theme$na_color,
showInLegend = FALSE,
dataLabels = list (
enabled = l$datalabel$dataLabels_show,
format = l$datalabel$dataLabels_format_sample %||% "{point.value}",
style = list(
fontSize = paste0(l$datalabel$dataLabels_size %||% 11, "px"),
color = l$datalabel$dataLabels_color %||% "#222222",
textShadow = "none",
textOutline = ifelse(l$datalabel$dataLabels_text_outline,
"1px contrast", "none")
)
),
events = list(click = l$shiny$clickFunction),
cursor= l$shiny$cursor
) %>%
hc_colorAxis(
stops = color_stops(colors = l$palette_colors)
) %>%
hc_tooltip(useHTML = TRUE,
formatter = JS(paste0("function () {return this.point.labels;}"))) %>%
hc_add_theme(hgch_theme(opts = l$theme))
}
#' choropleth chart Gnm
#'
#' @description
#' `hgch_choropleth_Gnm()` Create a Highcharter choropleth map based on a particular data type.
#' In this case, you can load data with only one column, where it's a **geoname column**,
#' or make sure that the first column of your data meet this condition
#'
#' @export
#' @family Gnm plots
#' @section Ftype:
#' Gnm
#' @examples
#' data <- sample_data("Gnm", n = 30)
#' hgch_choropleth_Gnm(data)
#'
#'
#' # if you want to calculate the average instead of the sum, you can use agg inside a function
#' hgch_choropleth_Gnm(data,
#' agg = "mean")
#'
#'
hgch_choropleth_Gnm <- function(data = NULL, ...) {
opts <- dsvizopts::merge_dsviz_options(...)
if (!is.null(data)) data[[1]] <- as_Gnm(data[[1]])
l <- hgchmaps_prep(data = data, opts = opts, ftype="Gnm")
highchart(type = "map") %>%
hc_add_series(
mapData = l$geoInfo,
data = l$data,
joinBy = l$by_col,
borderColor = opts$theme$border_color,
nullColor = opts$theme$na_color,
showInLegend = FALSE,
dataLabels = list (
enabled = l$datalabel$dataLabels_show,
format = l$datalabel$dataLabels_format_sample %||% "{point.value}",
style = list(
fontSize = paste0(l$datalabel$dataLabels_size %||% 11, "px"),
color = l$datalabel$dataLabels_color %||% "#222222",
textShadow = "none",
textOutline = ifelse(l$datalabel$dataLabels_text_outline,
"1px contrast", "none")
)
),
events = list(click = l$shiny$clickFunction),
cursor= l$shiny$cursor
) %>%
hc_colorAxis(
stops = color_stops(colors = l$palette_colors)
) %>%
hc_tooltip(useHTML = TRUE,
formatter = JS(paste0("function () {return this.point.labels;}"))) %>%
hc_add_theme(hgch_theme(opts = l$theme))
}
|
86a8fd472ca28312ddcb1046ecb37d139408f144
|
a495b873b0c82b31b5a75ca3547c9febf3af5ddc
|
/array/DNAm/analysis/EWAS/mlm.r
|
6a1274f467e7ffabf77214f58ba2c3803864dce6
|
[
"Artistic-2.0"
] |
permissive
|
ejh243/BrainFANS
|
192beb02d8aecb7b6c0dc0e59c6d6cf679dd9c0e
|
5d1d6113b90ec85f2743b32a3cc9a428bd797440
|
refs/heads/master
| 2023-06-21T17:42:13.002737
| 2023-06-20T12:35:38
| 2023-06-20T12:35:38
| 186,389,634
| 3
| 2
|
Artistic-2.0
| 2022-02-08T21:45:47
| 2019-05-13T09:35:32
|
R
|
UTF-8
|
R
| false
| false
| 3,782
|
r
|
mlm.r
|
##---------------------------------------------------------------------#
##
## Title: EWAS with mixed effects regression model
##
## Purpose of script: perform DNA methylation association analysis of
## schizophrenia vs controls testing for main and cell-specific effects.
## simulataneously testing for cell type differences
##
## Author: Eilis Hannon
##
## Date Created: 2022-07-08
##
##---------------------------------------------------------------------#
#----------------------------------------------------------------------#
# DEFINE ANALYSIS FUNCTION
#----------------------------------------------------------------------#
runEWAS<-function(row,QCmetrics){
pheno<-cbind(row,QCmetrics)
modelMLM<-lmer(row ~ Phenotype * Cell.type + CCDNAmAge + Sex + (1 | Tissue.Centre) + (1 | Indidivual.ID), REML = FALSE, data = pheno)
nullMLM<-lmer(row ~ Phenotype + Cell.type + CCDNAmAge + Sex + (1 | Tissue.Centre) + (1 | Indidivual.ID), REML = FALSE, data = pheno)
nullCT<-lmer(row ~ Phenotype + CCDNAmAge + Sex + (1 | Tissue.Centre) + (1 | Indidivual.ID), REML = FALSE, data = pheno)
# extract case control main effect
return(c(summary(modelMLM)$coefficients["PhenotypeSchizophrenia",c(1,2,5)],
# extract cell specific case control effect
anova(modelMLM,nullMLM)[2,8],
summary(modelMLM)$coefficients["PhenotypeSchizophrenia:Cell.typeNeuN+",c(1,2,5)],
summary(modelMLM)$coefficients["PhenotypeSchizophrenia:Cell.typeSox10+",c(1,2,5)],
# extract cell type effect
anova(nullMLM, nullCT)[2,8],
summary(nullMLM)$coefficients["Cell.typeNeuN+",c(1,2,5)],
summary(nullMLM)$coefficients["Cell.typeSox10+",c(1,2,5)]))
}
#----------------------------------------------------------------------#
# LOAD PACKAGES
#----------------------------------------------------------------------#
library(lme4)
library(lmerTest)
#library(GenABEL)
library(doParallel)
library(devtools)
devtools::load_all(path = "../functionsR")
#----------------------------------------------------------------------#
# DEFINE PARAMETERS
#----------------------------------------------------------------------#
args<-commandArgs(trailingOnly = TRUE)
dataDir <- args[1]
normData<-file.path(dataDir, "/3_normalised/normalised.rdata")
resPath<-file.path(dataDir, "/4_analysis/EWAS")
#----------------------------------------------------------------------#
# LOAD AND PREPARE DATA
#----------------------------------------------------------------------#
setwd(dataDir)
load(normData)
## remove total samples and cell types with less than 20 samples
QCmetrics<-QCmetrics[which(QCmetrics$Cell.type != "Total"),]
nSample<-table(QCmetrics$Cell.type)
QCmetrics<-QCmetrics[QCmetrics$Cell.type %in% names(nSample[which(nSample > 19)]),]
# filter to schizophrenia and control only
QCmetrics<-QCmetrics[QCmetrics$Phenotype %in% c("Schizophrenia", "Control"),]
celltypeNormbeta<-celltypeNormbeta[,QCmetrics$Basename]
cellTypes<-unique(QCmetrics$Cell.type)
#----------------------------------------------------------------------#
# INTITATE PARALLEL ENV
#----------------------------------------------------------------------#
nCores<-detectCores()
cl <- makeCluster(nCores-1)
registerDoParallel(cl)
clusterExport(cl, list("runEWAS", "lmer"))
outtab<-matrix(data = parRapply(cl, celltypeNormbeta, runEWAS, QCmetrics), ncol = 17, byrow = TRUE)
rownames(outtab)<-rownames(celltypeNormbeta)
colnames(outtab)<-c("SCZ_coeff", "SCZ_SE", "SCZ_P", "CellType_SCZ_P", "NeuN_SCZ_coeff", "NeuN_SCZ_SE","NeuN_SCZ_P", "SOX10_SCZ_coeff", "SOX10_SCZ_SE","SOX10_SCZ_P", "CellType_P","NeuN_coeff", "NeuN_SE", "NeuN_P", "SOX10_coeff", "SOX10_SE", "SOX10_P")
save(outtab, file = file.path(resPath, "MLM.rdata"))
|
d73c9fc277238417946e8444add9fb95a858ce50
|
1ee3625bc622a90c92617d2bb2711abff8e8c74f
|
/man/APA_stat.Rd
|
574ad7467e9f35a0e5959219b7f8eb41f13c6422
|
[] |
no_license
|
darrellpenta/APAstyler
|
d54a49cd328261b448db5afa0dabee4c0d4612c2
|
c895a13f0473d76efc15bd42d202c245fe36a021
|
refs/heads/master
| 2021-01-22T19:08:28.415784
| 2017-10-07T13:55:11
| 2017-10-07T13:55:11
| 85,164,023
| 2
| 0
| null | null | null | null |
UTF-8
|
R
| false
| true
| 1,117
|
rd
|
APA_stat.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/APA_stat-function.R
\name{APA_stat}
\alias{APA_stat}
\title{Format a statistic}
\usage{
APA_stat(stat, pre_stat = NULL, post_stat = NULL, snip = FALSE,
digit = 2, ...)
}
\arguments{
\item{stat}{A number, supplied as a numeric or character value.}
\item{pre_stat}{A string to be prepended to you number. Default is \code{NULL}.}
\item{post_stat}{A string to be appended to your number. Default is \code{NULL}.}
\item{snip}{Do you want to snip a leading zero from a number bounded by 1? Defaults to \code{FALSE}.}
\item{digit}{How many significance digits (defaults to 2, which is suitable for many APA applications)? Passed to \code{\link[formattable]{formattable}}.}
\item{...}{Optional arguments to be passed to \code{\link[formattable]{formattable}} or \code{paste0}}
}
\value{
\code{stat} as a string with APA formatting and/or other options applied.
}
\description{
\code{APA_stat} allows you to append your own labels to a statistic.
}
\examples{
# lapply(runif(n = 100, min = 0.00003, max = .99), APA_p, rmd_format = TRUE)
}
|
b6fb84ba99381b5173bc639f401d7945e146b9f6
|
22a7b51fcfcbef8291962d0d70237dada281e71b
|
/App2/ui.R
|
e8af21fd590c1807789d2413410637771e212b06
|
[] |
no_license
|
meetnfx/WASP
|
a17c1ea689cc8cb948558347614a402f6f3dae98
|
952fcfcac97e18cf5cd449484d6d74211b0dedb9
|
refs/heads/master
| 2022-11-29T21:18:06.264545
| 2016-12-19T06:07:29
| 2016-12-19T06:07:29
| 283,109,422
| 0
| 1
| null | 2020-07-28T05:28:30
| 2020-07-28T05:28:29
| null |
UTF-8
|
R
| false
| false
| 801
|
r
|
ui.R
|
library(shiny)
# Define UI for application that draws a histogram
shinyUI(fluidPage(
# Application title
titlePanel(""),
# Sidebar with a slider input for the number of bins
sidebarLayout(
sidebarPanel(
numericInput("target",
label = "Target",min = 0,max = 450,value = 300),
numericInput("runs",
label = "Runs Made", min = 0, max = 300,value = 110),
numericInput("wickets",
label = "Wickets Down", min = 0, max = 10,value = 0),
numericInput("balls",
label = "Balls Bowled", min = 1, max = 300,value = 120),
submitButton("Submit")
),
# Show a plot of the generated distribution
mainPanel(
verbatimTextOutput("probability")
)
)
)
)
|
b3a0d64bea1a146b084cea6503c8b6293428b91b
|
223a47aefa299993a1ca5b92c0f031639c869310
|
/src/6_unpaid_carer_analysis.R
|
76715465bedc2878cdc0cd124f2417dada949d69
|
[
"MIT"
] |
permissive
|
jfontestad/Unpaid_Carers
|
98b1f9e94d2463953b171ceb22a32e750359f143
|
5a10d4ecc7b75bb8d628dd062dded13a94151380
|
refs/heads/main
| 2023-05-15T00:57:49.349940
| 2021-06-03T10:47:38
| 2021-06-03T10:47:38
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 24,232
|
r
|
6_unpaid_carer_analysis.R
|
#
# Project: Unpaid Carers during the pandemic
# Purpose: Preliminary Analysis
# Author: Anne Alarilla
# Date: 17/05/2021
#
# Set up ------------------------------------------------------------------
##Load library
library(tidyverse)
library(THFstyle)
library(ggplot2)
library(ggfittext)
library(scales)
library(janitor)
library(networkD3)
library(stringr)
library(survey)
library(ggtext)
library(plotly)
library(xlsx)
library(gtsummary)
library(sankey)
##Load Data
all_h<- readRDS(here::here('data','care_type','health_all.rds'))
all_dem<- readRDS(here::here('data','care_type','demographics_all.rds'))
all<- readRDS(here::here('data','care_type','caring_pandemic_care_type.rds'))
all_s<- readRDS(here::here('data','care_type','services_all.rds'))
##Functions
clean_names<-function(df) {
w.env <- new.env()
w.env$df <- df
t2_names<-sub("[[:punct:]]{2}","",colnames(w.env$df))
t3_names<-sub("[[:punct:]]{2}","",t2_names)
names(w.env$df)<-t3_names
assign(deparse(substitute(df)),w.env$df, envir=.GlobalEnv)
}
# Contents ----------------------------------------------------------------
#Set up for excel output
#Combining the data set
#Survey design set up
#Sankey plot: Change in unpaid caring
#Women providing unpaid care and childcare
#Ethnicity and caring
#Caring within or outside household
#Caring outcomes
# Set up for excel output -------------------------------------------------
wb = createWorkbook()
# Final data and design variable ------------------------------------------------------
##Adding variables
##Demographics
all_dem <- all_dem %>%
mutate(resp_child=factor(case_when(child_u15 %in% c("One", "Two+") ~ 1,
child_u15== "None" ~ 2),levels=c(1,2),
labels=c("1+ child under 15 responsible for","No child under 15 responsible for")),
hh_child=factor(case_when(hh_child_u16 =="Atleast One" ~ 1,
hh_child_u16 == "None" ~ 2),levels=c(1,2),
labels=c("1+ child under 16 in the household","No child under 16 in the household")))
##Health and access to services
all_h_s<- all_h %>%
left_join(all_s) %>%
mutate(mltc_short=case_when(mltc %in% c("None", "One")~ "None/One",
mltc=="2+"~ "2 or more"))
#Combining the data
final<-all %>%
select(pidp, carer, carer_pre,care_hours_pre,care_loc_cv,care_loc_change,care_hours, probit_lasso_wgt_t25, psu, strata) %>%
left_join(all_dem %>%
select(race_plus, sex_lab,age,hh_child_u16, hh_child,resp_child,child_u15,pidp)) %>%
left_join(all_h_s %>%
select(GHQ_cv,mltc,wait_for_NHS_treat, mltc_short, pidp))
# Survey design set up ----------------------------------------------------
uos_design<-svydesign(id= ~psu, strata= ~strata, survey.lonely.psu="adjust",
weights= ~probit_lasso_wgt_t25, data=final)
options(survey.lonely.psu="adjust")
#Sankey plot: Change in unpaid caring -------------------------------------------------------------
##Descriptives
uos_design %>%
tbl_svysummary(include = c(care_hours_pre,care_hours),
type=everything()~"categorical") %>%
bold_labels()
uos_design %>%
tbl_svysummary(by="care_hours",include = c(care_hours,carer_pre),
type=everything()~"categorical") %>%
bold_labels()
##Sankey plot
t1<-svytable(~care_hours_pre+care_hours, design=uos_design) %>%
as.data.frame() %>%
mutate(care_hours_pre=factor(care_hours_pre, levels=c("High Level Caring", "Low Level Caring", "No caring"),
labels=c("Providing 20+ hours of care pw","Providing <20 hours of care pw","Not providing care")),
care_hours=factor(care_hours, levels=c("High Level Caring", "Low Level Caring", "No caring"),
labels=c("Providing 20+ hours of care pw","Providing <20 hours of care pw","Not providing care")))
##For data labels
lab_1<-svytable(~care_hours_pre, design=uos_design) %>%
as.data.frame() %>%
mutate(care_hours_pre=factor(care_hours_pre, levels=c("High Level Caring", "Low Level Caring", "No caring"),
labels=c("Providing 20+ hours of care pw","Providing <20 hours of care pw","Not providing care")),
sum=sum(Freq), prop=paste0(round(Freq/sum*100,1),"%"),
In=case_when(care_hours_pre != "" ~ paste0(care_hours_pre, " (",prop,")",sep='_1'))) %>%
select(care_hours_pre, In)
lab_2<-svytable(~care_hours, design=uos_design) %>%
as.data.frame() %>%
mutate(care_hours=factor(care_hours, levels=c("High Level Caring", "Low Level Caring", "No caring"),
labels=c("Providing 20+ hours of care pw","Providing <20 hours of care pw","Not providing care")),
sum=sum(Freq), prop=paste0(round(Freq/sum*100,1),"%"),
Out=case_when(care_hours != "" ~ paste0(care_hours, " (",prop,")",sep='_2'))) %>%
select(care_hours, Out)
sankey_plot_data<-t1 %>%
left_join(lab_1) %>%
left_join(lab_2) %>%
drop_na() %>%
select(In, Out, Freq) %>%
arrange(desc(In)) %>%
arrange(desc(Out))
# sankey_plot_data<-t1 %>%
# mutate(In=case_when(care_hours_pre != "" ~ paste0(care_hours_pre, sep='_1')),
# Out=case_when(care_hours != "" ~ paste0(care_hours, sep='_2'))) %>%
# drop_na() %>%
# select(In, Out, Freq) %>%
# arrange(desc(In)) %>%
# arrange(desc(Out))
nodes<- sankey_plot_data %>%
select(In, Out) %>%
pivot_longer(c(In,Out), names_to="col_names", values_to= "name_match") %>%
select(-1) %>%
distinct() %>%
mutate(name=str_sub(name_match, end=-3))
nodes<-data.frame(nodes)
sankey_plot_id<- sankey_plot_data %>%
mutate(IDin= match(In, nodes$name_match)-1,
IDout=match(Out, nodes$name_match)-1)
sankey_plot_id<-data.frame(sankey_plot_id)
# nodes<- nodes %>%
# mutate(ColourGroup=case_when(name== "Providing 20+ hours of care pw" ~ "#dd0031",
# name== "Providing <20 hours of care pw" ~ "#53a9cd",
# name=="Not providing care" ~ "#744284",
# ))
node_colour<-'d3.scaleOrdinal() .domain(["THF_red","THF_50pct_light_blue","THF_1_purple"])
.range(["#dd0031","#53a9cd","#744284"])'
nodes<- nodes %>%
mutate(ColourGroup=case_when(name %in% c("Providing 20+ hours of care pw (4.5%)","Providing 20+ hours of care pw (9%)") ~ "THF_red",
name %in% c("Providing <20 hours of care pw (12.8%)", "Providing <20 hours of care pw (22.8%)") ~ "THF_50pct_light_blue",
name %in% c("Not providing care (82.7%)", "Not providing care (68.2%)")~ "THF_1_purple"))
sankeyNetwork(Links = sankey_plot_id, Nodes = nodes,
Source = "IDin", Target = "IDout",
Value = "Freq", NodeID = "name", sinksRight = FALSE, fontSize = 16, colourScale = node_colour,
NodeGroup = "ColourGroup", iterations=0, units="respondents", nodeWidth=25, fontFamily="Arial", height=750, width=780)
#
# fig <- plot_ly(
# type = "sankey",
# arrangement = "snap",
# orientation = "h",
#
# node = list(
# label = nodes$name,
# color = nodes$ColourGroup,
# pad = 15,
# thickness = 20,
# line = list(
# color = "black",
# width = 0.5
# )
# ),
#
# link = list(
# source = sankey_plot_id$IDin,
# target = sankey_plot_id$IDout,
# value = sankey_plot_id$Freq
# )
# )
# fig <- fig %>% layout(
# title = "",
# font = list(
# size = 12
# )
# )
#
# fig
#Saving data to excel sheet
sheet = createSheet(wb, "Care Status")
addDataFrame(as.data.frame(t1), sheet=sheet, startColumn=1, row.names=FALSE)
# Women providing unpaid care and childcare ---------------------------------------------------------
##Breakdown of Sex, Caring and Childcare
##Descriptive
t2<-uos_design %>%
tbl_svysummary(by="sex_lab",include = c(carer, care_hours, sex_lab, hh_child, resp_child),
type=everything()~"categorical",
label= list(carer~"If unpaid carer (Nov 2020/Jan 2021)?",
care_hours~"Type of carer?",
hh_child~"Number of children in household?",
resp_child~"Number of children responsible for?")) %>%
add_p() %>%
as.tibble() %>%
mutate_if(is.character, ~replace(., is.na(.), ""))
clean_names(t2)
uos_design %>%
tbl_svysummary(include = c(carer, care_hours, sex_lab, hh_child, resp_child))
uos_design %>%
tbl_svysummary(by = "care_hours", include = c(care_hours, sex_lab, hh_child,resp_child),
label=list(sex_lab~"Gender",
hh_child~"Number of children in household?",
resp_child~"Number of children responsible for?"))%>%
add_p() %>%
bold_labels()
fem<-subset(uos_design, sex_lab=="Female") %>%
tbl_svysummary(by="care_hours",include = c(care_hours, hh_child,resp_child),
type=everything()~"categorical",
label= list(care_hours~"Type of carer?",
hh_child~"Number of children in household?",
resp_child~"Number of children responsible for?")) %>%
add_p() %>%
bold_labels() %>%
as.tibble() %>%
# modify_spanning_header(c("stat_1", "stat_2", "stat_3")~"**Female**") %>%
mutate_if(is.character, ~replace(., is.na(.), ""))
male<-subset(uos_design, sex_lab=="Male") %>%
tbl_svysummary(by="care_hours",include = c(care_hours, hh_child,resp_child),
type=everything()~"categorical",
label= list(care_hours~"Type of carer?",
hh_child~"Number of children in household?",
resp_child~"Number of children responsible for?")) %>%
add_p() %>%
bold_labels() %>%
as.tibble() %>%
# modify_spanning_header(c("stat_1", "stat_2", "stat_3")~"**Female**") %>%
mutate_if(is.character, ~replace(., is.na(.), ""))
t3<-cbind(fem,male)
clean_names(t3)
#Graph of sex and childcare by caring status
# tab_s<-svytable(~sex_lab+ hh_child +care_hours, design=uos_design) %>%
# as.data.frame() %>%
# # group_by(care_hours) %>%
# # mutate(sum_care=sum(Freq), prop_care=Freq/sum_care,) %>%
# # ungroup() %>%
# group_by(care_hours,sex_lab) %>%
# mutate(sum_sex=sum(Freq),prop_sex=Freq/sum_sex) %>%
# ungroup()
#
# tab_s %>%
# mutate(lab=case_when(care_hours=="Low Level Caring"~"Providing <20 hours of care",
# care_hours=="High Level Caring"~"Providing 20+ hours of care",
# care_hours=="No caring"~"Not providing care")) %>%
# ggplot(., aes(x = sex_lab, y = prop_sex*100)) +
# # geom_col(aes(color = hh_child, fill = hh_child), position = position_dodge(0.8), width = 0.7)+
# geom_bar(aes(color = hh_child, fill = hh_child), position ="stack", stat="identity")+
# theme_THF()+
# # scale_y_continuous(limits = c(0,100), breaks = seq(0, 100, by = 20))+
# facet_wrap(~lab)+
# scale_fill_THF()+
# scale_colour_THF()+
# labs(x= "", y="", title="")+
# theme(plot.title = element_text(size=14),
# legend.text=element_text(size=14), legend.position="bottom",
# axis.text.x=element_text(size=14, angle = 0, hjust=0.45),axis.text.y=element_text(size=14),
# strip.text=element_text(size=14))
#
# #Saving graph
# ggsave(here::here('outputs','care.png'),dpi=300,
# width = 10, height = 6.5)
# tab_sx<-svytable(~sex_lab+ resp_child +care_hours, design=uos_design) %>%
# as.data.frame() %>%
# group_by(care_hours) %>%
# mutate(sum_care=sum(Freq)) %>%
# ungroup() %>%
# group_by(care_hours,sex_lab) %>%
# mutate(sum_sex=sum(Freq),prop_sex=sum_sex/sum_care, prop_child=Freq/sum_sex) %>%
# ungroup()
#
# tab_sx %>%
# mutate(lab=case_when(care_hours=="Low Level Caring"~"Providing <20 hours of care",
# care_hours=="High Level Caring"~"Providing 20+ hours of care",
# care_hours=="No caring"~"Not providing care")) %>%
# ggplot(., aes(x = sex_lab, y = prop_sex))+
# # geom_col(aes(color = resp_child, fill = resp_child), position = position_dodge(0.8), width = 0.7)+
# geom_bar(aes(color = resp_child, fill = resp_child), position="stack", stat="identity")+
# theme_THF()+
# # scale_y_continuous(limits = c(0,100), breaks = seq(0, 100, by = 20))+
# facet_wrap(~lab)+
# scale_fill_THF()+
# scale_colour_THF()+
# labs(x= "", y="", title="")+
# theme(plot.title = element_text(size=14),
# legend.text=element_text(size=14), legend.position="bottom",
# axis.text.x=element_text(size=14, angle = 0, hjust=0.45),axis.text.y=element_text(size=14),
# strip.text=element_text(size=14))
#
##Caring status and childcare responsibility by sex
tab_sex_care<-svytable(~sex_lab+care_hours+resp_child, design=uos_design) %>%
as.data.frame() %>%
group_by(sex_lab,care_hours) %>%
mutate(sum_sex_care=sum(Freq)) %>%
ungroup() %>%
group_by(sex_lab) %>%
mutate(sum_sex=sum(Freq)) %>%
group_by(resp_child,sex_lab) %>%
mutate(sum_child_sex=sum(Freq)) %>%
ungroup() %>%
mutate(prop_care_sex=sum_sex_care/sum_sex, prop_child_sex_care=Freq/sum_sex) %>%
mutate(lab_prop=ifelse(prop_child_sex_care<0.01, "",percent(prop_child_sex_care,1)))
tab_sex_care
tab_sex_care %>%
mutate(lab=case_when(care_hours=="Low Level Caring"~"Providing <20 hours of care pw",
care_hours=="High Level Caring"~"Providing 20+ hours of care pw",
care_hours=="No caring"~"Not providing care")) %>%
ggplot(., aes(x = sex_lab, y = prop_child_sex_care*100, label=lab_prop))+
# geom_col(aes(color = resp_child, fill = resp_child), position = position_dodge(0.8), width = 0.7)+
geom_bar(aes(color = resp_child, fill = resp_child),position="stack", stat="identity")+
geom_text(aes(fill=resp_child, label=lab_prop), colour="White",position = position_stack(vjust = 0.5))+
theme_THF()+
scale_y_continuous(limits = c(0,80), breaks = seq(0, 80, by = 10))+
facet_wrap(~lab)+
scale_fill_THF()+
scale_colour_THF()+
labs(x= "", y="% respondents", title="")+
theme(plot.title = element_text(size=14),
legend.text=element_text(size=14), legend.position="bottom",
axis.text.x=element_text(size=14, angle = 0, hjust=0.45),axis.text.y=element_text(size=14),
strip.text=element_text(size=14))
##Saving graph
ggsave(here::here('outputs','care_childcare_by_sex.png'),dpi=300,
width = 10, height = 6.5)
##Sex and childcare responsibility by caring status
tab_care<-svytable(~sex_lab+care_hours+resp_child, design=uos_design) %>%
as.data.frame() %>%
group_by(care_hours) %>%
mutate(sum_care=sum(Freq)) %>%
ungroup() %>%
mutate(prop_care=Freq/sum_care, lab_prop=ifelse(prop_care<0.01, "",percent(prop_care,1)))
tab_care %>%
mutate(lab=case_when(care_hours=="Low Level Caring"~"Providing <20 hours of care pw",
care_hours=="High Level Caring"~"Providing 20+ hours of care pw",
care_hours=="No caring"~"Not providing care")) %>%
ggplot(., aes(x = sex_lab, y = prop_care*100, label=lab_prop))+
# geom_col(aes(color = resp_child, fill = resp_child), position = position_dodge(0.8), width = 0.7)+
geom_bar(aes(color = resp_child, fill = resp_child),position="stack", stat="identity")+
geom_text(aes(fill=resp_child, label=lab_prop), colour="White",position = position_stack(vjust = 0.5))+
theme_THF()+
scale_y_continuous(limits = c(0,80), breaks = seq(0, 80, by = 10))+
facet_wrap(~lab)+
scale_fill_THF()+
scale_colour_THF()+
labs(x= "", y="% respondents", title="")+
theme(plot.title = element_text(size=14),
legend.text=element_text(size=14), legend.position="bottom",
axis.text.x=element_text(size=14, angle = 0, hjust=0.45),axis.text.y=element_text(size=14),
strip.text=element_text(size=14))
#Saving graph
ggsave(here::here('outputs','sex_childcare_by_care.png'),dpi=300,
width = 10, height = 6.5)
#Saving data to excel sheet
sheet = createSheet(wb, "Sex")
addDataFrame(as.data.frame(t2), sheet=sheet, startColumn=1, row.names=FALSE)
sheet = createSheet(wb, "Sex and Childcare")
addDataFrame(as.data.frame(t3), sheet=sheet, startColumn=1, row.names=FALSE)
# Ethnicity and caring -----------------------------------------------------------------
##Descriptive
t4<-uos_design %>%
tbl_svysummary(by="race_plus", include = c(race_plus, care_hours),
type=everything()~"categorical") %>%
add_p() %>%
bold_labels() %>%
as.tibble() %>%
mutate_if(is.character, ~replace(., is.na(.), ""))
clean_names(t4)
# tbl<-svytable(~race_plus+care_hours, design=uos_design)
# c<-svychisq(~race_plus+care_hours, uos_design_xw, statistic="adjWald")
# summary(tbl, statistic="adjWald")
tab_s<-svytable(~race_plus+care_hours, design=uos_design) %>%
as.data.frame() %>%
group_by(race_plus) %>%
mutate(sum_care_hours=sum(Freq)) %>%
ungroup() %>%
mutate(prop_race=Freq/sum_care_hours, lab_prop=percent(prop_race,1))
##Graph
tab_s %>%
mutate(lab=case_when(care_hours=="Low Level Caring"~"Providing <20 hours of care pw",
care_hours=="High Level Caring"~"Providing 20+ hours of care pw",
care_hours=="No caring"~"Not providing care")) %>%
ggplot(., aes(x = race_plus, y = prop_race*100,label=lab_prop)) +
# geom_col(aes(color = race, fill = race), position = position_dodge(0.8), width = 0.7)+
geom_bar(aes(color = lab, fill = lab), position ="stack", stat="identity")+
geom_text(aes(fill=lab, label=lab_prop), colour="White",position = position_stack(vjust = 0.5))+
theme_THF()+
# scale_y_continuous(limits = c(0,100), breaks = seq(0, 100, by = 20))+
scale_fill_THF()+
scale_colour_THF()+
labs(x= "", y="% of respondents", title="")+
theme(plot.title = element_text(size=14),
legend.text=element_text(size=14), legend.position="bottom",
axis.text.x=element_text(size=14, angle = 0, hjust=0.45),axis.text.y=element_text(size=14))
##Saving Graph
ggsave(here::here('outputs','ethnicity.png'),dpi=300,
width = 10, height = 6.5)
##Table for excel sheet
sheet = createSheet(wb, "Ethnicity and Care status")
addDataFrame(as.data.frame(t4), sheet=sheet, startColumn=1, row.names=FALSE)
# Caring within or outside household -----------------------------------------------------------------
##Removing non carers in the sample
w_all_2<-final %>%
filter(carer=="Yes") %>%
mutate(care_hours=factor(care_hours, levels=c("Low Level Caring", "High Level Caring")))
##Needs a new survey design
uos_design_xw<-svydesign(id= ~psu, strata= ~strata,
weights= ~probit_lasso_wgt_t25, data=w_all_2)
options(survey.lonely.psu="adjust")
##Descriptives
t5<- uos_design_xw %>%
tbl_svysummary(by="care_hours", include = c(care_loc_cv, care_hours),
type=everything()~"categorical",label= list(care_loc_cv~"Where they are caring?")) %>%
add_p() %>%
add_overall() %>%
bold_labels() %>%
as.tibble() %>%
mutate_if(is.character, ~replace(., is.na(.), ""))
clean_names(t5)
tab_s<-svytable(~care_loc_cv+care_hours, design=uos_design_xw) %>%
as.data.frame() %>%
group_by(care_hours) %>%
mutate(sum_care=sum(Freq)) %>%
ungroup() %>%
mutate(prop_all=Freq/sum_care)
##Graph
tab_s %>%
mutate(lab=ifelse(care_hours=="Low Level Caring", "Providing <20 hours of care", "Providing 20+ hours of care")) %>%
ggplot(., aes(x = lab, y = prop_all*100)) +
geom_bar(aes(color = care_loc_cv, fill = care_loc_cv), position ="stack", stat="identity")+
theme_THF()+
scale_y_continuous(limits = c(0,100), breaks = seq(0, 100, by = 20))+
scale_fill_THF()+
scale_colour_THF()+
labs(x= "", y="% of respondents", title="")+
theme(legend.text=element_text(size=14), legend.position="bottom", legend.direction = "vertical",
axis.text.x=element_text(size=14, angle = 0, hjust=0.5),axis.text.y=element_text(size=14))
##Saving graph
ggsave(here::here('outputs','care_proximity.png'),dpi=300,
width = 10, height = 6.5)
#
# tbl<-svytable(~care_loc_cv+care_hours, design=uos_design_xw)
# c<-svychisq(~care_loc_cv+care_hours, uos_design_xw)
#
# svychisq(~care_loc_cv+care_hours, uos_design_xw, statistic="adjWald")
# summary(tbl, statistic="adjWald")
##Saving excel sheet
sheet = createSheet(wb, "Care location and Care status")
addDataFrame(as.data.frame(t5), sheet=sheet, startColumn=1, row.names=FALSE)
# Caring outcomes -----------------------------------------------------------------
#Descriptive
t6<-uos_design %>%
tbl_svysummary(by="care_hours", include = c(GHQ_cv, mltc_short, wait_for_NHS_treat, care_hours),
type=everything()~"categorical",label= list(GHQ_cv~"If experiencing Depressive Symptoms (DS) in Nov 2020/Jan 2021?",
mltc_short~"Number of long term health conditions in Nov 2020/Jan 2021?",
wait_for_NHS_treat~"Since 1st Jan 2020, have you been wating for NHS treatment?")) %>%
add_p() %>%
add_overall() %>%
bold_labels() %>%
as.tibble() %>%
mutate_if(is.character, ~replace(., is.na(.), ""))
clean_names(t6)
df<-svytable(~GHQ_cv+care_hours, design=uos_design) %>%
as.data.frame() %>%
rename(Metric=GHQ_cv) %>%
group_by(care_hours) %>%
mutate(sum_care=sum(Freq)) %>%
ungroup() %>%
mutate(prop_all=Freq/sum_care) %>%
bind_rows(svytable(~mltc_short+care_hours, design=uos_design) %>%
as.data.frame() %>%
rename(Metric=mltc_short) %>%
group_by(care_hours) %>%
mutate(sum_care=sum(Freq)) %>%
ungroup() %>%
mutate(prop_all=Freq/sum_care)) %>%
bind_rows(svytable(~wait_for_NHS_treat+care_hours, design=uos_design) %>%
as.data.frame() %>%
rename(Metric=wait_for_NHS_treat) %>%
group_by(care_hours) %>%
mutate(sum_care=sum(Freq)) %>%
ungroup() %>%
mutate(prop_all=Freq/sum_care)) %>%
filter(Metric %in% c("DS","2 or more", "Yes")) %>%
mutate(Metric_lab= case_when(Metric=="DS"~ "Depressive symptoms Nov 2020/Jan 2021",
Metric=="2 or more"~ "Two or more long term health conditions Nov 2020/Jan 2021",
Metric=="Yes"~ "Since 1st Jan 2020, been wating for NHS treatment"),
care_lab=factor(case_when(care_hours=="Low Level Caring"~ "Providing <20 hrs of care",
care_hours=="High Level Caring"~ "Providing 20+ hrs of care",
care_hours=="No caring"~ "Not providing care"),
levels=c("Not providing care","Providing <20 hrs of care","Providing 20+ hrs of care")),
lab_prop=percent(prop_all,1))
#Graph
df %>%
ggplot(., aes(x = care_lab, y = prop_all*100, label=lab_prop)) +
geom_col(aes(color = Metric_lab, fill = Metric_lab), position = position_dodge(0.8), width = 0.7)+
geom_text(aes(fill=Metric_lab, label=lab_prop), colour="White", position = position_dodge(width=0.8), vjust=1.5)+
# geom_text(position=position_dodge(width=0.8))+
theme_THF()+
scale_y_continuous(limits = c(0,100), breaks = seq(0, 100, by = 20))+
scale_fill_THF()+
scale_colour_THF()+
labs(x= "", y="% of respondents", title="")+
theme(legend.text=element_text(size=14), legend.position="bottom", legend.direction = "vertical",
axis.text.x=element_text(size=14, angle = 0, hjust=0.5),axis.text.y=element_text(size=14))
##Saving graph
ggsave(here::here('outputs','care_outcomes.png'),dpi=300,
width = 10, height = 6.5)
##Saving excel sheet
sheet = createSheet(wb, "Care status and outcomes")
addDataFrame(as.data.frame(t6), sheet=sheet, startColumn=1, row.names=FALSE)
# Closing excel sheet -----------------------------------------------------
saveWorkbook(wb, here::here('outputs', 'Unpaid_Carer.xlsx'))
|
a76bbd0812b2592b40ae631de83382b93bcc248f
|
8e4c50756478359905823835ba31ce09be93a4f6
|
/man/repeatWiseAnalysis.Rd
|
77d10253cd182b592fb1ccb2726a210259f3d5a8
|
[] |
no_license
|
RamsinghLab/arkas
|
0ff25cfdd40363dc914a16c9d3a6369be6371058
|
cdceb435c4ac438f631d67250e44d391aece6a56
|
refs/heads/master
| 2021-05-24T06:28:51.123257
| 2017-05-24T20:05:41
| 2017-05-24T20:05:41
| 55,867,218
| 3
| 3
| null | 2016-08-26T17:10:36
| 2016-04-09T21:13:15
|
R
|
UTF-8
|
R
| false
| true
| 1,394
|
rd
|
repeatWiseAnalysis.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/repeatWiseAnalysis.R
\name{repeatWiseAnalysis}
\alias{repeatWiseAnalysis}
\title{Downstream analysis of bundle-aggregated repeat elements}
\usage{
repeatWiseAnalysis(kexp, design = NULL, how = c("cpm", "tpm"),
p.cutoff = 0.05, fold.cutoff = 1, read.cutoff = 1,
species = c("Homo.sapiens", "Mus.musculus"), adjustBy = "holm")
}
\arguments{
\item{kexp}{a KallistoExperiment or SummarizedExperiment-like object}
\item{design}{a design matrix with 2nd coefficient as one to test}
\item{how}{whether to collapse by tpm or cpm}
\item{p.cutoff}{where to set the p-value cutoff for plots, etc. (0.05)}
\item{fold.cutoff}{where to set the log2-FC cutoff for plots, etc. (1==2x)}
\item{read.cutoff}{minimum read coverage (estimated) for a gene bundle}
\item{species}{which species? (Homo.sapiens, Mus.musculus are two currently supported}
\item{adjustBy}{character none, BH,BY, or holm for FDR procedures}
}
\value{
a list w/items design, voomed, fit, top, enriched,
Figures, scaledExprs, clusts, species,
features, ... (perhaps)
}
\description{
Downstream analysis of bundle-aggregated repeat elements
}
\details{
If no design matrix is found, the function will look in
exptData(kexp)$design. If that too is empty it fails.
}
|
137f4479342e2262e09ed5400337e4eced9e6c40
|
72d03ec10b4955bcc7daac5f820f63f3e5ed7e75
|
/input/gcam-data-system/emissions-processing-code/level2/L211.ag_nonco2.R
|
4cf7365427a064cc1d2b4bbbb82b8b48ec5504b6
|
[
"ECL-2.0",
"LicenseRef-scancode-unknown-license-reference"
] |
permissive
|
bgmishra/gcam-core
|
54daddc3d037571bf745c4cf0d54c0d7a77f493f
|
bbfb78aeb0cde4d75f307fc3967526d70157c2f8
|
refs/heads/master
| 2022-04-17T11:18:25.911460
| 2020-03-17T18:03:21
| 2020-03-17T18:03:21
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 9,675
|
r
|
L211.ag_nonco2.R
|
# Before we can load headers we need some paths defined. They
# may be provided by a system environment variable or just
# having already been set in the workspace
if( !exists( "EMISSPROC_DIR" ) ){
if( Sys.getenv( "EMISSIONSPROC" ) != "" ){
EMISSPROC_DIR <- Sys.getenv( "EMISSIONSPROC" )
} else {
stop("Could not determine location of emissions data system. Please set the R var EMISSPROC_DIR to the appropriate location")
}
}
# Universal header file - provides logging, file support, etc.
source(paste(EMISSPROC_DIR,"/../_common/headers/GCAM_header.R",sep=""))
source(paste(EMISSPROC_DIR,"/../_common/headers/EMISSIONS_header.R",sep=""))
logstart( "L211.ag_nonco2.R" )
adddep(paste(EMISSPROC_DIR,"/../_common/headers/GCAM_header.R",sep=""))
adddep(paste(EMISSPROC_DIR,"/../_common/headers/EMISSIONS_header.R",sep=""))
printlog( "Historical emissions in the aglu system" )
# -----------------------------------------------------------------------------
# 1. Read files
sourcedata( "COMMON_ASSUMPTIONS", "A_common_data", extension = ".R" )
sourcedata( "COMMON_ASSUMPTIONS", "level2_data_names", extension = ".R" )
sourcedata( "MODELTIME_ASSUMPTIONS", "A_modeltime_data", extension = ".R" )
sourcedata( "AGLU_ASSUMPTIONS", "A_aglu_data", extension = ".R" )
sourcedata( "EMISSIONS_ASSUMPTIONS", "A_emissions_data", extension = ".R" )
GCAM_region_names <- readdata( "COMMON_MAPPINGS", "GCAM_region_names")
basin_to_country_mapping <- readdata( "WATER_MAPPINGS", "basin_to_country_mapping" )
A_regions <- readdata( "EMISSIONS_ASSUMPTIONS", "A_regions" )
A_agSupplySector <- readdata( "AGLU_ASSUMPTIONS", "A_agSupplySector" )
L103.ag_Prod_Mt_R_C_Y_GLU <- readdata( "AGLU_LEVEL1_DATA", "L103.ag_Prod_Mt_R_C_Y_GLU" )
L205.AgCost_bio <- readdata( "AGLU_LEVEL2_DATA", "L205.AgCost_bio", skip = 4)
L113.ghg_tg_R_an_C_Sys_Fd_Yh <- readdata( "EMISSIONS_LEVEL1_DATA", "L113.ghg_tg_R_an_C_Sys_Fd_Yh" )
L115.nh3_tg_R_an_C_Sys_Fd_Yh <- readdata( "EMISSIONS_LEVEL1_DATA", "L115.nh3_tg_R_an_C_Sys_Fd_Yh" )
L121.nonco2_tg_R_awb_C_Y_GLU <- readdata( "EMISSIONS_LEVEL1_DATA", "L121.nonco2_tg_R_awb_C_Y_GLU", replace_GLU = T )
L122.ghg_tg_R_agr_C_Y_GLU <- readdata( "EMISSIONS_LEVEL1_DATA", "L122.ghg_tg_R_agr_C_Y_GLU", replace_GLU = T )
L123.bcoc_tgmt_R_awb_2000 <- readdata( "EMISSIONS_LEVEL1_DATA", "L123.bcoc_tgmt_R_awb_2000", replace_GLU = T )
A11.max_reduction <- readdata( "EMISSIONS_ASSUMPTIONS", "A11.max_reduction" )
A11.steepness <- readdata( "EMISSIONS_ASSUMPTIONS", "A11.steepness" )
# -----------------------------------------------------------------------------
# 2. Build tables for CSVs
#Sulfur emissions
printlog( "L211.AWBEmissions: AWB emissions in all regions" )
#Interpolate and add region name
#Note: interpolate takes ages, so I'm not using interpolate_and_melt
L211.AWB <- melt( L121.nonco2_tg_R_awb_C_Y_GLU, id.vars = c( R_C_GLU, "Non.CO2" ), measure.vars = X_emiss_model_base_years,
variable.name = "xyear", value.name = "input.emissions" )
L211.AWB$year <- as.numeric( substr( L211.AWB$xyear, 2, 5 ) )
#Add region, supplysector, subsector and tech names
L211.AWB <- add_region_name( L211.AWB )
L211.AWB$AgSupplySector <- L211.AWB$GCAM_commodity
L211.AWB$AgSupplySubsector <- paste( L211.AWB$GCAM_commodity, L211.AWB$GLU, sep=crop_GLU_delimiter )
L211.AWB$AgProductionTechnology <- L211.AWB$AgSupplySubsector
#Format for csv file
L211.AWBEmissions <- L211.AWB[ c( names_AgTechYr, "Non.CO2", "input.emissions" ) ]
L211.AWBEmissions$input.emissions <- round( L211.AWBEmissions$input.emissions, digits_emissions )
printlog( "L211.AGREmissions: ag AGR emissions in all regions" )
L211.AGR <- melt( L122.ghg_tg_R_agr_C_Y_GLU, id.vars = c( R_C_GLU, "Non.CO2" ), measure.vars = X_emiss_model_base_years,
variable.name = "xyear", value.name = "input.emissions" )
L211.AGR$year <- as.numeric( substr( L211.AGR$xyear, 2, 5 ) )
#Add region, supplysector, subsector and tech names
L211.AGR <- add_region_name( L211.AGR )
L211.AGR$AgSupplySector <- L211.AGR$GCAM_commodity
L211.AGR$AgSupplySubsector <- paste( L211.AGR$GCAM_commodity, L211.AGR$GLU, sep=crop_GLU_delimiter )
L211.AGR$AgProductionTechnology <- L211.AGR$AgSupplySubsector
#Format for csv file
L211.AGREmissions <- L211.AGR[ c( names_AgTechYr, "Non.CO2", "input.emissions" ) ]
L211.AGREmissions$input.emissions <- round( L211.AGREmissions$input.emissions, digits_emissions )
printlog( "L211.AGRBioEmissions: bio AGR emissions in all regions" )
#Map in coefficients from assumption file
L211.AGRBio <- L205.AgCost_bio[ L205.AgCost_bio[[Y]] == ctrl_base_year, names_AgTechYr ]
L211.AGRBio$Non.CO2 <- "N2O_AGR"
L211.AGRBio$bio_N2O_coef <- A_regions$bio_N2O_coef[ match( L211.AGRBio$region, A_regions$region ) ]
printlog( "L211.AnAGREmissions: animal AGR emissions in all regions" )
#Note: interpolate takes ages, so I'm not using interpolate_and_melt
L211.AN <- melt( L113.ghg_tg_R_an_C_Sys_Fd_Yh, id.vars = c( R, S_S_T, "Non.CO2" ), measure.vars = X_emiss_model_base_years,
variable.name = "xyear", value.name = "input.emissions" )
L211.AN$year <- as.numeric( substr( L211.AN$xyear, 2, 5 ) )
#Format for csv file
L211.AN <- add_region_name( L211.AN )
L211.AnEmissions <- L211.AN[ c( names_StubTechYr, "Non.CO2" ) ]
L211.AnEmissions$input.emissions <- round( L211.AN$input.emissions, digits_emissions )
printlog( "L211.AnNH3Emissions: animal NH3 emissions in all regions" )
#Note: interpolate takes ages, so I'm not using interpolate_and_melt
L211.AN_NH3 <- melt( L115.nh3_tg_R_an_C_Sys_Fd_Yh, id.vars = c( R, S_S_T, "Non.CO2" ),
measure.vars = names( L115.nh3_tg_R_an_C_Sys_Fd_Yh )[ names( L115.nh3_tg_R_an_C_Sys_Fd_Yh ) %in% X_emiss_model_base_years ],
variable.name = "xyear", value.name = "input.emissions" )
L211.AN_NH3$year <- as.numeric( substr( L211.AN_NH3$xyear, 2, 5 ) )
#Format for csv file
L211.AN_NH3 <- add_region_name( L211.AN_NH3 )
L211.AnNH3Emissions <- L211.AN_NH3[ c( names_StubTechYr, "Non.CO2" ) ]
L211.AnNH3Emissions$input.emissions <- round( L211.AN_NH3$input.emissions, digits_emissions )
printlog( "L211.AWB_BCOC_EmissCoeff: BC / OC AWB emissions coefficients in all regions" )
#Add region name & replicate for all commodities & base years
L211.AWB_BCOC <- add_region_name( L123.bcoc_tgmt_R_awb_2000 )
L211.AWB_BCOC$AgSupplySector <- L211.AWB_BCOC[[C]]
L211.AWB_BCOC$AgSupplySubsector <- paste( L211.AWB_BCOC$AgSupplySector, L211.AWB_BCOC$GLU, sep = crop_GLU_delimiter )
L211.AWB_BCOC$AgProductionTechnology <- L211.AWB_BCOC$AgSupplySubsector
L211.AWB_BCOC <- repeat_and_add_vector( L211.AWB_BCOC, "year", model_base_years )
#Format for csv file
L211.AWB_BCOC_EmissCoeff <- L211.AWB_BCOC[ c( names_AgTechYr, "Non.CO2" ) ]
L211.AWB_BCOC_EmissCoeff$emiss.coef <- round( L211.AWB_BCOC$emfact, digits_emissions )
printlog( "L211.nonghg_max_reduction: maximum emissions coefficient reduction for ag technologies in all regions" )
L211.nonghg_max_reduction <- rbind( L211.AWB_BCOC[ L211.AWB_BCOC[[Y]] == ctrl_base_year, c( names_AgTechYr, "Non.CO2" ) ],
L211.AWB[ L211.AWB[[Y]] == ctrl_base_year, c( names_AgTechYr, "Non.CO2" ) ] )
L211.nonghg_max_reduction$ctrl.name <- "GDP_control"
L211.nonghg_max_reduction$max.reduction <- A11.max_reduction$max.reduction[
match( L211.nonghg_max_reduction$AgSupplySector, A11.max_reduction$AgSupplySector ) ]
printlog( "L211.nonghg_steepness: steepness of reduction for energy technologies in all regions" )
L211.nonghg_steepness <- L211.nonghg_max_reduction[ c( names_AgTechYr, "Non.CO2" ) ]
L211.nonghg_steepness$ctrl.name <- "GDP_control"
L211.nonghg_steepness$steepness <- A11.steepness$steepness[
match( L211.nonghg_steepness$AgSupplySector, A11.steepness$AgSupplySector ) ]
L211.AnEmissions <- subset( L211.AnEmissions, !region %in% no_aglu_regions )
L211.AnNH3Emissions <- subset( L211.AnNH3Emissions, !region %in% no_aglu_regions )
printlog( "Rename to regional SO2" )
L211.AWBEmissions <- rename_SO2( L211.AWBEmissions, A_regions, TRUE )
L211.nonghg_max_reduction <- rename_SO2( L211.nonghg_max_reduction, A_regions, TRUE )
L211.nonghg_steepness <- rename_SO2( L211.nonghg_steepness, A_regions, TRUE )
# -----------------------------------------------------------------------------
# 3. Write all csvs as tables, and paste csv filenames into a single batch XML file
write_mi_data( L211.AWBEmissions, "OutputEmissionsAg", "EMISSIONS_LEVEL2_DATA", "L211.AWBEmissions", "EMISSIONS_XML_BATCH", "batch_all_aglu_emissions.xml" )
write_mi_data( L211.AGREmissions, "OutputEmissionsAg", "EMISSIONS_LEVEL2_DATA", "L211.AGREmissions", "EMISSIONS_XML_BATCH", "batch_all_aglu_emissions.xml" )
write_mi_data( L211.AnEmissions, "StbTechOutputEmissions", "EMISSIONS_LEVEL2_DATA", "L211.AnEmissions", "EMISSIONS_XML_BATCH", "batch_all_aglu_emissions.xml" )
write_mi_data( L211.AnNH3Emissions, "StbTechOutputEmissions", "EMISSIONS_LEVEL2_DATA", "L211.AnNH3Emissions", "EMISSIONS_XML_BATCH", "batch_all_aglu_emissions.xml" )
write_mi_data( L211.AGRBio, "OutputEmissCoeffAg", "EMISSIONS_LEVEL2_DATA", "L211.AGRBio", "EMISSIONS_XML_BATCH", "batch_all_aglu_emissions.xml" )
write_mi_data( L211.AWB_BCOC_EmissCoeff, "OutputEmissCoeffAg", "EMISSIONS_LEVEL2_DATA", "L211.AWB_BCOC_EmissCoeff", "EMISSIONS_XML_BATCH", "batch_all_aglu_emissions.xml" )
write_mi_data( L211.nonghg_max_reduction, "AgGDPCtrlMax", "EMISSIONS_LEVEL2_DATA", "L211.nonghg_max_reduction", "EMISSIONS_XML_BATCH", "batch_all_aglu_emissions.xml" )
write_mi_data( L211.nonghg_steepness, "AgGDPCtrlSteep", "EMISSIONS_LEVEL2_DATA", "L211.nonghg_steepness", "EMISSIONS_XML_BATCH", "batch_all_aglu_emissions.xml" )
logstop()
|
7e3b35a5567512d2f2d69d97ac5783828deef775
|
ffdea92d4315e4363dd4ae673a1a6adf82a761b5
|
/data/genthat_extracted_code/iopsych/examples/internal.indexMat.Rd.R
|
536a0f1e88838fb33e64550e3e66672109a19f9c
|
[] |
no_license
|
surayaaramli/typeRrh
|
d257ac8905c49123f4ccd4e377ee3dfc84d1636c
|
66e6996f31961bc8b9aafe1a6a6098327b66bf71
|
refs/heads/master
| 2023-05-05T04:05:31.617869
| 2019-04-25T22:10:06
| 2019-04-25T22:10:06
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 184
|
r
|
internal.indexMat.Rd.R
|
library(iopsych)
### Name: .indexMat
### Title: Finds rxx and rxy for a correlation matrix
### Aliases: .indexMat
### Keywords: internal
### ** Examples
print("example needed")
|
eec4c051627fa88627885a97b6d6c5a8233bd50e
|
eac5313f1066c61b16667e3b4c1048fde0dde122
|
/local_run.R
|
272a080d0a18c59b51e34e0f8d168966f7add90d
|
[] |
no_license
|
liuyanguu/proper_names
|
b85bd52937b27cfe162e563362eb342cf232783a
|
15e2a32c706878bfe98252b0f6446248fe46f1db
|
refs/heads/main
| 2023-08-14T08:44:12.626466
| 2021-09-30T17:10:43
| 2021-09-30T17:10:43
| 412,153,442
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 712
|
r
|
local_run.R
|
library("shiny")
library("readxl")
library("cld2")
library("data.table")
library("DT")
library("here")
Sys.setlocale("LC_ALL","Chinese")
options(shiny.maxRequestSize = -1)
# source("R/func.R")
# dt1 <- setDT(readxl::read_xlsx(here::here("data/people.xlsx"), col_types = "text"))
# head(dt1)
# dt1[, Category:= "人名"]
# dt2 <- setDT(readxl::read_xlsx(here::here("data/location.xlsx")))
# head(dt2)
# dt2[, Category:= "地名"]
# dt12 <- rbindlist(list(dt1, dt2))
dt12 <- readRDS("data/proper_names.rds")
dt12sub <- dt12[1:1000,]
saveRDS(dt12, "data/proper_names.rds")
writexl::write_xlsx(dt12, "data/proper_names_combined.xlsx")
writexl::write_xlsx(dt12sub, "data/proper_names_combined_test.xlsx")
|
2c4a67da391d5a730f7d9dc1750f69032bfd0465
|
fe53aa17ab441378c19f8fbbcc3a7466fa086d03
|
/server.R
|
fb71e97e07561c9143479bd7cffcb98c3237f044
|
[] |
no_license
|
alexwhan/shinymagick
|
ddc908f6fe1721127a64e3bd627a6ccae9df7985
|
e3bf0e9d0be97095490f56aebaccdad77b42aad0
|
refs/heads/master
| 2021-01-19T04:15:03.726399
| 2016-07-28T06:57:50
| 2016-07-28T06:57:50
| 64,371,192
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 511
|
r
|
server.R
|
library(shiny)
shinyServer(function(input, output) {
get_image <- reactive({
if(is.null(input$img_input)) return(NULL)
else {
outfile <- tempfile(fileext = ".jpg")
image_write(image_read(input$img_input[[4]]), outfile)
return(outfile)
}
})
output$img_output <- renderImage({
list(src = get_image())
}, deleteFile = TRUE)
output$text <- renderText({
length(input$img_input)
})
output$text2 <- renderText({
input$img_input[[4]]
})
})
|
9d736da0decaf8dd8df25c0440e6d4677a0703ae
|
03db6b2e77d001c85f871a20ad83ce98a01243fe
|
/R/sampleAA.R
|
19d9bdab82955a2281577065640a083ad01850d0
|
[] |
no_license
|
kmcalist682336/SimScalingRC
|
2ba27349ee30149f253d5ef8643f99a33af0522f
|
199bb645514abef9656fbe3db5b80e2f21dfa0bf
|
refs/heads/master
| 2020-03-21T09:50:58.225280
| 2018-06-24T04:18:39
| 2018-06-24T04:18:39
| 138,420,743
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 2,446
|
r
|
sampleAA.R
|
#A function for approximating the log likelihood for a new proposal of A
approx.ll <- function(new.aa,zz,pi.star){
new.dists <- make.norm.dist.mat(new.aa)
new.pis <- new.dists%*%pi.star
new.probs <- (zz*new.pis) + ((1 - zz)*(1 - new.pis))
return(sum(log(new.probs)))
}
#A function for sampling AA, the matrix of latent variables in the second layer of the model
#The key innovation in this model.
#Pretty fast right now. I spent a lot of time on this function, so I'm fine with it for now.
#Arguments: aa is p x L matrix of latent vars
#bb is d x L matrix of sedon lovel loadings
#rr is p x L binary matrix for text features
#mm is d x p matrix of augmented text data
#zz is p x K binary matrix of top level features
#pi.star is p x K matrix of top level local feature probabilities
#p is number of items
#L is the current number of features on the lower level
sample.aa <- function(aa,bb,rr,mm,zz,pi.star,p,L){
#try an element by element slice sampler
#set.seed(1234)
new.aa <- aa
for(i in 1:L){
mdj <- mm - (bb[,-i]%*%t(rr[,-i]*new.aa[,-i]))
b2 <- apply(bb^2,2,sum)[i]
uc.var <- 1/(1 + (rr[,i]*b2))
bl <- matrix(ncol = p,rep(bb[,i],p))
mbl <- bl*mdj
mbl <- apply(mbl,2,sum)
uc.mean <- uc.var*mbl*rr[,i]
curr.vals <- new.aa[,i]
curr.ll <- dnorm(curr.vals, uc.mean, sqrt(uc.var), log = T)
aux.uc <- curr.ll - rexp(p,1)
range.p <- uc.mean + sqrt(-uc.var*((2*aux.uc) + log(2*pi*uc.var)))
range.m <- uc.mean - sqrt(-uc.var*((2*aux.uc) + log(2*pi*uc.var)))
range.l <- c()
range.u <- c()
for(j in 1:p){
range.l[j] <- min(range.p[j],range.m[j])
range.u[j] <- max(range.p[j],range.m[j])
}
curr.bn <- approx.ll(new.aa = new.aa, zz = zz, pi.star = pi.star)
aux.bn <- curr.bn - rexp(1,1)
st <- 0
cc <- 0
while(st == 0){
cc <- cc + 1
naa <- new.aa
props <- c()
for(j in 1:p){
props[j] <- runif(1,range.l[j],range.u[j])
}
naa[,i] <- props
eval.naa <- approx.ll(new.aa = naa, zz = zz, pi.star = pi.star)
if(eval.naa > aux.bn){
new.aa[,i] <- props
st <- 1
}else{
if(cc == 100){
st <- 1
}
diffs <- sign(props - new.aa[,i])
for(j in 1:p){
if(diffs[j] == -1){
range.l[j] <- props[j]
}else{
range.u[j] <- props[j]
}
}
}
}
#print(i)
}
return(new.aa)
}
|
057a84965ebcde5c2a87b32e92e54e0a0fbb1ba0
|
d55d27b0dca8c78ad752849b96eb7b849c933d85
|
/man/aov.ispd.imcs.Rd
|
3811fc8333a8f7b21370ffc2a5329bc84e110ea1
|
[] |
no_license
|
cran/ispd
|
f50e477b7388ec59818d98a09a4f823529741516
|
a155e56059cf19558b6de2725007a7b2ab701110
|
refs/heads/master
| 2020-06-22T00:23:25.961235
| 2019-08-19T09:20:03
| 2019-08-19T09:20:03
| 197,588,213
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 783
|
rd
|
aov.ispd.imcs.Rd
|
\name{aov.ispd.imcs}
\alias{aov.ispd.imcs}
\title{Analysis of variance of data from an incomplete split-plot design with incomplete blocks and complete main plots}
\description{This function performs analysis of variance of data from experiments using
an incomplete split-plot design for the situation when blocks are incomplete with
respect to main plot treatments and mainplots are complete with respect to subplot treatments}
\usage{aov.ispd.imcs(obs, block, mp, sp, y)}
\arguments{
\item{obs}{observation numbers}
\item{block}{block}
\item{mp}{main plot treatment}
\item{sp}{subplot treatment}
\item{y}{response variable}
}
\value{Returns ANOVA table of incomplete split-plot design}
\author{Baidya Nath Mandal <mandal.stat@gmail.com>}
\keyword{internal}
|
873f4c52486e0c10842e32b542677e33fc76e95a
|
440df580a0d0be2595b1f80b7f19f154f5aa5eee
|
/Plot.R
|
0a30da0b7c29039e22f7cca3fdb47c1145b6cecb
|
[] |
no_license
|
xywanggg/Data-Science-Foundations-using-R
|
0ecc781b6cb167aaa6d7179e098edecfe5f8fa6e
|
da4a4091832e40e04c43d87d8447a2bd48ee8a96
|
refs/heads/master
| 2022-11-26T06:11:09.066350
| 2020-07-27T22:47:43
| 2020-07-27T22:47:43
| 280,541,835
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 3,089
|
r
|
Plot.R
|
## principles of analytic graphics
#1 show comparisons
#2 show causality, mechanism, explanation
#3 show multivariate data
#4 integrate multiple modes of evidence
#5 describe and document the evidence
#6 content is king
pollution<-read.csv("./data/pm_tx.csv")
head(pollution)
summary(pollution$PM2.5)
#boxplot
boxplot(pollution$PM2.5,col="blue")
abline(h=12)
boxplot(PM2.5~COUNTY,data=pollution,col="pink")
#histogram
hist(pollution$PM2.5,col="green",breaks=50)
rug(pollution$pm2.5)
abline(v=12,lwd=2)
abline(v=median(pollution$PM2.5),col="red",lwd=4)
par(mfrow=c(2,1),mar=c(4,4,2,1))
hist(subset(pollution,COUNTY=="Dallas")$PM2.5,col="green")
hist(subset(pollution,COUNTY=="Ellis")$PM2.5,col="green")
#barplot
barplot(table(pollution$COUNTY),col="wheat",main='number')
#scatterplot
with(pollution, plot(LATITUDE,PM2.5)) #col=COUNTY
abline(h=12,lwd=2,lty=2)
par(mfrow=c(1,2),mar=c(5,4,2,1))
with(subset(pollution,COUNTY=="Dallas"),plot(LATITUDE,PM2.5,main="Dallas"))
with(subset(pollution,COUNTY=="Ellis"),plot(LATITUDE,PM2.5,main="Ellis"))
#########base plot
?par
#some important base graphics parameters
#pch: the plotting symbol (default is open circle)
#lty: the line type (default is solid line)
#lwd: the line width, specified as an integer multiple
#col: color
#xlab: character string for the x-axis label
#ylab: character string for the y-axis label
#las: the orientation of the axis labels on the plot
#bg: background color
#mar: the margin size
#oma: the outer margin size
#mfrow: number of plots per row, column(plots are filled row-wise)
#mfcol: number of plots per row, column(plots are filled column-wise)
#plot: make a scatterplot, or other type of plot
#lines: add lines to plot
#points: add points to a plot
#text: add text labels to a plot using specified x,y
#title: add annotations to x,y axis labels,title, subtitle, outer margin
#mtext: add arbitrary text to the margins (inner or outer) of the plot
#axis: adding axis ticks/labels
library(datasets)
with(airquality,plot(Wind,Ozone))
title(main="Ozone and Wind 1")
with(airquality,plot(Wind,Ozone,main="Ozone and Wind 2"))
with(subset(airquality,Month==5),points(Wind,Ozone,col="blue"))
with(airquality,plot(Wind,Ozone,main="Ozone and Wind 3",type="n"))
with(subset(airquality,Month==5),points(Wind,Ozone,col="blue"))
with(subset(airquality,Month!=5),points(Wind,Ozone,col="red"))
legend("topright",pch=1,col=c("blue","red"),legend=c("May","Other months"))
with(airquality,plot(Wind,Ozone,main="Ozone and Wind 4",pch=20))
model<-lm(Ozone~Wind,airquality)
abline(model,lwd=2)
par(mfrow=c(1,2))
with(airquality,{
plot(Wind,Ozone, main="Ozone and Wind 5")
plot(Solar.R,Ozone,main="Ozone and Solar Radiat 6")
})
par(mfrow=c(1,3),mar=c(4,4,2,1),oma=c(0,0,2,0))
with(airquality,{
plot(Wind,Ozone, main="Ozone and Wind 7")
plot(Solar.R,Ozone, main="Ozone and Solar Radiation 8")
plot(Temp,Ozone,main="Ozone and Temperature 9")
mtext("Ozone and Weather in New York City", outer=TURE)
})
|
f6d0218490bffe8ed5a1a141429912d1ba296711
|
c600e67f82df25deb8f81d6e1b4ffdf6dd372927
|
/R/convert2snafu.R
|
b7cfd80b37933388f5625da2140d5bfeff29d515
|
[] |
no_license
|
cran/SemNetCleaner
|
3e8675a984baa20e8d9f3b45f5e4095a10ac7f79
|
c9c8671684233f65e499db8082f5c387e76296f3
|
refs/heads/master
| 2021-11-20T18:16:48.827959
| 2021-09-16T13:00:02
| 2021-09-16T13:00:02
| 133,685,844
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 4,005
|
r
|
convert2snafu.R
|
#' Pathfinder Network
#'
#' @description Estimates a pathfinder network using the MST-Pathfinder
#' Network method from Quirin et al. (2008; see also Schvaneveldt, 1990)
#'
#' @param ... Matrix or data frame.
#' A clean response matrices
#'
#' @param category Character.
#' Category of verbal fluency data
#'
#' @return A .csv file formatted for SNAFU
#'
#' @details The format of the file has 7 columns:
#' \itemize{
#' \item{id}{Defaults to the row names of the inputted \code{data}}
#'
#' \item{listnum}{The list number for the fluency category. Defaults to 0.
#' Future implementations will allow more lists}
#'
#' \item{category}{The verbal fluency category that is input into the
#' \code{category} argument}
#'
#' \item{item}{The verbal fluency responses for every participant}
#'
#' \item{RT}{Response time. Currently not implemented. Defaults to 0}
#'
#' \item{RTstart}{Start of response time. Currently not implemented. Defaults to 0}
#'
#' \item{group}{Names of groups. Defaults to the names of the objects input into
#' the function (\code{...})}
#' }
#'
#' @examples
#' # Convert data to SNAFU
#' if(interactive())
#' {convert2snafu(open.clean, category = "animals")}
#'
#' @references
#' # For SNAFU, see:
#' Zemla, J. C., Cao, K., Mueller, K. D., & Austerweil, J. L. (2020).
#' SNAFU: The Semantic Network and Fluency Utility.
#' \emph{Behavior Research Methods}, 1-19.
#' https://doi.org/10.3758/s13428-019-01343-w
#'
#' @author Alexander Christensen <alexpaulchristensen@gmail.com>
#'
#' @importFrom utils write.table
#'
#' @export
# Convert data to SNAFU
# Updated 24.09.2020
convert2snafu <- function (..., category)
{
# Data list
data.list <- list(...)
# Initialize snafu matrix
snafu.mat <- matrix(0, nrow = 0, ncol = 7)
colnames(snafu.mat) <- c("id", "listnum", "category", "item", "RT", "RTstart", "group")
if(length(data.list) == 1)
{
}else{
# Get group names
name <- as.character(substitute(list(...)))
name <- name[-which(name=="list")]
for(i in 1:length(data.list))
{
# Target data
target.data <- as.matrix(data.list[[i]])
# Number of possible responses
n <- ncol(target.data)
# IDs
if(is.null(row.names(target.data)))
{id <- paste("A", 1:nrow(target.data), sep = "")
}else{id <- paste("A", formatC(as.numeric(row.names(target.data)), digits = 2, format = "d", flag = 0), sep = "")}
for(j in 1:nrow(target.data))
{
# Target participant
target.part <- target.data[j,]
# Item
item <- na.omit(target.part)
# Target ID
target.id <- rep(id[j], length(item))
# List number
listnum <- rep(0, length(item))
# Category
categorey <- rep(category, length(item))
# RT
RT <- rep(0, length(item))
# RTstart
RTstart <- rep(0, length(item))
# Group
group <- rep(name[i], length(item))
# Bind data
target.mat <- cbind(target.id, listnum,
categorey, item,
RT, RTstart, group)
row.names(target.mat) <- NULL
colnames(target.mat) <- colnames(snafu.mat)
# Append snafu matrix
snafu.mat <- rbind(snafu.mat, target.mat)
}
}
}
# Choose directory
DIR <- easycsv::choose_dir()
# Get file name
FILENAME <- readline("Name of file: ")
# Set up path
PATH <- paste(DIR, FILENAME, sep = "/")
PATH <- gsub("\\\\", "/", PATH)
PATH <- paste(PATH, "csv", sep = ".")
write.table(snafu.mat, file = PATH,
quote = FALSE, sep = ",", row.names = FALSE)
# Message to user
message(paste("SNAFU formatted file was saved in: "), PATH)
}
|
db6e6f3114c9c3890745b4ea4ac588fe020becb6
|
fec3bf0fe08305ca687f871d444c78b327879420
|
/get_data.R
|
9edab84c76ee2ea9a0d9cecddafe7e5221bcf22c
|
[] |
no_license
|
GeorgyMakarov/rus_traffic_accidents
|
7eaddb2414ceefcfe057d93986833ba49c190bfa
|
7fa3a3c1ea9b03305365d155e04b00809788f363
|
refs/heads/master
| 2022-11-06T18:40:04.454698
| 2020-06-29T09:18:34
| 2020-06-29T09:18:34
| 260,383,742
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 5,324
|
r
|
get_data.R
|
# Prerequisite libraries
library(reshape2)
library(ggplot2)
library(dplyr)
library(tidyr)
library(lubridate)
library(XML)
library(readr)
library(stringi)
library(stringr)
# loop for reading .xml files
files <- list.files("./raw_data")
files <- as.list(files)
setwd("./raw_data")
list_file <- list.files(pattern = "*.xml") %>%
lapply(xmlToDataFrame, stringsAsFactors = F) %>% bind_rows
setwd("C:/Users/Георгий/Documents/GitHub/rus_traffic_accidents/")
# filter rows in the top of the table - they contain general info not needed
# for the analysis, because this infor tells us about region - St. Pete, but we
# already have filtered St. Pete.
list_file <- list_file %>%
filter(!(is.na(DTPV))) %>% select(-text)
list_file$date <- dmy(list_file$date)
list_file$time <- as.POSIXct(list_file$time, format = "%H:%M")
xmldf <- list_file %>% arrange(date, time, DTPV, district)
rm(list_file)
# check data summary
# need to transfor to num: KTS, KUCH, POG, RAN
summary(xmldf)
# transform columns to numeric
xmldf$KTS <- as.numeric(xmldf$KTS)
xmldf$KUCH <- as.numeric(xmldf$KUCH)
xmldf$POG <- as.numeric(xmldf$POG)
xmldf$RAN <- as.numeric(xmldf$RAN)
# write data description file
# check for correlation between variables
# check for NA values in each column
sum(complete.cases(xmldf))
# split infoDtp to columns
## create column to describe the driving mode after the accident
driving_mode <- xmldf %>%
separate(infoDtp, into = c("text1"),
sep = "([:digit:]+\\.[:digit:]+\\.[:digit:]+)")
driving_mode <- driving_mode %>%
separate(text1, into = c("text1"),
sep = "([:digit:]+\\.[:digit:]+)") %>% select(text1)
xmldf <- cbind(xmldf, driving_mode)
xmldf$driving_mode <- xmldf$text1
xmldf <- xmldf %>% select(-text1)
## extract coordinates from infoDtp
coords_dtp <- xmldf %>%
select(date, infoDtp) %>%
extract(infoDtp, c("text1"),
"([:digit:]+\\.[:digit:]+\\.[:digit:]+)", remove = TRUE) %>%
select(text1)
xmldf <- cbind(xmldf, coords_dtp)
xmldf$coords_dtp <- xmldf$text1
xmldf <- xmldf %>% select(-text1)
## extract road type from infoDtp
road_type <- xmldf %>% select(infoDtp) %>%
separate(infoDtp, into = c("text1", "text2"),
sep = "([:digit:]+\\.[:digit:]+\\.[:digit:]+)") %>%
select(text2)
road_type <- road_type %>%
extract(text2, c("text1"), "([^A-z]+[:punct:])", remove = FALSE)
road_type <- road_type %>%
extract(text1, c("text1"), "([:alnum:]+[:alnum:]+)", remove = TRUE)
xmldf$road_type <- road_type$text1
## read .csv files and create second dataset - use this dataset to obtain:
## latitude, longitude, road category, road condition, weather condition
## merging two datasets on artificial key: date time dtpv district
### read all the csv files
setwd("./csv_data")
list_file <- list.files(pattern = "*.csv") %>%
lapply(read.csv, sep = ";", encoding = "UTF-8") %>% bind_rows
setwd("C:/Users/Георгий/Documents/GitHub/rus_traffic_accidents/")
raw_data <- data.frame(list_file)
### transform .csv file to split the columns
raw_data <- raw_data %>% filter(!is.na(Номер))
raw_data <- unique(raw_data)
raw_data <- raw_data %>% select(Номер, Дата, Время, Схема, Широта, Вид.ДТП,
Адрес, Дорога, Категория.дороги, Состояние.погоды.1,
Состояние.проезжей.части, Освещение)
raw_data$weather_cond <- paste(raw_data$Состояние.погоды.1,
raw_data$Состояние.проезжей.части)
raw_data$road_cond <- raw_data$Освещение
raw_data$date <- raw_data$Дата
raw_data$time <- raw_data$Время
raw_data$id <- raw_data$Схема
raw_data$latitude <- raw_data$Широта
raw_data$longitude <- raw_data$Вид.ДТП
raw_data$road_cat <- raw_data$Категория.дороги
raw_data$type <- raw_data$Адрес
raw_data$district <- raw_data$Дорога
raw_data <- raw_data %>% select(date, time, type, district, latitude, longitude,
road_cat, road_cond, weather_cond)
raw_data$date <- dmy(raw_data$date)
raw_data <- raw_data %>% arrange(date, time, type, district)
raw_data$time <- as.POSIXct(raw_data$time, format = "%H:%M")
raw_data$key <- paste(raw_data$date, raw_data$time, raw_data$type, raw_data$district)
### make key in xml file
xmldf$key <- paste(xmldf$date, xmldf$time, xmldf$DTPV, xmldf$district)
### check if all rows match by the key
set_raw <- raw_data
set_xml <- xmldf
d1 <- data.frame(set_raw, set_xml)
d1$check <- d1$key == d1$key.1
sum(d1$check == TRUE)
rm(set_raw, set_xml, d1)
rm(files, coords_dtp, driving_mode, road_type)
raw_data_sel <- raw_data %>% select(key.1 = key, latitude, longitude, road_cat,
road_cond, weather_cond)
### merge two datasets
d2 <- cbind(xmldf, raw_data_sel)
rm(list_file, xmldf, raw_data, raw_data_sel)
### kick temporary columns
d2 <- d2 %>% select(dtpv = DTPV, date, time, district, kts = KTS, kuch = KUCH,
fatal = POG, injury = RAN, driving_mode, latitude,
longitude, road_cat, road_cond, weather_cond)
# save raw data
write.csv(d2, "raw_data.csv")
|
5204a2fdc66cc2debcb9eaf8aed7e60f6000f914
|
aa550db31fcc15f424a8468797c459048d50b8fd
|
/tests/testthat/test-assert.R
|
ebd3080a9abd3aceb37cad08671bdbf5131276f9
|
[] |
no_license
|
jeffreypullin/vctrs
|
5e23153a5195103b5287518389d067cb96092a11
|
035311f5ed1eb8cf9f0d7545372d72f90a31a7e0
|
refs/heads/master
| 2020-05-04T16:28:18.079491
| 2019-04-02T12:04:29
| 2019-04-03T07:51:41
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 4,741
|
r
|
test-assert.R
|
context("test-assert")
test_that("basic assert is idempotent", {
x <- new_vctr(1:4)
expect_true(vec_is(x))
expect_identical(vec_assert(x), x)
expect_identical(vec_assert(x), new_vctr(1:4))
expect_false(withVisible(vec_assert(x))$visible)
expect_true(vec_is(1:4))
expect_identical(vec_assert(1:4), 1:4)
})
test_that("asserting ptype", {
x <- new_vctr(1:4)
good <- new_vctr(integer())
expect_true(vec_is(x, good))
expect_error(vec_assert(x, good), NA)
# Is this the correct error message?
bad <- new_vctr(double())
expect_false(vec_is(x, bad))
expect_error(vec_assert(x, bad), class = "vctrs_error_assert_ptype")
})
test_that("asserting size", {
x <- new_vctr(1:4)
expect_true(vec_is(x, size = 4))
expect_error(vec_assert(x, size = 4), NA)
expect_false(vec_is(x, size = 5))
expect_error(vec_assert(x, size = 5), class = "vctrs_error_assert_size")
})
test_that("vec_assert() labels input", {
expect_error(
vec_assert(new_vctr(1:4), size = 5),
regexp = "`new_vctr\\(1:4\\)` must have",
class = "vctrs_error_assert_size"
)
expect_error(
vec_assert(new_vctr(1:4), size = 5, arg = "foobar"),
regexp = "`foobar` must have",
class = "vctrs_error_assert_size"
)
})
test_that("bare atomic vectors are vectors but not recursive", {
expect_true(vec_is_vector(TRUE))
expect_true(vec_is_vector(1L))
expect_true(vec_is_vector(1))
expect_true(vec_is_vector(1i))
expect_true(vec_is_vector("foo"))
expect_true(vec_is_vector(as.raw(1)))
})
test_that("S3 atomic vectors are vectors", {
expect_true(vec_is_vector(foobar(TRUE)))
expect_true(vec_is_vector(foobar(1L)))
expect_true(vec_is_vector(foobar(1)))
expect_true(vec_is_vector(foobar(1i)))
expect_true(vec_is_vector(foobar("foo")))
expect_true(vec_is_vector(foobar(as.raw(1))))
})
test_that("bare lists are recursive", {
expect_true(vec_is_vector(list()))
})
test_that("S3 lists are not vectors by default", {
expect_false(vec_is_vector(foobar()))
})
test_that("can override `vec_is_vector()` for S3 lists", {
scoped_bindings(.env = global_env(),
vec_proxy.vctrs_foobar = function(x) unclass(x)
)
expect_true(vec_is_vector(foobar()))
})
test_that("data frames and records are vectors", {
expect_true(vec_is_vector(mtcars))
expect_true(vec_is_vector(new_rcrd(list(x = 1, y = 2))))
})
test_that("non-vector base types are scalars", {
expect_identical(vec_typeof(quote(foo)), "scalar")
expect_identical(vec_typeof(pairlist("")), "scalar")
expect_identical(vec_typeof(function() NULL), "scalar")
expect_identical(vec_typeof(env()), "scalar")
expect_identical(vec_typeof(~foo), "scalar")
expect_identical(vec_typeof(base::`{`), "scalar")
expect_identical(vec_typeof(base::c), "scalar")
expect_identical(vec_typeof(expression()), "scalar")
expect_false(vec_is_vector(quote(foo)))
expect_false(vec_is_vector(pairlist("")))
expect_false(vec_is_vector(function() NULL))
expect_false(vec_is_vector(env()))
expect_false(vec_is_vector(~foo))
expect_false(vec_is_vector(base::`{`))
expect_false(vec_is_vector(base::c))
expect_false(vec_is_vector(expression()))
expect_false(vec_is(quote(foo)))
expect_false(vec_is(pairlist("")))
expect_false(vec_is(function() NULL))
expect_false(vec_is(env()))
expect_false(vec_is(~foo))
expect_false(vec_is(base::`{`))
expect_false(vec_is(base::c))
expect_false(vec_is(expression()))
expect_error(vec_assert(quote(foo)), "must be a vector")
expect_error(vec_assert(pairlist("")), "must be a vector")
expect_error(vec_assert(function() NULL), "must be a vector")
expect_error(vec_assert(env()), "must be a vector")
expect_error(vec_assert(~foo), "must be a vector")
expect_error(vec_assert(base::`{`), "must be a vector")
expect_error(vec_assert(base::c), "must be a vector")
expect_error(vec_assert(expression()), "must be a vector")
})
test_that("vec_assert() uses friendly type in error messages", {
# Friendly type will be generated in rlang in the future. Upstream
# changes should not cause CRAN failures.
skip_on_cran()
expect_error(vec_assert(function() NULL), "must be a vector, not a function")
})
test_that("vec_typeof() handles all types", {
for (i in seq_along(empty_types)) {
expect_identical(vec_typeof(!!empty_types[[i]]), !!names(empty_types)[[i]])
}
})
test_that("bare prototypes act as partial types", {
scoped_bindings(.env = global_env(),
vec_slice.vctrs_foobar = function(x, i) foobar(x[i])
)
expect_true(vec_is(foobar(1), dbl()))
expect_error(NA, object = vec_assert(foobar(1), dbl()))
})
test_that("data frames are always classified as such even when dispatch is off", {
expect_identical(vec_typeof_bare(mtcars), "dataframe")
})
|
01c6c8645367c457860d1268622db7f90514a729
|
c7d703db40b9639e2c440c2484975966aa081d1c
|
/man/opnmfR_test_ranksel.Rd
|
879f016acc2cc6ff53c8ab4a16b49f6861461620
|
[] |
no_license
|
kaurao/opnmfR
|
ef11922b141be3995c968de86197e7db7fbae5f5
|
5972f8db63df91776ba2116f827479e99292223c
|
refs/heads/master
| 2023-03-01T11:20:26.160096
| 2021-02-19T08:27:13
| 2021-02-19T08:27:13
| 324,676,654
| 1
| 0
| null | 2021-02-18T11:19:34
| 2020-12-27T03:07:17
|
R
|
UTF-8
|
R
| false
| true
| 1,183
|
rd
|
opnmfR_test_ranksel.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/opnmfR.R
\name{opnmfR_test_ranksel}
\alias{opnmfR_test_ranksel}
\title{Rank selection test on user provided data
Runs all three avilable rank selection methods.}
\usage{
opnmfR_test_ranksel(X = NULL, rs = NULL, W0 = "nndsvd", nrepeat = 1)
}
\arguments{
\item{X}{A matrix, if NULL the "iris" data is used (default NULL)}
\item{rs}{A vector of ranks to test for selection,
if rs=NULL then \code{1:nrow(X)} is used (default NULL)}
\item{W0}{A string or matrix for initialization (default "nndsvd")}
\item{nrepeat}{A number, number of iterations for rank selection,
i.e. number of permutations for \code{opnmfR_ranksel_perm}
and number of split-halves for \code{opnmfR_ranksel_splithalf} (default 1)}
}
\value{
A list with rank selection outputs from \code{opnmfR_ranksel_perm}, \code{opnmfR_ranksel_ooser}, and
\code{opnmfR_ranksel_splithalf}
}
\description{
Rank selection test on user provided data
Runs all three avilable rank selection methods.
}
\examples{
result <- opnmfR_test_ranksel()
}
\seealso{
\code{opnmfR_ranksel_perm}, \code{opnmfR_ranksel_ooser}, and
\code{opnmfR_ranksel_splithalf}
}
|
bea80c596e4632a183cc422c85bd5ceeaea21db5
|
db21b12d844b8323e1bdf8ef00c8e60e5f29c297
|
/R/seawifs.R
|
ad03ade04362d648adaa3f0958b22c4a8ee6d839
|
[] |
no_license
|
AustralianAntarcticDivision/future-krill
|
dc3546c4a957883a9f3c2f89a9153fe64d52e2cc
|
04993920f9dc9db14fcdc9001aa7381b6f197c2c
|
refs/heads/master
| 2020-08-31T09:26:33.233546
| 2020-01-09T04:58:42
| 2020-01-09T04:59:47
| 218,659,576
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 1,242
|
r
|
seawifs.R
|
library(SOmap)
#x <- SOmap_data$CCAMLR_SSMU
library(sf)
x <- as(readRDS("/perm_storage/home/mdsumner/Git/future-krill/shapes/Area_48_Krill_Domain.rds"), "Spatial")
ex <- extent(x) + 1
## map seawifs and modis bins to ssmu polys
library(croc)
n_bins <- 2160
theprod <- "SeaWiFS"
sw_bins <- tibble::tibble(bin_num = crop_init(initbin(NUMROWS = n_bins), ex))
sw_bins[c("lon", "lat")] <- croc::bin2lonlat(sw_bins$bin_num, n_bins)
sw_bins$poly <- sp::over(SpatialPoints(as.matrix(sw_bins[c("lon", "lat")]),
proj4string = CRS(projection(x))), as(x, "SpatialPolygons"))
sw_bins <- dplyr::filter(sw_bins, !is.na(poly))
## group daily files by month
library(raadtools)
library(dplyr)
files <- oc_sochla_files(product = theprod) %>% mutate(month = format(date, "%Y-%m")) %>% group_by(month)
library(furrr)
#plan(sequential)
plan(multicore)
library(purrr)
read_sw <- function(x, ...) {
future_map_dfr(x$date, ~read_oc_sochla(.x, bins = sw_bins, inputfiles = files, product = theprod)) %>% group_by(poly) %>%
summarize(chla = mean(chla_johnson), nbins = n())
}
## takes about an hour for MODISA
sw_chla <- files %>% group_split() %>% future_map(read_sw)
saveRDS(sw_chla, "sw_chla.rds")
|
4efa83538e5001494ec3b7440968a420cec6d73f
|
1f0764b617f16a9a85347b4fb343504944c59249
|
/man/eigenSentences.Rd
|
e0bde8d23d110e2bd895a874624ec370191ef568
|
[] |
no_license
|
stnava/RKRNS
|
8eb5cd0cdff9f015289673a315f48f7d24dfc6b9
|
252269d7eab3d7db120f347f3cd113b104a000ae
|
refs/heads/master
| 2021-01-22T22:45:08.564502
| 2017-08-24T13:22:40
| 2017-08-24T13:22:40
| 21,015,814
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| true
| 940
|
rd
|
eigenSentences.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/eigenSentences.R
\name{eigenSentences}
\alias{eigenSentences}
\title{Simple eigenSentence estimation function.}
\usage{
eigenSentences(wordembed, functiontoapply = sum, normalize = F,
eigsentbasislength = NA, sentencesIn = NA, eventdata = NA)
}
\arguments{
\item{wordembed}{the words and their vector embedding - a data frame with n
basis length columns}
\item{functiontoapply}{e.g. mean, max, min, a custom function --- needs to
work with apply}
\item{normalize}{normalize the magnitude of the eigsentences to be 1}
\item{eigsentbasislength}{less than or equal to the full basis length (100)}
}
\value{
eigensentence matrix is output
}
\description{
Applies a function to a matrix representation of a sentence to get an
eigensentence map.
}
\examples{
data(reuters_words,package="RKRNS")
esent<-eigenSentences( reuters_words )
}
\author{
Avants BB
}
|
1cdcf5522ac44cfe15a2e02c315a2ccd0356c276
|
100c3acce75579481f94d162ed803c094292e88c
|
/R/Repaso.R
|
544fe0c04b3ca2d1c3527051b33ac04dbd33d2f5
|
[] |
no_license
|
jcms2665/WorkshopR_2
|
a955a753ca0dad245d9f8fbe605655cb01c04712
|
47369a43119e4b421e83be59d7b81dcf9bc69a5a
|
refs/heads/master
| 2021-05-05T07:41:58.404780
| 2018-01-26T16:44:51
| 2018-01-26T16:44:51
| 118,878,345
| 0
| 0
| null | null | null | null |
ISO-8859-1
|
R
| false
| false
| 6,858
|
r
|
Repaso.R
|
# Repaso de R
## Contenido
#1. Preliminares
#2. Objetos en R
#3. Paquetes y librerías
#4. Importar datos
#5. Gráficos
#1. Preliminares
# Comentarios
# Todo aquello que se escriba a la derecha del signo numeral (#) se
# coloreará de verde pálido y será tomado por R como un comentario.
## Ejecutar una instrucción: <Ctrl> + <R>
# Ubicar el cursor al inicio de la línea de comando o seleccionar un
# conjunto de líneas de comandos y oprimir las teclas <Ctrl> y <R>.
## La Consola
# El signo '>' al final de la consola significa que R está listo para
# ejecutar la siguiente tarea.
# Un signo de '+' al final es indicativo de que la instrucción
# ejecutada está incompleta.
## Operadores
# Aritméticos: +, -, *, / y ^.
# Relacionales: >, >=, <, <=, == y !=.
# Lógicos: & y |.
#2. Objetos en R
# Un objeto en R puede ser una tabla con datos, una base de datos,
# una variable o un valor.
# Con el operador '<-' se asigna un valor a un objeto. Los objetos
# aparecen en la ventana superior de la derecha.
## Objetos numéricos
x <- 2
## Objetos de caracteres
aqui <- "Chihuahua"
## Vector numérico
cm <- c(167, 172, 153, 164, 182, 147)
kg <- c(48, NA, 55, 63, 71, 49)
## Vector de caracteres
nivel <- c("A", "B", "C", "D", "E", "F")
## Matrices
mv <- matrix(cm, nrow=3, ncol=2)
mh <- matrix(cm, nrow=3, ncol=2, byrow=TRUE)
## Llamar a los objetos
mv
mh
## Factor
# Objeto que almacena el valor de una variable categórica.
sexo <- factor(c("H", "M", "M", "M", "H", "M"))
summary(sexo)
## Data frame
# Un 'data frame' es más general que una matriz. Las columnas pueden
# tener diferentes clases de objetos (numéricos, factores, etc).
datos <- data.frame(nivel, sexo,cm, kg)
View(datos)
## Borrar objetos del workspace
rm(x, aqui) # Sólo algunos objetos
rm(list = ls()) # Todos los objetos
#3. Paquetes y librerías
# En la Red existe un sin número de paquetes y están disponibles al
# público de manera gratuita. Para usar estos recursos hay que:
# 1o. Descargar e instalar el paquete de interés.
# 2o. Cargar el paquete a la sesión de trabajo.
# Ejemplo. Pirámide de población.
install.packages("pyramid")
library(pyramid)
## Carpeta de trabajo
getwd()
# Cambiar carpeta de trabajo
setwd("C:\\Users\\jmartinez\\Desktop\\teTra-Red-master\\teTra-Red-master\\data")
#4. Importar datos
# En la práctica es común encontrar/tener la información almacenada
# en varios formatos. Los más comunes son: dbf, csv, dta, sav y dat
# R puede cargar/abrir cualquier base de datos, no importa el
# formato; sólo se necesita la librería 'foreign'.
install.packages("foreign")
library(foreign)
enut <- read.dta("ENUT.dta")
## Guardar una base de datos o una tabla en formato *.RData.
save(enut, file = "ENUT2014.RData")
rm(list=ls())
# Para cargar los datos utilizamos la función 'load()'.
load("ENUT2014.RData")
# ¿Qué variables tiene la ENUT?
names(enut)
# p 7.3: "En general, ¿qué tan feliz diría que es usted?"
# Para cambiar el nombre a una variable usamos la función 'rename'
# (se encuentra en el paquete 'reshape').
install.packages("reshape")
library(reshape)
## Renombrar la variable p7_3
enut <- rename(enut, c(p7_3 = "felicidad"))
names(enut)
## Selección de variables
# La forma de acceder a las variables en R es mediante el nombre del
# base (objeto), seguido del signo "$" y el nombre de la variable.
# Desplegar los primeros valores de la variable 'edad'.
head(enut$edad)
## Crear una variable
# Tiempo dedicado a la limpieza del hogar
enut$limpiar <- enut$p6_5_2_2 + (enut$p6_5_2_3/60)
## Resumen de datos
## Tabla de frecuencias
# Distribución de los individuos según nivel de felicidad
table(enut$felicidad)
# 1 Nada; 2 Poco feliz; 3 Más o menos; 4 Feliz; y 5 Muy feliz
# Distribución incluyendo los valores perdidos
table(enut$felicidad, useNA = "always")
# Distribución de los individuos por 'felicidad' y 'sexo'
table(enut$felicidad, enut$sexo)
# Frecuencia relativa de los individuos por 'felicidad' y 'sexo'
# Por renglón (prop. hombres + prop. mujeres = 1)
prop.table(table(enut$felicidad, enut$sexo), 1)
# Por columna (prop. nada + ... + prop. muy feliz = 1)
prop.table(table(enut$felicidad, enut$sexo), 2)*100
## Función 'aggregate'
# Felicidad media por nivel de escolaridad (niv)
aggregate(enut$felicidad, by = list(enut$niv),
FUN = mean, na.rm = TRUE)
## Función 'summarySE'
install.packages("Rmisc")
library(Rmisc)
summarySE(enut, measurevar="limpiar", groupvars=c("sexo"),
na.rm = TRUE)
#4. Gráficos
load("data/ENUT2014.RData")
enut$limpiar <- enut$p6_5_2_2 + (enut$p6_5_2_3/60)
## De línea
# Ejemplo. Tiempo promedio dedicado a la limpieza del hogar por edad
limpieza <- aggregate(enut$limpiar, by = list(enut$edad),
FUN = mean, na.rm = TRUE)
head(limpieza)
names(limpieza) <- c("edad","media")
head(limpieza)
plot(limpieza$edad ,limpieza$media, type="l", xlab="Edad",
ylab="Tiempo promedio")
## Histogramas
# Ejemplo. Tiempo dedicado a cocinar
# Mujeres
hist(enut$p6_4_3_2[enut$sexo == 2], freq = FALSE,
ylab = "Frec. rel.", xlab = "Horas", breaks = 20,
ylim = c(0, 0.4), col = "purple")
# Hombres
hist(enut$p6_4_3_2[enut$sexo == 1], freq = FALSE,
ylab = "Frec. rel.", xlab = "Horas", breaks = 20,
ylim = c(0, 0.4), col = "cyan", add=TRUE)
## Gráfica de caja
boxplot(enut$limpiar ~ enut$sexo,
main = "Tiempo dedicado a limpiar")
enut$sexof <- factor(enut$sexo, levels = c(1,2),
labels = c("Hombres", "Mujeres"))
boxplot(enut$limpiar ~ enut$sexof,
main = "Tiempo dedicado a limpiar")
## Guardar en el escritorio las imágenes como un archivo *.png
getwd()
setwd("C:/Users/marius/Desktop")
png("Limpiar.png")
plot(limpieza$edad ,limpieza$media, type="l", xlab="Edad",
ylab="Tiempo promedio")
dev.off()
# Varias gráficas en una imagen
png("Arreglo de gráficas - 2 en 1.png", width = 700, height = 800)
par(mfrow = c(2,1))
boxplot(enut$escoacum ~ enut$p7_3,
main="Escolaridad por nivel de felicidad",
xlab="Nivel de felicidad", ylab="Años de escolaridad",
col="cyan")
plot(limpieza$edad ,limpieza$media, type="l",
main="Tiempo promedio dedicado a la \n limpieza del hogar por edad",
xlab="Edad", ylab="Media de felicidad")
par(mfrow = c(1,1))
dev.off()
|
5606d817d0869fa7c4e2991c3a6affb0a1dd4c09
|
066d43e9d84e29c05b9d3ab423665efda8008eb7
|
/R/map-utils.R
|
ebc56f555d890786f5824ea29783a78a52d4aa54
|
[] |
no_license
|
espinielli/italian-comuni-bot
|
18bd16d10f9ffcc5ada673df9ce7bb34ab65c8b6
|
96871013862bc8637f8d3443f2a9188a5771e78b
|
refs/heads/master
| 2021-06-15T01:23:42.403848
| 2021-02-14T13:27:31
| 2021-02-14T13:27:31
| 139,019,912
| 5
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 1,952
|
r
|
map-utils.R
|
# Fixed to ggmap:calc_zoom
# see https://github.com/dkahle/ggmap/pull/141
calc_zoom_fix <- function (lon, lat, data, adjust = 0, f = 0.05)
{
if (!missing(adjust))
stopifnot(is.integer(adjust))
if (missing(data)) {
if (missing(lat)) {
bbox <- lon
errorString <- "if specifying a bounding box, the format should match that of make_bbox."
if (length(bbox) != 4)
stop(errorString, call. = FALSE)
if (!all(names(bbox) == c("left", "bottom", "right",
"top")))
stop(errorString, call. = FALSE)
lon_range <- bbox[c("left", "right")]
lat_range <- bbox[c("bottom", "top")]
}
else {
if (length(lon) != 2 || length(lat) != 2 || !is.numeric(lon) ||
!is.numeric(lat))
stop("if specifying ranges, they both must be of length 2 and numeric.")
lon_range <- sort(lon)
lat_range <- sort(lat)
}
}
else {
lon <- data[, deparse(substitute(lon))]
lat <- data[, deparse(substitute(lat))]
bbox <- ggmap::make_bbox(lon, lat, f = f)
lon_range <- bbox[c("left", "right")]
lat_range <- bbox[c("bottom", "top")]
}
lonlength <- diff(lon_range)
latlength <- diff(lat_range)
zoomlon <- ceiling(log2(360 * 2/lonlength))
zoomlat <- ceiling(log2(180 * 2/latlength))
# FIXED: use min() instead of max() in order to include the whole bbox
zoom <- min(zoomlon, zoomlat)
zoom + adjust
}
# from https://gis.stackexchange.com/a/155495/76173
ggmap_rast <- function(map){
map_bbox <- attr(map, 'bb')
.extent <- raster::extent(as.numeric(map_bbox[c(2,4,1,3)]))
my_map <- raster::raster(.extent, nrow= nrow(map), ncol = ncol(map))
rgb_cols <- setNames(as.data.frame(t(col2rgb(map))), c('red','green','blue'))
red <- my_map
values(red) <- rgb_cols[['red']]
green <- my_map
values(green) <- rgb_cols[['green']]
blue <- my_map
values(blue) <- rgb_cols[['blue']]
raster::stack(red,green,blue)
}
|
e103d6b7c45ea009e87b0d2de82018b448c7cb9b
|
0eac6f72fc988546ee57127b5741e3d12e2379a5
|
/man/craigsendi.Rd
|
836be112d1884ff7301c0aac494f15e1b1660321
|
[
"MIT"
] |
permissive
|
spedygiorgio/markovchain
|
4e70064a749f55d52bcdfffb7559e7027b161cc1
|
4eb1ec1b67f9231c129db5da3cc2ba51bd5f4121
|
refs/heads/master
| 2023-06-09T15:48:30.895373
| 2023-05-16T21:25:26
| 2023-05-16T21:25:26
| 31,481,152
| 111
| 58
|
NOASSERTION
| 2023-05-18T22:00:52
| 2015-02-28T23:54:38
|
R
|
UTF-8
|
R
| false
| true
| 904
|
rd
|
craigsendi.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/data.R
\docType{data}
\name{craigsendi}
\alias{craigsendi}
\title{CD4 cells counts on HIV Infects between zero and six month}
\format{
The format is:
table [1:3, 1:3] 682 154 19 33 64 19 25 47 43
- attr(*, "dimnames")=List of 2
..$ : chr [1:3] "0-49" "50-74" "75-UP"
..$ : chr [1:3] "0-49" "50-74" "75-UP"
}
\source{
Estimation of the transition matrix of a discrete time Markov chain, Bruce A. Craig and Peter P. Sendi, Health Economics 11, 2002.
}
\usage{
data(craigsendi)
}
\description{
This is the table shown in Craig and Sendi paper showing zero and six month CD4 cells count in six brakets
}
\details{
Rows represent counts at the beginning, cols represent counts after six months.
}
\examples{
data(craigsendi)
csMc<-as(craigsendi, "markovchain")
steadyStates(csMc)
}
\references{
see source
}
\keyword{datasets}
|
ecbc96aa12bf1e7ce3d8ff0b0ee5dd9c74e65e68
|
0e3611663f54a79f65cf918d2e543dea6d42bb27
|
/src/core/assessments/bivariate/inactive/84_sc_iqr.R
|
bf082b74b5490f43d23915238572703fc03ff17c
|
[] |
no_license
|
imclab/cai
|
37e96c08d91340ebab5289fe80f6a3d091d08ea7
|
f8213ca990e664afd9ee68479ce2b5e89faa03b2
|
refs/heads/master
| 2020-12-26T00:07:36.286908
| 2014-07-13T00:22:01
| 2014-07-13T00:22:01
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 141
|
r
|
84_sc_iqr.R
|
# Copyright 2013, 2014 by Christopher L. Simons
assessment <- createSCAssessment("IQR", "IQR")
assessments[[assessment$name]] <- assessment
|
e399e29b83e63f3973ae3f1b756c9e3d3ff581b6
|
5db62e916fb7dc421067525da59a65af61e66e29
|
/test_code.R
|
e19e317bc53be2e0adab82b7226b55caa6b9ab99
|
[] |
no_license
|
alexandrashtein/test_250118
|
ab38f5fbcb0b379b20d1b22e2b77c0ff13d4ab97
|
085c00b88668342acce6a1251c53f9d6a603611c
|
refs/heads/master
| 2021-09-05T07:56:20.525046
| 2018-01-25T12:08:51
| 2018-01-25T12:08:51
| 118,907,558
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 17
|
r
|
test_code.R
|
# Some code
# New
|
07fc4d06332cd591b5714e50eebc93805ccf8c86
|
0732213947df3a08c33d7414ea1e1dd461c142e7
|
/ch_RealSample/基金推薦/基金推薦_Hybrid.r
|
5ae0234fd4f2699e596615a13d566adf7fbcce67
|
[] |
no_license
|
arpitag1/R-for-data-science
|
ea3c580dd8288a1bb368c5aba14911785bec387b
|
f09fb8ebb9f44a0bec242126de8b3cdfe9ef5bc1
|
refs/heads/master
| 2022-02-08T23:37:06.525461
| 2017-09-29T06:51:16
| 2017-09-29T06:51:16
| null | 0
| 0
| null | null | null | null |
BIG5
|
R
| false
| false
| 11,319
|
r
|
基金推薦_Hybrid.r
|
# 基金推薦--協同過濾法-- -----------------------------------------------------------
## 套件引用
library(RODBC)
library(tidyverse)
library(recommenderlab)
library(reshape2)
library(data.table)
## 連結資料庫
conn <- odbcDriverConnect("Driver=SQL Server;Server=dbm_public;Database=project2017;Uid=sa;Pwd=01060728;")
load('./模型評估/fundsDistance.RData')
load('./模型評估/申購基金ui資料.RData')
fund_ids <- r_b_purchase@data@itemInfo$labels ## 2,235
base_ids <- funds$基金代碼 ## 2,777
both_ids <- base_ids %in% fund_ids
both_ids2 <- fund_ids %in% base_ids
gower_mat <- as.matrix(gower_distance)
gower_mat <- gower_mat[both_ids,both_ids]
gower_mat %>% dim() # 2,170
rb_use <- r_b_purchase[,both_ids2] ## 45,350 * 2,170
rb_use <- rb_use[!rowCounts(rb_use)==0,] ## delete none data user
rb_use <- rb_use[rowCounts(rb_use)>1,]
################################################
#### build model ######
################################################
set.seed(100)
which_train <- sample(x = c(TRUE, FALSE),
size = nrow(rb_use),
replace = TRUE,
prob = c(0.8, 0.2))
recc_data_train <-rb_use[which_train,]
recc_data_test <-rb_use[!which_train,]
recc_model <- Recommender(data = recc_data_train,
method = "IBCF",
parameter = list(method = "Jaccard"))
dim(recc_model@model$sim)
image(recc_model@model$sim)
range(recc_model@model$sim)
###### Hybrid item sim ######
simIBCF <- as(recc_model@model$sim,"matrix")
simIContents <- 1- gower_mat
weight.Content <- 0.5
sim_tot <- simIContents* weight.Content + simIBCF* (1 - weight.Content)
# image(sim_tot)
recc_model@model$sim <- as(sim_tot, "dgCMatrix")
################ evaluation ##############
eval_sets <- evaluationScheme(data = rb_use,
method = "cross-validation",
k = 3,
given = -1)
eval_prediction <- predict(object = recc_model,
newdata = getData(eval_sets,"known"),
n = 10, # item to recommend
type = "topNList")
eval_accuracy <- calcPredictionAccuracy(
x = eval_prediction,
data = getData(eval_sets, "unknown"),
byUser = FALSE,
given = 10)
eval_accuracy
## 評估Hybrid模型 ###
evaluateModel <- function(recc_data,item_dist,
number_neighbors=20,
k=5,
weight.Content=0.5,
items_to_recommend=20){
#####################################################################
## recc_data : binaryRatingMatrix with transaction data(U-I matrix)
## item_dist : items matrix measure pairwise distance,
## number_neighbors :
## items_to_recommend: (20)
## k: default=5 , k-fold validation,
## weight.Content : default=0.5, weight for content
####################################################################
eval_sets <- evaluationScheme(data=recc_data,
method="cross-validation",
k=k, ## k:kfold validate
given=-1) ##
recc_model <- Recommender(data = getData(eval_sets, "train"),
method = "IBCF",
parameter = list(method = "Jaccard",
k = number_neighbors))
#### Hybrid similarity ####
sim_cf <- as(recc_model@model$sim, "matrix") ## similarity from CF model
sim_contents <- 1- item_dist ## similarity from CONTENT
sim_tot <- sim_contents*weight.Content + sim_cf*(1 - weight.Content)
recc_model@model$sim <- as(sim_tot, "dgCMatrix")
#### evaluate ####
eval_prediction <- predict(object = recc_model,
newdata=getData(eval_sets,"known"),
n=items_to_recommend,
type="topNList")
eval_accuracy <- calcPredictionAccuracy(x = eval_prediction,
data = getData(eval_sets,"unknown"),
byUser = F,
given = items_to_recommend)
print(eval_accuracy[c('precision','recall','TPR','FPR')])
return(eval_accuracy)
}
w <- seq(0,1,0.1)
w_index <- sapply(seq(1:10),function(x) paste0('weight_',x))
eva_resultLists<- lapply(w,function(x){
result <- evaluateModel(rb_use,gower_mat,weight.Content=x)
eva_resultLists <- tibble(eva = result)
})
eva_df <- sapply(eva_resultLists,'[[',"eva")[5:8,] %>% as_tibble()
colnames(eva_df) <- w
rownames(eva_df) <- c('prec','recall','TPR','FPR')
eva_df <- t(eva_df) # matrix
eva_df <- eva_df %>% as_tibble() %>% mutate(w=w)
ggplot(eva_df,aes(x=w,y=TPR)) + geom_smooth()
qplot(x=w,y=TPR,data=eva_df) + geom_smooth() +
scale_x_continuous(name="weight", limits=c(0, 1)) +
ggtitle('Weight of item-content') +
annotate("text", x=0.9, y=0.2, label= "item#: 20,\ncv: 5,\nnn: 20")
# qplot(x=FPR,y=TPR,data=eva_df)
#################################################################################
#### Predict results
#################################################################################
hybrid_reccList <- function(r_b,item_dist,weight.Content){
## 給出混合式IBCF推薦清單
recc_model <- Recommender(r_b,method = "IBCF")
sim_cf <- as(recc_model@model$sim, "matrix") ## similarity from CF model
sim_contents <- 1- item_dist ## similarity from CONTENT
sim_tot <- sim_contents*weight.Content + sim_cf*(1 - weight.Content)
recc_model@model$sim <- as(sim_tot, "dgCMatrix")
pred_result <- predict(recc_model,r_b,type="topNList",n=20)
return(pred_result)
}
predictList <- hybrid_reccList(rb_use,gower_mat,weight.Content=0.1)
df_t <- t(data.table::as.data.table(as(predictList,"list")))
itemNames <- sapply('基金',paste0,c(1:20))[,1]
df_HybridRecc <- df_t
colnames(df_HybridRecc) <- itemNames
df_upload <- as_data_frame(df_HybridRecc) %>% mutate(id = ids)
df_upload %>% head()
sqlSave(conn,
df_upload,
tablename="基金推薦_物品混合式_全清單",
rownames = F)
#################################################################################
# 資料: 申購基金 ----------------------------------------------------------------
#################################################################################
# sql_fund_purchase <- "select * from v_基金推薦_申購明細 where [申購登錄年] >= 2015"
# fund_purchase <- sqlQuery(conn,sql_fund_purchase)
# r_b_purchase <- getUIMatrix(fund_purchase) ## 稀疏矩陣,
# save(r_b_purchase,file="模型評估/申購基金ui資料.RData")
# 排除購買基金檔數 > 1, 2, 3, 4, 5 #
# r_b_purchase_gt2 <- r_b_purchase[rowCounts(r_b_purchase)>1]
# r_b_purchase_gt3 <- r_b_purchase[rowCounts(r_b_purchase)>2]
# r_b_purchase_gt4 <- r_b_purchase[rowCounts(r_b_purchase)>3]
# r_b_purchase_gt5 <- r_b_purchase[rowCounts(r_b_purchase)>4]
#
# # 評估算法 #
# ev5 <- evaluateAlgo(r_b_purchase_gt5)
# ev4 <- evaluateAlgo(r_b_purchase_gt4)
# ev3 <- evaluateAlgo(r_b_purchase_gt3)
# ev2 <- evaluateAlgo(r_b_purchase_gt2)
# ev <- evaluateAlgo(r_b_purchase)
#
# save(ev,ev2,ev3,file="模型評估/申購模型評估.RData")
#
# recommenderList_all <- recommenderList(r_b_purchase,'IBCF')
# recommenderList_gt2 <- recommenderList(r_b_purchase_gt2,findBestAlgo(ev2))
#
#
# ##########################################################################
# ### IBCF ,UBCF, POPULAR,
# ##########################################################################
# hot_model <- Recommender(r_b,method="POPULAR")
# pred_result <- predict(recommender_model,r_bex,type="topNList",n=20)
# hot_result <- predict(hot_model,r_b,type="topNList",n=20)
#
# pred_result_list <- as(pred_result,"list")
# hot_result_list <- as(hot_result,"list")
#
# ### best model --> UBCF --> 推薦
# df_t <- t(data.table::as.data.table(pred_result_list))
# itemNames <- sapply('item',paste0,c(1:20))[,1]
# df_exclude <- as.data.frame(df_t,stringsAsFactors = F);
# colnames(df_exclude) <- itemNames;
#
# ## 少於5次的 利用最熱銷產品推薦
# hot_dt_t <- t(as.data.table(hot_result_list))
#
# uid_include <- rownames(df_exclude)
# df_hot <- as.data.frame(hot_dt_t[!rownames(hot_dt_t) %in% uid_include,],
# stringsAsFactors = F)
# colnames(df_hot) <- itemNames
#################################################################################
# helper function ----------------------------------------------------------------
#################################################################################
# getUIMatrix <- function(fund) {
# ##### 從基金(申購/庫存)明細資料
# #### 整理成recommenderLab使用的rating_binaryMatrix (用戶-物品稀疏矩陣)
# fund1 <-
# fund %>%
# mutate(fundId=substr(基金中文名稱,1,3)) %>%
# group_by(身分證字號,fundId) %>%
# count() %>%
# ungroup() %>%
# arrange(desc(n)) %>%
# dcast(身分證字號~fundId,value.var="n")
#
# rownames(fund1) <-fund1$身分證字號
# fund1$身分證字號 <-NULL
#
# ### user - item matrix ###
#
# ui_trans <- as(data.matrix(fund1),"realRatingMatrix")
#
# r_b <- binarize(ui_trans,minRating=0.1) # 55798 * 2318
# r_b <- as(r_b,"binaryRatingMatrix")
# return(r_b)
# }
#
# evaluateAlgo <- function(r_b) {
# ## 評估算法結果
# ## ============
# ## params -- input :binary rating U-I sparse matrix
# ## --
#
# algorithms <- list(
# "random items" = list(name="RANDOM"),
# "popular items" = list(name="POPULAR"),
# "user-based CF" = list(name="UBCF",param=list(nn=50)),
# "item-based CF" = list(name="IBCF",param=list(k=50))
# # "SVD approx" = list(name="SVD",param=list(k=50)) ## can't work for binary case ....
# )
#
# scheme_rb_split <-evaluationScheme(r_b,method="split",train=0.9,k=1,given=-1)
# ev_result_split <-evaluate(scheme_rb_split,algorithms,type="topNList",n=c(1,3,5,10,20))
#
#
# # scheme_rb_cv <- evaluationScheme(r_bex,method="cross",k=4,given=-1) # cross
# # ev_resultEx_cross <- evaluate(scheme_rbex_cv,algorithms,type="topNList",
# # n=c(1,3,5,10,20))
#
# plot(ev_result_split,annotate=c(2,3))
# plot(ev_result_split,annotate=c(2,3),"prec/rec",legend="topleft")
#
# return(ev_result_split)
# }
#
# findBestAlgo <- function(ev){
# ### 透過check最大recall值,找出最佳模型 UBCF/IBCF/POPULAR/RANDOM ##
# lengthOfData <- dim(avg(ev$`user-based CF`))[1]
# ev_dataList <- avg(ev)
# recall_compare <- sapply(ev_dataList,`[[`,lengthOfData,'recall')
# best_model <- names(which.max(recall_compare))
# if (best_model=='popular items') {
# best_model <- 'popular'
# } else if (best_model=='user-based CF'){
# best_model <- 'UBCF'
# } else if (best_model=='item-based CF'){
# best_model <- 'IBCF'
# }
# return(best_model)
# }
#
# recommenderList <- function(r_b,best_model){
# ## 給出推薦清單
# recommender_model <- Recommender(r_b,method = best_model)
# print(paste0('best model :',best_model))
# pred_result <- predict(recommender_model,r_b,type="topNList",n=20)
# return(pred_result)
# }
#
#
#
#
|
b1935852f1e342cb864ae6904577c6ec3f971e13
|
50c21e2a446a39b2062fda951e0fd5519524709d
|
/ase_ys/src/st.16.ase.1.R
|
25aa85079a87a99a235d0eb16751f74a965d87c0
|
[
"BSD-2-Clause"
] |
permissive
|
orionzhou/misc
|
2e2990dca514464f8d82e10d89e044e237c729da
|
eef4799cb6d16fde8d37d0f896d209cc8d514366
|
refs/heads/main
| 2022-12-25T18:13:01.620988
| 2021-05-23T15:23:22
| 2021-05-23T15:23:22
| 239,606,926
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 8,764
|
r
|
st.16.ase.1.R
|
source('functions.R')
dirw = glue('{dird}/16_ase')
#{{{ functions
require(stats4)
require(multidplyr)
require(bbmle)
calc_bic <- function(i, p1, p2, h1, h2, disp, method='L-BFGS-B') {
#{{{
cat(i, "\n", sep = '')
#{{{ LLs
LL1 <- function(mu1, mu3) {
#{{{
l1 = dnbinom(p1, size=size, mu=mu1, log=T)
l2 = dnbinom(p2, size=size, mu=mu1, log=T)
l3 = dnbinom(h1, size=size, mu=mu3, log=T)
l4 = dnbinom(h2, size=size, mu=mu3, log=T)
l = -sum(l1, l2, l3, l4)
ifelse(is.nan(l), 100, l)
#}}}
}
LL2 <- function(mu1, mu3, mu4) {
#{{{
l1 = dnbinom(p1, size=size, mu=mu1, log=T)
l2 = dnbinom(p2, size=size, mu=mu1, log=T)
l3 = dnbinom(h1, size=size, mu=mu3, log=T)
l4 = dnbinom(h2, size=size, mu=mu4, log=T)
l = -sum(l1, l2, l3, l4)
ifelse(is.nan(l), 100, l)
#}}}
}
LL3 <- function(mu1, mu2, mu3) {
#{{{
mu4 = (mu2 / mu1) * mu3
l1 = dnbinom(p1, size=size, mu=mu1, log=T)
l2 = dnbinom(p2, size=size, mu=mu2, log=T)
l3 = dnbinom(h1, size=size, mu=mu3, log=T)
l4 = dnbinom(h2, size=size, mu=mu4, log=T)
l = -sum(l1, l2, l3, l4)
ifelse(is.nan(l), 100, l)
#}}}
}
LL4 <- function(mu1, mu2, mu3) {
#{{{
l1 = dnbinom(p1, size=size, mu=mu1, log=T)
l2 = dnbinom(p2, size=size, mu=mu2, log=T)
l3 = dnbinom(h1, size=size, mu=mu3, log=T)
l4 = dnbinom(h2, size=size, mu=mu3, log=T)
l = -sum(l1, l2, l3, l4)
ifelse(is.nan(l), 100, l)
#}}}
}
LL5 <- function(mu1, mu2, mu3, mu4) {
#{{{
l1 = dnbinom(p1, size=size, mu=mu1, log=T)
l2 = dnbinom(p2, size=size, mu=mu2, log=T)
l3 = dnbinom(h1, size=size, mu=mu3, log=T)
l4 = dnbinom(h2, size=size, mu=mu4, log=T)
l = -sum(l1, l2, l3, l4)
ifelse(is.nan(l), 100, l)
#}}}
}
#}}}
size = 1 / disp
n_obs = length(p1)
m1 = round(mean(p1)); m2 = round(mean(p2)); m3 = round(mean(h1)); m4 = round(mean(h2))
p1 = round(p1); p2 = round(p2); h1 = round(h1); h2 = round(h2)
min_mu = 1e-2; max_mu = 1e8
fit1 = mle2(LL1, start = list(mu1=(m1+m2)/2, mu3=(m3+m4)/2),
lower = rep(min_mu, 2), upper=rep(max_mu, 2),
method = method)#, nobs = n_obs)
fit2 = mle2(LL2, start = list(mu1=(m1+m2)/2, mu3=m3, mu4=m4),
lower = rep(min_mu, 3), upper=rep(max_mu, 3),
method = method)#, nobs = n_obs)#, control = list(trace=3, maxit=1000))
fit3 = mle2(LL3, start = list(mu1=m1, mu2=m2, mu3=(m3+m4)/2),
lower = rep(min_mu, 3), upper=rep(max_mu, 3),
method = method)#, nobs = n_obs)
fit4 = mle2(LL4, start = list(mu1=m1, mu2=m2, mu3=(m3+m4)/2),
lower = rep(min_mu, 3), upper=rep(max_mu, 3),
method = method)#, nobs = n_obs)
fit5 = mle2(LL5, start = list(mu1=m1, mu2=m2, mu3=m3, mu4=m4),
lower = rep(min_mu, 4), upper=rep(max_mu, 4),
method = method)#, nobs = n_obs)
#coef(fitc)
bic = AICtab(fit1, fit2, fit3, fit4, fit5, k = log(n_obs), sort=F)
tb = as_tibble(bic) %>%
mutate(reg = c('conserved','unexpected','cis','trans','cis+trans')) %>%
arrange(dAIC)
tb$reg[1]
#}}}
}
calc_bic_2 <- function(i, p1,p2,h1,h2,bp1,bp2,bh1,bh2,disp, method='L-BFGS-B') {
#{{{
cat(i, "\n", sep = '')
#{{{ LLs
LL1 <- function(mu1,mu2,mu3,mu4,mu5,mu7) {
#{{{
mu6 = mu5 - mu1 + mu2
mu8 = mu7 - mu3 + mu4
l1 = dnbinom(p1, size=size, mu=mu1, log=T)
l2 = dnbinom(p2, size=size, mu=mu2, log=T)
l3 = dnbinom(h1, size=size, mu=mu3, log=T)
l4 = dnbinom(h2, size=size, mu=mu4, log=T)
l5 = dnbinom(bp1, size=size, mu=mu5, log=T)
l6 = dnbinom(bp2, size=size, mu=mu6, log=T)
l7 = dnbinom(bh1, size=size, mu=mu7, log=T)
l8 = dnbinom(bh2, size=size, mu=mu8, log=T)
l = -sum(l1, l2, l3, l4, l5, l6, l7, l8)
ifelse(is.nan(l), 100, l)
#}}}
}
LL2 <- function(mu1,mu2,mu3,mu4,mu5,mu7,mu8) {
#{{{
mu6 = mu5 - mu1 + mu2
l1 = dnbinom(p1, size=size, mu=mu1, log=T)
l2 = dnbinom(p2, size=size, mu=mu2, log=T)
l3 = dnbinom(h1, size=size, mu=mu3, log=T)
l4 = dnbinom(h2, size=size, mu=mu4, log=T)
l5 = dnbinom(bp1, size=size, mu=mu5, log=T)
l6 = dnbinom(bp2, size=size, mu=mu6, log=T)
l7 = dnbinom(bh1, size=size, mu=mu7, log=T)
l8 = dnbinom(bh2, size=size, mu=mu8, log=T)
l = -sum(l1, l2, l3, l4, l5, l6, l7, l8)
ifelse(is.nan(l), 100, l)
#}}}
}
LL3 <- function(mu1,mu2,mu3,mu4,mu5,mu6,mu7) {
#{{{
mu8 = (mu6-mu2) / (mu5-mu1) * (mu7-mu3) + mu4
l1 = dnbinom(p1, size=size, mu=mu1, log=T)
l2 = dnbinom(p2, size=size, mu=mu2, log=T)
l3 = dnbinom(h1, size=size, mu=mu3, log=T)
l4 = dnbinom(h2, size=size, mu=mu4, log=T)
l5 = dnbinom(bp1, size=size, mu=mu5, log=T)
l6 = dnbinom(bp2, size=size, mu=mu6, log=T)
l7 = dnbinom(bh1, size=size, mu=mu7, log=T)
l8 = dnbinom(bh2, size=size, mu=mu8, log=T)
l = -sum(l1, l2, l3, l4, l5, l6, l7, l8)
ifelse(is.nan(l), 100, l)
#}}}
}
LL4 <- function(mu1,mu2,mu3,mu4,mu5,mu6,mu7) {
#{{{
mu8 = mu7 - mu3 + mu4
l1 = dnbinom(p1, size=size, mu=mu1, log=T)
l2 = dnbinom(p2, size=size, mu=mu2, log=T)
l3 = dnbinom(h1, size=size, mu=mu3, log=T)
l4 = dnbinom(h2, size=size, mu=mu4, log=T)
l5 = dnbinom(bp1, size=size, mu=mu5, log=T)
l6 = dnbinom(bp2, size=size, mu=mu6, log=T)
l7 = dnbinom(bh1, size=size, mu=mu7, log=T)
l8 = dnbinom(bh2, size=size, mu=mu8, log=T)
l = -sum(l1, l2, l3, l4, l5, l6, l7, l8)
ifelse(is.nan(l), 100, l)
#}}}
}
LL5 <- function(mu1,mu2,mu3,mu4,mu5,mu6,mu7,mu8) {
#{{{
l1 = dnbinom(p1, size=size, mu=mu1, log=T)
l2 = dnbinom(p2, size=size, mu=mu2, log=T)
l3 = dnbinom(h1, size=size, mu=mu3, log=T)
l4 = dnbinom(h2, size=size, mu=mu4, log=T)
l5 = dnbinom(bp1, size=size, mu=mu5, log=T)
l6 = dnbinom(bp2, size=size, mu=mu6, log=T)
l7 = dnbinom(bh1, size=size, mu=mu7, log=T)
l8 = dnbinom(bh2, size=size, mu=mu8, log=T)
l = -sum(l1, l2, l3, l4, l5, l6, l7, l8)
ifelse(is.nan(l), 100, l)
#}}}
}
#}}}
size = 1 / disp
n_obs = length(p1)
m1 = round(mean(p1)); m2 = round(mean(p2)); m3 = round(mean(h1)); m4 = round(mean(h2))
m5 = round(mean(bp1)); m6 = round(mean(bp2)); m7 = round(mean(bh1)); m8 = round(mean(bh2))
p1 = round(p1); p2 = round(p2); h1 = round(h1); h2 = round(h2)
bp1 = round(bp1); bp2 = round(bp2); bh1 = round(bh1); bh2 = round(bh2)
min_mu = 1e-2; max_mu = 1e8
fit1 = mle2(LL1, start = list(mu1=m1,mu2=m2,mu3=m3,mu4=m4,mu5=m5,mu7=m7),
lower = rep(min_mu, 6), upper=rep(max_mu, 6),
method = method)#, nobs = n_obs)
fit2 = mle2(LL2, start = list(mu1=m1,mu2=m2,mu3=m3,mu4=m4,mu5=m5,mu7=m7,mu8=m8),
lower = rep(min_mu, 7), upper=rep(max_mu, 7),
method = method)#, nobs = n_obs)
fit3 = mle2(LL3, start = list(mu1=m1,mu2=m2,mu3=m3,mu4=m4,mu5=m5,mu6=m6,mu7=m7),
lower = rep(min_mu, 7), upper=rep(max_mu, 7),
method = method)#, nobs = n_obs)
fit4 = mle2(LL4, start = list(mu1=m1,mu2=m2,mu3=m3,mu4=m4,mu5=m5,mu6=m6,mu7=m7),
lower = rep(min_mu, 7), upper=rep(max_mu, 7),
method = method)#, nobs = n_obs)
fit5 = mle2(LL5, start = list(mu1=m1,mu2=m2,mu3=m3,mu4=m4,mu5=m5,mu6=m6,mu7=m7,mu8=m8),
lower = rep(min_mu, 8), upper=rep(max_mu, 8),
method = method)#, nobs = n_obs)
#coef(fitc)
bic = AICtab(fit1, fit2, fit3, fit4, fit5, k = log(n_obs), sort=F)
tb = as_tibble(bic) %>%
mutate(reg = c('conserved','unexpected','cis','trans','cis+trans')) %>%
arrange(dAIC)
tb$reg[[1]]
#}}}
}
#}}}
i=98
#p1=ti$rc.p1[[i]]; p2=ti$rc.p2[[i]]; h1=ti$rc.h1[[i]]; h2=ti$rc.h2[[i]]; disp=ti$disp[i]
n_cpu = 8
#{{{ prepare for parallel computing
n_thread = n_cpu
cluster = new_cluster(n_thread)
cluster_library(cluster, "tidyverse")
cluster_library(cluster, "stats4")
cluster_library(cluster, "bbmle")
cluster_copy(cluster, 'calc_bic')
#}}}
#{{{ run cis/trans tests
fi = glue('{dirw}/01.raw.rds')
ra = readRDS(fi)
tmt = ra$tmt
min_rc = 10
ti = tmt %>%
filter(trc.p1 + trc.p2 >= 2*min_rc, trc.h1+trc.h2 >= min_rc) %>%
mutate(i= 1:n())
tw = ti %>%# dplyr::slice(1:10) %>%
partition(cluster) %>%
mutate(reg = pmap_chr(list(i,rc.p1,rc.p2,rc.h1,rc.h2,disp), calc_bic)) %>%
collect()
to = tw %>% mutate(n.p1 = map_int(rc.p1, length), n.p2=map_int(rc.p2, length),
n.h1 = map_int(rc.h1, length), n.h2=map_int(rc.h2, length)) %>%
mutate(mrc.p1 = trc.p1/n.p1, mrc.p2 = trc.p2/n.p2,
mrc.h1 = trc.h1/n.h1, mrc.h2 = trc.h2/n.h2) %>%
mutate(prop.p = mrc.p1/(mrc.p1+mrc.p2), prop.h = mrc.h1/(mrc.h1+mrc.h2)) %>%
select(cond,cross,gid,mrc.p1,mrc.p2,mrc.h1,mrc.h2,prop.p,prop.h,reg)
fo = file.path(dirw, '05.modes.rds')
saveRDS(to, fo)
#}}}
|
3fbe5efffedae912cd05b2d31f30aa6d203e6a00
|
475cf3ba4a5e4fe5fca1a4bda10424c33dbac34a
|
/plot1.R
|
2693aa6ee88bdec042eab4b45c2e3c46d8dd746e
|
[] |
no_license
|
moolins12/ExData_Plotting1
|
f773d5adea0467ea819fcdb301bb4e9e108cf681
|
9dc932b8afbbbd1fee889eb1a4921c6666ec0da3
|
refs/heads/master
| 2022-05-01T11:23:19.574225
| 2022-04-25T12:46:50
| 2022-04-25T12:46:50
| 135,728,188
| 0
| 0
| null | 2018-06-01T16:33:56
| 2018-06-01T14:31:43
| null |
UTF-8
|
R
| false
| false
| 1,394
|
r
|
plot1.R
|
##### Exploratory Data Analysis - Course Project 1
### Plot 1
### Download and read in datafiles
filename <- "Course_Project_Data.zip"
if(!file.exists("Data")) {
create.dir("Data")
}
setwd("Data")
if (!file.exists(filename)) {
fileURL <- "https://d396qusza40orc.cloudfront.net/exdata%2Fdata%2Fhousehold_power_consumption.zip"
download.file(fileURL, filename, method = "curl")
}
if(!file.exists("Data")) {
unzip(filename)
}
##### Look at the files
list.files()
library(data.table)
library(dplyr)
library(lubridate)
# Read in the dataset and look at the data
power <- read.table("household_power_consumption.txt", sep = ";", header = TRUE,
na.strings = "?")
head(power)
str(power)
# Convert each column to the proper class (c(Date, Time, numeric, ...))
power$Date <- dmy(as.character(power$Date))
power$Time <- hms(as.character(power$Time))
for (i in 3:9) {
power[ ,i] <- as.numeric(as.character(power[ ,i]))
}
str(power)
## Subset the data for 1/2/2007 to 2/2/2007
subpower <- subset(power, Date == "1/2/2007" | Date == "2/2/2007")
head(subpower)
### Create plot 1 - histogram of Global Active Power
hist(subpower$Global_active_power, col = "red",
xlab = "Global Active Power (kilowatts)", main = "Global Active Power")
dev.copy(png, file = "plot1.png") ## Copy the histogram plot to a PNG file
dev.off() # Close the PNG device
|
bc893aec4b96aca9d47db4c2d5936bfe239d2c44
|
9fd92a3597adbc6a711d9ef5e63881477073e629
|
/man/A.frac.Rd
|
7cfd5c106b6f81ebdff3b15c99df29c18c741e09
|
[] |
no_license
|
ryamada22/SelfDecABP
|
1adc6675002de12880b2d12e64eca0cafb91f32f
|
6d1b945832a160091e703cde623547036dd6e251
|
refs/heads/master
| 2020-05-29T12:25:19.480839
| 2016-09-21T01:38:58
| 2016-09-21T01:38:58
| 68,680,737
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| true
| 1,037
|
rd
|
A.frac.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/manual R codes.r
\name{A.frac}
\alias{A.frac}
\title{Calculate the Fraction of A-arm Selection of Two-armed Bernoulli-outcomes.}
\usage{
A.frac(x)
}
\arguments{
\item{x}{a vector of 4 elements including A$Success, A$Failure, B$Success, and B$Failure.}
}
\value{
the fraction of A-arm selected
}
\description{
Calculate the fraction of A_armed for every 2X2 table numerated in the serialTables () when N patients to be treated.
}
\details{
A and B are examples of two arms, which resulting in favorable outcomes (success) and unfavorable outcomes (failure).
}
\examples{
AS<-13
# the successful counts of A treatment
AF<-5
# the failures of A treatment
BS<-2
# the successful counts of B treatment
BF<-1
#the failures of B treatment
N<- AS+AF+BS+BF
A.frac(x=c(AS,AF,BS,BF))
library(combinat)
N <- 10
out <- serialTables(N)
ABSF<-sapply(out$Z, unlist)
A.fraction<-apply(ABSF,1,A.frac)
# the fraction of A-arm selected per table.
A.fraction
}
\keyword{NA}
|
d7e11d9addeae01d45b24d12b31b2c86fdc729d6
|
5ca959c15764a014400c9ed90cf393a9172e62f4
|
/3 - Scripts/Paper 1/results.R
|
5ef5f2ac7d8e5dc73a6e3751e76d31294148e0eb
|
[] |
no_license
|
mccorvie/BackToSchool2
|
08e8a6ae5a8004d7d400bc6b9e0caa15f7bd201b
|
6bc57c6a956022700c1d4d1656822f14bbc23fc0
|
refs/heads/master
| 2023-07-13T06:53:54.763632
| 2021-08-16T02:42:11
| 2021-08-16T02:42:11
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 26,392
|
r
|
results.R
|
#*************************** Code to replicate results and figures ************************#
# #
# #
# #
#******************************************************************************************#
####*********************************** SETUP ******************************************####
# set working directory
wd = "~/Dropbox/Schools/Public code/"
setwd(wd)
# source global options
source("global_options.R")
# update working directory
setwd("./4 - Output/Paper 1")
####*********************************** FUNCTIONS ******************************************####
#### CLEAN UP ####
prep_sims = function(sims, name_val = ""){
out = sims %>%
mutate(school = ifelse(grepl("Elem", name_val), "Elementary school", "High school"),
strategy = ifelse(notify, "Classroom quarantine", "Symptomatic isolation"),
strategy = ifelse(test, "Weekly screening", strategy),
strategy = ifelse(teacher_susp < 1, "Teacher vaccination", strategy),
seed_type = ifelse(start_type =="child", "Child", "Teacher"),
seed_type = ifelse(start_type =="mix", "Mix", seed_type),
attack_level = ifelse(attack.y==.01, "High", "Medium"),
attack_level = ifelse(attack.y==.03, "Low", attack_level),
attack_level = factor(attack_level, c("Low", "Medium", "High")),
tot = as.numeric(tot),
R0 = as.numeric(R0),
school.infs = as.numeric(adult_tot) + as.numeric(children_tot),
school.infs.symp = .6*as.numeric(adult_tot) +
.2*as.numeric(children_tot),
school.infs.symp2 = .8*as.numeric(adult_tot) +
.4*as.numeric(children_tot),
school.infs.perc = ifelse(tot==0, NA, school.infs.symp/tot),
source.asymp = as.numeric(source_asymp),
source.asymp.family = as.numeric(source_asymp_family_kids) + as.numeric(source_asymp_family_staff),
id2 = ifelse(id==1, "5-day", id),
id2 = ifelse(id==2, "Cohorting", id2),
id2 = ifelse(id==3, "1/2 class size", id2),
id2 = ifelse(id==4, "A/B", id2),
id2 = ifelse(type=="A/B" & total_days=="1", "A/B/C/D", id2),
id2 = ifelse(type=="A/B" & total_days=="2.2", "A/B (v2)", id2),
id2 = ifelse(type=="On/off" & total_days=="1", "On/off (1)", id2),
id2 = ifelse(type=="On/off" & total_days=="2", "On/off (2)", id2),
id2 = factor(id2, levels = rev(c("A/B/C/D", "On/off (1)", "On/off (2)", "A/B", "A/B (v2)", "1/2 class size", "Cohorting", "5-day"))),
strategy = factor(strategy, levels = c("Symptomatic isolation", "Classroom quarantine", "Teacher vaccination", "Weekly screening")))
return(out)
}
#### MEAN GRAPHS ####
graph_sims1 = function(sims, title = "", palette = pal, ymax = NA){
pal = palette
out = sims %>% filter(!is.na(id2)) %>%
group_by(id2, n_HH, disperse_transmission, dedens,
test, isolate, teacher_susp, notify, child_susp,
child_trans, start_type, school, strategy, attack_level) %>%
summarize(mean.new = mean(as.numeric(tot)),
mean.R0 = mean(as.numeric(R0)),
perc.zero = mean(as.numeric(tot==0)),
mt_5 = mean(as.numeric(tot>5)),
mt_5_avg = mean(as.numeric(tot)[as.numeric(tot)>5]),
fam_no_symp = mean(tot>0 & symp_kids==0)/mean(tot>0),
mean.R0 = mean(R0),
mean.school.infs = mean(as.numeric(school.infs)),
mean.school.infs.symp = mean(as.numeric(school.infs.symp)),
mean.school.infs.symp2 = mean(as.numeric(school.infs.symp2)),
mean.school.infs.perc = mean(as.numeric(school.infs.perc), na.rm = T),
mean.source.asymp = mean(source.asymp),
mean.source.asymp.family = mean(source.asymp.family),
### change when possib
mean.kids = mean(as.numeric(children_tot)),
mean.staff = mean(as.numeric(school_adult_tot)),
mean.family = mean(as.numeric(family_tot))) %>% ungroup()
a = ggplot(out,
aes(x = attack_level, y = mean.new, group = id2, col = id2)) +
scale_color_manual(name = "", values = pal) +
geom_line() +
geom_point(col = "white", size = 5) +
facet_grid(school~strategy) + theme_minimal(base_size = size) +
geom_text(size = font-1, aes(label = case_when(mean.new>10~round(mean.new), mean.new <10 & mean.new > 1~round(mean.new, 1), mean.new<1~round(mean.new, 1)))) + theme_opts + labs(x = "", y = "", title = title) + ylim(0, ymax)
b = ggplot(out,
aes(x = attack_level, y = mean.school.infs.symp, group = id2, col = id2)) +
scale_color_manual(name = "", values = pal) +
geom_line() +
geom_point(col = "white", size = 5) +
facet_grid(school~strategy) + theme_minimal(base_size = size) +
geom_text(size = font-1, aes(label = case_when(mean.school.infs.symp>10~round(mean.school.infs.symp),
mean.school.infs.symp <10 & mean.school.infs.symp > 1~round(mean.school.infs.symp, 1),
mean.school.infs.symp<1~round(mean.school.infs.symp, 2)))) + theme_opts + labs(x = "", y = "", title = title) + ylim(0, ymax)
d = ggplot(out %>% mutate(mean.R0 = mean.R0),
aes(x = attack_level, y = mean.R0, group = id2, col = id2)) +
scale_color_manual(name = "", values = pal) +
geom_line() +
geom_point(col = "white", size = 5) +
facet_grid(school~strategy) + theme_minimal(base_size = size) +
geom_text(size = font-1, aes(label = case_when(mean.R0>10~round(mean.R0),
mean.R0<10 & mean.R0> 1~round(mean.R0, 1),
mean.R0<1~round(mean.R0, 2)))) + theme_opts + labs(x = "", y = "", title = title) + ylim(0, ymax)
# make plots
out2 = out %>% gather(var, value, mean.kids, mean.staff, mean.family) %>%
mutate(var_cat = ifelse(var=="mean.kids", "Students","Staff"),
var_cat = ifelse(var=="mean.family", "Family", var_cat),
var_cat = factor(var_cat, levels = c("Students", "Staff", "Family")))
c = ggplot(out2,
aes(x = attack_level, y = value, group = id2, col = id2)) +
scale_color_manual(name = "", values = pal) +
geom_line() +
geom_point(col = "white", size = 5) +
facet_grid(var_cat~strategy, scales = "free") + theme_minimal(base_size = size) +
geom_text(size = font-1, aes(label = case_when(value>10~round(value),
value <10 & value>1~round(value, 1),
value<1~round(value, 2)))) + theme_opts + labs(x = "", y = "", title = title) + ylim(0, ymax)
return(list(a,b,c,out,out2,d))
}
#### STOCHASTIC GRAPHS ####
graph_sims2 = function(sims, title = ""){
out = sims %>% filter(id>0) %>% dplyr::group_by(sim.x, strategy, id) %>%
mutate(quant = quantile(tot, .995),
IQR = IQR(tot),
median = mean(tot),
#tot = tot + rnorm(tot, mean = .05, sd = .1),
#tot = ifelse(tot<0, 0, tot)
) %>% ungroup() %>% filter(tot < quant) %>%
filter(strategy%in%c("Classroom quarantine", "Weekly screening") &
id2 %in% c("5-day", "A/B")) %>%
mutate(grp = paste(as.numeric(id),as.numeric(attack_level), strategy),
id.strategy = factor(paste(id2, "\n", strategy, sep = "")),
id.strategy = factor(id.strategy, levels = levels(id.strategy)[c(3,4,1,2)]),
outlier = tot > median + IQR*1.5)
out_fig = ggplot(out,
aes(x = attack_level, y = tot, group = grp, col = id.strategy)) +
geom_boxplot(outlier.alpha = .04) +
facet_wrap(school~attack_level, scales = "free") + facet_grid(school~.) +
theme_minimal(base_size = size) +
theme_opts + labs(x = "", y = "", title = title) +
scale_color_manual(name = "", values = pal, breaks = c("5-day\nClassroom quarantine",
"5-day\nWeekly screening",
"A/B\nClassroom quarantine",
"A/B\nWeekly screening"))
return(out_fig)
}
graph_sims3 = function(sims, n_teacher = 1, n_student = 1, ymax = NA){
dyn_elem1 = prep_sims(sims, "Elem") %>% filter(type!="Remote")
dyn_elem2 = prep_sims(sims, "Elem") %>% filter(type=="Remote" & strategy=="Symptomatic isolation")
dyn_elem3 = prep_sims(sims, "Elem") %>% filter(type=="Remote" & strategy=="Symptomatic isolation") %>% mutate(strategy= "Classroom quarantine", notify = "TRUE")
dyn_elem4 = prep_sims(sims, "Elem") %>% filter(type=="Remote" & strategy=="Symptomatic isolation") %>% mutate(strategy= "Weekly screening", notify = "TRUE", test = "TRUE")
dyn_elem5 = prep_sims(sims, "Elem") %>% filter(type=="Remote" & strategy=="Symptomatic isolation") %>% mutate(strategy= "Teacher vaccination", teacher_susp = "0.33")
dyn_elem = bind_rows(dyn_elem1, dyn_elem2, dyn_elem3, dyn_elem4, dyn_elem5)
out = dyn_elem %>% group_by(sim.y, id, id2, n_HH, disperse_transmission, dedens,
test, isolate, teacher_susp, notify, child_susp,
child_trans, start_type, school, strategy,
attack_level, adult_prob, scenario, type) %>%
summarize(all.mean = mean(as.numeric(all)),
all.median = median(as.numeric(all)),
teachers.mean = mean(as.numeric(adult)),
students.mean = mean(as.numeric(children)),
family.mean = mean(as.numeric(family)),
tot = mean(as.numeric(tot))) %>% ungroup() %>%
mutate(grp = factor(paste(n_HH, disperse_transmission, dedens,
teacher_susp, child_susp, child_trans, start_type,
attack_level, adult_prob))) %>%
group_by(grp) %>%
mutate(All = (all.mean)/(n_teacher*2 + n_student*3),
Staff = (teachers.mean)/n_teacher,
Students = (students.mean)/n_student,
Family = (family.mean)/(n_teacher + n_student*2),
All_inc = All-All[scenario==3],
Staff_inc = Staff-Staff[scenario==3],
Students_inc = Students-Students[scenario==3],
Family_inc = Family-Family[scenario==3],
scenario = scenario,
adult_prob = as.numeric(adult_prob)/3*100000,
id_cat = ifelse(scenario==1, "5-day", "A/B"),
id_cat = ifelse(scenario==3, "Remote", id_cat),
n_HH = ifelse(n_HH=="0", 0, as.numeric(n_HH)-1),
strategy = factor(strategy, levels = c("Symptomatic isolation", "Classroom quarantine",
"Teacher vaccination", "Weekly screening"))) %>%
filter(attack_level!="Low")
out2 = out %>% gather(var, value, All, Family, Staff, Students) %>%
mutate(var = factor(var, levels = c("Students", "Staff", "Family", "All")),
inc_pch = case_when(var=="All"~All_inc>.01, var =="Students"~Students_inc>.01,
var=="Staff"~Staff_inc>.01, var=="Family"~Family_inc>.01),
inc_pch = ifelse(inc_pch==F, NA, inc_pch))
plot_out = ggplot(out2,
aes(x = adult_prob, y = value, group = paste(var, attack_level, id_cat, strategy),
col = id_cat, lty = attack_level)) + geom_line() +
geom_point(aes(pch = inc_pch)) +
scale_shape_manual(values = c(20, NA), guide = F) +
facet_grid(var~strategy) + theme_minimal() + theme_opts +
labs(x = "", y = "") +
scale_color_manual(name = "Strategy", values = c(pal[c(1,4)], "black")) +
scale_linetype(name = "Prevention measures") +
labs(title = "Cumulative incidence over 8 weeks") + ylim(0, ymax)
plot_out2 = ggplot(out2 %>% filter(id_cat%in%c("5-day", "A/B") & attack_level=="Medium" & adult_prob %in% c(10, 50, 100)),
aes(x = n_HH, y = value, group = paste(adult_prob, var, attack_level, id_cat, strategy),
col = as.factor(adult_prob), lty = id_cat)) + geom_line() +
facet_grid(var~strategy) + theme_minimal() + theme_opts + geom_point() +
labs(x = "Number of households mixed when school is out of session", y = "") +
scale_color_manual(name = "Community incidence", values = c(pal[c(1,2,4)], "black")) +
scale_linetype(name = "Strategy") +
labs(title = "Cumulative incidence over 8 weeks") + ylim(0, ymax)
out3 = out %>% gather(var, value, All_inc, Family_inc, Staff_inc, Students_inc) %>%
separate(var, into = c("var_cat", "junk"), sep = "_") %>%
mutate(var_cat = factor(var_cat, levels = c("Students", "Staff", "Family", "All")))
plot_out3 = ggplot(out3,
aes(x = adult_prob, y = value, group = paste(var_cat, attack_level, id_cat, strategy),
col = id_cat, lty = attack_level)) + geom_line() + geom_point() +
facet_grid(var_cat~strategy) + theme_minimal() + theme_opts +
labs(x = "", y = "") +
scale_color_manual(name = "Strategy", values = c(pal[c(1,4)], "black")) +
scale_linetype(name = "Prevention measures") +
labs(title = "Cumulative incidence over 8 weeks") + ylim(0, ymax)
return(list(plot_out, out, plot_out2, plot_out3))
}
graph_sims4 = function(sims, n_teacher = 1, n_student = 1, ymax = NA){
dyn_elem1 = prep_sims(sims, "Elem") %>% filter(type!="Remote")
dyn_elem2 = prep_sims(sims, "Elem") %>% filter(type=="Remote" & strategy=="Symptomatic isolation")
dyn_elem3 = prep_sims(sims, "Elem") %>% filter(type=="Remote" & strategy=="Symptomatic isolation") %>% mutate(strategy= "Classroom quarantine", notify = "TRUE")
dyn_elem4 = prep_sims(sims, "Elem") %>% filter(type=="Remote" & strategy=="Symptomatic isolation") %>% mutate(strategy= "Weekly screening", notify = "TRUE", test = "TRUE")
dyn_elem5 = prep_sims(sims, "Elem") %>% filter(type=="Remote" & strategy=="Symptomatic isolation") %>% mutate(strategy= "Teacher vaccination", teacher_susp = "0.33")
dyn_elem = bind_rows(dyn_elem1, dyn_elem2, dyn_elem3, dyn_elem4, dyn_elem5)
out = dyn_elem %>% group_by(sim.y, id, id2, n_HH, disperse_transmission, dedens,
test, isolate, teacher_susp, notify, child_susp,
child_trans, start_type, school, strategy,
attack_level, adult_prob, scenario, type) %>%
summarize(all.mean = mean(as.numeric(all)),
all.median = median(as.numeric(all)),
teachers.mean = mean(as.numeric(adult)),
students.mean = mean(as.numeric(children)),
family.mean = mean(as.numeric(family)),
tot = mean(as.numeric(tot))) %>% ungroup() %>%
mutate(grp = factor(paste(n_HH, disperse_transmission, dedens,
teacher_susp, child_susp, child_trans, start_type,
attack_level, adult_prob))) %>%
group_by(grp) %>%
mutate(All = (all.mean)/(n_teacher*2 + n_student*3),
Staff = (teachers.mean)/n_teacher,
Students = (students.mean)/n_student,
Family = (family.mean)/(n_teacher + n_student*2),
scenario = scenario,
adult_prob = as.numeric(adult_prob)/3*100000,
id_cat = ifelse(scenario==1, "5-day", "A/B"),
id_cat = ifelse(scenario==3, "Remote", id_cat),
n_HH = ifelse(n_HH=="0", 0, as.numeric(n_HH)-1),
strategy = factor(strategy, levels = c("Symptomatic isolation", "Classroom quarantine",
"Teacher vaccination", "Weekly screening"))) %>%
filter(attack_level!="Low")
out2 = out %>% gather(var, value, All, Family, Staff, Students) %>%
mutate(var = factor(var, levels = c("Students", "Staff", "Family", "All")))
plot_out2 = ggplot(out2 %>% filter(id_cat%in%c("5-day", "A/B") & attack_level=="Medium" & adult_prob %in% c(10, 50, 100)),
aes(x = n_HH, y = value, group = paste(adult_prob, var, attack_level, id_cat, strategy),
col = as.factor(adult_prob), lty = id_cat)) + geom_line() +
facet_grid(var~strategy) + theme_minimal() + theme_opts + geom_point() +
labs(x = "Number of households mixed when school is out of session", y = "") +
scale_color_manual(name = "Community incidence", values = c(pal[c(1,2,4)], "black")) +
scale_linetype(name = "Strategy") +
labs(title = "Cumulative incidence over 8 weeks") + ylim(0, ymax)
return(list(plot_out2))
}
####*********************************** PROCESS DATA ******************************************####
# Base Elementary School
name_val = "fig_output_Base Elem.RData"; load(name_val)
base_elem = prep_sims(sims %>% filter(start_type==1), name_val = "Elem")
a = graph_sims1(base_elem, title = "A")
a1 = graph_sims2(base_elem, title = "A")
# Base High School
name_val = "fig_output_Base HS.RData"; load(name_val)
base_hs = prep_sims(sims)
b = graph_sims1(base_hs, title = "B")
b1 = graph_sims2(base_hs, title = "B")
# Base Elementary School - mod
name_val = "fig_output_Base_Elem_MOD.RData"; load(name_val)
base_elem = prep_sims(sims %>% filter(start_type==1), name_val = "Elem")
a.mod = graph_sims1(base_elem, title = "A")
a.mod.symp = graph_sims1(base_elem %>% filter(start_symp=="1"), title = "A")
# Base High School - mod
name_val = "fig_output_Base_HS_MOD.RData"; load(name_val)
base_hs = prep_sims(sims)
b.mod = graph_sims1(base_hs, title = "B")
b.mod.symp = graph_sims1(base_hs %>% filter(start_symp=="1"), title = "B")
# ELEMENTARY SCHOOL SUPPLEMENTS
# equal inf
name_val = "fig_output_Elem_supp1.RData"
load(name_val)
elem_s1 = prep_sims(sims, name_val = "Elem")
# more schedules
name_val = "fig_output_Elem_supp2.RData"
load(name_val)
elem_s2 = prep_sims(sims, name_val = "Elem") %>% filter(total_days!=5)
# overdispersed transmission
name_val = "fig_output_Elem_supp3.RData"
load(name_val)
elem_s3 = prep_sims(sims, name_val = "Elem")
# teacher as index case
name_val = "fig_output_Elem_supp5.RData"
load(name_val)
elem_s5 = prep_sims(sims, name_val = "Elem")
# make graphs
esa = graph_sims1(elem_s1, title = "Children have equal infectiousness")
esb = graph_sims1(elem_s2, title = "With additional schedules", palette = pal2)
esc = graph_sims1(elem_s3, title = "Children have overdispersed transmission")
esd = graph_sims1(elem_s5, title = "Teacher as index case")
# HIGH SCHOOL SUPPLEMENTS
# less suscepetible
name_val = "fig_output_HS_supp1.RData"; load(name_val)
high_s1 = prep_sims(sims)
# different schedules
name_val = "fig_output_HS_supp2.RData"; load(name_val)
high_s2 = prep_sims(sims)
# overdispersed transmission
name_val = "fig_output_HS_supp3.RData"; load(name_val)
high_s3 = prep_sims(sims)
hsa = graph_sims1(high_s1, title = "Adolescents less susceptible", ymax = 80)
hsb = graph_sims1(high_s2, title = "With different schedules", palette = pal2, ymax = 80)
hsc = graph_sims1(high_s3, title = "Adolescents have overdispersed transmission", ymax = 80)
# DYNAMIC ELEMENTARY
load("fig_output_Dynamic Elem.RData")
a3 = graph_sims3(sims, n_student = 638, n_teacher = 60, ymax = .3)
sims1 = sims
load("fig_output_Dynamic Elem Sens.RData")
a4 = graph_sims3(bind_rows(sims, sims1), n_student = 638, n_teacher = 60, ymax = .3)
# DYNAMIC HIGH SCHOOL
load("fig_output_Dynamic High.RData")
b3 = graph_sims3(sims, n_student = 1451, n_teacher = 124, ymax = .75)
sims1 = sims
load("fig_output_Dynamic High Sens.RData")
b4 = graph_sims4(bind_rows(sims, sims1), n_student = 1451, n_teacher = 124, ymax = 1)
####*********************************** RESULTS ******************************************####
#### Impact of in-school mitigation ####
# Paragraph 1
a[[4]] %>% filter(id2=="5-day" & strategy=="Classroom quarantine") %>% dplyr::select(mean.new)
a[[4]] %>% filter(id2=="A/B" & strategy=="Classroom quarantine") %>% dplyr::select(mean.new)
max(a[[4]] %>% filter(attack_level=="High") %>% dplyr::select(mean.new))
# Paragraph 2
b[[4]] %>% filter(id2=="5-day" & strategy=="Classroom quarantine") %>% dplyr::select(mean.new)
#### Quarantine, teacher vaccination, and screening ####
# Paragraph 1
k = b[[4]] %>% filter(id2=="5-day" & attack_level=="Low") %>% dplyr::select(mean.new)
k$mean.new[3]/k$mean.new[2]
k = b[[4]] %>% filter(id2=="5-day" & attack_level=="High") %>% dplyr::select(mean.new)
k$mean.new[3]/k$mean.new[2]
a[[4]] %>% filter(strategy %in% c("Classroom quarantine", "Teacher vaccination")) %>%
group_by(id2, n_HH, disperse_transmission, dedens, child_susp, child_trans, start_type, school, attack_level) %>%
summarize(overall = mean.new[1]/mean.new[2],
teachers = mean.staff[1]/mean.staff[2]) %>%
ungroup() %>% summarize(mean(overall), mean(teachers))
b[[4]] %>% filter(strategy %in% c("Classroom quarantine", "Teacher vaccination")) %>%
group_by(id2, n_HH, disperse_transmission, dedens, child_susp, child_trans, start_type, school, attack_level) %>%
summarize(overall = mean.new[1]/mean.new[2],
teachers = mean.staff[1]/mean.staff[2]) %>%
ungroup() %>% summarize(mean(overall), mean(teachers))
esd[[4]] %>% filter(strategy %in% c("Classroom quarantine", "Teacher vaccination")) %>%
group_by(id2, n_HH, disperse_transmission, dedens, child_susp, child_trans, start_type, school, attack_level) %>%
summarize(overall = mean.new[1]/mean.new[2],
teachers = mean.staff[1]/mean.staff[2]) %>%
ungroup() %>% summarize(mean(overall), mean(teachers))
# Paragraph 2
a[[4]] %>% filter(id2=="5-day" & strategy=="Weekly screening") %>% dplyr::select(mean.new)
b[[4]] %>% filter(id2=="5-day" & strategy=="Weekly screening") %>% dplyr::select(mean.new)
a[[4]] %>% ungroup() %>% summarize(kids = mean(mean.kids)/mean(mean.new),
staff = mean(mean.staff)/mean(mean.new),
family = mean(mean.family)/mean(mean.new),
tot = mean(mean.new))
b[[4]] %>% ungroup() %>% summarize(kids = mean(mean.kids)/mean(mean.new),
staff = mean(mean.staff)/mean(mean.new),
family = mean(mean.family)/mean(mean.new),
tot = mean(mean.new))
#### Observability ####
a[[4]] %>% ungroup() %>% summarize(out = mean(mean.school.infs.symp/mean.new))
b[[4]] %>% ungroup() %>% summarize(out = mean(mean.school.infs.symp/mean.new))
a[[4]] %>% ungroup() %>% summarize(out = mean(mean.school.infs.symp2/mean.new))
b[[4]] %>% ungroup() %>% summarize(out = mean(mean.school.infs.symp2/mean.new))
#### Stochastic variation in secondary transmission ####
a[[4]] %>% filter(id2=="5-day" & strategy=="Classroom quarantine") %>% dplyr::select(perc.zero, mt_5, mt_5_avg)
b[[4]] %>% filter(id2=="5-day" & strategy=="Classroom quarantine") %>% dplyr::select(perc.zero, mt_5, mt_5_avg)
a[[4]] %>% filter(strategy=="Classroom quarantine") %>% dplyr::select(id2, attack_level, perc.zero, mt_5, mt_5_avg)
b[[4]] %>% filter(strategy=="Classroom quarantine") %>% dplyr::select(id2, attack_level, perc.zero, mt_5, mt_5_avg)
#### Transmissions over the course of the semester ####
f = a3[[2]] %>% filter(scenario!=3 & id_cat=="A/B") %>% group_by(adult_prob, attack_level) %>%
summarize(mean(All), mean(All_inc<.01), mean(Staff_inc), mean(Staff_inc<.01))
f = a3[[2]] %>% filter(scenario!=3 & id_cat=="5-day" & attack_level == "Medium") %>%
group_by(adult_prob, strategy) %>%
summarize(mean(All), mean(All_inc), mean(All_inc<.01),
mean(Staff), mean(Staff_inc), mean(Staff_inc<.01))
f = a3[[2]] %>% filter(scenario!=3 & id_cat=="5-day" & attack_level == "High") %>%
group_by(adult_prob, strategy) %>%
summarize(mean(All_inc<.01), mean(Staff_inc<.01), mean(Staff_inc))
g = b3[[2]] %>% filter(scenario!=3 & id_cat=="5-day" & attack_level == "Medium") %>%
group_by(adult_prob, strategy) %>%
summarize(mean(All_inc<.01), mean(Staff_inc<.01))
g = b3[[2]] %>% filter(scenario!=3 & id_cat=="5-day" & attack_level == "High") %>%
group_by(adult_prob, strategy) %>%
summarize(mean(All), mean(All_inc), mean(All_inc<.01), mean(Staff_inc<.01), mean(Staff_inc))
#### Sensitivity analyses ####
bind_rows(a[[4]] %>% mutate(val = "base"), esa[[4]] %>% mutate(val = "sens")) %>%
group_by(id2, n_HH, disperse_transmission, dedens, start_type, school, attack_level, strategy) %>%
arrange(id2, n_HH, disperse_transmission, dedens, start_type, school, attack_level, strategy) %>%
summarize(out = mean.new[2]/mean.new[1]) %>% ungroup() %>% summarize(mean(out))
bind_rows(b[[4]] %>% mutate(val = "base"), hsa[[4]] %>% mutate(val = "sens")) %>%
group_by(id2, n_HH, disperse_transmission, dedens, start_type, school, attack_level, strategy) %>%
summarize(out = mean.new[2]/mean.new[1]) %>% ungroup() %>% summarize(mean(out))
a[[4]] %>% filter(id2=="5-day" & strategy=="Classroom quarantine") %>% dplyr::select(perc.zero, mt_5, mt_5_avg)
esc[[4]] %>% filter(id2=="5-day" & strategy=="Symptomatic isolation") %>% dplyr::select(perc.zero, mt_5, mt_5_avg)
hsc[[4]] %>% filter(id2=="5-day" & strategy=="Symptomatic isolation") %>% dplyr::select(perc.zero, mt_5, mt_5_avg)
b[[4]] %>% filter(id2=="5-day" & strategy=="Classroom quarantine") %>% dplyr::select(perc.zero, mt_5, mt_5_avg)
setwd("./Saved figures")
jpeg("Fig2.jpg", width = 8, height = 6.5, units = "in", res = 500)
ggarrange(a[[1]],b[[1]], common.legend = T, ncol = 1, legend = "right")
dev.off()
jpeg("Fig3.jpg", width = 7, height = 6, units = "in", res = 500)
figure = ggarrange(a1,b1, ncol = 1, common.legend = TRUE, legend="right")
annotate_figure(figure, left = "Number of secondary transmissions", bottom = "School attack rate")
dev.off()
jpeg("Fig4.jpg", width = 8, height = 6.5, units = "in", res = 500)
print(a3[[1]])
dev.off()
jpeg("Fig5.jpg", width = 8, height = 6.5, units = "in", res = 500)
print(b3[[1]])
dev.off()
|
35e608195e83f2ab09fd26deeff4337152a5762e
|
c9e51dd4ef1053ba79e2f4cf15ffbeff785db518
|
/man/run_glm_combo_formal.Rd
|
ea9236832f445587bf701a8fad91c068f13ce53f
|
[] |
no_license
|
jacob-ogre/section7.quality
|
3479908aa6bb2b600760729931905713419706ed
|
8c16ce0297b54eac0d957bf8f7c49ec88579eaa1
|
refs/heads/master
| 2021-01-20T00:41:42.170864
| 2017-10-20T19:29:14
| 2017-10-20T19:29:14
| 83,167,618
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| true
| 730
|
rd
|
run_glm_combo_formal.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/utils.R
\name{run_glm_combo_formal}
\alias{run_glm_combo_formal}
\title{Run binomial GLM for overall quality of formal consultations}
\usage{
run_glm_combo_formal(x)
}
\arguments{
\item{x}{The 'formal' data.frame of formal consultations}
}
\value{
A list of results including:
\describe{
\item{mods}{Nine GLM (binomial) model objects}
\item{AICs}{AICc values for the nine models}
\item{summaries}{Results from \code{summary(mod)} for the nine models}
\item{aovs}{Analysis of Variance for the nine models}
}
}
\description{
Run binomial GLM for overall quality of formal consultations
}
\examples{
\dontrun{
run_glm_combo(combo)
}
}
|
a1984253e00e4c200e2a8803aaefc6edd932f465
|
897ae677171a43d05f2f9ebe7bf09230b2064646
|
/R/input.check.R
|
7bacc364cbbdb72c403bc61b96401d5920b87846
|
[] |
no_license
|
cran/ACTCD
|
663aabf6200fc95e45e1098255ec7dded4143251
|
594f873d8591f137cede01739e791f989b910359
|
refs/heads/master
| 2021-01-10T21:23:27.917813
| 2018-04-23T04:25:33
| 2018-04-23T04:25:33
| 17,677,568
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 1,370
|
r
|
input.check.R
|
input.check <-
function(Y, Q, cluster.method="HACA", HACA.link="complete",
label.method="2a",perm=NULL)
{
##################################################
# Check Y and Q #
##################################################
if (!is.matrix(Y)){
Y <- as.matrix(Y)
}
if (!is.matrix(Q)){
Q <- as.matrix(Q)
}
if (ncol(Y) != nrow(Q)) {
return(warning("Item numbers in the response matrix are not equal to that in Q-matrix."))
}
if (!all(Y %in% c(1, 0))) {
return(warning("Only 0 and 1 are allowed in the response matrix."))
}
if (!all(Q %in% c(1, 0))) {
return(warning("Only 0 and 1 are allowed in the Q-matrix."))
}
##################################################
# Check method #
##################################################
if (cluster.method != "Kmeans" && cluster.method != "HACA")
{
return(warning("Only Kmeans or HACA can be used as cluster method options."))
}
if (label.method == "1" && ncol(Q) != 3 && ncol(Q) != 4)
{
return(warning('label method "1" is only available for 3 or 4 attributes.'))
}
if (label.method == "1" && is.null(perm))
{
return(warning('when label method "1" used, the "perm" is needed to be specified.'))
}
}
|
69b9695b3997ce5e07f7baacceca2ea32cf59df5
|
3f2c6d6bd10005be79120c2bf100602bda31dc0f
|
/man/infctMRoi.chkclosemz.Rd
|
75d15140d02278e4f22227ad40bf5569745cfd86
|
[] |
no_license
|
tonedivad/metaboGoS
|
e0913d188f832d455837b8199ddc8284c94eab56
|
f2038f34ddf1635d0e23ecccd8679a85615e36d2
|
refs/heads/master
| 2021-01-23T01:59:45.823999
| 2017-06-25T16:17:10
| 2017-06-25T16:17:10
| 85,953,526
| 0
| 1
| null | null | null | null |
UTF-8
|
R
| false
| true
| 664
|
rd
|
infctMRoi.chkclosemz.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/mergeROI.r
\name{infctMRoi.chkclosemz}
\alias{infctMRoi.chkclosemz}
\title{Merge slices from the same sample within a single ROI}
\usage{
infctMRoi.chkclosemz(tmproi, indicatorVec, thrMZ = 0.001, dmz = 5e-04,
dppm = 2.5)
}
\arguments{
\item{tmproi}{Data frame of ROIs}
\item{thrMZ}{overlap in mz}
\item{dmz}{dmz}
\item{dppm}{dppm}
\item{sid}{Sample id indicator}
\item{mzmed}{mzmed}
\item{mzmin}{mzmin}
\item{mzmax}{mzmax}
}
\value{
list of entries to be merged in tmproi - may be null
}
\description{
Merge slices from the same sample within a single ROI
}
\keyword{internal}
|
c48c805595c18399e2602901992b50f847f5702e
|
37520057f8324bbddcebaa3276fdc5c7390bca14
|
/eda/no_beds_no_baths.R
|
90767a3437df049d874c059748cd6ed641d64736
|
[] |
no_license
|
JordanJamesSands/melbourne_housing
|
8aa9eac49f5f6ae25a2de4df5793d07a06d150b3
|
e00ed6b81c48b67f5b88bdba102902f5ead1e749
|
refs/heads/master
| 2020-04-29T10:29:44.704673
| 2019-04-30T06:54:41
| 2019-04-30T06:54:41
| 176,063,789
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 357
|
r
|
no_beds_no_baths.R
|
#explore properties without any bathrooms or bedrooms
no_beds = subset(d,nbedroom==0)
View(no_beds)
no_baths = subset(d,nbathroom==0)
View(no_baths)
mean(d$nbedroom==0 | d$nbathroom==0,na.rm=TRUE)
#only 0.2 % fit this strange occurence
#are they garages?
no_beds_baths = subset(d,nbathroom==0 & nbedroom==0)
mean(no_beds_baths$ncar)
#no they are not garages
|
0ca32e4b2e16c3fefb1c6338839d2cefb793e3fa
|
021cf7b0683c948a3936848b27b339dc429e8128
|
/main.R
|
47baae5d93e0d320150df20b736eb553cffb6caa
|
[] |
no_license
|
jdh009/Lesson4exercise4
|
ae6ac946e83540b1ec732a3df7aa7fbfc4733ab2
|
c923699fe8fead529e30f07edc1d85281aba0402
|
refs/heads/master
| 2016-08-07T06:33:19.516391
| 2015-01-09T03:42:30
| 2015-01-09T03:42:30
| 28,999,035
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 2,949
|
r
|
main.R
|
### Jeroen Roelofs
### January 08 2015
# Load packages.
library(rgdal)
library(raster)
library(downloader)
# Source Scripts
source("R/Cloud2NA.R")
source("R/CalculateNDVI.R")
# Download data from source. Does not work for windows, got the files by direct download. Can skip this part if needed
# download('https://www.dropbox.com/s/i1ylsft80ox6a32/LC81970242014109-SC20141230042441.tar.gz?dl=0', "data/LC81970242014109-SC20141230042441.tar", quiet = T, mode = "wb")
# download('https://www.dropbox.com/s/akb9oyye3ee92h3/LT51980241990098-SC20150107121947.tar.gz?dl=0', "data/LT51980241990098-SC20150107121947.tar", quiet = T, mode = "wb")
# Unpackage data.
untar("data/LC81970242014109-SC20141230042441.tar", exdir = 'data/LC81970242014109-SC20141230042441/')
untar("data/LT51980241990098-SC20150107121947.tar", exdir = 'data/LT51980241990098-SC20150107121947/')
# Load data Landsat 5 (1990).
Landsat5b3 <- raster("data/LT51980241990098-SC20150107121947//LT51980241990098KIS00_sr_band3.tif")
Landsat5b4 <- raster("data/LT51980241990098-SC20150107121947//LT51980241990098KIS00_sr_band4.tif")
Landsat5Clouds <- raster("data/LT51980241990098-SC20150107121947//LT51980241990098KIS00_cfmask.tif")
# Load data Landsat 8 (2014).
Landsat8b4 <- raster("data/LC81970242014109-SC20141230042441//LC81970242014109LGN00_sr_band4.tif")
Landsat8b5 <- raster("data/LC81970242014109-SC20141230042441//LC81970242014109LGN00_sr_band5.tif")
Landsat8Clouds <- raster("data/LC81970242014109-SC20141230042441//LC81970242014109LGN00_cfmask.tif")
#preprocessing cloud removal
# Remove surface water and replace 'clear Land by NA'
Landsat5Clouds[Landsat5Clouds == 0] <- NA
Landsat8Clouds[Landsat8Clouds == 0] <- NA
#Remove clouds from images
#Landsat5 (CF = Cloud Free)
Landsat5b3CF <- overlay(x = Landsat5b3, y = Landsat5Clouds, fun = cloud2NA)
Landsat5b4CF <- overlay(x = Landsat5b4, y = Landsat5Clouds, fun = cloud2NA)
#Landsat8 (CF = Cloud Free)
Landsat8b4CF <- overlay(x = Landsat8b4, y = Landsat8Clouds, fun = cloud2NA)
Landsat8b5CF <- overlay(x = Landsat8b5, y = Landsat8Clouds, fun = cloud2NA)
#Calculating NDVI for 1990 & 2014
Landsat5NDVI <- CalculateNDVI(Landsat5b3CF, Landsat5b4CF)
Landsat8NDVI <- CalculateNDVI(Landsat8b4CF, Landsat8b5CF)
#Test plot NDVI
# plot(Landsat5NDVI)
# plot(Landsat8NDVI)
# NDVI Change between 1990 & 2014 and Cutting to the overlapping extent
# Warning message: In Landsat5NDVI - Landsat8NDVI : Raster objects have different extents. Result for their intersection is returned
NDVIchanges <- Landsat5NDVI - Landsat8NDVI
# An other way to calculate the extent but with more code and one step extra
# extent <- intersect(Landsat5b3, Landsat8b4)
## extent <- intersect(Landsat5b4, Landsat8b5)
## extent <- intersect(Landsat8b4, Landsat5b3)
## extent <- intersect(Landsat8b5, Landsat5b4)
# extent(extent)
# Plot result
plot(NDVIchanges)
#Write to disk
writeRaster(NDVIchanges, filename = "Output/NDVIchanges", overwrite = TRUE)
|
2c11439a3a6715538baf95825904a23f881585fd
|
c750c1991c8d0ed18b174dc72f3014fd35e5bd8c
|
/pkgs/lawstat/R/lorenz.curve.R
|
4727fd99538723d0bf760ed0f25049b147214f25
|
[] |
no_license
|
vaguiar/EDAV_Project_2017
|
4b190e66fe7a6b4078cfe1b875bccd9b5a594b25
|
288ffaeec1cfdd873fe7439c0fa0c46a90a16a4f
|
refs/heads/base
| 2021-01-23T02:39:36.272851
| 2017-05-01T23:21:03
| 2017-05-01T23:21:03
| 86,010,131
| 1
| 0
| null | 2017-05-01T23:43:04
| 2017-03-24T00:21:20
|
HTML
|
UTF-8
|
R
| false
| false
| 3,209
|
r
|
lorenz.curve.R
|
`lorenz.curve` <-
function(data, weight=NULL, mul=FALSE, plot.it=TRUE,
main=NULL, xlab=NULL, ylab=NULL, xlim=c(0,1), ylim=c(0,1), ... )
{
### Function for the lorenz curve. Lorenz Single is the default and
### Lorenz Multiple is an option. The input data should be
### a data frame with 2 columns. The first column will be treated
### as a data vector, and the second column to be treated as weight vector.
### Alternatively, data and weight can be entered as separate one-column vectors.
### Check length of data and weight vectors ###
if(any(is.na(data))){stop("NAs in data. Please try again.")}
if( is.vector(data) & !( is.list(data) & length(data)>1 ) | (is.data.frame(data)&length(data)==1) )
{
if(is.null(weight)){stop("A single-column weight vector is required. Please try again.")}
if( !(is.vector(weight)) | ( is.list(weight) & length(weight)>1 ) ){
stop("The weight input is not a single-column vector. Please try again.")}
if(any(is.na(weight))){stop("NAs in the weight vector. Please try again.")}
dframe=data.frame(data,weight)
names(dframe)[1]=deparse(substitute(data))
names(dframe)[2]=deparse(substitute(weight))
}
else{dframe=data.frame(data)[1:2]}
if( any(is.na(dframe[,1])) | is.factor(dframe[,1]) | is.character(dframe[,1]) ){
stop("The first column contains invalid input. Please try again.")}
if( any(is.na(dframe[,2])) | is.factor(dframe[,2]) | is.character(dframe[,2]) ){
stop("The second column contains invalid input. Please try again.")}
### Process the data vector based on weighting###
if(mul)
{
vv=NULL
for (k in 1:nrow(dframe))
{
if(dframe[k,2]>1){ vv=c(vv,rep(dframe[k,2]/dframe[k,1], dframe[k,2])) }
else{ vv=c(vv,dframe[k,2]/dframe[k,1]) }
}
if(is.null(main)){main="Lorenz Curve Multiple"}
}
else{vv=dframe[,2]/dframe[,1]; if(is.null(main)){main="Lorenz Curve"}}
nn<-length(vv)
relative.mean.dev<-1/nn*sum(abs(vv-mean(vv)))/mean(vv)
d<-0
for (i in 1:nn)
{
for (j in 1:nn) d<-d+abs(vv[i]-vv[j])
}
### gini index ###
gini<-d/nn/(nn-1)/2/mean(vv)
# RDR<-(max(vv)-min(vv))/mean(vv)
case<-sort(vv)
tot<-sum(case)
qs<-case/tot
qa<-c(0)
qscomp<-c(qa,qs)
y<-cumsum(qscomp)
x<-seq(0,1,1/(length(qs)))
par(mfrow=c(1,1))
if(plot.it){
if(is.null(xlab)){xlab=paste("Cumulative fraction of",names(dframe)[1]) }
if(is.null(ylab)){ylab=paste("Cumulative fraction of",names(dframe)[2]) }
plot(x,y, type="l",xaxs="i",yaxs="i",main=main, xlab=xlab, ylab=ylab, xlim=xlim, ylim=ylim, ...) ##Lorena Curve
abline(0,1, ...)
xx<-x
yy1<-x
yy2<-y
segments(xx,yy1,xx,yy2) ################ add the lines in the curve area
legend(0.05,0.8,"rel.mean.de=",bty="n", cex=0.6)
legend(0.18,0.8,round(relative.mean.dev,3),bty="n", cex=0.6)
legend(0.05,0.75,"gini.index=",bty="n", cex=0.6)
legend(0.18,0.75,round(gini,3),bty="n", cex=0.6)
# legend(0.05,0.7,"RDR=",bty="n", cex=0.6)
# legend(0.18,0.7,round(RDR,3),bty="n", cex=0.6)
legend(0.05,0.65,"L(1/2)=",bty="n", cex=0.6)
legend(0.18,0.65,round(median(y),3),bty="n", cex=0.6)
}
}
|
4219054b1057a8d244968ee90c5260b24772eaf1
|
85e89b63736bd186d0dac6b515fe4de60e9c3821
|
/R/CodesFun.R
|
1d69cd11f4e9885c092616a5621428b6e17c4a34
|
[] |
no_license
|
dtbinh/RQDA
|
cbad809817296860b2b0093a90ad80e8e1f3fb22
|
d840c6feccc7f98128ff668555b48d3c6fe074d7
|
refs/heads/master
| 2020-06-14T01:43:06.239102
| 2016-11-24T10:52:27
| 2016-11-24T10:52:27
| 75,526,174
| 1
| 0
| null | 2016-12-04T08:33:19
| 2016-12-04T08:33:19
| null |
UTF-8
|
R
| false
| false
| 47,933
|
r
|
CodesFun.R
|
addcode <- function(name,conName="qdacon",assignenv=.rqda,...) {
if (name != ""){
con <- get(conName,assignenv)
maxid <- dbGetQuery(con,"select max(id) from freecode")[[1]]
nextid <- ifelse(is.na(maxid),0+1, maxid+1)
write <- FALSE
if (nextid==1){
write <- TRUE
} else {
dup <- dbGetQuery(con,sprintf("select name from freecode where name='%s'",name))
if (nrow(dup)==0) write <- TRUE
}
if (write ) {
dbGetQuery(con,sprintf("insert into freecode (name, id, status,date,owner)
values ('%s', %i, %i,%s, %s)",
name,nextid, 1, shQuote(date()),shQuote(.rqda$owner)))
}
}
}
CodeNamesUpdate <- function(CodeNamesWidget=.rqda$.codes_rqda,sortByTime=TRUE,decreasing = FALSE,...)
{
if (is_projOpen()){
freecode <- RQDAQuery("select name, id,date from freecode where status=1 order by lower(name)")
codeName <- freecode$name
if (nrow(freecode)!=0) {
Encoding(codeName) <- "UTF-8"
if (sortByTime){
codeName <- codeName[OrderByTime(freecode$date,decreasing=decreasing)]
}
}
tryCatch(CodeNamesWidget[] <- codeName, error=function(e){})
} else gmessage(gettext("Cannot update Code List in the Widget. Project is closed already.\n", domain = "R-RQDA"),con=TRUE)
}
CodeNamesWidgetUpdate <- function(CodeNamesWidget=.rqda$.codes_rqda,sortByTime=TRUE,decreasing = FALSE,CodeId=NULL,...)
## CodeNamesWidgetUpdate is the alternative function of CodeNamesUpdate, should be used afterwards
{
if (is_projOpen()){
freecode <- dbGetQuery(.rqda$qdacon, "select name, id,date from freecode where status=1 order by lower(name)")
if (nrow(freecode)!=0) {
if (!is.null(CodeId)) {freecode <- freecode[freecode$id %in% CodeId,]}
codeName <- freecode$name
Encoding(codeName) <- "UTF-8"
if (sortByTime){
codeName <- codeName[OrderByTime(freecode$date,decreasing=decreasing)]
}
}
tryCatch(CodeNamesWidget[] <- codeName, error=function(e){})
} else gmessage(gettext("Cannot update Code List in the Widget. Project is closed already.\n", domain = "R-RQDA"),con=TRUE)
}
mark <- function(widget,fore.col=.rqda$fore.col,back.col=NULL,addButton=FALSE,buttonLabel="",codingTable="coding"){
## modified so can change fore.col and back.col easily
index <- sindex(widget,includeAnchor=TRUE,codingTable=codingTable)
startI <- index$startI ## start and end iter
endI <- index$endI
selected <- index$seltext
Encoding(selected) <- "UTF-8"
startN <- index$startN # translate iter pointer to number
endN <- index$endN
if (selected != ""){## only when selected text chunk is not "", apply the color scheme.
buffer <- slot(widget,"widget")@widget$GetBuffer()
if(addButton) {
InsertAnchor(widget,sprintf("%s<",buttonLabel),index=startN,handler=TRUE)
InsertAnchor(widget,sprintf(">%s",buttonLabel),index=endN + 1)
}
startIter <- buffer$GetIterAtMark(index$startMark)$iter
endIter <- buffer$GetIterAtMark(index$endMark)$iter
if (!is.null(fore.col)){ ## when col is NULL, it is skipped
buffer$ApplyTagByName(fore.col,startIter,endIter)## make use of property of gtext().
}
if (!is.null(back.col)){
buffer$ApplyTagByName(sprintf("%s.background",back.col),startIter,endIter)
}
startN <- index$startN
endN <- index$endN
startN <- startN - countAnchorsWithFileName(to=startN,codingTable=codingTable)
endN <- endN - countAnchorsWithFileName(to=endN,codingTable=codingTable)
##startN <- startN - countAnchors(.rqda$.openfile_gui,from=0,to=startN)
##endN <- endN - countAnchors(.rqda$.openfile_gui,from=0,to=endN)
return(list(start=startN,end=endN,text=selected))
}
}
markRange <- function(widget,from,to,rowid,fore.col=.rqda$fore.col,back.col=NULL,addButton=FALSE,buttonLabel="",buttonCol=.rqda$codeMark.col,codingTable="coding"){
if (from != to){
FileName <- tryCatch(svalue(.rqda$.root_edit),error=function(e){})
if (!is.null(FileName)){
Fid <- RQDAQuery(sprintf("select id from source where status =1 and name='%s'",enc(FileName)))$id
idx <- RQDAQuery(sprintf("select selfirst,selend,rowid from %s where fid=%i and status=1", codingTable, Fid))
if (nrow(idx)!=0) idx <- idx[idx$rowid!=rowid,c("selfirst","selend")] ## exclude itself
anno <- RQDAQuery(sprintf("select position,rowid from annotation where status=1 and fid=%s",Fid))
allidx <- c(idx$selfirst,anno$position)
if (!is.null(allidx)){
from <- from + sum(allidx <= from)
to <- to + sum(allidx <= to)
}
buffer <- slot(widget,"widget")@widget$GetBuffer()
startIter <- buffer$GetIterAtOffset(from)$iter
endIter <- buffer$GetIterAtOffset(to)$iter
buffer$CreateMark(sprintf("%s.1",rowid),where=startIter)
buffer$CreateMark(sprintf("%s.2",rowid),where=endIter)
buffer <- slot(widget,"widget")@widget$GetBuffer()
if(addButton) {
InsertAnchor(widget,sprintf("<%s>",buttonLabel),index=from,label.col=buttonCol,
handler=TRUE, EndMarkName=sprintf("%s.2", rowid))
}
m1 <- buffer$GetMark(sprintf("%s.1", rowid))
startIter <- buffer$GetIterAtMark(m1)$iter
m2 <- buffer$GetMark(sprintf("%s.2", rowid))
endIter <- buffer$GetIterAtMark(m2)$iter
if (!is.null(fore.col)) buffer$ApplyTagByName(fore.col,startIter,endIter)
if (!is.null(back.col)) buffer$ApplyTagByName(sprintf("%s.background",back.col),startIter,endIter)
}}}
ClearMark <- function(widget,min=0, max, clear.fore.col=TRUE,clear.back.col=FALSE, clear.underline=TRUE){
## max position of marked text.
buffer <- slot(widget,"widget")@widget$GetBuffer()
startI <- gtkTextBufferGetIterAtOffset(buffer,min)$iter # translate number back to iter
endI <-gtkTextBufferGetIterAtOffset(buffer,max)$iter
if (clear.fore.col) gtkTextBufferRemoveTagByName(buffer,.rqda$fore.col,startI,endI)
if (clear.back.col) gtkTextBufferRemoveTagByName(buffer,sprintf("%s.background",.rqda$back.col),startI,endI)
if (clear.underline) gtkTextBufferRemoveTagByName(buffer,"underline",startI,endI)
}
HL <- function(W,index,fore.col=.rqda$fore.col,back.col=NULL){
## highlight text chuck according to index
## W is the gtext widget of the text.
## index is a data frame, each row == one text chuck.
buffer <- slot(W,"widget")@widget$GetBuffer()
apply(index,1, function(x){
start <-gtkTextBufferGetIterAtOffset(buffer,x[1])$iter # translate number back to iter
end <-gtkTextBufferGetIterAtOffset(buffer,x[2])$iter
if (!is.null(fore.col)){
buffer$ApplyTagByName(fore.col,start,end)
}
if (!is.null(back.col)){
buffer$ApplyTagByName(sprintf("%s.background",back.col),start,end)
}
}
)
}
sindex <- function(widget=.rqda$.openfile_gui,includeAnchor=TRUE,codingTable="coding"){
buffer <- slot(widget,"widget")@widget$GetBuffer()
bounds = buffer$GetSelectionBounds()
startI = bounds$start ## start and end iter
endI = bounds$end
selected <- buffer$GetText(startI,endI)
startMark <- buffer$CreateMark(mark.name=NULL,where=startI)
endMark <- buffer$CreateMark(mark.name=NULL,where=endI)
startN <- gtkTextIterGetOffset(startI) # translate iter pointer to number
endN <- gtkTextIterGetOffset(endI)
if (!includeAnchor) {
startN <- startN - countAnchorsWithFileName(to=startN,codingTable=codingTable)
endN <- endN - countAnchorsWithFileName(to=endN,codingTable=codingTable)
##startN <- startN - countAnchors(widget,from=0,to=startN)
##endN <- endN - countAnchors(widget,from=0,to=endN)
}
return(list(startI=startI,endI=endI,startN=startN,endN=endN,
startMark=startMark,endMark=endMark,seltext=selected))
}
InsertAnchor <- function(widget,label,index,label.col="gray90",
handler=FALSE, EndMarkName=NULL) {
## EndMarkName is a gtk mark for end position of highlight
lab <- gtkLabelNew(label)
labelEvBox <- gtkEventBoxNew()
if (isTRUE(handler)) labelEvBox$ModifyBg("normal", gdkColorParse(label.col)$color)
labelEvBox$Add(lab)
buffer <- slot(widget,"widget")@widget$GetBuffer()
if (isTRUE(handler)){
button_press <-function(widget,event,W, codeName = label){
if (attr(event$type,"name")== "GDK_BUTTON_PRESS" && event$button==1) {
## action for left click
if (!is.null(EndMarkName)){
Iter <- gtkTextBufferGetIterAtChildAnchor(buffer,anchor)$iter
Offset <- Iter$GetOffset()
maxidx <- buffer$GetBounds()$end$GetOffset()
ClearMark(W,min=0,max=maxidx)
m <- buffer$GetMark(EndMarkName)
gtkTextMarkSetVisible(m,TRUE) ## useful when a coding end with space
Offset2 <- buffer$GetIterAtMark(m)$iter$GetOffset()
HL(W=W, index=data.frame(Offset,Offset2))
## buffer$createTag("underline", underline = "single")
## should be created when a file is opened
rowid <- gsub(".2$","",EndMarkName)
assign("selectedRowid", rowid, envir=.codingEnv)
enabled(button$UnMarB1) <- TRUE
memo <- RQDAQuery(sprintf("select memo from coding where rowid=%s",rowid))$memo
if (!is.na(memo) && memo!="") {
buffer$ApplyTagByName("underline",Iter,buffer$GetIterAtMark(m)$iter)
}
}
}
if (attr(event$type,"name")== "GDK_BUTTON_PRESS" && event$button==3) {
## action for right click
if (!is.null(EndMarkName)) {
rowid <- gsub(".2$","",EndMarkName)
prvcontent <- RQDAQuery(sprintf("select memo from coding where rowid=%s",rowid))[1,1]
tryCatch(dispose(.rqda$.codingmemo),error=function(e) {})
## Close the coding memo first, then open a new one
title <- sprintf("Coding Memo:%s",codeName)
.codingmemo <- gwindow(title=title,getOption("widgetCoordinate"),width=600,height=400)
assign(".codingmemo",.codingmemo, envir=.rqda)
.codingmemo <- get(".codingmemo",envir=.rqda)
.codingmemo2 <- gpanedgroup(horizontal = FALSE, container=.codingmemo)
.codingMemoSaveButton <- gbutton(gettext("Save Coding Memo", domain = "R-RQDA"),container=.codingmemo2,action=list(rowid=rowid),handler=function(h,...){
newcontent <- svalue(.rqda$.cdmemocontent)
newcontent <- enc(newcontent,encoding="UTF-8") ## take care of double quote.
RQDAQuery(sprintf("update coding set memo='%s' where rowid=%s",newcontent,rowid=h$action$rowid))
enabled(.rqda$".codingMemoSaveButton") <- FALSE
})## end of save memo button
enabled(.codingMemoSaveButton) <- FALSE
assign(".codingMemoSaveButton",.codingMemoSaveButton,envir=.rqda)
assign(".cdmemocontent",gtext(container=.codingmemo2,font.attr=c(sizes="large")),envir=.rqda)
if (is.na(prvcontent)) prvcontent <- ""
Encoding(prvcontent) <- "UTF-8"
if (prvcontent=="") assign("NewCodingMemo",TRUE,envir=.rqda)
W <- get(".cdmemocontent",envir=.rqda)
add(W,prvcontent,font.attr=c(sizes="large"),do.newline=FALSE)
gSignalConnect(W@widget@widget$GetBuffer(), "changed",
function(h,...){
enabled(.rqda$".codingMemoSaveButton") <- TRUE
})
}
}
}
gSignalConnect(labelEvBox, "button-press-event",button_press,data=widget)
}
iter <- gtkTextBufferGetIterAtOffset(buffer,index)$iter
anchorcreated <- buffer$createChildAnchor(iter)
iter$BackwardChar()
anchor <- iter$getChildAnchor()
anchor <- gtkTextIterGetChildAnchor(iter)
widget@widget@widget$addChildAtAnchor(labelEvBox, anchor)
}
DeleteButton <- function(widget,label,index,direction=c("backward","forward")){
buffer <- slot(widget,"widget")@widget$GetBuffer()
direction <- match.arg(direction)
if (direction=="backward") index <- index - 1
iter <- gtkTextBufferGetIterAtOffset(buffer,index)$iter
stop <- FALSE
isRemove <- FALSE
while (!stop) {
Anchor <- iter$getChildAnchor()
if (!is.null(Anchor)){
lab <- Anchor$GetWidgets()[[1]][["child"]]$GetLabel()
Encoding(lab) <- "UTF-8"
if (lab==label){
iterEnd <- gtkTextIterGetOffset(iter)
iterEnd <- gtkTextBufferGetIterAtOffset(buffer,iterEnd+1)$iter
gtkTextBufferDelete(buffer,iter,iterEnd)
stop <- TRUE
isRemove <- TRUE
}
if (direction=="backward") if (! iter$BackwardChar()) stop <- TRUE
if (direction=="forward") if (! iter$ForwardChar()) stop <- TRUE
} else {stop <- TRUE}
}
invisible(isRemove)
}
countAnchors <- function(widget=.rqda$.openfile_gui,to,from=0){
buffer <- slot(widget,"widget")@widget$GetBuffer()
iter <- gtkTextBufferGetIterAtOffset(buffer,from)$iter
ans <- 0
while(from<to){
hasAnchor <- iter$getChildAnchor()
ans <- ans + ifelse(is.null(hasAnchor),0,1)
gtkTextIterForwardChar(iter)
from <- gtkTextIterGetOffset(iter)
}
ans
}
## testing
## g<-gtext("testing widget of text.",container=T)
## InsertAnchor(g,"button",8)
countAnchorsWithFileName <- function(to,fileName=enc(svalue(.rqda$.root_edit),encoding="UTF-8"),codingTable="coding")
{
## the same purpose as countAnchors, but faster.
fid <- RQDAQuery(sprintf("select id from source where status=1 and name='%s'",fileName))$id
## idx <- RQDAQuery(sprintf("select selfirst,selend from coding where status==1 and fid==%s",fid))
idx <- RQDAQuery(sprintf("select selfirst from %s where status=1 and fid=%s", codingTable, fid)) ## insert one code lable only for 0.2-0
anno <- RQDAQuery(sprintf("select position from annotation where status=1 and fid=%s",fid))$position
allidx <- c(unlist(idx),anno)
if (!is.null(allidx)){
allidx <- allidx + rank(allidx,ties.method="first")
ans <- sum(allidx <= to) ## note the equal sign
} else ans <- 0
ans
}
## testIt <- function(){ ## test the reliability of countAnchorsWithFileName().
## a <- sindex(incl=T)
## ans <- data.frame(correct=c(countAnchors(to=a$startN),countAnchors(to=a$endN)),wrong=c(countAnchorsWithFileName(to=a$startN),countAnchorsWithFileName(to=a$endN)))
## ans
## }
retrieval <- function(Fid=NULL,order=c("fname","ftime","ctime"),CodeNameWidget=.rqda$.codes_rqda, codingTable="coding")
## retrieval is rewritten in rev 134
{
currentCode2 <- svalue(CodeNameWidget)
if (length(currentCode2)!=0){
currentCode <- enc(currentCode2,"UTF-8")
Encoding(currentCode2) <- "UTF-8"
currentCid <- dbGetQuery(.rqda$qdacon,sprintf("select id from freecode where name= '%s' ",currentCode))[1,1]
order <- match.arg(order)
order <- switch(order,
fname="order by source.name",
ftime="order by source.id",
ctime="")
if (is.null(Fid)){
retrieval <- RQDAQuery(sprintf("select cid,fid, selfirst, selend, seltext,%s.rowid, source.name,source.id from %s,source where %s.status=1 and cid=%i and source.id=fid %s",codingTable,codingTable,codingTable,currentCid,order))
} else {
retrieval <- RQDAQuery(sprintf("select cid,fid, selfirst, selend, seltext, %s.rowid,source.name,source.id from %s,source where %s.status=1 and cid=%i and source.id=fid and fid in (%s) %s",codingTable, codingTable, codingTable, currentCid, paste(Fid,collapse=","), order))
}
if (nrow(retrieval)==0) gmessage(gettext("No Coding associated with the selected code.", domain = "R-RQDA"),container=TRUE) else {
fid <- unique(retrieval$fid)
retrieval$fname <-""
Nfiles <- length(fid)
Ncodings <- nrow(retrieval)
if(Ncodings == 1){
title <- sprintf(ngettext(Nfiles,
"1 retrieved coding: \"%s\" from %i file",
"1 retrieved coding: \"%s\" from %i files", domain = "R-RQDA"),
currentCode2,Nfiles)
} else {
title <- sprintf(ngettext(Nfiles,
"%i retrieved codings: \"%s\" from %i file",
"%i retrieved codings: \"%s\" from %i files", domain = "R-RQDA"),
Ncodings,currentCode2,Nfiles)
}
tryCatch(eval(parse(text=sprintf("dispose(.rqda$.codingsOf%s)",currentCid))),error=function(e){})
wnh <- size(.rqda$.root_rqdagui) ## size of the main window
.gw <- gwindow(title=title, parent=c(wnh[1]+10,2),
width = min(c(gdkScreenWidth()- wnh[1]-20,getOption("widgetSize")[1])),
height = min(c(wnh[2],getOption("widgetSize")[2]))
)
mainIcon <- system.file("icon", "mainIcon.png", package = "RQDA")
.gw@widget@widget$SetIconFromFile(mainIcon)
assign(sprintf(".codingsOf%s",currentCid),.gw,envir=.rqda)
.retreivalgui <- gtext(container=.gw)
font <- pangoFontDescriptionFromString(.rqda$font)
gtkWidgetModifyFont(.retreivalgui@widget@widget,font)
.retreivalgui@widget@widget$SetPixelsBelowLines(5) ## set the spacing
.retreivalgui@widget@widget$SetPixelsInsideWrap(5) ## so the text looks more confortable.
## .retreivalgui <- gtext(container=.gw)
for (i in fid){
FileName <- dbGetQuery(.rqda$qdacon,sprintf("select name from source where status=1 and id=%i",i))[['name']]
if (!is.null(FileName)){
Encoding(FileName) <- "UTF-8"
retrieval$fname[retrieval$fid==i] <- FileName
} else {
retrieval <- retrieval[retrieval$fid!=i,]
RQDAQuery(sprintf("update %s set status=0 where fid=%i",codingTable, i))
}
}
Encoding(retrieval$seltext) <- Encoding(retrieval$fname) <- "UTF-8"
## helper function
ComputeCallbackFun <- function(FileName,rowid){
CallBackFUN <- function(widget,event,...){
ViewFileFunHelper(FileName,hightlight=FALSE)
textView <- .rqda$.openfile_gui@widget@widget
buffer <- textView$GetBuffer()
mark1 <- gtkTextBufferGetMark(buffer,sprintf("%s.1",rowid))
if(is.null(mark1)){
## The coding was deleted by pressing the Unmark button
## in the Condings view widget
gmessage(gettext("Coding not found."), type="warning")
return(invisible(NULL))
}
gtkTextViewScrollToMark(textView,mark1,0)
iter1 <- buffer$GetIterAtMark(mark1)$iter
idx1 <- gtkTextIterGetOffset(iter1)
mark2 <- buffer$GetMark(sprintf("%s.2", rowid))
gtkTextMarkSetVisible(mark2,TRUE)
iter2 <- buffer$GetIterAtMark(mark2)$iter
idx2 <- gtkTextIterGetOffset(iter2)
HL(.rqda$.openfile_gui, data.frame(idx1,idx2), fore.col = .rqda$fore.col, back.col = NULL)
}
CallBackFUN
} ## end of ComputeCallbackFun
ComputeRecodeFun <- function(rowid){
RecodeFun <- function(widget, event, ...){
SelectedCode <- svalue(.rqda$.codes_rqda)
if (length(SelectedCode)!=0){
Encoding(SelectedCode) <- "UTF-8"
SelectedCode2 <- enc(SelectedCode, encoding="UTF-8")
currentCid <- dbGetQuery(.rqda$qdacon, sprintf("select id from freecode where name='%s'",SelectedCode2))$id
DAT <- RQDAQuery(sprintf("select * from coding where rowid=%s", rowid))
## DAT will be empty if the user has Unmarked the coding and clicked
## on the "Clean project" button.
if(length(DAT$cid) == 0){
gmessage(gettext("Coding not found."), type="warning")
return(invisible(NULL))
}
DAT$seltext <- enc(DAT$seltext)
Exists <- RQDAQuery(sprintf("select * from coding where cid=%s and selfirst=%s and selend=%s and status=1", currentCid, DAT$selfirst, DAT$selend))
if (nrow(Exists)==0) {
success <- is.null(try(RQDAQuery(sprintf("insert into %s (cid,fid, seltext, selfirst, selend, status, owner, date) values (%s, %s, '%s', %s, %s, %s, '%s', '%s') ",
codingTable, currentCid, DAT$fid, DAT$seltext, DAT$selfirst, DAT$selend, 1, .rqda$owner,
as.character(date()))),silent=TRUE))
if (success){
gmessage(sprintf(gettext("Code \"%s\" applied to this text segment.\n"), SelectedCode2))
} else {
gmessage(gettext("Cannot recode this text segment."), type="warning")
}
} else {
gmessage(sprintf(gettext("Text segment already coded as \"%s\""),
SelectedCode2), type="warning") }
}
}
RecodeFun
} ## end of ComputeRecodeFun
ComputeUnMarkFun <- function(rowid, sO, nB){
UnmarkFun <- function(widget, event, ...){
RQDAQuery(sprintf("update %s set status=-1 where rowid=%s", .rqda$codingTable, rowid))
# Better than striking through the text would be to reload the Codings
# View widget and put the cursor at the same position because the
# "Back", "Recode" and "Unmark" buttons would be recomputed.
buffer$ApplyTagByName("strkthrgh",
buffer$GetIterAtOffset(sO)$iter,
buffer$GetIterAtOffset(sO + nB)$iter)
freq <- RQDAQuery(sprintf("select count(cid) as freq from coding where status=1 and cid=%s", currentCid))$freq
## This crashes R:
## names(CodeNameWidget) <- sprintf(gettext("Selected code id is %s__%s codings", domain = "R-RQDA"),currentCid, freq)
}
UnmarkFun
} ## end of ComputeUnMarkFun
buffer <- .retreivalgui@widget@widget$GetBuffer()
buffer$createTag("red", foreground = "red")
buffer$createTag("strkthrgh", strikethrough = TRUE)
iter <- buffer$getIterAtOffset(0)$iter
apply(retrieval,1, function(x){
metaData <- sprintf("%s [%i:%i]",x[['fname']],as.numeric(x[['selfirst']]),as.numeric(x[['selend']]))
## buffer$InsertWithTagsByName(iter, metaData,"x-large","red")
sOffset <- iter$GetOffset()
nBytes <- nchar(paste(metaData, x[['seltext']]), type = "chars") + 8
buffer$InsertWithTagsByName(iter, metaData,"red")
iter$ForwardChar()
buffer$Insert(iter, "\n")
anchorcreated <- buffer$createChildAnchor(iter)
iter$BackwardChar()
anchor <- iter$getChildAnchor()
lab <- gtkLabelNew(gettext("Back", domain = "R-RQDA"))
widget <- gtkEventBoxNew()
widget$Add(lab)
gSignalConnect(widget, "button-press-event",
ComputeCallbackFun(x[["fname"]],as.numeric(x[["rowid"]])))
.retreivalgui@widget@widget$addChildAtAnchor(widget, anchor)
iter$ForwardChar()
buffer$Insert(iter, " ")
buffer$createChildAnchor(iter)
iter$BackwardChar()
anchor_recode <- iter$getChildAnchor()
lab_recode <- gtkLabelNew(gettext("Recode", domain = "R-RQDA"))
widget_recode <- gtkEventBoxNew()
widget_recode$Add(lab_recode)
gSignalConnect(widget_recode, "button-press-event",
ComputeRecodeFun(as.numeric(x[["rowid"]])))
.retreivalgui@widget@widget$addChildAtAnchor(widget_recode, anchor_recode)
iter$ForwardChar()
buffer$Insert(iter, " ")
buffer$createChildAnchor(iter)
iter$BackwardChar()
anchor_unmark <- iter$getChildAnchor()
lab_unmark<- gtkLabelNew(gettext("Unmark", domain = "R-RQDA"))
widget_unmark <- gtkEventBoxNew()
widget_unmark$Add(lab_unmark)
gSignalConnect(widget_unmark, "button-press-event",
ComputeUnMarkFun(as.numeric(x[["rowid"]]), sOffset, nBytes))
.retreivalgui@widget@widget$addChildAtAnchor(widget_unmark, anchor_unmark)
widget$showAll()
iter$ForwardChar()
buffer$insert(iter, "\n")
buffer$InsertWithTagsByName(iter, x[['seltext']])
buffer$insert(iter, "\n\n")
}
)## end of apply
buffer$PlaceCursor(buffer$getIterAtOffset(0)$iter)
}
}
}
ExportCoding <- function(file="Exported Codings.html",Fid=NULL,order=c("fname","ftime","ctime"),append=FALSE,codingTable="coding")
{
ExportCodingOfOneCode <- function(file,currentCode,Fid,order=c("fname","ftime","ctime"),append=TRUE){
if (length(currentCode)!=0){
currentCid <- dbGetQuery(.rqda$qdacon,sprintf("select id from freecode where name= '%s' ",enc(currentCode)))[1,1]
order <- match.arg(order)
order <- switch(order,
fname="order by source.name",
ftime="order by source.id",
ctime="")
##if (is.null(Fid)){
## retrieval <- RQDAQuery(sprintf("select coding.cid,coding.fid, coding.selfirst, ##coding.selend,coding.seltext,coding.rowid, source.name,source.id from coding,source where coding.status=1 and coding.cid=%i and source.id=coding.fid %s",currentCid,order))
## } else {
retrieval <- RQDAQuery(sprintf("select cid,fid, selfirst, selend, seltext, %s.rowid,source.name,source.id from %s,source where %s.status=1 and cid=%i and source.id=coding.fid and fid in (%s) %s",codingTable,codingTable,codingTable,currentCid, paste(Fid,collapse=","), order))
## }
if (nrow(retrieval)==0) gmessage(sprintf(gettext("No Coding associated with the '%s'.", domain = "R-RQDA"),currentCode),container=TRUE) else {
fid <- unique(retrieval$fid)
retrieval$fname <-""
for (i in fid){
FileName <- dbGetQuery(.rqda$qdacon,sprintf("select name from source where status=1 and id=%i",i))[['name']]
Encoding(FileName) <- "UTF-8"
if (!is.null(FileName)){
retrieval$fname[retrieval$fid==i] <- FileName
} else {
retrieval <- retrieval[retrieval$fid!=i,]
RQDAQuery(sprintf("update %s set status=0 where fid=%i",codingTable,i))
}
}
Nfiles <- length(unique(retrieval$fname))
Ncodings <- nrow(retrieval)
Encoding(retrieval$seltext) <- "UTF-8"
if (nrow(retrieval)==1) {
cat("<hr><p align='center'><b><font color='blue' size='+2'>",
sprintf(ngettext(Nfiles,
"%i Coding of <a id='%s'>\"%s\"</a> from %s file.",
"%i Coding of <a id='%s'>\"%s\"</a> from %s files.", domain = "R-RQDA"),
Ncodings,currentCode,currentCode,Nfiles),
"</b></font><hr><p align='left'>", sep="",file=file,append=append)
} else {
cat("<hr><p align='center'><b><font color='blue' size='+2'>",
sprintf(ngettext(Nfiles,
"%i Codings of <a id='%s'>\"%s\"</a> from %s file.",
"%i Codings of <a id='%s'>\"%s\"</a> from %s files.", domain = "R-RQDA"),
Ncodings,currentCode,currentCode,Nfiles),
"</b></font><hr><p align='left'>", sep="",file=file,append=append)
}
retrieval$seltext <- gsub("\\n", "<p>", retrieval$seltext)
apply(retrieval,1, function(x){
metaData <- sprintf("<b><font color='red'> %s [%s:%s] </font></b><br><br>",x[['fname']],x[['selfirst']],x[['selend']])
cat(metaData,file=file,append=TRUE)
cat(x[['seltext']],file=file,append=TRUE)
cat(sprintf("<br><a href='#%s+b'>", currentCode), gettext("Back", domain = "R-RQDA"), "<a><br><br>", sep="",file=file,append=TRUE)
}
)## end of apply
}}}## end of export helper function
if (is.null(Fid)) Fid <- GetFileId(type="coded")
allcodes <- RQDAQuery(sprintf("select freecode.name from freecode, %s where freecode.status=1 and freecode.id=%s.cid and %s.status=1 and %s.fid in (%s) group by freecode.name",
codingTable,codingTable,codingTable,codingTable,
paste(shQuote(Fid),collapse=",")))$name
if (!is.null(allcodes)){
Encoding(allcodes) <- "UTF-8"
CodeList <- gselect.list(allcodes, multiple = TRUE, title = "Select one or more codes.")
if (length(CodeList)>1 || CodeList!="") {
file=file(file,open="w",encoding="UTF-8")
if (!append){
cat("<HEAD><META HTTP-EQUIV='CONTENT-TYPE' CONTENT='text/html; charset=UTF-8'><TITLE>Codings created by RQDA.</TITLE><META NAME='AUTHOR' CONTENT='RQDA'>",file=file,append=append)
}
cat(sprintf("Created by <a href='http://rqda.r-forge.r-project.org/'>RQDA</a> at %s<br><br>\n",Sys.time()),file=file,append=TRUE)
for (i in CodeList){
cat(sprintf("<a id='%s+b' href='#%s'>%s<a><br>",i,i,i),file=file,append=TRUE)
}
for (i in seq_along(CodeList)){
ExportCodingOfOneCode(file=file,currentCode=CodeList[i],Fid=Fid,order=order,append=TRUE)
}
close(file)
}
}}
ClickHandlerFun <- function(CodeNameWidget,buttons=c("MarCodB1","UnMarB1"),codingTable="coding"){
## CodeNameWidget=.rqda$.codes_rqda
con <- .rqda$qdacon
SelectedCode <- currentCode <- svalue(CodeNameWidget)
if (length(SelectedCode)!=0) {
SelectedCode <- currentCode <- enc(currentCode,encoding="UTF-8")
currentCid <- dbGetQuery(con,sprintf("select id from freecode where name='%s'",SelectedCode))[,1]
freq <- RQDAQuery(sprintf("select count(cid) as freq from coding where status=1 and cid=%s", currentCid))$freq
names(CodeNameWidget) <- sprintf(gettext("Selected code id is %s__%s codings", domain = "R-RQDA"),currentCid, freq)
if (exists(".root_edit",envir=.rqda) && isExtant(.rqda$.root_edit)) { ## a file is open
for (i in buttons) {
b <- get(i,envir=button)
enabled(b) <- TRUE
}
SelectedFile <- svalue(.rqda$.root_edit)
SelectedFile <- enc(SelectedFile,encoding="UTF-8")
currentFid <- RQDAQuery(sprintf("select id from source where name='%s'",SelectedFile))[,1]
## following code: Only mark the text chuck according to the current code.
idx1 <- dbGetQuery(con,sprintf("select selfirst, selend from %s where
cid=%i and fid=%i and status=1",codingTable, currentCid, currentFid))
idx2 <- dbGetQuery(con, sprintf("select selfirst, selend from %s where fid=%i and status=1",codingTable, currentFid))
if (nrow(idx2)>0) {
ClearMark(.rqda$.openfile_gui,min=0,max=max(as.numeric(idx2$selend))+2*nrow(idx2),clear.fore.col = TRUE, clear.back.col =FALSE)
}
if (nrow(idx1)>0) {
##allidx <- unlist(idx2)
anno <- RQDAQuery(sprintf("select position from annotation where status=1 and fid=%s",currentFid))$position
allidx <- c(idx2[,1],anno) ## since 0.2-0, only one code label is added to file widget.
addidx <- data.frame(selfirst=apply(outer(allidx,idx1$selfirst,"<="),2,sum),
selend=apply(outer(allidx,idx1$selend,"<="),2,sum))
idx1 <- idx1+addidx
HL(.rqda$.openfile_gui,index=idx1,fore.col=.rqda$fore.col,back.col=NULL)
}
}# end of mark text chuck
}
}
HL_CodingWithMemo <- function(codingTable="coding"){
if (is_projOpen(envir=.rqda,conName="qdacon")){
SelectedFile <- tryCatch(svalue(.rqda$.root_edit),error=function(e){})
if (!is.null(SelectedFile)) {
SelectedFile <- enc(SelectedFile,encoding="UTF-8")
currentFid <- RQDAQuery(sprintf("select id from source where name='%s'",SelectedFile))[,1]
tryCatch({
widget <- .rqda$.openfile_gui
idx <- RQDAQuery(sprintf("select selfirst, selend,memo from %s where fid=%i and status=1",codingTable, currentFid))
if (nrow(idx)!=0){
ClearMark(widget,min=0,max=max(as.numeric(idx$selend))+2*nrow(idx),clear.fore.col = TRUE, clear.back.col =FALSE)
anno <- RQDAQuery(sprintf("select position from annotation where status=1 and fid=%s",currentFid))$position
## allidx <- unlist(idx[,c("selfirst","selend")])
allidx <- c(idx[,c("selfirst")],anno)
addidx <- data.frame(selfirst=apply(outer(allidx,idx$selfirst,"<="),2,sum),
selend=apply(outer(allidx,idx$selend,"<="),2,sum))
idx[,c("selfirst","selend")] <- idx[,c("selfirst","selend")] + addidx
idx1 <- idx[(idx$memo!="") & (!is.na(idx$memo)),c("selfirst","selend")]
HL(widget,index=idx1,fore.col=.rqda$fore.col,back.col=NULL)
}
},error=function(e){}) # end of mark text chuck
}}}
HL_AllCodings <- function(codingTable="coding") {
if (is_projOpen(envir=.rqda,conName="qdacon")) {
SelectedFile <- tryCatch(svalue(.rqda$.root_edit),error=function(e){NULL})
if (!is.null(SelectedFile)) {
currentFid <- RQDAQuery(sprintf("select id from source where name='%s'",enc(SelectedFile,"UTF-8")))[,1]
idx <- RQDAQuery(sprintf("select selfirst,selend from %s where fid=%i and status=1",codingTable,currentFid))
if ((N <- nrow(idx)) != 0){
anno <- RQDAQuery(sprintf("select position from annotation where status=1 and fid=%s",currentFid))$position
idx1 <- c(idx$selfirst,anno)
idx1 <- idx1 + rank(idx1)
idx2 <- c(idx$selend,anno)
idx2 <- idx2 + rank(idx2)
idx <-data.frame(idx1,idx2)
ClearMark(.rqda$.openfile_gui ,0 , max(idx2))
HL(.rqda$.openfile_gui,index=idx)
}
}
}
}
##addAnnoTable <- function(){
## tryCatch(
## RQDAQuery("create table annotation (fid integer,position integer,annotation text, owner text, date text,dateM text, ##status integer)"),error=function(e){})
##} ##RQDAQuery("drop table annotation")
NextRowId <- function(table){
ans <- RQDAQuery(sprintf("select max(rowid)+1 as nextid from %s",table))$nextid
if (is.na(ans)) ans <- 1
ans
}
InsertAnnotation <- function (index,fid,rowid,label=gettext("[Annotation]", domain = "R-RQDA"),AnchorPos=NULL)
{
widget=.rqda$.openfile_gui
lab <- gtkLabelNew(label)
label <- gtkEventBoxNew()
label$ModifyBg("normal", gdkColorParse("yellow")$color)
label$Add(lab)
buffer <- slot(widget, "widget")@widget$GetBuffer()
button_press <- function(widget, event,moreArgs) {
openAnnotation(New=FALSE,pos=moreArgs$pos,fid=moreArgs$fid,rowid=moreArgs$rowid)
enabled(button$savAnnB) <- FALSE
}
gSignalConnect(label, "button-press-event", button_press,data = list(pos=index,fid=fid,rowid=rowid))
if (is.null(AnchorPos)) AnchorPos <- index
iter <- gtkTextBufferGetIterAtOffset(buffer, AnchorPos)$iter
buffer$CreateMark(mark.name=sprintf("%s.3",rowid),where=iter)
anchorcreated <- buffer$createChildAnchor(iter)
iter$BackwardChar()
anchor <- iter$getChildAnchor()
anchor <- gtkTextIterGetChildAnchor(iter)
widget@widget@widget$addChildAtAnchor(label, anchor)
} ## end of helper widget
DeleteAnnotationAnchorByMark <- function(markname){
buffer <- .rqda$.openfile_gui@widget@widget$GetBuffer()
mark <- buffer$GetMark(markname)
buffer$GetIterAtMark(mark)
offset2 <- buffer$GetIterAtMark(mark)$iter$GetOffset()
offset1 <- offset2 - 1
iter2 <- buffer$GetIterAtOffset(offset2)$iter
iter1 <- buffer$GetIterAtOffset(offset1)$iter
buffer$Delete(iter1,iter2)
}
openAnnotation <- function(New=TRUE,pos,fid,rowid,AnchorPos=NULL){
tryCatch(dispose(.rqda$.annotation),error=function(e) {})
wnh <- size(.rqda$.root_rqdagui)
.annotation <- gwindow(title=ngettext(1, "Annotation", "Annotations", domain = "R-RQDA"),parent=c(wnh[1]+10,2), # ngettext avoid update_pkg_po() crash.
width = min(c(gdkScreenWidth()- wnh[1]-20,getOption("widgetSize")[1])),
height = min(c(wnh[2],getOption("widgetSize")[2]))
)
mainIcon <- system.file("icon", "mainIcon.png", package = "RQDA")
.annotation@widget@widget$SetIconFromFile(mainIcon)
assign(".annotation",.annotation, envir=.rqda)
.annotation2 <- gpanedgroup(horizontal = FALSE, container=.annotation)
savAnnB <-
gbutton(gettext("Save Annotation", domain = "R-RQDA"),container=.annotation2,handler=function(h,...){
newcontent <- svalue(W)
newcontent <- enc(newcontent,encoding="UTF-8")
if (newcontent != ""){
if (New) {
if (is.null(AnchorPos)) AnchorPos <- pos
InsertAnnotation(index=pos,fid=fid,rowid=rowid,AnchorPos=AnchorPos)
RQDAQuery(sprintf("insert into annotation (fid,position,annotation,owner,date,status) values (%i,%i,'%s','%s','%s',1)", fid,pos,newcontent,.rqda$owner,date()))
New <<- FALSE ## note the replacement <<-
} else {
## RQDAQuery(sprintf("update annotation set annotation='%s' where fid=%i and position=%s and status=1", newcontent,fid,pos))
RQDAQuery(sprintf("update annotation set annotation='%s' where rowid=%s and status=1", newcontent,rowid))
}
} else {## action for empty new content.
tryCatch(DeleteAnnotationAnchorByMark(sprintf("%s.3",rowid)),error=function(e){})
## RQDAQuery(sprintf("update annotation set annotation='%s' where fid=%i and position=%s and status=1", newcontent,fid,pos))
RQDAQuery(sprintf("update annotation set annotation='%s' where rowid=%s and status=1", newcontent,rowid))
## RQDAQuery(sprintf("update annotation set status=0 where fid=%i and position=%s and status=1",fid,pos))
RQDAQuery(sprintf("update annotation set status=0 where rowid=%s and status=1",rowid))
}
enabled(savAnnB) <- FALSE
}
)## end of save button
enabled(savAnnB) <- FALSE
assign("savAnnB", savAnnB, envir=button)
assign(".annotationContent",gtext(container=.annotation2,font.attr=c(sizes="large")),envir=.rqda)
## prvcontent <- RQDAQuery(sprintf("select annotation from annotation where fid=%i and position=%s and status=1",fid,pos))[1,1]
prvcontent <- RQDAQuery(sprintf("select annotation from annotation where rowid=%s and status=1",rowid))[1,1]
if (is.null(prvcontent) || is.na(prvcontent)) prvcontent <- ""
Encoding(prvcontent) <- "UTF-8"
W <- get(".annotationContent",envir=.rqda)
gSignalConnect(W@widget@widget$GetBuffer(), "changed",
function(h,...){
mbut <- get("savAnnB",envir=button)
enabled(mbut) <- TRUE
}
)##
add(W,prvcontent,font.attr=c(sizes="large"),do.newline=FALSE)
}
Annotation <- function(...){
if (is_projOpen(envir=.rqda,conName="qdacon")) {
W <- tryCatch( get(".openfile_gui",envir=.rqda), error=function(e){})
## get the widget for file display. If it does not exist, then return NULL.
pos <- tryCatch(sindex(W,includeAnchor=FALSE),error=function(e) {}) ## if the not file is open, it doesn't work.
if (is.null(pos)) {gmessage(gettext("Open a file first!", domain = "R-RQDA"),container=TRUE)}
else {
AnchorPos <- sindex(W,includeAnchor=TRUE)$startN
SelectedFile <- svalue(.rqda$.root_edit)
SelectedFile <- enc(SelectedFile,encoding="UTF-8")
currentFid <- RQDAQuery(sprintf("select id from source where name='%s'",SelectedFile))[,1]
idx <- RQDAQuery(sprintf("select fid, annotation,rowid from annotation where fid=%i and position=%s and status=1",currentFid,pos$startN))
New <- ifelse(nrow(idx)==0,TRUE,FALSE)
if (nrow(idx)==0) rowid <- NextRowId("annotation") else rowid <- idx$rowid
openAnnotation(New=New,pos=pos$startN,fid=currentFid,rowid=rowid,AnchorPos=AnchorPos)
}
}
}
CodeWithCoding <- function(condition = c("unconditional", "case", "filecategory","both"),
codingTable="coding"){
if (is_projOpen(envir=.rqda,conName="qdacon")) {
condition <- match.arg(condition)
fid <- GetFileId(condition,"coded")
if (length(fid)!=0){
ans <- unlist(RQDAQuery(sprintf("select name from freecode where status=1 and id in (select cid from %s where status=1 and fid in (%s) group by cid)",codingTable, paste(shQuote(fid),collapse=","))))
Encoding(ans) <- "UTF-8"
.rqda$.codes_rqda[] <- ans
invisible(ans)
}}}
CodeWithoutCoding <- function(condition = c("unconditional", "case", "filecategory","both"),
codingTable="coding"){
if (is_projOpen(envir=.rqda,conName="qdacon")) {
condition <- match.arg(condition)
fid <- GetFileId(condition,"coded")
if (length(fid)!=0){
ans <- unlist(RQDAQuery(sprintf("select name from freecode where status=1 and id not in
(select cid from %s where status=1 and fid in (%s) group by cid)",
codingTable, paste(shQuote(fid),collapse=","))))
Encoding(ans) <- "UTF-8"
.rqda$.codes_rqda[] <- ans
invisible(ans)
}
}
}
AddToCodeCategory <- function (Widget = .rqda$.codes_rqda, updateWidget = TRUE)
{
codename2 <- svalue(Widget)
codename <- enc(codename2)
query <- dbGetQuery(.rqda$qdacon, sprintf("select id, name from freecode where name in(%s) and status=1",
paste("'", codename, "'", sep = "", collapse = ",")))
cid <- query$id
Encoding(query$name) <- "UTF-8"
CodeCat <- RQDAQuery(sprintf("select name, catid from codecat where status=1 and catid not in (select catid from treecode where status=1 and cid in (%s) group by catid)", paste("'", cid, "'", sep = "", collapse = ",")))
if (nrow(CodeCat) == 0) {
gmessage(gettext("Add Code Category First.", domain = "R-RQDA"), container=TRUE)
}
else {
Encoding(CodeCat$name) <- "UTF-8"
Selecteds <- gselect.list(CodeCat$name, multiple = TRUE,x=getOption("widgetCoordinate")[1])
if (length(Selecteds) > 0 && Selecteds != "") {
Encoding(Selecteds) <- "UTF-8"
for (Selected in Selecteds) {
CodeCatid <- CodeCat$catid[CodeCat$name %in% Selected]
exist <- dbGetQuery(.rqda$qdacon, sprintf("select cid from treecode where status=1 and cid in (%s) and catid=%i", paste("'", cid, "'", sep = "", collapse = ","), CodeCatid)) ## this check is unnecessary
if (nrow(exist) != length(cid)) {
DAT <- data.frame(cid = cid[!cid %in% exist$cid],
catid = CodeCatid, date = date(), dateM = date(),
memo = "", status = 1, owner=.rqda$owner)
success <- dbWriteTable(.rqda$qdacon, "treecode",
DAT, row.name = FALSE, append = TRUE)
if (success && updateWidget) {
UpdateCodeofCatWidget()
}
if (!success)
gmessage(sprintf(gettext("Fail to write to code category of %s", domain = "R-RQDA"),
Selected))
}
}
}
}
}
## c2InfoFun <- function(){
## con <- .rqda$qdacon
## if (is_projOpen(envir=.rqda,conName="qdacon")) {
## W <- tryCatch(get(".openfile_gui",envir=.rqda), error=function(e){})
## ## get the widget for file display. If it does not exist, then return NULL.
## sel_index <- tryCatch(sindex(W,includeAnchor=FALSE),error=function(e) {})
## ## if the not file is open, it doesn't work.
## if (is.null(sel_index)) {gmessage(gettext("Open a file first!", domain = "R-RQDA"),container=TRUE)}
## else {
## CodeTable <- dbGetQuery(con,"select id,name from freecode where status==1")
## SelectedFile <- svalue(.rqda$.root_edit); Encoding(SelectedFile) <- "UTF-8" ##file title
## currentFid <- dbGetQuery(con,sprintf("select id from source where name=='%s'",SelectedFile))[,1]
## codings_index <- RQDAQuery(sprintf("select rowid, cid, fid, selfirst, selend from coding where fid==%i ", currentFid))
## ## should only work with those related to current code and current file.
## rowid <- codings_index$rowid[(codings_index$selfirst >= sel_index$startN) &
## (codings_index$selend <= sel_index$endN)
## ] ## determine which codes correspond to the selection
## cid <- codings_index$cid[codings_index$rowid %in% rowid]
## Codes <- CodeTable$name[CodeTable$id %in% cid]
## ## should not use data frame as x, otherwise, svalue(c2infoWidget) is a factor rather than a character
## if (length(Codes)!=0){
## Encoding(Codes) <- "UTF-8"
## tryCatch(dispose(.rqda$.c2info),error=function(e){})
## gw <- gwindow(title="Associted code-list.",heigh=min(33*length(Codes),600),parent=.rqda$.openfile_gui)
## c2infoWidget <- gtable(Codes,container=gw)
## assign(".c2info",gw,envir=.rqda)
## addhandlerdoubleclick(c2infoWidget,handler=function(h,...) retrieval2(CodeNameWidget=c2infoWidget))
## addHandlerClicked(c2infoWidget,handler <- function(h,...){ClickHandlerFun(CodeNameWidget=c2infoWidget)})
## }
## }}}
## InsertAnchor <- function(widget,label,index,handler=FALSE,label.col="gray90",
## forward=TRUE){ ## forward is used only when handler is TRUE
## ## rev 233
## lab <- gtkLabelNew(label)
## label <- gtkEventBoxNew()
## if (isTRUE(handler)) label$ModifyBg("normal", gdkColorParse(label.col)$color)
## label$Add(lab)
## buffer <- slot(widget,"widget")@widget$GetBuffer()
## if (isTRUE(handler)){
## button_press <-function(widget,event,W){
## Iter <- gtkTextBufferGetIterAtChildAnchor(buffer,anchor)$iter
## Offset <- Iter$GetOffset()
## label <- lab$GetLabel()
## if (forward) {
## label <- gsub("<$","",label)
## Succeed <- FALSE
## while (!Succeed){
## if (! Iter$ForwardChar()) Succeed <- TRUE
## Anchor <- Iter$getChildAnchor()
## if (!is.null(Anchor)){
## lab <- Anchor$GetWidgets()[[1]][["child"]]$GetLabel()##Anchor is event box.
## lab <- gsub("^>","",lab)
## if (lab==label){
## Succeed <- TRUE
## maxidx <- buffer$GetBounds()$end$GetOffset()
## ClearMark(W,min=0,max=maxidx)
## Offset2 <- Iter$GetOffset()
## HL(W=W, index=data.frame(Offset,Offset2))
## }}}} else {
## label <- gsub("^>","",label)
## Succeed <- FALSE
## while (!Succeed){
## if (! Iter$BackwardChar()) Succeed <- TRUE
## Anchor <- Iter$getChildAnchor()
## if (!is.null(Anchor)){
## lab <- Anchor$GetWidgets()[[1]][["child"]]$GetLabel()
## lab <- gsub("<$","",lab)
## if (lab==label){
## Succeed <- TRUE
## maxidx <- buffer$GetBounds()$end$GetOffset()
## ClearMark(W,min=0,max=maxidx)
## Offset2 <- Iter$GetOffset()
## HL(W=W, index=data.frame(Offset2,Offset)) ## note the offset2 comes first
## }}}}
## }
## gSignalConnect(label, "button-press-event",button_press,data=widget)}
## iter <- gtkTextBufferGetIterAtOffset(buffer,index)$iter
## anchorcreated <- buffer$createChildAnchor(iter)
## iter$BackwardChar()
## anchor <- iter$getChildAnchor()
## anchor <- gtkTextIterGetChildAnchor(iter)
## widget@widget@widget$addChildAtAnchor(label, anchor)
## }
|
8f4420c7127c33d154eafb97fdfea7c273b7a471
|
0551b7a697f099dda594df1cfcad16d2969f1c8f
|
/DataCleaningScripts/old_counts.R
|
bbb5c5ef90b93fbdc7709d6705561c0225bfb0b7
|
[
"CC0-1.0"
] |
permissive
|
weecology/EvergladesWadingBird
|
82c8b7e2d5179433813c430498bcddeb440b352b
|
46ab55289a0c4b5ed326ddb8b802c810009a3504
|
refs/heads/main
| 2023-08-31T05:20:50.368672
| 2023-08-29T18:07:56
| 2023-08-29T18:07:56
| 238,316,459
| 3
| 2
|
CC0-1.0
| 2023-09-11T21:19:55
| 2020-02-04T22:04:09
|
R
|
UTF-8
|
R
| false
| false
| 3,873
|
r
|
old_counts.R
|
## Used to clean count data 1994 - 2020 into standard long format
## Done one year at a time, format is different every year
## G. Yenni
source("~/EvergladesWadingBird/DataCleaningScripts/clean_counts.R")
# All original data files containing count data were pulled into a separate directory
# Original files:
# [1] "2000 appendix.xls"
# [2] "2001 appendix .xls"
# [3] "2002 appendix.xls"
# [4] "2004 appendix.xls"
# [5] "2004 raw survey data all colonies_Found20130128 (Autosaved).xls"
# [6] "94.xlsx"
# [7] "96appendix.xls"
# [8] "97colappend.xls"
# [9] "98 appendix.xls"
# [10] "99 Appendix.xls"
# [11] "Aerial Photo Counts 2005.xls"
# [12] "Aerial Photo Counts 2006.xls"
# [13] "Aerial Transect Data 2005.xls"
# [14] "Aerial Transect Data 2006.xls"
# [15] "Aerial Transect Data 2007.xls"
# [16] "Aerial Transect Data 2008.xls"
# [17] "Aerial Transect Data 2009.xls"
# [18] "Alley North drone counts_2019.xlsx"
# [19] "Breeding Birds 20031.xls"
# [20] "Flight survey data_2013.xls"
# [21] "Flight survey data_2015.xlsx"
# [22] "Flight survey data_2016.xlsx"
# [23] "Flight survey data_2017.xlsx"
# [24] "Flight Survey Data_2018.xlsx"
# [25] "Ground Survey Data 2007.xls"
# [26] "Ground Survey Data 2008.xls"
# [27] "ground survey data 2013.xls"
# [28] "ground survey data 2014.xlsx"
# [29] "ground survey data 2015.xlsx"
# [30] "ground survey data 2017.xlsx"
# [31] "Ground Survey Data_2019.xlsx"
# [32] "Ground Surveys 2005.xls"
# [33] "Ground Surveys 2006.xls"
# [34] "Ground Surveys 2009.xls"
# [35] "Photo Count Data_2018.xlsx"
# [36] "Photo Counts_2017.xlsx"
# [37] "Picture_Counts_2013.xls"
# [38] "Picture_Counts_2015.xlsx"
# [39] "Picture_Counts_2016.xlsx"
# [40] "2021_WB PeakCounts_Preliminary.xls"
# [41] "Preliminary Max Counts_2020.xlsx"
# [42] "Wading Bird_2019_Final Nest Numbers_working draft.xlsx"
files <- list.files("~/Downloads/countdata", full.names = TRUE, recursive = TRUE)
dat20 <- clean_count_data(files[41], 2020)
dat21 <- clean_count_data(files[33], 2021)
counts <- counts %>% dplyr::arrange(counts)
write.csv(counts, "Counts/maxcounts.csv", row.names = FALSE, na = "", quote = 9)
species <- species %>% dplyr::arrange(species)
write.csv(species, "SiteandMethods/species_list.csv", row.names = FALSE, na = "", quote = 5:25)
colonies <- colonies %>% dplyr::arrange(colony)
write.csv(colonies, "SiteandMethods/colonies.csv", row.names = FALSE, na = "", quote = c(7,8))
|
4f79cdd3e3ec062f1c7016434248bf17717ff74c
|
34289a04a4dd4088079d4598faee0d3d4e41fea0
|
/Script/eplot.r
|
908e58b1a46ff43f367bd3fb18a0295af6255fad
|
[
"MIT"
] |
permissive
|
david-beauchesne/Predict_interactions
|
6034004897860ced2ed47d4ba503330a5374a7b0
|
bcddde0b04325a7c8a64467d4adcf8f13d7208c5
|
refs/heads/master
| 2020-05-21T20:44:17.918693
| 2018-06-27T18:40:12
| 2018-06-27T18:40:12
| 65,501,383
| 7
| 1
| null | 2016-08-12T14:35:17
| 2016-08-11T21:03:01
|
R
|
UTF-8
|
R
| false
| false
| 232
|
r
|
eplot.r
|
# Creating an empty plot
eplot <- function(x = 1, y = 1, xmin = 0, xmax = 1, ymin = 0, ymax = 1) {
plot(x = x, y = y, bty = "n",ann = FALSE,xaxt = "n",yaxt = "n",type = "n",bg = "grey", ylim = c(ymin,ymax), xlim = c(xmin,xmax))
}
|
d0d6d950fe380c2d758e8009ecd2273a0f59045b
|
ebbe08d58a57ae2e9d308a12df500e1e0ef8d098
|
/scRef/codes/scripts/figure1.R
|
86d491e6ea375c651afb6b97fc85ae1aadab6234
|
[
"MIT"
] |
permissive
|
Drizzle-Zhang/bioinformatics
|
a20b8b01e3c6807a9b6b605394b400daf1a848a3
|
9a24fc1107d42ac4e2bc37b1c866324b766c4a86
|
refs/heads/master
| 2022-02-19T15:57:43.723344
| 2022-02-14T02:32:47
| 2022-02-14T02:32:47
| 171,384,799
| 2
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 2,653
|
r
|
figure1.R
|
library(Seurat)
library(ggplot2)
library(SeuratData)
data("pbmc3k")
pbmc3k.tag <- pbmc3k$seurat_annotations
table(pbmc3k.tag)
new.tag <- rep('0', length(pbmc3k.tag))
# new.tag[pbmc3k.tag %in% c('Naive CD4 T', 'Memory CD4 T', 'CD8 T')] <- 'Cell 1'
# new.tag[pbmc3k.tag %in% c('CD14+ Mono', 'FCGR3A+ Mono')] <- 'Cell 2'
# new.tag[pbmc3k.tag %in% c('NK')] <- 'Cell 3'
# new.tag[pbmc3k.tag %in% c('Platelet', 'DC', 'B')] <- 'Unassigned'
new.tag[pbmc3k.tag %in% c('Naive CD4 T', 'Memory CD4 T', 'CD8 T')] <- 'Cell 1'
new.tag[pbmc3k.tag %in% c('CD14+ Mono', 'FCGR3A+ Mono')] <- 'Cell 2'
new.tag[pbmc3k.tag %in% c('NK')] <- 'Cell 3'
new.tag[pbmc3k.tag %in% c('B')] <- 'Cell 4'
new.tag[pbmc3k.tag %in% c('Platelet', 'DC')] <- 'Unassigned'
pbmc <- CreateSeuratObject(counts = pbmc3k@assays$RNA@counts[, new.tag != '0'])
pbmc@meta.data$new.tag <- new.tag[new.tag != '0']
# add unassigned
cell_ids <- colnames(pbmc@assays$RNA@counts)
cell_ids_sample <- sample(cell_ids, 200)
unknown.tag <- new.tag[new.tag != '0']
unknown.tag[(cell_ids %in% cell_ids_sample) & (unknown.tag %in% c('Cell 1', 'Cell 2'))] <- 'Unassigned'
pbmc@meta.data$unknown.tag <- unknown.tag
pbmc <- NormalizeData(pbmc, normalization.method = "LogNormalize", scale.factor = 10000)
pbmc <- FindVariableFeatures(pbmc, selection.method = "vst", nfeatures = 2000)
pbmc <- ScaleData(pbmc)
pbmc <- RunPCA(pbmc, features = VariableFeatures(object = pbmc))
pbmc <- RunUMAP(pbmc, dims = 1:10)
plot.1 <-
DimPlot(pbmc, reduction = "umap", label = F, pt.size = 0.15, group.by = 'new.tag') +
labs(x = 'Dim-1', y = 'Dim-2') +
scale_color_manual(values = c(hue_pal()(4), 'DimGray'),
breaks = c(names(table(pbmc$new.tag)))) +
theme_bw() +
theme(axis.text = element_text(size = 9),
panel.grid = element_blank(),
axis.title = element_text(size = 12),
legend.text = element_text(size = 11))
plot.2 <-
DimPlot(pbmc, reduction = "umap", label = F, pt.size = 0.15, group.by = 'unknown.tag') +
labs(x = 'Dim-1', y = 'Dim-2') +
scale_color_manual(values = c(hue_pal()(4), 'DimGray'),
breaks = c(names(table(pbmc$unknown.tag)))) +
theme_bw() +
theme(axis.text = element_text(size = 9),
panel.grid = element_blank(),
axis.title = element_text(size = 12),
legend.text = element_text(size = 11))
ggsave(plot = plot.1, path = '/home/zy/scRef/figure', filename = 'scatter_4.png',
units = 'cm', height = 10, width = 14)
ggsave(plot = plot.2, path = '/home/zy/scRef/figure', filename = 'scatter_unassigned_4.png',
units = 'cm', height = 10, width = 14)
|
f27b19bc00b626f96d6f9a6877b2a024a0fcb1ff
|
da928003b54ed1c4e016eef8ff663069554bf925
|
/R/api/ffa_league.R
|
4169051a178bb50967ad54a03134f59caea0137b
|
[] |
no_license
|
GiulSposito/DudesFantasyFootball
|
c3c412a697f073c48aec61ad637d0e9a79ea3007
|
eccdd274cac718de2c7c099a6637b8bf8cdb2804
|
refs/heads/master
| 2021-09-21T14:03:25.540083
| 2021-09-08T16:14:52
| 2021-09-08T16:14:52
| 153,347,513
| 2
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 4,653
|
r
|
ffa_league.R
|
source("./R/api/ffa_api.R")
ffa_league_settings <- function(.authToken, .leagueId){
ffa_api(
.path = "/v2/league/settings",
.query = list(
"appKey" = "internalemailuse",
"leagueId" = .leagueId
),
.auth=.authToken)
}
# return the user id, leagues and teams of a 'authToken'
ffa_league_teams <- function(.authToken, .leagueId){
players <- ffa_api(
.path = "/v2/league/teams",
.query = list(
"appKey" = "internalemailuse",
"leagueId" = .leagueId
),
.auth=.authToken)
}
# return the user id, leagues and teams of a 'authToken'
ffa_league_matchups <- function(.authToken, .leagueId, .week){
players <- ffa_api(
.path = "/v2/league/matchups",
.query = list(
"appKey" = "internalemailuse",
"leagueId" = .leagueId,
"week" = .week,
"includeRosters" = 1,
"forcePlayoffs" = 1
),
.auth=.authToken)
}
# return the user id, leagues and teams of a 'authToken'
ffa_league_standings <- function(.authToken, .leagueId, .week){
players <- ffa_api(
.path = "/v2/league/standings",
.query = list(
"appKey" = "internalemailuse",
"week" = .week,
"leagueId" = .leagueId
),
.auth=.authToken)
}
# return the user id, leagues and teams of a 'authToken'
ffa_league_team_roster <- function(.authToken, .leagueId, .teamId, .week){
players <- ffa_api(
.path = "/v2/league/teams",
.query = list(
"appKey" = "internalemailuse",
"leagueId" = .leagueId,
"teamId" = .teamId,
"week" = .week
),
.auth=.authToken)
}
# return the user id, leagues and teams of a 'authToken'
ffa_league_matchups_recap <- function(.authToken, .leagueId, .week,.teamId){
players <- ffa_api(
.path = "/v2/league/team/matchuprecap",
.query = list(
"appKey" = "internalemailuse",
"leagueId" = .leagueId,
"week" = .week,
"teamId" = .teamId
),
.auth=.authToken)
}
ffa_extractRecap <- function(recapResp){
if (length(recapResp$content)==0) return(NULL)
tibble(
team = c("away","home"),
teamId = recapResp$content$teams$id,
name = recapResp$content$teams$name,
coachPoints = recapResp$content$teams$coach_points
) %>%
pivot_wider(names_from="team", values_from=c(teamId, name, coachPoints), names_sep=".") %>%
mutate(
title = recapResp$content$title,
week = recapResp$content$week_num,
paragraphs = list(tibble(recapResp$content$paragraphs)),
leagueHighligths = list(tibble(recapResp$content$league_notes))
) %>%
return()
}
# extrai o time e o roster
ffa_extractTeams <- function(teamsResp){
# extract teams
teamsResp$content$games[[1]]$leagues[[1]]$teams %>%
#transforma a lista de times em tibble
tibble(team=.) %>%
unnest_wider(team) %>%
# corrige tipos inteiros
mutate(across(c(teamId, ownerUserId), as.integer)) %>%
return()
}
# extrai o time e o roster
ffa_extractTeamsFromMatchups <- function(leagueMatchupsResp){
# extract teams and rosters
leagueMatchupsResp$content$games[[1]]$leagues[[1]]$teams %>%
#transforma a lista de times em tibble
tibble(team=.) %>%
unnest_wider(team) %>%
# corrige tipos inteiros
mutate(across(c(teamId, ownerUserId), as.integer)) %>%
# transforma a lista de rosters (em cada time) em um tibble
mutate( rosters = map(rosters, function(r){
r[[1]] %>%
bind_rows(.id="slotPosition") %>%
as_tibble() %>%
mutate(across(rosterSlotId:playerId,as.integer)) %>%
return()
})) %>%
# transform as estatisticas semanais em tibble
mutate( week.stats = map(stats, function(.stat){
.stat$week$`2021` %>%
tibble(week=names(.), week.stats=.) %>%
unnest_wider(week.stats) %>%
mutate( week = as.integer(week) ) %>%
mutate( pts = ifelse("pts" %in% names(.), as.numeric(pts), as.numeric(0)) ) %>%
return()
})) %>%
# transforma as estatisticas da temporada em tibble
mutate( season.stats = map(stats, function(.stat){
.stat$season %>% tibble(season.stats=.) %>%
unnest_wider(season.stats)
}))
}
# extrai os jogos
ffa_extractMatchups <- function(leagueMatchupsResp){
# extract matchups
leagueMatchupsResp$content$games[[1]]$leagues[[1]]$matchups %>%
tibble(matchups=.) %>%
unnest_wider(matchups) %>%
unnest_wider(awayTeam, names_sep=".") %>%
unnest_wider(homeTeam, names_sep=".") %>%
mutate(across(c(week, ends_with("teamId")), as.integer)) %>%
return()
}
|
1e5b134edb204eb5f62356ef178d6d52638ddd7f
|
c0edfdfa9df200f6a5bc124d37d83bc26850b011
|
/inst/Shiny/MSMplus/joint_lab_separate/4.predict/server_visit.R
|
9c190bb3818a26fb2af07a9a7a74cdbbbef222e4
|
[
"MIT"
] |
permissive
|
nskourlis/rpkg
|
394ed58cc5859a3777406c5e05869ad7f9c8788b
|
2944993edce3c3c3f49fe27b6b34f0ca7b36927a
|
refs/heads/master
| 2023-06-11T01:19:26.177563
| 2021-06-24T16:18:19
| 2021-06-24T16:18:19
| 378,363,835
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 98,251
|
r
|
server_visit.R
|
###### Show and hide and tick inputs ####
timerv <- reactiveVal(1.5)
observeEvent(c(input$showtickvis,invalidateLater(1000, session)), {
if(input$showtickvis=="No"){
hide("tickinputvisit")
}
if(input$showtickvis=="Yes"){
show("tickinputvisit")
}
isolate({
timerv(timerv()-1)
if(timerv()>1 & input$showtickvis=="No")
{
show("tickinputvisit")
}
})
})
################################################
existvisit <- reactive({
if (length(myjson2()$visit) != 0) {
x= 1
}
else if (length(myjson2()$visit) == 0) {
x= 0
}
})
existvisitratio <- reactive({
if (length(myjson2()$visitr) != 0) {
x= 1
}
else if (length(myjson2()$visitr) == 0) {
x= 0
}
})
existvisitdiff <- reactive({
if (length(myjson2()$visitd) != 0) {
x= 1
}
else if (length(myjson2()$visitd) == 0) {
x= 0
}
})
output$pagevisit <- renderUI({
if (is.null(myjson2())) return("Provide the json file with the predictions")
if (existvisit()==0) {
fluidRow(
tags$style(type="text/css",
".shiny-output-error { visibility: hidden; }",
".shiny-output-error:before { visibility: hidden; }"
),
column(12,
shinyjs::useShinyjs(),
output$loginpagevisit <- renderUI({h1("Non applicable")})
)
)
}
else if (existvisit()==1) {
if (existvisitdiff()==1 & existvisitratio()==1) {
fluidRow(
tags$style(type="text/css",
".shiny-output-error { visibility: hidden; }",
".shiny-output-error:before { visibility: hidden; }"
),
column(2,
shinyjs::useShinyjs(),
h1("Visit probabilities"),
conditionalPanel(condition="input.tabsvis =='#panel1vis'||input.tabsvis =='#panel2vis'||input.tabsvis=='#panel4vis'||input.tabsvis =='#panel5vis'",
uiOutput("facetvis")
) ,
uiOutput("confvis")
#radioButtons("displayvis", "Change labels of states and covariate patterns",
# c("same" = "same", "change"= "change")),
#uiOutput("covarinputvis"),
#uiOutput("statesinputvis")
),
column(2,
br(),
p(""),
radioButtons("showtickvis", "Show axis tick options",
choices = list("No" = "No",
"Yes" = "Yes"), selected ="No"),
uiOutput("tickinputvisit")
),
column(7,
tabsetPanel(id = "tabsvis",
tabPanel(h2("By state"), value = "#panel1vis", plotlyOutput("visit_state" , height="600px", width = "100%"),uiOutput("shouldloadvis1")),
tabPanel(h2("By covariate pattern"), value = "#panel2vis", plotlyOutput("visit_cov" , height="600px", width = "100%"),uiOutput("shouldloadvis2")),
tabPanel(h2("By state and covariate pattern"), value = "#panel3vis", plotlyOutput("visit_both" , height="600px", width = "100%"),uiOutput("shouldloadvis3")),
tabPanel(h2("Differences"), value = "#panel4vis", plotlyOutput("visit_diff" , height="600px", width = "100%"),uiOutput("shouldloadvis4")),
tabPanel(h2("Ratios"), value = "#panel5vis", plotlyOutput("visit_ratio" , height="600px", width = "100%"),uiOutput("shouldloadvis5"))
)
)
)
}
else if (existvisitdiff()==1 & existvisitratio()==0) {
fluidRow(
tags$style(type="text/css",
".shiny-output-error { visibility: hidden; }",
".shiny-output-error:before { visibility: hidden; }"
),
column(2,
shinyjs::useShinyjs(),
h1("Visit probabilities"),
conditionalPanel(condition="input.tabsvis =='#panel1vis'||input.tabsvis =='#panel2vis'||input.tabsvis=='#panel4vis'||input.tabsvis =='#panel5vis'",
uiOutput("facetvis")
) ,
uiOutput("confvis")
#radioButtons("displayvis", "Change labels of states and covariate patterns",
# c("same" = "same", "change"= "change")),
#uiOutput("covarinputvis"),
#uiOutput("statesinputvis")
),
column(2,
br(),
p(""),
radioButtons("showtickvis", "Show axis tick options",
choices = list("No" = "No",
"Yes" = "Yes"), selected = "No"),
uiOutput("tickinputvisit")
),
column(7,
tabsetPanel(id = "tabsvis",
tabPanel(h2("By state"), value = "#panel1vis", plotlyOutput("visit_state" , height="600px", width = "100%"),uiOutput("shouldloadvis1")),
tabPanel(h2("By covariate pattern"), value = "#panel2vis", plotlyOutput("visit_cov" , height="600px", width = "100%"),uiOutput("shouldloadvis2")),
tabPanel(h2("By state and covariate pattern"),value = "#panel3vis", plotlyOutput("visit_both" , height="600px", width = "100%"),uiOutput("shouldloadvis3")),
tabPanel(h2("Differences"), value = "#panel4vis", plotlyOutput("visit_diff" , height="600px", width = "100%"),uiOutput("shouldloadvis4")),
tabPanel(h2("Ratios"), value = "#panel5vis", print("Not applicable"))
)
)
)
}
else if ( existvisitdiff()==0 & existvisitratio()==1) {
fluidRow(
tags$style(type="text/css",
".shiny-output-error { visibility: hidden; }",
".shiny-output-error:before { visibility: hidden; }"
),
column(2,
h1("Visit probabilities"),
conditionalPanel(condition="input.tabsvis =='#panel1vis'||input.tabsvis =='#panel2vis'||input.tabsvis=='#panel4vis'||input.tabsvis =='#panel5vis'",
uiOutput("facetvis")
) ,
uiOutput("confvis")
#radioButtons("displayvis", "Change labels of states and covariate patterns",
# c("same" = "same", "change"= "change")),
#uiOutput("covarinputvis"),
#uiOutput("statesinputvis")
),
column(2,
br(),
p(""),
radioButtons("showtickvis", "Show axis tick options",
choices = list("No" = "No",
"Yes" = "Yes"), selected = "No"),
uiOutput("tickinputvisit")
),
column(7,
tabsetPanel(id = "tabsvis",
tabPanel(h2("By state"), value = "#panel1vis", plotlyOutput("visit_state" , height="600px", width = "100%"),uiOutput("shouldloadvis1")),
tabPanel(h2("By covariate pattern"), value = "#panel2vis", plotlyOutput("visit_cov" , height="600px", width = "100%"),uiOutput("shouldloadvis2")),
tabPanel(h2("By state and covariate pattern"), value = "#panel3vis", plotlyOutput("visit_both" , height="600px", width = "100%"),uiOutput("shouldloadvis3")),
tabPanel(h2("Differences"), value = "#panel4vis", print("Not applicable")),
tabPanel(h2("Ratios"), value = "#panel5vis", plotlyOutput("visit_ratio" , height="600px", width = "100%"),uiOutput("shouldloadvis5"))
)
)
)
}
else if (existvisitdiff()==0 & existvisitratio()==0) {
fluidRow(
tags$style(type="text/css",
".shiny-output-error { visibility: hidden; }",
".shiny-output-error:before { visibility: hidden; }"
),
column(2,
h1("Visit probabilities"),
conditionalPanel(condition="input.tabsvis =='#panel1vis'||input.tabsvis =='#panel2vis'||input.tabsvis=='#panel4vis'||input.tabsvis =='#panel5vis'",
uiOutput("facetvis")
) ,
uiOutput("confvis")
#radioButtons("displayvis", "Change labels of states and covariate patterns",
# c("same" = "same", "change"= "change")),
#uiOutput("covarinputvis"),
#uiOutput("statesinputvis")
),
column(2,
br(),
p(""),
radioButtons("showtickvis", "Show axis tick options",
choices = list("No" = "No",
"Yes" = "Yes"), selected = "No"),
uiOutput("tickinputvisit")
),
column(7,
tabsetPanel(id = "tabsvis",
tabPanel(h2("By state"), value = "#panel1vis", plotlyOutput("visit_state" , height="600px", width = "100%"),uiOutput("shouldloadvis1")),
tabPanel(h2("By covariate pattern"), value = "#panel2vis", plotlyOutput("visit_cov" , height="600px", width = "100%"),uiOutput("shouldloadvis2")),
tabPanel(h2("By state and covariate pattern"),value = "#panel3vis", plotlyOutput("visit_both" , height="600px", width = "100%"),uiOutput("shouldloadvis3")),
tabPanel(h2("Differences"), value = "#panel4vis", print("Not applicable")),
tabPanel(h2("Ratios"), value = "#panel5vis", print("Not applicable"))
)
)
)
}
}
})
observeEvent(input$json2, {
if( length(which(startsWith(names(fromJSON(input$json2$datapath, flatten=TRUE)), 'Visit')))==0 ) {
js$disableTab("mytab_vis")
}
})
observeEvent(input$csv2, {
if( length(which(startsWith(names( read.table(input$csv2$datapath,header=TRUE, sep=",") ), 'Visit')))==0 ) {
js$disableTab("mytab_vis")
}
})
#output$displayvisit <- renderUI({
# radioButtons("displayvisit", "Change labels of states and covariate patterns",
# c("same" = "same", "change"= "change"))
#})
output$facetvis <- renderUI({
radioButtons(inputId="facetvis", label= "Display graph in grids",
choices=c("No","Yes"),selected = "No")
})
output$confvis <- renderUI({
if (length(myjson2()$ci_visit)!=0) {
radioButtons("confvis", "Confidence intervals",
c("No" = "ci_no",
"Yes" ="ci_yes"))
}
else if (length(myjson2()$ci_visit)==0) {
item_list <- list()
item_list[[1]]<- radioButtons("confvis", "Confidence intervals",c("No" = "ci_no"))
item_list[[2]]<-print("Confidence interval data were not provided")
do.call(tagList, item_list)
}
})
##################################################
###### Will appear conditionally##################
###################################################
#Create the reactive input of covariates
#output$covarinputvis <- renderUI({
#
# if (is.null(myjson2())) return()
#
# if (input$displayvis=="same") return()
#
# else {
#
# item_list <- list()
# item_list[[1]] <- h2("Covariate patterns")
#
# v=vector()
# for (i in 1:length(myjson2()$cov$atlist)) {
# v[i]=myjson2()$cov$atlist[i]
# }
#
# default_choices_cov=v
# for (i in seq(length(myjson2()$cov$atlist))) {
# item_list[[i+1]] <- textInput(paste0('covvis', i),default_choices_cov[i], labels_cov()[i])
# }
#
# do.call(tagList, item_list)
# }
#})
#
#labels_covvis<- reactive ({
#
# if (input$displayvis=="same") {labels_cov()}
#
# else {
#
# myList<-vector("list",length(myjson2()$cov$atlist))
# for (i in 1:length(myjson2()$cov$atlist)) {
# myList[[i]]= input[[paste0('covvis', i)]][1]
# }
# final_list=unlist(myList, recursive = TRUE, use.names = TRUE)
# final_list
# }
#})
#
##Create the reactive input of states
#
#output$statesinputvis <- renderUI({
#
# if (input$displayvis=="same") return()
#
# else {
#
# item_list <- list()
# item_list[[1]] <- h2("States")
# default_choices_state=vector()
#
# title_choices_state=vector()
# for (i in 1:length(myjson2()$P)) {
# title_choices_state[i]=paste0("State"," ",input$select,selectend()[i])
# }
# for (i in 1:length(myjson2()$P)) {
#
# item_list[[1+i]] <- textInput(paste0('statevis',i),title_choices_state[i], labels_state()[i])
#
# }
# do.call(tagList, item_list)
# }
#})
#
#labels_statevis<- reactive ({
#
# if (input$displayvis=="same") {labels_state()}
# else {
#
# myList<-vector("list",length(myjson2()$P))
# for (i in 1:length(myjson2()$P)) {
#
# myList[[i]]= input[[paste0('statevis', i)]][1]
#
# }
# final_list=unlist(myList, recursive = TRUE, use.names = TRUE)
# final_list
# }
#})
##################################################################################
###################################################################################
data_V <- reactive ({
if(is.null(myjson2()))
return()
#Will give a certain shape to the probability data so that we have the
#covariate patterns as variables and the states as groups
timevar=as.data.frame(myjson2()$timevar)
names(timevar)[1]<- "timevar"
v=vector()
for (i in 1:length(myjson2()$cov$atlist)) {
v[i]=myjson2()$cov$atlist[i]
}
## Different variable of probabilities for each covariate pattern
## Different variable of probabilities for each covariate pattern
vis=list()
if (length(myjson2()$visit)==0) {return()}
for(i in 1:length(myjson2()$visit)) {
vis[[i]]=as.data.frame(t(data.frame(myjson2()$visit[i])))
colnames(vis[[i]]) <-labels_cov()
}
for(i in 1:length(myjson2()$visit)) {
vis[[i]]=as.data.frame(cbind(vis[[i]], timevar ,state=rep(i,nrow(vis[[i]] )) ))
}
# Append the probabilities datasets of the different states
data_vis=list()
data_vis[[1]]=vis[[1]]
for (u in 2:(length(myjson2()$visit))) {
data_vis[[u]]=rbind(vis[[u]],data_vis[[(u-1)]])
}
datav=data_vis[[length(myjson2()$visit)]]
datav
})
data_V_uci <- reactive ({
if(is.null(myjson2())) return()
#Will give a certain shape to the probability data so that we have the
#covariate patterns as variables and the states as groups
timevar=as.data.frame(myjson2()$timevar)
names(timevar)[1]<- "timevar"
v=vector()
for (i in 1:length(myjson2()$cov$atlist)) {v[i]=myjson2()$cov$atlist[i]}
## Different variable of probabilities for each covariate pattern
vis_uci=list()
if (length(myjson2()$visit_uci)==0) {return()}
for(i in 1:length(myjson2()$visit_uci)) {
vis_uci[[i]]=as.data.frame(t(data.frame(myjson2()$visit_uci[i])))
colnames(vis_uci[[i]]) <-labels_cov()
}
for(i in 1:length(myjson2()$visit_uci)) {
vis_uci[[i]]=as.data.frame(cbind(vis_uci[[i]], timevar ,state=rep(i,nrow(vis_uci[[i]] )) ))
}
# Append the probabilities datasets of the different states
data_V_uci=list()
data_V_uci[[1]]=vis_uci[[1]]
for (u in 2:(length(myjson2()$visit_uci))) {
data_V_uci[[u]]=rbind(vis_uci[[u]],data_V_uci[[(u-1)]])
}
datav_uci=data_V_uci[[length(myjson2()$visit_uci)]]
datav_uci
})
data_V_lci <- reactive ({
if(is.null(myjson2())) return()
#Will give a certain shape to the probability data so that we have the
#covariate patterns as variables and the states as groups
timevar=as.data.frame(myjson2()$timevar)
names(timevar)[1]<- "timevar"
v=vector()
for (i in 1:length(myjson2()$cov$atlist)) {v[i]=myjson2()$cov$atlist[i]}
## Different variable of probabilities for each covariate pattern
vis_lci=list()
if (length(myjson2()$visit_lci)==0) {return()}
for(i in 1:length(myjson2()$visit_lci)) {
vis_lci[[i]]=as.data.frame(t(data.frame(myjson2()$visit_lci[i])))
colnames(vis_lci[[i]]) <-labels_cov()
}
for(i in 1:length(myjson2()$visit_lci)) {
vis_lci[[i]]=as.data.frame(cbind(vis_lci[[i]], timevar ,state=rep(i,nrow(vis_lci[[i]] )) ))
}
# Append the probabilities datasets of the different states
data_V_lci=list()
data_V_lci[[1]]=vis_lci[[1]]
for (u in 2:(length(myjson2()$visit_lci))) {
data_V_lci[[u]]=rbind(vis_lci[[u]],data_V_lci[[(u-1)]])
}
datav_lci=data_V_lci[[length(myjson2()$visit_lci)]]
datav_lci
})
data_V_st<-reactive ({
datanew=data_V()
datanew$state_fac=ordered(c(rep("NA",nrow(datanew))), levels = labels_state() )
for (o in 1:(length(myjson2()$visit))) {
for (g in 1:nrow(datanew)) {
if (datanew$state[g]==o) {datanew$state_fac[g]=labels_state()[o] }
}
}
datanew
})
data_V_st_uci<-reactive ({
datanew=data_V_uci()
datanew$state_fac=ordered(c(rep("NA",nrow(datanew))), levels = labels_state() )
for (o in 1:(length(myjson2()$visit_uci))) {
for (g in 1:nrow(datanew)) {
if (datanew$state[g]==o) {datanew$state_fac[g]=labels_state()[o] }
}
}
datanew
})
data_V_st_lci<-reactive ({
datanew=data_V_lci()
datanew$state_fac=ordered(c(rep("NA",nrow(datanew))), levels = labels_state() )
for (o in 1:(length(myjson2()$visit_lci))) {
for (g in 1:nrow(datanew)) {
if (datanew$state[g]==o) {datanew$state_fac[g]=labels_state()[o] }
}
}
datanew
})
data_V_d <- reactive ({
### Meke one variable of probabilities so now, states and covariate patterns
### define subgroups of the dataset
dlist=list()
for (d in 1:length(myjson2()$cov$atlist)) {
dlist[[d]]=cbind.data.frame(data_V_st()[,d],data_V_st()[,ncol(data_V_st())-2],data_V_st()[,ncol(data_V_st())-1],data_V_st()[,ncol(data_V_st())],rep(d,length(data_V_st()[,d])) )
dlist[[d]][,6] <- rep(colnames(data_V_st())[d],length(data_V_st()[,d]))
colnames(dlist[[d]]) <- c("V","timevar","state","state_factor","cov","cov_factor")
}
d_all_v <- bind_rows(dlist, .id = "column_label")
d_all_v
})
data_V_d_uci <- reactive ({
### Meke one variable of probabilities so now, states and covariate patterns
### define subgroups of the dataset
dlist=list()
for (d in 1:length(myjson2()$cov$atlist)) {
dlist[[d]]=cbind.data.frame(data_V_st_uci()[,d],data_V_st_uci()[,ncol(data_V_st_uci())-2],
data_V_st_uci()[,ncol(data_V_st_uci())-1],
data_V_st_uci()[,ncol(data_V_st_uci())],rep(d,length(data_V_st_uci()[,d])) )
dlist[[d]][,6] <- rep(colnames(data_V_st_uci())[d],length(data_V_st_uci()[,d]))
colnames(dlist[[d]]) <- c("V","timevar","state","state_factor","cov","cov_factor")
}
d_all_v_uci <- bind_rows(dlist, .id = "column_label")
d_all_v_uci
})
data_V_d_lci <- reactive ({
### Meke one variable of probabilities so now, states and covariate patterns
### define subgroups of the dataset
dlist=list()
for (d in 1:length(myjson2()$cov$atlist)) {
dlist[[d]]=cbind.data.frame(data_V_st_lci()[,d],data_V_st_lci()[,ncol(data_V_st_lci())-2],
data_V_st_lci()[,ncol(data_V_st_lci())-1],
data_V_st_lci()[,ncol(data_V_st_lci())],rep(d,length(data_V_st_lci()[,d])) )
dlist[[d]][,6] <- rep(colnames(data_V_st_lci())[d],length(data_V_st_lci()[,d]))
colnames(dlist[[d]]) <- c("V","timevar","state","state_factor","cov","cov_factor")
}
d_all_v_lci <- bind_rows(dlist, .id = "column_label")
d_all_v_lci
})
output$tickinputvisit <- renderUI({
default_choices=c("black","blue1","brown1","chartreuse2","cyan1","darkgray","firebrick3",
"gold","darkorange2","lightsteelblue4","rosybrow2","violetred2",
"yellow2","yellowgreen","tan1","lightslateblue","khaki4")
if (is.null(myjson2())) return()
item_list <- list()
item_list[[1]] <- h2("Provide x axis range and ticks")
item_list[[2]] <-numericInput("startvx","Start x at:",value=min(data_V_d()$timevar),min=0,max=max(data_V_d()$timevar) )
item_list[[3]] <-numericInput("stepvx","step:",value=max(data_V_d()$timevar/10),min=0,max=max(data_V_d()$timevar))
item_list[[4]] <-numericInput("endvx","End x at:",value =max(data_V_d()$timevar),min=0,max=max(data_V_d()$timevar))
item_list[[5]] <-numericInput("stepvy","step at y axis:",value=0.2,min=0.001,max=1)
item_list[[6]] <-numericInput("endvy","End y at:",value =1,min=0,max=1)
item_list[[7]] <-numericInput("textsizevis",h2("Legends size"),value=input$textsize,min=5,max=30)
item_list[[8]] <-numericInput("textfacetvis",h2("Facet title size"),value=input$textsize-3,min=5,max=30 )
do.call(tagList, item_list)
})
data_V_ci<- reactive ({
x=c( data_V_d()[order(data_V_d()$timevar,data_V_d()$state,data_V_d()$cov),]$timevar,
data_V_d_lci()[order(-data_V_d()$timevar,data_V_d()$state,data_V_d()$cov),]$timevar )
y_central=c( data_V_d()[order(data_V_d()$timevar,data_V_d()$state,data_V_d()$cov),]$V,
data_V_d()[order(-data_V_d()$timevar,data_V_d()$state,data_V_d()$cov),]$V )
y=c( data_V_d_uci()[order(data_V_d_uci()$timevar,data_V_d_uci()$state,data_V_d_uci()$cov),]$V,
data_V_d_lci()[order(-data_V_d_uci()$timevar,data_V_d_uci()$state,data_V_d_uci()$cov),]$V )
frameto=c(as.character(data_V_d_uci()[order(-data_V_d_uci()$timevar,data_V_d_uci()$state,data_V_d_uci()$cov),]$state_factor),
as.character(data_V_d_lci()[order(-data_V_d_lci()$timevar,data_V_d_lci()$state,data_V_d_lci()$cov),]$state_factor) )
covto=c( data_V_d_uci()[order(-data_V_d_uci()$timevar,data_V_d_uci()$state,data_V_d_uci()$cov),]$cov_factor,
data_V_d_lci()[order(-data_V_d_lci()$timevar,data_V_d_lci()$state,data_V_d_lci()$cov),]$cov_factor )
data=data.frame(x,y,frameto,covto,y_central)
data
})
######################################
#output$shouldloadvis1 <- renderUI({
# if (is.null((myjson2()))) return()
# downloadButton(outputId = "downplotvis1", label = h2("Download the plot"))
#})
datavis1_re <- reactive ({
####### Plot 1 frame is state, factor is cov ########################
if (input$confvis=="ci_no") {
if (input$facetvis=="No") {
vis_state= plot_ly(data_V_d(),alpha=0.5) %>%
add_lines(
x=data_V_d()$timevar,y=data_V_d()$V,
frame=factor(as.factor(data_V_d()$state_factor),levels = labels_state()),
color=factor(as.factor(data_V_d()$cov_factor),levels = labels_cov()),
colors=labels_colour_cov()[1:length(myjson2()$cov$atlist)],
mode="lines",
line=list(simplify=FALSE,color = labels_colour_cov()) ,
text = 'Select or deselect lines by clicking on the legend',
hovertemplate = paste("<b>%{text}</b><br><br>", "%{yaxis.title.text}: %{y:,}<br>",
"%{xaxis.title.text}: %{x:,}<br>","<extra></extra>") )
vis_state = vis_state %>%
layout(title=list(text="Probability of visit for each covariate pattern among states",y=0.95),
font= list(family = "times new roman", size = input$textsizevis, color = "black"),
margin = list(l = 50, r = 50, b = 30, t = 70),
xaxis=list(title=list(text="Time since entry",y=0.2),
dtick = input$stepvx,
tick0 = input$startvx,
range=c(input$startvx,input$endvx),
ticklen = 5,
tickwidth = 2,
tickcolor = toRGB("black"),
tickmode = "linear"),
yaxis =list(title= "Probability of visit",rangemode = "nonnegative",
dtick = input$stepvy,
ticklen = 5,
tickwidth = 2,
tickcolor = toRGB("black")),
shapes = list(
list(type = "rect",
fillcolor = "grey",
line = list(color = "grey"),
opacity = 0.8,
x0 = 0, x1 =0, xref = "x", y0 = 0, y1 = 1, yref = "y") ) )%>%
animation_opts(frame = 1000, transition = 0, redraw = FALSE)%>%
config(
toImageButtonOptions = list(
format = "png",
width = 1200,
height = 900,scale=input$figscale
), edits = list(
annotationPosition = TRUE,
annotationTail = TRUE,
annotationText = TRUE,
axisTitleText=TRUE,
colorbarTitleText=TRUE,
legendPosition=TRUE,
legendText=TRUE,
shapePosition=TRUE,
titleText=TRUE
) ,queueLength=10
)
}
if (input$facetvis=="Yes") {
data_plot=data_V_d()
vis_state = ggplot(data_plot)
vis_state = ggplot(data_plot,aes(x=timevar, y=V, color=factor(as.factor(cov_factor),levels=labels_cov()), group=1,
text=paste("Select or deselect lines by clicking on the legend",
"<br>Time: ", timevar,
"<br>Probability of visit: ", V,
"<br>Covariate pattern: ", factor(as.factor(cov_factor),levels=labels_cov()))))
vis_state = vis_state+geom_line(aes(x=timevar, y=V, color= factor(as.factor(cov_factor),levels=labels_cov())))+
scale_colour_manual( values =labels_colour_cov(),labels = labels_cov() )
if (input$aimtype=="compare") { vis_state = vis_state+ facet_wrap(~ factor(as.factor(state_factor),levels = labels_state()), nrow=2)}
else if (input$aimtype=="present") {vis_state = vis_state+ facet_wrap(~factor(as.factor(state_factor),levels = labels_state()))}
vis_state = vis_state + scale_x_continuous(breaks=c(seq(input$startvx,input$endvx,by=input$stepvx ))) +
scale_y_continuous(breaks=c(seq(0,input$endvy,by=input$stepvy )))
vis_state = vis_state +labs(title="Probability of visit for each covariate pattern among states", x="Time since entry", y="Probability of visit")
vis_state = vis_state + labs(color = "Covariate\npatterns")+ labs(fill = "Covariate\npatterns")
vis_state = vis_state +theme(title = element_text(size = input$textsizevis-4), strip.text = element_text(size=input$textfacetvis),
legend.title = element_text(color="black", size= input$textsizevis-5),
legend.text=element_text(size= input$textsizevis-6),
plot.margin = unit(x=c(1.5,1.5,1.5,1.5),units="cm"),
legend.margin = margin(1.5, 1, 1, 1, "cm"),
legend.justification = "center",legend.box.spacing = unit(0.2, "cm"),
axis.title.y = element_text(size= input$textsizevis-5),
axis.title.x = element_text(size= input$textsizevis-5),
axis.text.x = element_text( size=input$textsizevis-6),axis.text.y = element_text( size=input$textsizevis-6))
vis_state = ggplotly(vis_state, tooltip = "text")%>%
config(
toImageButtonOptions = list(
format = "png",
width = 1200,
height = 900,scale=input$figscale
), edits = list(
annotationPosition = TRUE,
annotationTail = TRUE,
annotationText = TRUE,
axisTitleText=TRUE,
colorbarTitleText=TRUE,
legendPosition=TRUE,
legendText=TRUE,
shapePosition=TRUE,
titleText=TRUE
) ,queueLength=10
)
vis_state
}
}
else if (input$confvis=="ci_yes") {
if (input$facetvis=="No") {
vis_state <- plot_ly()
vis_state <- add_trace(vis_state, line=list(simplify=FALSE,color = labels_colour_cov()),
mode="lines", type = "scatter",
x=data_V_ci()$x, y=data_V_ci()$y_central,
frame=factor(as.factor(data_V_ci()$frameto),levels = labels_state()),
colors=labels_colour_cov()[1:length(myjson2()$cov$atlist)],
color=factor(as.factor(data_V_ci()$covto) ,levels = labels_cov()),
text = 'Select or deselect lines by clicking on the legend',
hovertemplate = paste("<b>%{text}</b><br><br>", "%{yaxis.title.text}: %{y:,}<br>",
"%{xaxis.title.text}: %{x:,}<br>","<extra></extra>"))
vis_state <- add_trace(vis_state, fill = "tozerox",
line=list(dash = "solid", color = "transparent", width = 1.8897637),
mode = "lines", type = "scatter",
x=data_V_ci()$x, y=data_V_ci()$y,
frame=factor(as.factor(data_V_ci()$frameto),levels = labels_state()),
colors=labels_colour_cov()[1:length(myjson2()$cov$atlist)],
color=factor(as.factor(data_V_ci()$covto),levels = labels_cov()),
showlegend = FALSE,
text = 'Select or deselect lines by clicking on the legend',
hovertemplate = paste("<b>%{text}</b><br><br>", "%{yaxis.title.text}: %{y:,}<br>",
"%{xaxis.title.text}: %{x:,}<br>","<extra></extra>"))
vis_state = vis_state %>%
layout(title=list(text="Probability of visit for each covariate pattern among states",y=0.95),
font= list(family = "times new roman", size = input$textsizevis, color = "black"),
margin = list(l = 50, r = 50, b = 30, t = 70),
xaxis=list(title=list(text="Time since entry",y=0.2),
dtick = input$stepvx,
tick0 = input$startvx,
range=c(input$startvx,input$endvx),
ticklen = 5,
tickwidth = 2,
tickcolor = toRGB("black"),
tickmode = "linear"),
yaxis =list(title= "Probability of visit",rangemode = "nonnegative",
dtick = input$stepvy,
ticklen = 5,
tickwidth = 2,
tickcolor = toRGB("black")),
shapes = list(
list(type = "rect",
fillcolor = "grey",
line = list(color = "grey"),
opacity = 0.8,
x0 = 0, x1 =0, xref = "x", y0 = 0, y1 = 1, yref = "y") ) )%>%
animation_opts(frame = 1000, transition = 0, redraw = FALSE)%>%
config(
toImageButtonOptions = list(
format = "png",
width = 1200,
height = 900,scale=input$figscale
), edits = list(
annotationPosition = TRUE,
annotationTail = TRUE,
annotationText = TRUE,
axisTitleText=TRUE,
colorbarTitleText=TRUE,
legendPosition=TRUE,
legendText=TRUE,
shapePosition=TRUE,
titleText=TRUE
) ,queueLength=10
)
}
if (input$facetvis=="Yes") {
V_lci= data_V_d_lci()$V
V_uci= data_V_d_uci()$V
data_plot=cbind(data_V_d(),V_lci,V_uci)
vis_state=ggplot(data_plot)
vis_state=ggplot(data_plot,aes(x=timevar, y=V, color= factor(as.factor(cov_factor),levels=labels_cov()), group=1,
text=paste("Select or deselect lines by clicking on the legend",
"<br>Time: ", timevar,
"<br>Probability of visit: ", V,
"<br>Covariate pattern: ", factor(as.factor(cov_factor),levels=labels_cov()))))+
scale_colour_manual( values =labels_colour_cov(),labels = labels_cov() )
vis_state=vis_state+geom_line(aes(x=timevar, y=V, fill= factor(as.factor(cov_factor),levels=labels_cov())))
vis_state=vis_state+ geom_ribbon(aes(ymin = V_lci, ymax =V_uci,fill=factor(as.factor(cov_factor),levels=labels_cov())),alpha=0.4)+
scale_fill_manual( values =labels_colour_cov(),labels = labels_cov() )
if (input$aimtype=="compare") { vis_state = vis_state+ facet_wrap(~factor(as.factor(state_factor),levels = labels_state()), nrow=2)}
else if (input$aimtype=="present") {vis_state = vis_state+ facet_wrap(~factor(as.factor(state_factor),levels = labels_state()))}
vis_state = vis_state + scale_x_continuous(breaks=c(seq(input$startvx,input$endvx,by=input$stepvx ))) +
scale_y_continuous(breaks=c(seq(0,input$endvy,by=input$stepvy )))
vis_state = vis_state +labs(title="Probability of visit at each state", x="Time since entry", y="Probability of visit")
vis_state = vis_state + labs(color = "Covariate\npatterns")+ labs(fill = "Covariate\npatterns")
vis_state = vis_state +theme(title = element_text(size = input$textsizevis-4), strip.text = element_text(size=input$textfacetvis),
legend.title = element_text(color="black", size= input$textsizevis-5),
legend.text=element_text(size= input$textsizevis-6),
plot.margin = unit(x=c(1.5,1.5,1.5,1.5),units="cm"),
legend.margin = margin(1.5, 1, 1, 1, "cm"),
legend.justification = "center",legend.box.spacing = unit(0.2, "cm"),
axis.title.y = element_text(size= input$textsizevis-5),
axis.title.x = element_text(size= input$textsizevis-5),
axis.text.x = element_text( size=input$textsizevis-6),axis.text.y = element_text( size=input$textsizevis-6))
vis_state = ggplotly(vis_state, tooltip = "text")%>%
config(
toImageButtonOptions = list(
format = "png",
width = 1200,
height = 900,scale=input$figscale
), edits = list(
annotationPosition = TRUE,
annotationTail = TRUE,
annotationText = TRUE,
axisTitleText=TRUE,
colorbarTitleText=TRUE,
legendPosition=TRUE,
legendText=TRUE,
shapePosition=TRUE,
titleText=TRUE
) ,queueLength=10
)
vis_state
}
}
vis_state
})
output$visit_state <- renderPlotly ({ datavis1_re() })
output$downplotvis1 <- downloadHandler(
filename = function(){paste("vis1",'.png',sep='')},
content = function(file){
plotly_IMAGE( datavis1_re(),width = 1400, height = 1100, format = "png", scale = 2, out_file = file )
}
)
#####################################
#output$shouldloadvis2 <- renderUI({
# if (is.null((myjson2()))) return()
# downloadButton(outputId = "downplotvis2", label = h2("Download the plot"))
#})
datavis2_re <- reactive ({
if (input$confvis=="ci_no") {
if (input$facetvis=="No") {
vis_cov= plot_ly(data_V_d(),alpha=0.5) %>%
add_lines(
x=data_V_d()$timevar,y=data_V_d()$V,
frame=factor(as.factor(data_V_d()$cov_factor),levels = labels_cov()),
color=factor(as.factor(data_V_d()$state_factor),levels = labels_state()),
colors=labels_colour_state()[1:length(myjson2()$P)],
mode="lines",
line=list(simplify=FALSE,color = labels_colour_state()) ,
text = 'Select or deselect lines by clicking on the legend',
hovertemplate = paste("<b>%{text}</b><br><br>", "%{yaxis.title.text}: %{y:,}<br>",
"%{xaxis.title.text}: %{x:,}<br>","<extra></extra>") )
vis_cov = vis_cov %>%
layout(title=list(text="Probability of visit for each state among covariate patterns",y=0.95),
font= list(family = "times new roman", size = input$textsizevis, color = "black"),
margin = list(l = 50, r = 50, b = 30, t = 70),
xaxis=list(title=list(text="Time since entry",y=0.2),
dtick = input$stepvx,
tick0 = input$startvx,
range=c(input$startvx,input$endvx),
ticklen = 5,
tickwidth = 2,
tickcolor = toRGB("black"),
tickmode = "linear"),
yaxis =list(title= "Probability of visit",rangemode = "nonnegative",
dtick = input$stepvy,
ticklen = 5,
tickwidth = 2,
tickcolor = toRGB("black")),
shapes = list(
list(type = "rect",
fillcolor = "grey",
line = list(color = "grey"),
opacity = 0.8,
x0 = 0, x1 =0, xref = "x", y0 = 0, y1 = 1, yref = "y") ) )%>%
config(
toImageButtonOptions = list(
format = "png",
width = 1200,
height = 900,scale=input$figscale
), edits = list(
annotationPosition = TRUE,
annotationTail = TRUE,
annotationText = TRUE,
axisTitleText=TRUE,
colorbarTitleText=TRUE,
legendPosition=TRUE,
legendText=TRUE,
shapePosition=TRUE,
titleText=TRUE
) ,queueLength=10
)
if (input$smooth=="No") {
vis_cov= vis_cov %>%
animation_opts(frame = 1000, transition = 0, redraw = FALSE)
}
}
if (input$facetvis=="Yes") {
data_plot=data_V_d()
vis_cov = ggplot(data_plot)
vis_cov = ggplot(data_plot,aes(x=timevar, y=V, color= factor(as.factor(state_factor),levels=labels_state()), group=1,
text=paste("Select or deselect lines by clicking on the legend",
"<br>Time: ", timevar,
"<br>Probability of visit: ", V,
"<br>State: ", factor(as.factor(state_factor),levels=labels_state()))))
vis_cov = vis_cov+geom_line(aes(x=timevar, y=V, color= factor(as.factor(state_factor),levels=labels_state())))+
scale_colour_manual( values =labels_colour_state(),labels = labels_state() )
if (input$aimtype=="compare") {vis_cov = vis_cov+ facet_wrap(~factor(as.factor(cov_factor),levels = labels_cov()),nrow=2)}
else if (input$aimtype=="present") {vis_cov = vis_cov+ facet_wrap(~factor(as.factor(cov_factor),levels = labels_cov())) }
vis_cov = vis_cov + scale_x_continuous(breaks=c(seq(input$startvx,input$endvx,by=input$endvx ))) +
scale_y_continuous(breaks=c(seq(0,input$endvy,by=input$stepvy )))
vis_cov = vis_cov +labs(title="Probability of visit for each state among covariate patterns", x="Time since entry", y="Probability of visit")
vis_cov = vis_cov + labs(color = "States")+ labs(fill = "States")
vis_cov = vis_cov+theme(title = element_text(size = input$textsizevis-4), strip.text = element_text(size=input$textfacetvis),
legend.title = element_text(color="black", size= input$textsizevis-5),
legend.text=element_text(size= input$textsizevis-6),
plot.margin = unit(x=c(1.5,1.5,1.5,1.5),units="cm"),
legend.margin = margin(1.5, 1, 1, 1, "cm"),
legend.justification = "center",legend.box.spacing = unit(0.2, "cm"),
axis.title.y = element_text(size= input$textsizevis-5),
axis.title.x = element_text(size= input$textsizevis-5),
axis.text.x = element_text( size=input$textsizevis-6),axis.text.y = element_text( size=input$textsizevis-6))
vis_cov = ggplotly(vis_cov, tooltip = "text")%>%
config(
toImageButtonOptions = list(
format = "png",
width = 1200,
height = 900,scale=input$figscale
), edits = list(
annotationPosition = TRUE,
annotationTail = TRUE,
annotationText = TRUE,
axisTitleText=TRUE,
colorbarTitleText=TRUE,
legendPosition=TRUE,
legendText=TRUE,
shapePosition=TRUE,
titleText=TRUE
) ,queueLength=10
)
vis_cov
}
}
else if (input$confvis=="ci_yes") {
if (input$facetvis=="No") {
vis_cov <- plot_ly()
vis_cov <- add_trace(vis_cov, line=list(simplify=FALSE,color = labels_colour_cov()),
mode="lines", type = "scatter",
x=data_V_ci()$x, y=data_V_ci()$y_central,
frame=factor(as.factor(data_V_ci()$covto),levels = labels_cov()),
colors=labels_colour_state()[1:length(myjson2()$P)],
color=factor(as.factor(data_V_ci()$frameto) ,levels = labels_state()),
text = 'Select or deselect lines by clicking on the legend',
hovertemplate = paste("<b>%{text}</b><br><br>", "%{yaxis.title.text}: %{y:,}<br>",
"%{xaxis.title.text}: %{x:,}<br>","<extra></extra>"))
vis_cov <- add_trace(vis_cov, fill = "tozerox",
line=list(dash = "solid", color = "transparent", width = 1.8897637),
mode = "lines", type = "scatter",
x=data_V_ci()$x, y=data_V_ci()$y,
frame=factor(as.factor(data_V_ci()$covto),levels = labels_cov()),
colors=labels_colour_state()[1:length(myjson2()$P)],
color=factor(as.factor(data_V_ci()$frameto) ,levels = labels_state()),
showlegend = FALSE,
text = 'Select or deselect lines by clicking on the legend',
hovertemplate = paste("<b>%{text}</b><br><br>", "%{yaxis.title.text}: %{y:,}<br>",
"%{xaxis.title.text}: %{x:,}<br>","<extra></extra>"))
vis_cov = vis_cov %>%
layout(title=list(text="Probability of visit for each state among covariate patterns",y=0.95),
font= list(family = "times new roman", size = input$textsizevis, color = "black"),
margin = list(l = 50, r = 50, b = 30, t = 70),
xaxis=list(title=list(text="Time since entry",y=0.2),
dtick = input$stepvx,
tick0 = input$startvx,
range=c(input$startvx,input$endvx),
ticklen = 5,
tickwidth = 2,
tickcolor = toRGB("black"),
tickmode = "linear"),
yaxis =list(title= "Probability of visit",rangemode = "nonnegative",
dtick = input$stepvy,
ticklen = 5,
tickwidth = 2,
tickcolor = toRGB("black")),
shapes = list(
list(type = "rect",
fillcolor = "grey",
line = list(color = "grey"),
opacity = 0.8,
x0 = 0, x1 =input$area, xref = "x", y0 = 0, y1 = 1, yref = "y") ) )%>%
config(
toImageButtonOptions = list(
format = "png",
width = 1200,
height = 900,scale=input$figscale
), edits = list(
annotationPosition = TRUE,
annotationTail = TRUE,
annotationText = TRUE,
axisTitleText=TRUE,
colorbarTitleText=TRUE,
legendPosition=TRUE,
legendText=TRUE,
shapePosition=TRUE,
titleText=TRUE
) ,queueLength=10
)
if (input$smooth=="No") {
vis_cov= vis_cov %>%
animation_opts(frame = 1000, transition = 0, redraw = FALSE)
}
vis_cov
}
if (input$facetvis=="Yes") {
V_lci= data_V_d_lci()$V
V_uci= data_V_d_uci()$V
data_plot=cbind(data_V_d(),V_lci,V_uci)
vis_cov=ggplot(data_plot)
vis_cov=ggplot(data_plot,aes(x=timevar, y=V, color=factor(as.factor(state_factor),levels=labels_state()), group=1,
text=paste("Select or deselect lines by clicking on the legend",
"<br>Time: ", timevar,
"<br>Probability of visit: ", V,
"<br>State: ", factor(as.factor(state_factor),levels=labels_state()))))+
scale_colour_manual( values =labels_colour_state(),labels = labels_state() )
vis_cov=vis_cov+geom_line(aes(x=timevar, y=V, fill= factor(as.factor(state_factor),levels=labels_state())))
vis_cov=vis_cov+ geom_ribbon(aes(ymin = V_lci, ymax =V_uci,fill=factor(as.factor(state_factor),levels=labels_state())),alpha=0.4)+
scale_fill_manual( values =labels_colour_state(),labels = labels_state() )
if (input$aimtype=="compare") {vis_cov = vis_cov+ facet_wrap(~factor(as.factor(cov_factor),levels = labels_cov()),nrow=2)}
else if (input$aimtype=="present") {vis_cov = vis_cov+ facet_wrap(~factor(as.factor(cov_factor),levels = labels_cov())) }
vis_cov = vis_cov + scale_x_continuous(breaks=c(seq(input$startvx,input$endvx,by=input$stepvx )))
vis_cov = vis_cov +labs(title="Probability of visit for each state among covariate patterns", x="Time since entry", y="Probability of visit")
vis_cov = vis_cov + labs(color = "States")+ labs(fill = "States")
vis_cov = vis_cov+theme(title = element_text(size = input$textsizevis-4), strip.text = element_text(size=input$textfacetvis),
legend.title = element_text(color="black", size= input$textsizevis-5),
legend.text=element_text(size= input$textsizevis-6),
plot.margin = unit(x=c(1.5,1.5,1.5,1.5),units="cm"),
legend.margin = margin(1.5, 1, 1, 1, "cm"),
legend.justification = "center",legend.box.spacing = unit(0.2, "cm"),
axis.title.y = element_text(size= input$textsizevis-5),
axis.title.x = element_text(size= input$textsizevis-5),
axis.text.x = element_text( size=input$textsizevis-6),axis.text.y = element_text( size=input$textsizevis-6))
vis_cov = ggplotly(vis_cov, tooltip = "text")%>%
config(
toImageButtonOptions = list(
format = "png",
width = 1200,
height = 900,scale=input$figscale
), edits = list(
annotationPosition = TRUE,
annotationTail = TRUE,
annotationText = TRUE,
axisTitleText=TRUE,
colorbarTitleText=TRUE,
legendPosition=TRUE,
legendText=TRUE,
shapePosition=TRUE,
titleText=TRUE
) ,queueLength=10
)
vis_cov
}
}
vis_cov
})
output$visit_cov <- renderPlotly ({ datavis2_re() })
output$downplotvis2 <- downloadHandler(
filename = function(){paste("vis2",'.png',sep='')},
content = function(file){
plotly_IMAGE( datavis2_re(),width = 1400, height = 1100, format = "png", scale = 2, out_file = file )
}
)
##########################################
#output$shouldloadvis3 <- renderUI({
# if (is.null((myjson2()))) return()
# downloadButton(outputId = "downplotvis3", label = h2("Download the plot"))
#})
datavis3_re <- reactive ({
if (input$confvis=="ci_no") {
####### Plot 3 f factor is state and cov ########################
v_cov_state = plot_ly(data_V_d(),alpha=0.5) %>%
add_lines(
x=data_V_d()$timevar,y=data_V_d()$V,
color = as.factor(data_V_d()$cov_factor),
fill =as.factor(data_V_d()$state_factor),
linetype=as.factor(data_V_d()$state_factor),
mode="lines",
line=list(simplify=FALSE),
text = 'Select or deselect lines by clicking on the legend',
hovertemplate = paste("<b>%{text}</b><br><br>", "%{yaxis.title.text}: %{y:,}<br>",
"%{xaxis.title.text}: %{x:,}<br>","<extra></extra>")
)
}
else if (input$confvis=="ci_yes") {
v_cov_state <- plot_ly()
v_cov_state <- add_trace(v_cov_state, line=list(simplify=FALSE),
mode="lines", type = "scatter",
x=data_V_ci()$x, y=data_V_ci()$y_central,
color=as.factor(data_V_ci()$covto),
fill=as.factor(data_V_ci()$frameto),
linetype=as.factor(data_V_ci()$frameto),
text = 'Select or deselect lines by clicking on the legend',
hovertemplate = paste("<b>%{text}</b><br><br>", "%{yaxis.title.text}: %{y:,}<br>",
"%{xaxis.title.text}: %{x:,}<br>","<extra></extra>"))
v_cov_state <- add_trace(v_cov_state, fill = "tozerox",
line=list(dash = "solid", color = "transparent", width = 1.8897637),
mode = "lines", type = "scatter",
x=data_V_ci()$x, y=data_V_ci()$y,
color=as.factor(data_V_ci()$covto),
fill=as.factor(data_V_ci()$frameto),
linetype=as.factor(data_V_ci()$frameto),
showlegend = FALSE,
text = 'Select or deselect lines by clicking on the legend',
hovertemplate = paste("<b>%{text}</b><br><br>", "%{yaxis.title.text}: %{y:,}<br>",
"%{xaxis.title.text}: %{x:,}<br>","<extra></extra>")
)
}
v_cov_state=v_cov_state %>%
layout(title=list(text="Probability of state visit",y=0.95),
font= list(family = "times new roman", size = input$textsizevis, color = "black"),
margin = list(l = 50, r = 50, b = 30, t = 70),
xaxis=list(title=list(text="Time since entry",y=0.2),
dtick = input$stepvx,
tick0 = input$startvx,
range=c(input$startvx,input$endvx),
ticklen = 5,
tickwidth = 2,
tickcolor = toRGB("black"),
tickmode = "linear"),
yaxis =list(title= "Probability of visit",rangemode = "nonnegative",
dtick = input$stepvy,
ticklen = 5,
tickwidth = 2,
tickcolor = toRGB("black")),
shapes = list(
list(type = "rect",
fillcolor = "grey",
line = list(color = "grey"),
opacity = 0.8,
x0 = 0, x1 =0, xref = "x", y0 = 0, y1 = 1, yref = "y") ) )%>%
config(
toImageButtonOptions = list(
format = "png",
width = 1200,
height = 900,scale=input$figscale
), edits = list(
annotationPosition = TRUE,
annotationTail = TRUE,
annotationText = TRUE,
axisTitleText=TRUE,
colorbarTitleText=TRUE,
legendPosition=TRUE,
legendText=TRUE,
shapePosition=TRUE,
titleText=TRUE
) ,queueLength=10
)
})
output$visit_both <- renderPlotly ({ datavis3_re() })
output$downplotvis3 <- downloadHandler(
filename = function(){paste("vis3",'.png',sep='')},
content = function(file){
plotly_IMAGE( datavis3_re(),width = 1400, height = 1100, format = "png", scale = 2, out_file = file )
}
)
##############################################################################################
############### Diff #######################################################################
data_V_diff1 <- reactive ({
visit_diff=list()
timevar=as.data.frame(myjson2()$timevar)
names(timevar)[1]<- "timevar"
v_diff= vector()
if (length(myjson2()$atlist)>1) {
for (i in 2:length(myjson2()$atlist)) {
v_diff[i-1]= paste0(labels_cov()[i]," vs ",labels_cov()[1])
}
}
if (length(myjson2()$atlist)>2) {
for(i in 1:length(myjson2()$visitd)) {
visit_diff[[i]]=as.data.frame(t(data.frame(myjson2()$visitd[i])))
colnames(visit_diff[[i]]) <- v_diff
}
for(i in 1:length(myjson2()$visitd)) {
visit_diff[[i]]=as.data.frame(cbind(visit_diff[[i]], timevar ,state=rep(i,nrow(visit_diff[[i]] )) ))
}
}
else {
for (i in 1:length(myjson2()$visitd)) {
visit_diff[[i]]=as.data.frame(myjson2()$visitd[[i]][,1])
}
for (i in 1:length(myjson2()$visitd)) {
visit_diff[[i]]=as.data.frame(c(visit_diff[[i]], timevar ,state=rep(i,ncol(as.data.frame(myjson2()$visitd[[i]][,1])) )) )
colnames(visit_diff[[i]])[1:(length(myjson2()$atlist)-1)] <- v_diff
}
}
# Append the probabilities datasets of the different states
data_visitd=list()
data_visitd[[1]]=visit_diff[[1]]
for (u in 2:(length(myjson2()$visitd))) {
data_visitd[[u]]=rbind(visit_diff[[u]],data_visitd[[(u-1)]])
}
datavd=data_visitd[[length(myjson2()$visitd)]]
datavd$state_fac=c(rep("NA",nrow(datavd)))
for (o in 1:(length(myjson2()$visitd))) {
for (g in 1:nrow(datavd)) {
if (datavd$state[g]==o) {datavd$state_fac[g]=labels_state()[o]}
}
}
datavd
})
data_V_diff1_uci <- reactive ({
V_diff_uci=list()
timevar=as.data.frame(myjson2()$timevar)
names(timevar)[1]<- "timevar"
v_diff= vector()
if (length(myjson2()$atlist)>1) {
for (i in 2:length(myjson2()$atlist)) {
v_diff[i-1]= paste0(labels_cov()[i]," vs ",labels_cov()[1])
}
}
if (length(myjson2()$atlist)>2) {
for(i in 1:length(myjson2()$visitd_uci)) {
V_diff_uci[[i]]=as.data.frame(t(data.frame(myjson2()$visitd_uci[i])))
colnames(V_diff_uci[[i]]) <- v_diff
}
for(i in 1:length(myjson2()$visitd_uci)) {
V_diff_uci[[i]]=as.data.frame(cbind(V_diff_uci[[i]], timevar ,state=rep(i,nrow(V_diff_uci[[i]] )) ))
}
}
else {
for (i in 1:length(myjson2()$visitd_uci)) {
V_diff_uci[[i]]=as.data.frame(myjson2()$visitd_uci[[i]][,1])
}
for (i in 1:length(myjson2()$visitd_uci)) {
V_diff_uci[[i]]=as.data.frame(c(V_diff_uci[[i]], timevar ,state=rep(i,ncol(as.data.frame(myjson2()$visitd_uci[[i]][,1])) )) )
colnames(V_diff_uci[[i]])[1:(length(myjson2()$atlist)-1)] <- v_diff
}
}
# Append the probabilities datasets of the different states
data_visitd_uci=list()
data_visitd_uci[[1]]=V_diff_uci[[1]]
for (u in 2:(length(myjson2()$visitd_uci))) {
data_visitd_uci[[u]]=rbind(V_diff_uci[[u]],data_visitd_uci[[(u-1)]])
}
datavd_uci=data_visitd_uci[[length(myjson2()$visitd_uci)]]
datavd_uci$state_fac=c(rep("NA",nrow(datavd_uci)))
for (o in 1:(length(myjson2()$visitd_uci))) {
for (g in 1:nrow(datavd_uci)) {
if (datavd_uci$state[g]==o) {datavd_uci$state_fac[g]=labels_state()[o]}
}
}
datavd_uci
})
data_V_diff1_lci <- reactive ({
V_diff_lci=list()
timevar=as.data.frame(myjson2()$timevar)
names(timevar)[1]<- "timevar"
v_diff= vector()
if (length(myjson2()$atlist)>1) {
for (i in 2:length(myjson2()$atlist)) {
v_diff[i-1]= paste0(labels_cov()[i]," vs ",labels_cov()[1])
}
}
if (length(myjson2()$atlist)>2) {
for(i in 1:length(myjson2()$visitd_lci)) {
V_diff_lci[[i]]=as.data.frame(t(data.frame(myjson2()$visitd_lci[i])))
colnames(V_diff_lci[[i]]) <- v_diff
}
for(i in 1:length(myjson2()$visitd_lci)) {
V_diff_lci[[i]]=as.data.frame(cbind(V_diff_lci[[i]], timevar ,state=rep(i,nrow(V_diff_lci[[i]] )) ))
}
}
else {
for (i in 1:length(myjson2()$visitd_lci)) {
V_diff_lci[[i]]=as.data.frame(myjson2()$visitd_lci[[i]][,1])
}
for (i in 1:length(myjson2()$visitd_lci)) {
V_diff_lci[[i]]=as.data.frame(c(V_diff_lci[[i]], timevar ,state=rep(i,ncol(as.data.frame(myjson2()$visitd_lci[[i]][,1])) )) )
colnames(V_diff_lci[[i]])[1:(length(myjson2()$atlist)-1)] <- v_diff
}
}
# Append the probabilities datasets of the different states
data_visitd_lci=list()
data_visitd_lci[[1]]=V_diff_lci[[1]]
for (u in 2:(length(myjson2()$visitd_lci))) {
data_visitd_lci[[u]]=rbind(V_diff_lci[[u]],data_visitd_lci[[(u-1)]])
}
datavd_lci=data_visitd_lci[[length(myjson2()$visitd_lci)]]
datavd_lci$state_fac=c(rep("NA",nrow(datavd_lci)))
for (o in 1:(length(myjson2()$visitd_lci))) {
for (g in 1:nrow(datavd_lci)) {
if (datavd_lci$state[g]==o) {datavd_lci$state_fac[g]=labels_state()[o]}
}
}
datavd_lci
})
data_V_diff2<- reactive ({
dlist=list()
for (d in 1:(length(myjson2()$cov$atlist)-1)) {
dlist[[d]]=cbind.data.frame(data_V_diff1()[,d],data_V_diff1()[,ncol(data_V_diff1())-2],data_V_diff1()[,ncol(data_V_diff1())-1],
data_V_diff1()[,ncol(data_V_diff1())],rep(d,length(data_V_diff1()[,d])) )
dlist[[d]][,6] <- rep(colnames(data_V_diff1())[d],length(data_V_diff1()[,d]))
colnames(dlist[[d]]) <- c("V","timevar","state","state_factor","cov","cov_factor")
}
d_all_Vd <- bind_rows(dlist, .id = "column_Vabel")
d_all_Vd
})
data_V_diff2_uci<- reactive ({
dlist=list()
for (d in 1:(length(myjson2()$cov$atlist)-1)) {
dlist[[d]]=cbind.data.frame(data_V_diff1_uci()[,d],data_V_diff1_uci()[,ncol(data_V_diff1_uci())-2],
data_V_diff1_uci()[,ncol(data_V_diff1_uci())-1],
data_V_diff1_uci()[,ncol(data_V_diff1_uci())],rep(d,length(data_V_diff1_uci()[,d])) )
dlist[[d]][,6] <- rep(colnames(data_V_diff1_uci())[d],length(data_V_diff1_uci()[,d]))
colnames(dlist[[d]]) <- c("V","timevar","state","state_factor","cov","cov_factor")
}
d_all_Vd_uci <- bind_rows(dlist, .id = "column_Vabel")
d_all_Vd_uci
})
data_V_diff2_lci<- reactive ({
dlist=list()
for (d in 1:(length(myjson2()$cov$atlist)-1)) {
dlist[[d]]=cbind.data.frame(data_V_diff1_lci()[,d],data_V_diff1_lci()[,ncol(data_V_diff1_lci())-2],
data_V_diff1_lci()[,ncol(data_V_diff1_lci())-1],
data_V_diff1_lci()[,ncol(data_V_diff1_lci())],rep(d,length(data_V_diff1_lci()[,d])) )
dlist[[d]][,6] <- rep(colnames(data_V_diff1_lci())[d],length(data_V_diff1_lci()[,d]))
colnames(dlist[[d]]) <- c("V","timevar","state","state_factor","cov","cov_factor")
}
d_all_Vd_lci <- bind_rows(dlist, .id = "column_Vabel")
d_all_Vd_lci
})
data_V_diff_ci<- reactive ({
x=c( data_V_diff2()[order(data_V_diff2()$timevar,data_V_diff2()$state,data_V_diff2()$cov),]$timevar,
data_V_diff2()[order(-data_V_diff2()$timevar,data_V_diff2()$state,data_V_diff2()$cov),]$timevar )
y_central=c( data_V_diff2()[order(data_V_diff2()$timevar,data_V_diff2()$state,data_V_diff2()$cov),]$V,
data_V_diff2()[order(-data_V_diff2()$timevar,data_V_diff2()$state,data_V_diff2()$cov),]$V )
y=c( data_V_diff2_uci()[order(data_V_diff2_uci()$timevar,data_V_diff2_uci()$state,data_V_diff2_uci()$cov),]$V,
data_V_diff2_lci()[order(-data_V_diff2_lci()$timevar,data_V_diff2_lci()$state,data_V_diff2_lci()$cov),]$V )
frameto=c(as.character(data_V_diff2_uci()[order(-data_V_diff2_uci()$timevar,data_V_diff2_uci()$state,data_V_diff2_uci()$cov),]$state_factor),
as.character(data_V_diff2_lci()[order(-data_V_diff2_lci()$timevar,data_V_diff2_lci()$state,data_V_diff2_lci()$cov),]$state_factor) )
covto=c( data_V_diff2_uci()[order(-data_V_diff2_uci()$timevar,data_V_diff2_uci()$state,data_V_diff2_uci()$cov),]$cov_factor,
data_V_diff2_lci()[order(-data_V_diff2_lci()$timevar,data_V_diff2_lci()$state,data_V_diff2_lci()$cov),]$cov_factor )
data=data.frame(x,y,frameto,covto,y_central)
data
})
#output$shouldloadvis4 <- renderUI({
# if (is.null((myjson2()))) return()
# downloadButton(outputId = "downplotvis4", label = h2("Download the plot"))
#})
datavis4_re <- reactive ({
ax <- list( title = "",zeroline = FALSE,showline = FALSE, showticklabels = FALSE, showgrid = FALSE)
if (length(myjson2()$visitd) == 0| myjson2()$Nats==1 ) {
V_state_d= plot_ly() %>%
layout(title=list(text="Not applicable- Only one covariate pattern specified",y=0.95),xaxis=ax, yaxis=ax)
V_state_d
}
else {
if (input$confvis=="ci_no") {
if (input$facetvis=="No") {
V_state_d= plot_ly(data_V_diff2(),alpha=0.5) %>%
add_lines(
x=data_V_diff2()$timevar,y=data_V_diff2()$V,
frame=factor(as.factor(data_V_diff2()$state_factor),levels=labels_state()),
color=as.factor(data_V_diff2()$cov_factor),
colors=labels_colour_cov()[1:length(myjson2()$cov$atlist)-1],
mode="lines",
line=list(simplify=FALSE),
text = 'Select or deselect lines by clicking on the legend',
hovertemplate = paste("<b>%{text}</b><br><br>", "%{yaxis.title.text}: %{y:,}<br>",
"%{xaxis.title.text}: %{x:,}<br>","<extra></extra>")
) %>%
layout(title=list(text="Difference in visit probabilities among covariate patterns (compared to reference)",y=0.95),
font= list(family = "times new roman", size = input$textsizevis, color = "black"),
margin = list(l = 50, r = 50, b = 30, t = 70),
xaxis=list(title=list(text="Time since entry",y=0.2),
dtick = input$stepvx,
tick0 = input$startvx,
range=c(input$startvx,input$endvx),
ticklen = 5,
tickwidth = 2,
tickcolor = toRGB("black"),
tickmode = "linear"),
yaxis =list(title= "Difference in visit probabilities",
dtick = input$stepvy,
ticklen = 5,
tickwidth = 2,
tickcolor = toRGB("black")),
shapes = list(
list(type = "rect",
fillcolor = "grey",
line = list(color = "grey"),
opacity = 0.8,
x0 = 0, x1 =0, xref = "x", y0 = 0, y1 = 1, yref = "y") )
) %>%
animation_opts(frame = 1000, transition = 0, redraw = FALSE)%>%
config(
toImageButtonOptions = list(
format = "png",
width = 1200,
height = 900,scale=input$figscale
), edits = list(
annotationPosition = TRUE,
annotationTail = TRUE,
annotationText = TRUE,
axisTitleText=TRUE,
colorbarTitleText=TRUE,
legendPosition=TRUE,
legendText=TRUE,
shapePosition=TRUE,
titleText=TRUE
) ,queueLength=10
)
V_state_d
}
if (input$facetvis=="Yes") {
data_plot=data_V_diff2()
V_state_d = ggplot(data_plot)
V_state_d = ggplot(data_plot,aes(x=timevar, y=V, color= factor(as.factor(cov_factor) )))
V_state_d = V_state_d+geom_line(aes(x=timevar, y=V, color= factor(as.factor(cov_factor) ), group=1,
text=paste("Select or deselect lines by clicking on the legend",
"<br>Time: ", timevar,
"<br>Differences in probability of visit: ", V,
"<br>Covariate pattern: ", factor(as.factor(cov_factor) ))))+
scale_colour_manual( values =labels_colour_cov(),labels = labels_cov() )
if (input$aimtype=="compare") { V_state_d = V_state_d+ facet_wrap(~factor(as.factor(state_factor),levels=labels_state()), nrow=2)}
else if (input$aimtype=="present") {V_state_d = V_state_d+ facet_wrap(~factor(as.factor(state_factor),levels=labels_state()))}
V_state_d = V_state_d + scale_x_continuous(breaks=c(seq(input$startvx,input$endvx,by=input$endvx )))
V_state_d = V_state_d +labs(title="Differences in probability of visit among covariate patterns (compared to reference)",
x="Time since entry", y="Differences in probability of visit")
V_state_d = V_state_d + labs(color = "Covariate\npatterns")+ labs(fill = "Covariate\npatterns")
V_state_d = V_state_d +theme(title = element_text(size = input$textsizevis-4), strip.text = element_text(size=input$textfacetvis),
legend.title = element_text(color="black", size= input$textsizevis-5),
legend.text=element_text(size= input$textsizevis-6),
plot.margin = unit(x=c(1.5,1.5,1.5,1.5),units="cm"),
legend.margin = margin(1.5, 1, 1, 1, "cm"),
legend.justification = "center",legend.box.spacing = unit(0.2, "cm"),
axis.title.y = element_text(size= input$textsizevis-5),
axis.title.x = element_text(size= input$textsizevis-5),
axis.text.x = element_text( size=input$textsizevis-6),axis.text.y = element_text( size=input$textsizevis-6))
V_state_d = ggplotly(V_state_d, tooltip = "text")%>%
config(
toImageButtonOptions = list(
format = "png",
width = 1200,
height = 900,scale=input$figscale
), edits = list(
annotationPosition = TRUE,
annotationTail = TRUE,
annotationText = TRUE,
axisTitleText=TRUE,
colorbarTitleText=TRUE,
legendPosition=TRUE,
legendText=TRUE,
shapePosition=TRUE,
titleText=TRUE
) ,queueLength=10
)
V_state_d
}
}
else if (input$confvis=="ci_yes") {
if (input$facetvis=="No") {
V_state_d <- plot_ly()
V_state_d <- add_trace(V_state_d, line=list(simplify=FALSE),
mode="lines", type = "scatter",
x=data_V_diff_ci()$x, y=data_V_diff_ci()$y_central,
frame=factor(as.factor(data_V_diff_ci()$frameto),levels=labels_state()),
colors=labels_colour_cov()[1:length(myjson2()$cov$atlist)-1],
color=as.factor(data_V_diff_ci()$covto),
text = 'Select or deselect lines by clicking on the legend',
hovertemplate = paste("<b>%{text}</b><br><br>", "%{yaxis.title.text}: %{y:,}<br>",
"%{xaxis.title.text}: %{x:,}<br>","<extra></extra>") )
V_state_d <- add_trace(V_state_d, fill = "tozerox",
line=list(dash = "solid", color = "transparent", width = 1.8897637),
mode = "lines", type = "scatter",
x=data_V_diff_ci()$x, y=data_V_diff_ci()$y,
frame=factor(as.factor(data_V_diff_ci()$frameto),levels=labels_state()),
colors=labels_colour_cov()[1:length(myjson2()$cov$atlist)-1],
color=as.factor(data_V_diff_ci()$covto),
showlegend = FALSE,
text = 'Select or deselect lines by clicking on the legend',
hovertemplate = paste("<b>%{text}</b><br><br>", "%{yaxis.title.text}: %{y:,}<br>",
"%{xaxis.title.text}: %{x:,}<br>","<extra></extra>"))
V_state_d= V_state_d %>%
layout(title=list(text="Differences in probability of visit among covariate patterns (compared to reference)",y=0.95),
font= list(family = "times new roman", size = input$textsizevis, color = "black"),
margin = list(l = 50, r = 50, b = 30, t = 70),
xaxis=list(title=list(text="Time since entry",y=0.2),
dtick = input$stepvx,
tick0 = input$startvx,
range=c(input$startvx,input$endvx),
ticklen = 5,
tickwidth = 2,
tickcolor = toRGB("black"),
tickmode = "linear"),
yaxis =list(title= "Differences in probability of visit",
dtick = input$stepvy,
ticklen = 5,
tickwidth = 2,
tickcolor = toRGB("black")),
shapes = list(
list(type = "rect",
fillcolor = "grey",
line = list(color = "grey"),
opacity = 0.8,
x0 = 0, x1 =0, xref = "x", y0 = 0, y1 = 1, yref = "y") ) )%>%
animation_opts(frame = 1000, transition = 0, redraw = FALSE)%>%
config(
toImageButtonOptions = list(
format = "png",
width = 1200,
height = 900,scale=input$figscale
), edits = list(
annotationPosition = TRUE,
annotationTail = TRUE,
annotationText = TRUE,
axisTitleText=TRUE,
colorbarTitleText=TRUE,
legendPosition=TRUE,
legendText=TRUE,
shapePosition=TRUE,
titleText=TRUE
) ,queueLength=10
)
V_state_d
}
if (input$facetvis=="Yes") {
V_lci= data_V_diff2_lci()$V
V_uci= data_V_diff2_uci()$V
data_plot=cbind(data_V_diff2(),V_lci,V_uci)
V_state_d=ggplot(data_plot)
V_state_d=ggplot(data_plot,aes(x=timevar, y=V, color= factor(as.factor(cov_factor) )))+
scale_colour_manual( values =labels_colour_cov(),labels = labels_cov() )
V_state_d=V_state_d+geom_line(aes(x=timevar, y=V, fill= factor(as.factor(cov_factor) ), group=1,
text=paste("Select or deselect lines by clicking on the legend",
"<br>Time: ", timevar,
"<br>Differences in probability of visit: ", V,
"<br>Covariate pattern: ", factor(as.factor(cov_factor) ))))
V_state_d=V_state_d+ geom_ribbon(aes(ymin = V_lci, ymax =V_uci,fill=factor(as.factor(cov_factor) )),alpha=0.4)+
scale_fill_manual( values =labels_colour_cov(),labels = labels_cov() )
if (input$aimtype=="compare") { V_state_d = V_state_d+ facet_wrap(~factor(as.factor(state_factor),levels=labels_state()), nrow=2)}
else if (input$aimtype=="present") {V_state_d = V_state_d+ facet_wrap(~factor(as.factor(state_factor),levels=labels_state()) )}
V_state_d = V_state_d + scale_x_continuous(breaks=c(seq(input$startvx,input$endvx,by=input$endvx )))
V_state_d = V_state_d +labs(title="Differences in probability of visit among covariate patterns (compared to reference)",
x="Time since entry", y="Differences in probability of visit")
V_state_d = V_state_d + labs(color = "Covariate\npatterns")+ labs(fill = "Covariate\npatterns")
V_state_d = V_state_d +theme(title = element_text(size = input$textsizevis-4), strip.text = element_text(size=input$textfacetvis),
legend.title = element_text(color="black", size= input$textsizevis-5),
legend.text=element_text(size= input$textsizevis-6),
plot.margin = unit(x=c(1.5,1.5,1.5,1.5),units="cm"),
legend.margin = margin(1.5, 1, 1, 1, "cm"),
legend.justification = "center",legend.box.spacing = unit(0.2, "cm"),
axis.title.y = element_text(size= input$textsizevis-5),
axis.title.x = element_text(size= input$textsizevis-5),
axis.text.x = element_text( size=input$textsizevis-6),axis.text.y = element_text( size=input$textsizevis-6))
V_state_d = ggplotly(V_state_d, tooltip = "text")%>%
config(
toImageButtonOptions = list(
format = "png",
width = 1200,
height = 900,scale=input$figscale
), edits = list(
annotationPosition = TRUE,
annotationTail = TRUE,
annotationText = TRUE,
axisTitleText=TRUE,
colorbarTitleText=TRUE,
legendPosition=TRUE,
legendText=TRUE,
shapePosition=TRUE,
titleText=TRUE
) ,queueLength=10
)
}
}
V_state_d
}
})
output$visit_diff <- renderPlotly ({ datavis4_re() })
output$downplotvis4 <- downloadHandler(
filename = function(){paste("vis4",'.png',sep='')},
content = function(file){
plotly_IMAGE( datavis4_re(),width = 1400, height = 1100, format = "png", scale = 2, out_file = file )
}
)
######################################################################################
#####################################################################################
data_V_ratio1 <- reactive ({
visit_ratio=list()
timevar=as.data.frame(myjson2()$timevar)
names(timevar)[1]<- "timevar"
v_ratio= vector()
if (length(myjson2()$atlist)>1) {
for (i in 2:length(myjson2()$atlist)) {
v_ratio[i-1]= paste0(labels_cov()[i]," vs ",labels_cov()[1])
}
}
if (length(myjson2()$atlist)>2) {
for(i in 1:length(myjson2()$visitr)) {
visit_ratio[[i]]=as.data.frame(t(data.frame(myjson2()$visitr[i])))
colnames(visit_ratio[[i]]) <- v_ratio
}
for(i in 1:length(myjson2()$visitr)) {
visit_ratio[[i]]=as.data.frame(cbind(visit_ratio[[i]], timevar ,state=rep(i,nrow(visit_ratio[[i]] )) ))
}
}
else {
for (i in 1:length(myjson2()$visitr)) {
visit_ratio[[i]]=as.data.frame(myjson2()$visitr[[i]][,1])
}
for (i in 1:length(myjson2()$visitr)) {
visit_ratio[[i]]=as.data.frame(c(visit_ratio[[i]], timevar ,state=rep(i,ncol(as.data.frame(myjson2()$visitr[[i]][,1])) )) )
colnames(visit_ratio[[i]])[1:(length(myjson2()$atlist)-1)] <- v_ratio
}
}
# Append the probabilities datasets of the different states
data_Vosr=list()
data_Vosr[[1]]=visit_ratio[[1]]
for (u in 2:(length(myjson2()$visitr))) {
data_Vosr[[u]]=rbind(visit_ratio[[u]],data_Vosr[[(u-1)]])
}
datalr=data_Vosr[[length(myjson2()$visitr)]]
datalr$state_fac=c(rep("NA",nrow(datalr)))
for (o in 1:(length(myjson2()$visitr))) {
for (g in 1:nrow(datalr)) {
if (datalr$state[g]==o) {datalr$state_fac[g]=labels_state()[o]}
}
}
datalr
})
data_V_ratio1_uci <- reactive ({
L_ratio_uci=list()
timevar=as.data.frame(myjson2()$timevar)
names(timevar)[1]<- "timevar"
v_ratio= vector()
if (length(myjson2()$atlist)>1) {
for (i in 2:length(myjson2()$atlist)) {
v_ratio[i-1]= paste0(labels_cov()[i]," vs ",labels_cov()[1])
}
}
if (length(myjson2()$atlist)>2) {
for(i in 1:length(myjson2()$visitr_uci)) {
L_ratio_uci[[i]]=as.data.frame(t(data.frame(myjson2()$visitr_uci[i])))
colnames(L_ratio_uci[[i]]) <- v_ratio
}
for(i in 1:length(myjson2()$visitr_uci)) {
L_ratio_uci[[i]]=as.data.frame(cbind(L_ratio_uci[[i]], timevar ,state=rep(i,nrow(L_ratio_uci[[i]] )) ))
}
}
else {
for (i in 1:length(myjson2()$visitr_uci)) {
L_ratio_uci[[i]]=as.data.frame(myjson2()$visitr_uci[[i]][,1])
}
for (i in 1:length(myjson2()$visitr_uci)) {
L_ratio_uci[[i]]=as.data.frame(c(L_ratio_uci[[i]], timevar ,state=rep(i,ncol(as.data.frame(myjson2()$visitr_uci[[i]][,1])) )) )
colnames(L_ratio_uci[[i]])[1:(length(myjson2()$atlist)-1)] <- v_ratio
}
}
# Append the probabilities datasets of the ratioerent states
data_Vosr_uci=list()
data_Vosr_uci[[1]]=L_ratio_uci[[1]]
for (u in 2:(length(myjson2()$visitr_uci))) {
data_Vosr_uci[[u]]=rbind(L_ratio_uci[[u]],data_Vosr_uci[[(u-1)]])
}
datalr_uci=data_Vosr_uci[[length(myjson2()$visitr_uci)]]
datalr_uci$state_fac=c(rep("NA",nrow(datalr_uci)))
for (o in 1:(length(myjson2()$visitr_uci))) {
for (g in 1:nrow(datalr_uci)) {
if (datalr_uci$state[g]==o) {datalr_uci$state_fac[g]=labels_state()[o]}
}
}
datalr_uci
})
data_V_ratio1_lci <- reactive ({
L_ratio_lci=list()
timevar=as.data.frame(myjson2()$timevar)
names(timevar)[1]<- "timevar"
v_ratio= vector()
if (length(myjson2()$atlist)>1) {
for (i in 2:length(myjson2()$atlist)) {
v_ratio[i-1]= paste0(labels_cov()[i]," vs ",labels_cov()[1])
}
}
if (length(myjson2()$atlist)>2) {
for(i in 1:length(myjson2()$visitr_lci)) {
L_ratio_lci[[i]]=as.data.frame(t(data.frame(myjson2()$visitr_lci[i])))
colnames(L_ratio_lci[[i]]) <- v_ratio
}
for(i in 1:length(myjson2()$visitr_lci)) {
L_ratio_lci[[i]]=as.data.frame(cbind(L_ratio_lci[[i]], timevar ,state=rep(i,nrow(L_ratio_lci[[i]] )) ))
}
}
else {
for (i in 1:length(myjson2()$visitr_lci)) {
L_ratio_lci[[i]]=as.data.frame(myjson2()$visitr_lci[[i]][,1])
}
for (i in 1:length(myjson2()$visitr_lci)) {
L_ratio_lci[[i]]=as.data.frame(c(L_ratio_lci[[i]], timevar ,state=rep(i,ncol(as.data.frame(myjson2()$visitr_lci[[i]][,1])) )) )
colnames(L_ratio_lci[[i]])[1:(length(myjson2()$atlist)-1)] <- v_ratio
}
}
# Append the probabilities datasets of the ratioerent states
data_Vosr_lci=list()
data_Vosr_lci[[1]]=L_ratio_lci[[1]]
for (u in 2:(length(myjson2()$visitr_lci))) {
data_Vosr_lci[[u]]=rbind(L_ratio_lci[[u]],data_Vosr_lci[[(u-1)]])
}
datalr_lci=data_Vosr_lci[[length(myjson2()$visitr_lci)]]
datalr_lci$state_fac=c(rep("NA",nrow(datalr_lci)))
for (o in 1:(length(myjson2()$visitr_lci))) {
for (g in 1:nrow(datalr_lci)) {
if (datalr_lci$state[g]==o) {datalr_lci$state_fac[g]=labels_state()[o]}
}
}
datalr_lci
})
data_V_ratio2<- reactive ({
dlist=list()
for (d in 1:(length(myjson2()$cov$atlist)-1)) {
dlist[[d]]=cbind.data.frame(data_V_ratio1()[,d],data_V_ratio1()[,ncol(data_V_ratio1())-2],data_V_ratio1()[,ncol(data_V_ratio1())-1],
data_V_ratio1()[,ncol(data_V_ratio1())],rep(d,length(data_V_ratio1()[,d])) )
dlist[[d]][,6] <- rep(colnames(data_V_ratio1())[d],length(data_V_ratio1()[,d]))
colnames(dlist[[d]]) <- c("V","timevar","state","state_factor","cov","cov_factor")
}
d_all_Vr <- bind_rows(dlist, .id = "column_Vabel")
d_all_Vr
})
data_V_ratio2_uci<- reactive ({
dlist=list()
for (d in 1:(length(myjson2()$cov$atlist)-1)) {
dlist[[d]]=cbind.data.frame(data_V_ratio1_uci()[,d],data_V_ratio1_uci()[,ncol(data_V_ratio1_uci())-2],
data_V_ratio1_uci()[,ncol(data_V_ratio1_uci())-1],
data_V_ratio1_uci()[,ncol(data_V_ratio1_uci())],rep(d,length(data_V_ratio1_uci()[,d])) )
dlist[[d]][,6] <- rep(colnames(data_V_ratio1_uci())[d],length(data_V_ratio1_uci()[,d]))
colnames(dlist[[d]]) <- c("V","timevar","state","state_factor","cov","cov_factor")
}
d_all_Vr_uci <- bind_rows(dlist, .id = "column_Vabel")
d_all_Vr_uci
})
data_V_ratio2_lci<- reactive ({
dlist=list()
for (d in 1:(length(myjson2()$cov$atlist)-1)) {
dlist[[d]]=cbind.data.frame(data_V_ratio1_lci()[,d],data_V_ratio1_lci()[,ncol(data_V_ratio1_lci())-2],
data_V_ratio1_lci()[,ncol(data_V_ratio1_lci())-1],
data_V_ratio1_lci()[,ncol(data_V_ratio1_lci())],rep(d,length(data_V_ratio1_lci()[,d])) )
dlist[[d]][,6] <- rep(colnames(data_V_ratio1_lci())[d],length(data_V_ratio1_lci()[,d]))
colnames(dlist[[d]]) <- c("V","timevar","state","state_factor","cov","cov_factor")
}
d_all_Vr_lci <- bind_rows(dlist, .id = "column_Vabel")
d_all_Vr_lci
})
data_V_ratio_ci<- reactive ({
x=c( data_V_ratio2()[order(data_V_ratio2()$timevar,data_V_ratio2()$state,data_V_ratio2()$cov),]$timevar,
data_V_ratio2()[order(-data_V_ratio2()$timevar,data_V_ratio2()$state,data_V_ratio2()$cov),]$timevar )
y_central=c( data_V_ratio2()[order(data_V_ratio2()$timevar,data_V_ratio2()$state,data_V_ratio2()$cov),]$V,
data_V_ratio2()[order(-data_V_ratio2()$timevar,data_V_ratio2()$state,data_V_ratio2()$cov),]$V )
y=c( data_V_ratio2_uci()[order(data_V_ratio2_uci()$timevar,data_V_ratio2_uci()$state,data_V_ratio2_uci()$cov),]$V,
data_V_ratio2_lci()[order(-data_V_ratio2_lci()$timevar,data_V_ratio2_lci()$state,data_V_ratio2_lci()$cov),]$V )
frameto=c(as.character(data_V_ratio2_uci()[order(-data_V_ratio2_uci()$timevar,data_V_ratio2_uci()$state,data_V_ratio2_uci()$cov),]$state_factor),
as.character(data_V_ratio2_lci()[order(-data_V_ratio2_lci()$timevar,data_V_ratio2_lci()$state,data_V_ratio2_lci()$cov),]$state_factor) )
covto=c( data_V_ratio2_uci()[order(-data_V_ratio2_uci()$timevar,data_V_ratio2_uci()$state,data_V_ratio2_uci()$cov),]$cov_factor,
data_V_ratio2_lci()[order(-data_V_ratio2_lci()$timevar,data_V_ratio2_lci()$state,data_V_ratio2_lci()$cov),]$cov_factor )
data=data.frame(x,y,frameto,covto,y_central)
data
})
#output$shouldloadvis5 <- renderUI({
# if (is.null((myjson2()))) return()
# downloadButton(outputId = "downplotvis5", label = h2("Download the plot"))
#})
datavis5_re <- reactive ({
ax <- list( title = "",zeroline = FALSE,showline = FALSE, showticklabels = FALSE, showgrid = FALSE)
if (length(myjson2()$visitr) == 0| myjson2()$Nats==1 ) {
V_state_r= plot_ly() %>%
layout(title=list(text="Not applicable- Only one covariate pattern specified",y=0.95),xaxis=ax, yaxis=ax)
V_state_r
}
else {
if (input$confvis=="ci_no") {
if (input$facetvis=="No") {
V_state_r= plot_ly(data_V_ratio2(),alpha=0.5) %>%
add_lines(
x=data_V_ratio2()$timevar,y=data_V_ratio2()$V,
frame=factor(as.factor(data_V_ratio2()$state_factor),levels=labels_state()),
color=as.factor(data_V_ratio2()$cov_factor),
colors=labels_colour_cov()[1:length(myjson2()$cov$atlist)-1],
mode="lines",
line=list(simplify=FALSE),
text = 'Select or deselect lines by clicking on the legend',
hovertemplate = paste("<b>%{text}</b><br><br>", "%{yaxis.title.text}: %{y:,}<br>",
"%{xaxis.title.text}: %{x:,}<br>","<extra></extra>")
) %>%
layout(title=list(text="Ratios in visit probabilities among covariate patterns (compared to reference)",y=0.95),
font= list(family = "times new roman", size = input$textsizevis, color = "black"),
margin = list(l = 50, r = 50, b = 30, t = 70),
xaxis=list(title=list(text="Time since entry",y=0.2),
dtick = input$stepvx,
tick0 = input$startvx,
range=c(input$startvx,input$endvx),
ticklen = 5,
tickwidth = 2,
tickcolor = toRGB("black"),
tickmode = "linear"),
yaxis =list(title= "Ratios in visit probabilities",
dtick = input$stepvy,
ticklen = 5,
tickwidth = 2,
tickcolor = toRGB("black")),
shapes = list(
list(type = "rect",
fillcolor = "grey",
line = list(color = "grey"),
opacity = 0.8,
x0 = 0, x1 =0, xref = "x", y0 = 0, y1 = 1, yref = "y") )
) %>%
animation_opts(frame = 1000, transition = 0, redraw = FALSE)%>%
config(
toImageButtonOptions = list(
format = "png",
width = 1200,
height = 900,scale=input$figscale
), edits = list(
annotationPosition = TRUE,
annotationTail = TRUE,
annotationText = TRUE,
axisTitleText=TRUE,
colorbarTitleText=TRUE,
legendPosition=TRUE,
legendText=TRUE,
shapePosition=TRUE,
titleText=TRUE
) ,queueLength=10
)
V_state_r
}
if (input$facetvis=="Yes") {
data_plot=data_V_ratio2()
V_state_r = ggplot(data_plot)
V_state_r = ggplot(data_plot,aes(x=timevar, y=V, color= factor(as.factor(cov_factor) ), group=1,
text=paste("Select or deselect lines by clicking on the legend",
"<br>Time: ", timevar,
"<br>Ratio of length of stay: ", V,
"<br>Covariate pattern: ", factor(as.factor(cov_factor) ))))
V_state_r = V_state_r+geom_line(aes(x=timevar, y=V, color= factor(as.factor(cov_factor) )))+
scale_colour_manual( values =labels_colour_cov(),labels = labels_cov() )
if (input$aimtype=="compare") { V_state_r = V_state_r+ facet_wrap(~factor(as.factor(state_factor),levels=labels_state()), nrow=2)}
else if (input$aimtype=="present") {V_state_r = V_state_r+ facet_wrap(~factor(as.factor(state_factor),levels=labels_state()))}
V_state_r = V_state_r + scale_x_continuous(breaks=c(seq(input$startvx,input$endvx,by=input$endvx )))
V_state_r = V_state_r +labs(title="Ratios in visit probabilities among covariate patterns (compared to reference)", x="Time since entry", y="Ratios in visit probabilities")
V_state_r = V_state_r + labs(color = "Covariate\npatterns")+ labs(fill = "Covariate\npatterns")
V_state_r = V_state_r +theme(title = element_text(size = input$textsizevis-4), strip.text = element_text(size=input$textfacetvis),
legend.title = element_text(color="black", size= input$textsizevis-5),
legend.text=element_text(size= input$textsizevis-6),
plot.margin = unit(x=c(1.5,1.5,1.5,1.5),units="cm"),
legend.margin = margin(1.5, 1, 1, 1, "cm"),
legend.justification = "center",legend.box.spacing = unit(0.2, "cm"),
axis.title.y = element_text(size= input$textsizevis-5),
axis.title.x = element_text(size= input$textsizevis-5),
axis.text.x = element_text( size=input$textsizevis-6),axis.text.y = element_text( size=input$textsizevis-6))
V_state_r = ggplotly(V_state_r, tooltip = "text")%>%
config(
toImageButtonOptions = list(
format = "png",
width = 1200,
height = 900,scale=input$figscale
), edits = list(
annotationPosition = TRUE,
annotationTail = TRUE,
annotationText = TRUE,
axisTitleText=TRUE,
colorbarTitleText=TRUE,
legendPosition=TRUE,
legendText=TRUE,
shapePosition=TRUE,
titleText=TRUE
) ,queueLength=10
)
V_state_r
}
}
else if (input$confvis=="ci_yes") {
if (input$facetvis=="No") {
V_state_r <- plot_ly()
V_state_r <- add_trace(V_state_r, line=list(simplify=FALSE),
mode="lines", type = "scatter",
x=data_V_ratio_ci()$x, y=data_V_ratio_ci()$y_central,
frame=factor(as.factor(data_V_ratio_ci()$frameto),levels=labels_state()),
colors=labels_colour_cov()[1:length(myjson2()$cov$atlist)-1],
color=as.factor(data_V_ratio_ci()$covto) ,
text = 'Select or deselect lines by clicking on the legend',
hovertemplate = paste("<b>%{text}</b><br><br>", "%{yaxis.title.text}: %{y:,}<br>",
"%{xaxis.title.text}: %{x:,}<br>","<extra></extra>"))
V_state_r <- add_trace(V_state_r, fill = "tozerox",
line=list(dash = "solid", color = "transparent", width = 1.8897637),
mode = "lines", type = "scatter",
x=data_V_ratio_ci()$x, y=data_V_ratio_ci()$y,
frame=factor(as.factor(data_V_ratio_ci()$frameto),levels=labels_state()),
colors=labels_colour_cov()[1:length(myjson2()$cov$atlist)-1],
color=as.factor(data_V_ratio_ci()$covto),
showlegend = FALSE,
text = 'Select or deselect lines by clicking on the legend',
hovertemplate = paste("<b>%{text}</b><br><br>", "%{yaxis.title.text}: %{y:,}<br>",
"%{xaxis.title.text}: %{x:,}<br>","<extra></extra>"))
V_state_r= V_state_r %>%
layout(title=list(text="Ratios in visit probabilities among covariate patterns (compared to reference)",y=0.95),
font= list(family = "times new roman", size = input$textsizevis, color = "black"),
margin = list(l = 50, r = 50, b = 30, t = 70),
xaxis=list(title=list(text="Time since entry",y=0.2),
dtick = input$stepvx,
tick0 = input$startvx,
range=c(input$startvx,input$endvx),
ticklen = 5,
tickwidth = 2,
tickcolor = toRGB("black"),
tickmode = "linear"),
yaxis =list(title= "Ratios in visit probabilities",
dtick = input$stepvy,
ticklen = 5,
tickwidth = 2,
tickcolor = toRGB("black")),
shapes = list(
list(type = "rect",
fillcolor = "grey",
line = list(color = "grey"),
opacity = 0.8,
x0 = 0, x1 =input$area, xref = "x", y0 = 0, y1 = 1, yref = "y") ) )%>%
animation_opts(frame = 1000, transition = 0, redraw = FALSE)%>%
config(
toImageButtonOptions = list(
format = "png",
width = 1200,
height = 900,scale=input$figscale
), edits = list(
annotationPosition = TRUE,
annotationTail = TRUE,
annotationText = TRUE,
axisTitleText=TRUE,
colorbarTitleText=TRUE,
legendPosition=TRUE,
legendText=TRUE,
shapePosition=TRUE,
titleText=TRUE
) ,queueLength=10
)
V_state_r
}
if (input$facetvis=="Yes") {
V_lci= data_V_ratio2_lci()$V
V_uci= data_V_ratio2_uci()$V
data_plot=cbind(data_V_ratio2(),V_lci,V_uci)
V_state_r=ggplot(data_plot)
V_state_r=ggplot(data_plot,aes(x=timevar, y=V, color= factor(as.factor(cov_factor) ), group=1,
text=paste("Select or deselect lines by clicking on the legend",
"<br>Time: ", timevar,
"<br>Ratio of length of stay: ", V,
"<br>Covariate pattern: ", factor(as.factor(cov_factor) ))))+
scale_colour_manual( values =labels_colour_cov(),labels = labels_cov() )
V_state_r=V_state_r+geom_line(aes(x=timevar, y=V, fill= factor(as.factor(cov_factor) )))
V_state_r=V_state_r+ geom_ribbon(aes(ymin = V_lci, ymax =V_uci,fill=factor(as.factor(cov_factor) )),alpha=0.4)+
scale_fill_manual( values =labels_colour_cov(),labels = labels_cov() )
if (input$aimtype=="compare") { V_state_r = V_state_r+ facet_wrap(~factor(as.factor(state_factor),levels=labels_state()), nrow=2)}
else if (input$aimtype=="present") {V_state_r = V_state_r+ facet_wrap(~factor(as.factor(state_factor),levels=labels_state()),)}
V_state_r = V_state_r + scale_x_continuous(breaks=c(seq(input$startvx,input$endvx,by=input$endvx )))
V_state_r = V_state_r +labs(title="Ratios in visit probabilities among covariate patterns (compared to reference)", x="Time since entry", y="Ratios in visit probabilities")
V_state_r = V_state_r + labs(color = "Covariate\npatterns")+ labs(fill = "Covariate\npatterns")
V_state_r = V_state_r +theme(title = element_text(size = input$textsizevis-4), strip.text = element_text(size=input$textfacetvis),
legend.title = element_text(color="black", size= input$textsizevis-5),
legend.text=element_text(size= input$textsizevis-6),
plot.margin = unit(x=c(1.5,1.5,1.5,1.5),units="cm"),
legend.margin = margin(1.5, 1, 1, 1, "cm"),
legend.justification = "center",legend.box.spacing = unit(0.2, "cm"),
axis.title.y = element_text(size= input$textsizevis-5),
axis.title.x = element_text(size= input$textsizevis-5),
axis.text.x = element_text( size=input$textsizevis-6),axis.text.y = element_text( size=input$textsizevis-6))
V_state_r = ggplotly(V_state_r, tooltip = "text")%>%
config(
toImageButtonOptions = list(
format = "png",
width = 1200,
height = 900,scale=input$figscale
), edits = list(
annotationPosition = TRUE,
annotationTail = TRUE,
annotationText = TRUE,
axisTitleText=TRUE,
colorbarTitleText=TRUE,
legendPosition=TRUE,
legendText=TRUE,
shapePosition=TRUE,
titleText=TRUE
) ,queueLength=10
)
}
}
V_state_r
}
})
output$visit_ratio <- renderPlotly ({ datavis5_re() })
output$downplotvis5 <- downloadHandler(
filename = function(){paste("vis5",'.png',sep='')},
content = function(file){
plotly_IMAGE( datavis5_re(),width = 1400, height = 1100, format = "png", scale = 2, out_file = file )
}
)
|
5cec6071d77ef99b0597c011e1852ce973db5070
|
4bbbc253e2640fa5cfd1f1e947ddcfb8a9aee849
|
/Script/Main.R
|
4b753b059594d3ae6fffda4a11cdf3fcdb9d5d27
|
[] |
no_license
|
Hjacer/Govhack2021_Ken_Behrens_Collective
|
750de10a960dee971c30799701a1d0da032b3fae
|
78b00bfc35e2d8fe0594487885fb14a001affd13
|
refs/heads/main
| 2023-07-14T14:39:33.335067
| 2021-08-22T06:53:00
| 2021-08-22T06:53:00
| 398,726,160
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 2,702
|
r
|
Main.R
|
library('openxlsx')
library('data.table')
library('tidyverse')
library('ggplot2')
library(lubridate)
project_path = "https://github.com/Hjacer/Govhack2021_Ken_Behrens_Collective/"
script_path <- paste0(project_path,'Script/')
output = paste0(project_path,'Output/')
out_plot_ts <- paste0(output,'Time series Plots/')
out_plot_pit <- paste0(output,'Point in time plots/')
input = paste0(project_path,'Input/')
file_name1 = "All_Wellbeing_Measures.xlsx"
wellbeing_data <- read.xlsx(paste0(input,file_name1))
setDT(wellbeing_data)
wellbeing_data[,PeriodEndDate2:=as.Date(PeriodEndDate,format = "%m/%d/%Y %H:%M:%S")]
wellbeing_data <- wellbeing_data[!is.na(PeriodEndDate2)]
domain_list <- unique(wellbeing_data$Domain)
# domain_list <- c('Living standards','Housing and home','Health','Social connection')
# domain_list <- c('Living standards','Housing and home','Access and connectivity')
# domain_name = "Living standards"
domain_ts_data_all <- data.table()
domain_pit_data_all <- data.table()
for (domain_name in domain_list) {
print(domain_name)
domain_name = gsub("/",'',domain_name)
domain_data <- wellbeing_data[Domain==domain_name,]
group_cols <- c('Indicator','Measure','CategoryName','CategoryOption','Type','Unit','PeriodEndDate2')
domain_data_sum <- domain_data[,.(Value_Tot = sum(Value)),
by=group_cols]
# domain_data_all_sum <- domain_data_sum[CategoryName=='All']
# indicator_level <- c("Income levels","Net worth","Cost of living","Financial position")
# for (ind in indicator_level) {
# subdata <- domain_data_sum[Indicator==ind,]
# }
setDT(domain_data_sum)
domain_data_sum[is.na(CategoryOption) & Type=='ACT',CategoryOption:='ACT']
for (measure in unique(domain_data_sum$Measure)) {
print(measure)
measure=gsub("/",'',measure)
subdata <- domain_data_sum[Measure==measure,]
if (length(unique(subdata$PeriodEndDate2))==1) {
print('Point in time data')
subdata[,Domain:=domain_name]
domain_pit_data_all <- rbind(domain_pit_data_all,subdata)
}
else {
print('Time series data')
subdata[,Domain:=domain_name]
domain_ts_data_all <- rbind(domain_ts_data_all,subdata)
g <- ggplot(subdata, aes(x=PeriodEndDate2, y=Value_Tot, group=CategoryOption, color=CategoryOption)) +
geom_line() + ggtitle(label=domain_name,subtitle = measure)
print(g)
ggsave(filename=paste0(out_plot_ts,'Plot ',domain_name,'-',measure,'.png'),plot =g)
}
}
# domain_data_sum[,Domain:=domain_name]
# domain_data_sum_all <- rbind(domain_data_sum_all,domain_data_sum)
}
|
f5b45ff5f72634a3ded81f5e25a90c0106eacf85
|
92ba3ed47ef15dfb096003cfb959a32852b686b9
|
/AvI_metaanalysis_slopes__24_03_19.R
|
b19cf86b2e4455892804ed50174d7c24d745105c
|
[] |
no_license
|
rajwhitlock/Invasive-abundance-native-impact
|
2c4caef8e3a03a0848a2dc97d4c2cc42011e47c0
|
df6874876de4024deb5a10e681fd73ddde89777d
|
refs/heads/master
| 2020-04-30T12:31:52.135892
| 2019-03-26T07:59:55
| 2019-03-26T07:59:55
| 176,828,931
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 67,524
|
r
|
AvI_metaanalysis_slopes__24_03_19.R
|
#Meta-analysis code and analysis framework written by Raj Whitlock (RW) 08/01/2017
#Code last updated by Bethany Bradley and RW 03/24/2019
#Overview: this code allows raw data pre-processing to calculate "slopes" effect sizes, meta-analysis of processed effect-size data and figure creation from model output
#You will need raw input files "AvI_data_sheet.txt" and "AvI_attribute_sheet.txt" as well as the R script 'Bar_plot_error_bars.R'. Place the datasets in your working directory
##################################################################
##################################################################
##################################################################
##################################################################
source("/Directory/path/to/script/Bar_plot_error_bars.R")
setwd("/Directory/path/to/working_directory")
#load necessary libraries
library(MASS)
library(MCMCglmm)
library(metafor)
# SECTION 1. The data
full_data <- read.delim("AvI_data_sheet.txt")
aie_db <- read.delim("AvI_attribute_sheet.txt", header=T)
##################################################################
##################################################################
##################################################################
# SECTION 2. Needed utility functions:
# This is a function to calculate approximate Bayesian p-vales from posterior samples from MCMCglmm models, following Hadfield JD (2010) MCMC methods for multi-response generalized linear mixed models: the MCMCglmm R package. Journal of Statistical Software 33, 1–22.
pMCMC <- function(data){
res <- 2*(min(length(which(data >0)),length(which(data <0))))/length(data)
if (res==0){res <- "<0.001"}
return(as.character(res))
}
##################################################################
##################################################################
##################################################################
# SECTION 3. Data pre-processing to calculate effect sizes
# First, define some "columns to pass" (c2p) through data pre-processing that are cut out and joined back into the effect size dataset prior to analysis
c2p <- c(which(names(data)=="Article_ID"),which(names(data)=="Study_ID"),which(names(data)=="Art_stud"))
##################################################################
##################################################################
####################START OF FUNCTION CREATING "SLOPES" EFFECT SIZES########
# Function to process the raw data and extract regression coefficients and their standard error (effect sizes). The argument columns.to.pass specifies article-level variates that should be retained, excluding the unique article identifier, which will be retained automatically
avi.es.calc2 <- function (data,columns.to.pass, rsc.x = T, rsc.y = T){
# data = input data
# columns.to.pass are study-level variates embedded in the data that we want to keep (they are cut out, and added back in after effect size calculation)
# rsc.x, and rsc.y are options to rescale x and y to c(0, 1), default = T
# the raw x data are always centred
avi.cov <- data[,c2p]
avi.cov2 <- split(avi.cov,f = list(avi.cov[,1],avi.cov[,2]),drop=T)
avi.cov3 <- lapply(avi.cov2,function(x){
x[1,]
})
avi.cov <- do.call(rbind,avi.cov3)
xx <- split(data,list(data$Article_ID,data$Study_ID),drop=T)
yy <- lapply(xx, function(y){
#rescaling the data between 0-1 (if rsc.x and rsc.y are True)
n <- dim(y)[1]
if (rsc.x == T){
if(sign (min (y$Abundance_Invader))==-1){offset.x <- min (y$Abundance_Invader)} else {offset.x <- 0}
inv.recentred <- (y$Abundance_Invader - offset.x)/(max(y$Abundance_Invader) - offset.x)
inv.recentred <- inv.recentred - mean(inv.recentred)
} else {
inv.recentred <- y$Abundance_Invader - mean(y$Abundance_Invader)
}
if (rsc.y == T){
if(sign (min (y$Response))==-1){offset.y <- min (y$Response)} else {offset.y <- 0}
y.resp <- (y$Response - offset.y) / (max(y$Response) - offset.y)
} else {y.resp <- y$Response}
#mev <- 1/(n-3)
# if a study only has 3 effective invasive data points, we can't compute the polynomial slope, so I give the option to estimate linear only for these ones
if (n > 3 && length(unique(y$Abundance_Invader)) > 3){
m1 <- lm(y.resp ~ poly(inv.recentred,2, raw=T)) #second order polynomial fit between response and invasive
# Raw polynomials were chosen so that a consistently scaled model matrix applied in each of the studies, allowing comparability and synthesis of effect sizes from different studies
a.1 <- summary(m1)$coef[1,1] #estimate
a.2 <- summary(m1)$coef[1,2] #St Error
a.3 <- summary(m1)$coef[2,1] #estimate
a.4 <- summary(m1)$coef[2,2] #St Error
a.5 <- summary(m1)$coef[3,1] #estimate
a.6 <- summary(m1)$coef[3,2] #St Error
c.m1 <- cov2cor(vcov(m1))[3,2] # calculate within-study correlation of linear and polynomial predictors within studies
} else {
m1 <- lm(y.resp ~ inv.recentred)
a.1 <- summary(m1)$coef[1,1]
a.2 <- summary(m1)$coef[1,2]
a.3 <- summary(m1)$coef[2,1]
a.4 <- summary(m1)$coef[2,2]
a.5 <- NA # no polynomial slope estimated
a.6 <- NA # no polynomial slope estimated
c.m1 <- NA
}
return(data.frame("Article_ID" = as.character(y$Article_ID[1]), "Study_ID" = as.character(y$Study_ID[1]), "Int.u" = a.1, "Int.s" = a.2, "Lin.u" = a.3, "Lin.s" = a.4, "Pol.u" = a.5, "Pol.s" = a.6, "cor1.2" = c.m1))
})
yy2 <- do.call(rbind,yy)
res <- merge (avi.cov,yy2)
return (res)
}
##################################################################
####################END OF FUNCTION CREATING SLOPES EFFECT SIZES########
##################################################################
# SECTION 4. Now run the function and create the effect size dataset:
avi.ES.s <- avi.es.calc2(data,columns.to.pass=c2p, rsc.x = T, rsc.y = T)
#database internal join of effect estimates with article level covariates
avi.ES.s <- merge(avi.ES.s, aie_db)
avi.ES.s <- avi.ES.s[-which(is.na(avi.ES.s$Pol.u)),] #removes studies where polynomial slopes could not be estimated (<4 data points)
#Two studies removed: BRAN2010_1 and TERA2011_2
#Two studies with problematic polynomial slopes identified: BRAN2010_3 (only two response values, poly.u = 60)
#TERA2011_4 poly.u = -464 (only five lines in study). Exclude both of these
avi.ES.s <- avi.ES.s[-which(avi.ES.s$Art_stud=="BRAN2010_3"),]
avi.ES.s <- avi.ES.s[-which(avi.ES.s$Art_stud=="TERA2011_4"),]
##################################################################
##################################################################
##################################################################
# SECTION 5. Meta-analysis models. The code in this section specifies models that can be run on the effect size data, and code for producing figures from model output. Take care, the correspondence between figure numbers indicated in this code and figure numbers in the manuscript and its supplementary materials may need checking
# now for the analysis, random effects model with article level random effects
#We need this prior: uniform prior on the standard deviation of the random effects, for both residual variance (R structure), and study level variance (G structure)
prior <- list(R=list(V = 1e-10, nu = -1), G = list(G1 = list(V = 1e-10, nu = -1)))
##################################################################
##################################################################
## Analysis 1: Global meta-analysis ## (Not included in manuscript)
# random effects meta-analysis with additional random effects for study, no fixed effects bar the intercept
ms1a <- MCMCglmm(Int.u ~ 1, random = ~ Article_ID, mev = avi.ES.s$Int.s, prior = prior, nitt = 110000, burnin = 10000, thin = 100, verbose = F, data = avi.ES.s)
ms1b <- MCMCglmm(Lin.u ~ 1, random = ~ Article_ID, mev = avi.ES.s$Lin.s, prior = prior, nitt = 110000, burnin = 10000, thin = 100, verbose = F, data = avi.ES.s)
ms1c <- MCMCglmm(Pol.u ~ 1, random = ~ Article_ID, mev = avi.ES.s$Pol.s, prior = prior, nitt = 110000, burnin = 10000, thin = 100, verbose = F, data = avi.ES.s)
#mev is a vector of measurement error variance for each data point
# Effect size, direction and statistical significance of linear term
summary(ms1b)
# Effect size, direction and statistical significance of polynomial term
summary (ms1c)
# Plot the functional response curve (and 95% credible zone) from meta-analysis of slopes
#prediction interval on the centred x scale (-0.5, 0.5). Subsequent plots are on the original rescaled (0, 1 scale) (0.5 is added to predicted values on the x axis)
ix <- (-500:500)/1000
# extract posteriors for the average regression coefficients (intercept, x, x^2)
ms1.sol <- cbind(ms1a$Sol[, "(Intercept)"],ms1b$Sol[, "(Intercept)"],ms1c$Sol[, "(Intercept)"])
# split by row, for lapply convenience later...
ms1.sol <- split(ms1.sol,1:1000)
# matrix to catch results for 95% credible zone
res <- matrix(NA,length(ix),3)
for (i in 1:length(ix)){
pred1 <- lapply(ms1.sol, function(y){
ny <- y[3]*(ix[i]^2) + y[2]*ix[i] + y[1]
return (ny)
})
pred1.int <- HPDinterval(mcmc(unlist(pred1)))
# expected functional curve is summarised using the mean of predicted posterior samples
pred1.u <- mean(unlist(pred1))
res[i,] <- c(pred1.int,pred1.u)
}
######## Proportional change in native responses ###################################
######## over the typical range in invasive species' ###############################
######## abundance investigated in the literature ##################################
# note: changes are on average over meta-analysed studies, the response in individual studies varies.
res[1000,3] - res[1,3]
# On average, there was a 23.2% decrease in native responses over the typical range in invasive species abundance
##########################################################
# plot the functional response curve and 95% credible zone
## Figure X: Slopes Global #######################################
par(mfrow=c(1,1))
par(mar = c(5,5,2,2))
plot((ix+0.5),res[,3],type="n",xlim=c(0,1),ylim=c(0,1), ylab = "Native response", xlab = "Invasive abundance", cex.lab = 2, cex.axis = 2)
polygon(x = c(ix,ix[length(ix):1])+0.5, y = c(res[,1],res[length(ix):1,2]),border = NA, col = "darkgrey")
lines((ix+0.5),res[,3],type="l", lwd = 2)
##################################################################
##################################################################
## Analysis 2: Community vs. Population level response
##################################################################
#avi.ES.s <- avi.ES.s[-which (avi.ES.s$Study_type == "Spatial"),] #double check to make sure results still hold when spatial studies excluded. They do.
avi.ES2.s <- avi.ES.s[-which(avi.ES.s$Response_type == "Other"),]
ms2a <- MCMCglmm(Int.u ~ Multi_spp_resp, random = ~ Article_ID, mev = avi.ES2.s$Int.s, prior = prior, nitt = 110000, burnin = 10000, thin = 100, verbose = F, data = avi.ES2.s)
ms2b <- MCMCglmm(Lin.u ~ Multi_spp_resp, random = ~ Article_ID, mev = avi.ES2.s$Lin.s, prior = prior, nitt = 110000, burnin = 10000, thin = 100, verbose = F, data = avi.ES2.s)
ms2c <- MCMCglmm(Pol.u ~ Multi_spp_resp, random = ~ Article_ID, mev = avi.ES2.s$Pol.s, prior = prior, nitt = 110000, burnin = 10000, thin = 100, verbose = F, data = avi.ES2.s)
summary(ms2a); summary(ms2b); summary(ms2c)
# Community level intercept term is significantly greater than from zero p < 0.001
# Population level intercept term is significantly lower than community level intercept term p < 0.001
# Community level linear term is significantly less than zero p < 0.001
# Population level linear term does not differ significantly from community level linear term p = 0.282
# Community level polynomial term does not differ significantly from zero p = 0.330
# Population level polynomial term is significantly greater than community level polynomial term p = 0.026
## Plot the functional response curve (and 95% credible zone) ##########################
########################################################################################
# extract posteriors for the average regression coefficients (intercept, x, x^2)
ms2.sol <- cbind(
ms2a$Sol[, "(Intercept)"] + ms2a$Sol[, "Multi_spp_respSINGLE"],
ms2b$Sol[, "(Intercept)"] + ms2b$Sol[, "Multi_spp_respSINGLE"],
ms2c$Sol[, "(Intercept)"] + ms2c$Sol[, "Multi_spp_respSINGLE"],
ms2a$Sol[, "(Intercept)"],
ms2b$Sol[, "(Intercept)"],
ms2c$Sol[, "(Intercept)"])
# Estimate for Population, linear term
mean(mcmc(ms2.sol)[,2])
# Estimate for Population, polynomial term
mean(mcmc(ms2.sol)[,3])
# Estimate for Community, linear term
mean(mcmc(ms2.sol)[,5])
# Estimate for Community, polynomial term
mean(mcmc(ms2.sol)[,6])
# p-value for Population, linear term, comparing to zero
pMCMC(mcmc(ms2.sol)[,2])
# <0.001
# p-value for Population, polynomial term, comparing to zero
pMCMC(mcmc(ms2.sol)[,3])
# =0.002
# p-value for Community, linear term, comparing to zero
pMCMC(mcmc(ms2.sol)[,5])
# <0.001
# p-value for Community, polynomial term, comparing to zero
pMCMC(mcmc(ms2.sol)[,6])
# =0.330
# split by row, for lapply convenience later...
ms2.sol <- split(ms2.sol,1:1000)
ix <- (-500:500)/1000
####### (i) Population level predictions and credible zone ###############################
# matrix to catch results for 95% credible zone
res2a <- matrix(NA,length(ix),3)
for (i in 1:length(ix)){
pred1 <- lapply(ms2.sol, function(y){
ny <- y[3]*(ix[i]^2) + y[2]*ix[i] + y[1]
return (ny)
})
pred1.int <- HPDinterval(mcmc(unlist(pred1)))
# expected functional curve is summarised using the mean of predicted posterior samples
pred1.u <- mean(unlist(pred1))
res2a[i,] <- c(pred1.int,pred1.u)
}
####### (ii) Community level predictions and credible zone ###############################
# matrix to catch results for 95% credible zone
res2b <- matrix(NA,length(ix),3)
for (i in 1:length(ix)){
pred1 <- lapply(ms2.sol, function(y){
# Note indexing within the square brackets refers to correct columns in ms2.sol
ny <- y[6]*(ix[i]^2) + y[5]*ix[i] + y[4]
return (ny)
})
pred1.int <- HPDinterval(mcmc(unlist(pred1)))
# expected functional curve is summarised using the mean of predicted posterior samples
pred1.u <- mean(unlist(pred1))
res2b[i,] <- c(pred1.int,pred1.u)
}
######## Proportional change in native responses ###################################
######## over the typical range in invasive species' ###############################
######## abundance investigated in the literature ##################################
# note: changes are on average over meta-analysed studies, the response in individual studies varies.
res2a[1000,3] - res2a[1,3]
# There was a 19.9% decrease in population-level native responses over the typical range in invasive species abundance investigated in the literature
res2b[1000,3] - res2b[1,3]
# There was a 24.6% decrease in community-level native responses over the typical range in invasive abundance investigated in the literature
##########################################################
## Figure 2b: Slopes Population #######################################
par(mfrow=c(1,1))
par(mar = c(5,5,2,2))
plot((ix+0.5),res2a[,3],type="n",xlim=c(0,1),ylim=c(0,1), ylab = "Population response", xlab = "Invasive abundance", cex.lab = 2, cex.axis = 2)
polygon(x = c(ix,ix[length(ix):1])+0.5, y = c(res2a[,1],res2a[length(ix):1,2]),border = NA, col = "darkgrey")
lines((ix+0.5),res2a[,3],type="l", lwd = 2)
## Figure 2d: Slopes Community #######################################
par(mfrow=c(1,1))
par(mar = c(5,5,2,2))
plot((ix+0.5),res2b[,3],type="n",xlim=c(0,1),ylim=c(0,1), ylab = "Community response", xlab = "Invasive abundance", cex.lab = 2, cex.axis = 2)
polygon(x = c(ix,ix[length(ix):1])+0.5, y = c(res2b[,1],res2b[length(ix):1,2]),border = NA, col = "darkgrey")
lines((ix+0.5),res2b[,3],type="l", lwd = 2)
##################################################################
## Analysis 3: Community vs. Population by trophic level
##################################################################
avi.ES3.s <- avi.ES2.s[-c(which(avi.ES2.s$Trophic_level=="Above/Intra"),which(avi.ES2.s$Trophic_level=="Below/Intra"),which(avi.ES2.s$Trophic_level=="Mixed"),which(avi.ES2.s$Trophic_level=="Unknown")),]
# remove empty levels (clean up)
avi.ES3.s <- droplevels (avi.ES3.s)
ms4a <- MCMCglmm(Int.u ~ Trophic_level + Multi_spp_resp, random = ~ Article_ID, mev = avi.ES3.s$Int.s, prior = prior, nitt = 110000, burnin = 10000, thin = 100, verbose = F, data = avi.ES3.s)
ms4b <- MCMCglmm(Lin.u ~ Trophic_level + Multi_spp_resp, random = ~ Article_ID, mev = avi.ES3.s$Lin.s, prior = prior, nitt = 110000, burnin = 10000, thin = 100, verbose = F, data = avi.ES3.s)
ms4c <- MCMCglmm(Pol.u ~ Trophic_level + Multi_spp_resp, random = ~ Article_ID, mev = avi.ES3.s$Pol.s, prior = prior, nitt = 110000, burnin = 10000, thin = 100, verbose = F, data = avi.ES3.s)
summary(ms4b)
# The linear term for Trophic category Above is significantly less than zero p < 0.001
# The linear term for Intra is significantly greater than that for Above p = 0.002
# The linear term for Below is significantly greater than that for Above p < 0.001
# set reference level of Trophic_level to intra, to examine intra vs. below
avi.ES3.s$Trophic_level <- relevel (avi.ES3.s$Trophic_level, ref = 3)
#model m4b2
m4b2 <- MCMCglmm(Lin.u ~ Trophic_level + Multi_spp_resp, random = ~ Article_ID, mev = avi.ES3.s$Lin.s, prior = prior, nitt = 110000, burnin = 10000, thin = 100, verbose = F, data = avi.ES3.s)
summary(m4b2)
# Linear effect size for intra is significantly lower than (more negative) than that for below p < 0.001
# VERY IMPORTANT: Reset the Trophic_level variable to original indexing
avi.ES3.s$Trophic_level <- relevel (avi.ES3.s$Trophic_level, ref = 2)
summary(ms4c)
# The polynomial term for Trophic category Above is significantly greater than zero p = 0.002
# The polynomial term for Intra is significantly lower than that for Above p = 0.018
# The polynomial term for Below is significantly lower than that for Above p = 0.026
# set reference level of Trophic_level to intra, to examine intra vs. below
avi.ES3.s$Trophic_level <- relevel (avi.ES3.s$Trophic_level, ref = 3)
#model m4c2
m4c2 <- MCMCglmm(Pol.u ~ Trophic_level + Multi_spp_resp, random = ~ Article_ID, mev = avi.ES3.s$Pol.s, prior = prior, nitt = 110000, burnin = 10000, thin = 100, verbose = F, data = avi.ES3.s)
summary(m4c2)
# Polynomial effect size for intra does not differ significantly from that for below p = 0.586
# VERY IMPORTANT: Reset the Trophic_level variable to original indexing
avi.ES3.s$Trophic_level <- relevel (avi.ES3.s$Trophic_level, ref = 2)
## Plot the functional response curve (and 95% credible zone) ##########################
########################################################################################
# extract posteriors for the average regression coefficients (intercept, x, x^2)
ms4.sol <- cbind(
ms4a$Sol[, "(Intercept)"] + ms4a$Sol[, "Multi_spp_respSINGLE"],
ms4b$Sol[, "(Intercept)"] + ms4b$Sol[, "Multi_spp_respSINGLE"],
ms4c$Sol[, "(Intercept)"] + ms4c$Sol[, "Multi_spp_respSINGLE"],
ms4a$Sol[, "(Intercept)"] + ms4a$Sol[, "Multi_spp_respSINGLE"]+ ms4a$Sol[, "Trophic_levelIntra"],
ms4b$Sol[, "(Intercept)"] + ms4b$Sol[, "Multi_spp_respSINGLE"]+ ms4b$Sol[, "Trophic_levelIntra"],
ms4c$Sol[, "(Intercept)"] + ms4c$Sol[, "Multi_spp_respSINGLE"]+ ms4c$Sol[, "Trophic_levelIntra"],
ms4a$Sol[, "(Intercept)"] + ms4a$Sol[, "Multi_spp_respSINGLE"] + ms4a$Sol[, "Trophic_levelBelow"],
ms4b$Sol[, "(Intercept)"] + ms4b$Sol[, "Multi_spp_respSINGLE"] + ms4b$Sol[, "Trophic_levelBelow"],
ms4c$Sol[, "(Intercept)"] + ms4c$Sol[, "Multi_spp_respSINGLE"] + ms4c$Sol[, "Trophic_levelBelow"],
ms4a$Sol[, "(Intercept)"],
ms4b$Sol[, "(Intercept)"],
ms4c$Sol[, "(Intercept)"],
ms4a$Sol[, "(Intercept)"]+ ms4a$Sol[, "Trophic_levelIntra"],
ms4b$Sol[, "(Intercept)"]+ ms4b$Sol[, "Trophic_levelIntra"],
ms4c$Sol[, "(Intercept)"]+ ms4c$Sol[, "Trophic_levelIntra"],
ms4a$Sol[, "(Intercept)"] + ms4a$Sol[, "Trophic_levelBelow"],
ms4b$Sol[, "(Intercept)"] + ms4b$Sol[, "Trophic_levelBelow"],
ms4c$Sol[, "(Intercept)"] + ms4c$Sol[, "Trophic_levelBelow"])
# Estimates for 18 regression term effect sizes
apply(mcmc(ms4.sol), 2, mean)
# Ordering of estimates is as follows, these contain information to add to plots
# 1 Population, Above, Intercept
# 2 Population, Above, Linear
# 3 Population, Above, Polynomial
# 4 Population, Intra, Intercept
# 5 Population, Intra, Linear
# 6 Population, Intra, Polynomial
# 7 Population, Below, Intercept
# 8 Population, Below, Linear
# 9 Population, Below, Polynomial
# 10 Community, Above, Intercept
# 11 Community, Above, Linear
# 12 Community, Above, Polynomial
# 13 Community, Intra, Intercept
# 14 Community, Intra, Linear
# 15 Community, Intra, Polynomial
# 16 Community, Below, Intercept
# 17 Community, Below, Linear
# 18 Community, Below, Polynomial
# p-values for 18 regression term effect sizes, comparing to zero
apply(mcmc(ms4.sol), 2, pMCMC)
# Ordering is as in comments immediately preceding, some of these p-values to be added to plots as star symbols
# split by row, for lapply convenience later...
ms4.sol <- split(ms4.sol,1:1000)
ix <- (-500:500)/1000
####### (i) Population, Above: predictions and credible zone #############################
# matrix to catch results for 95% credible zone
res4a <- matrix(NA,length(ix),3)
for (i in 1:length(ix)){
pred1 <- lapply(ms4.sol, function(y){
ny <- y[3]*(ix[i]^2) + y[2]*ix[i] + y[1]
return (ny)
})
pred1.int <- HPDinterval(mcmc(unlist(pred1)))
# expected functional curve is summarised using the mean of predicted posterior samples
pred1.u <- mean(unlist(pred1))
res4a[i,] <- c(pred1.int,pred1.u)
}
####### (ii) Population, Intra: predictions and credible zone ###########################
# matrix to catch results for 95% credible zone
res4b <- matrix(NA,length(ix),3)
for (i in 1:length(ix)){
pred1 <- lapply(ms4.sol, function(y){
# Note indexing within the square brackets refers to correct columns in ms4.sol
ny <- y[6]*(ix[i]^2) + y[5]*ix[i] + y[4]
return (ny)
})
pred1.int <- HPDinterval(mcmc(unlist(pred1)))
# expected functional curve is summarised using the mean of predicted posterior samples
pred1.u <- mean(unlist(pred1))
res4b[i,] <- c(pred1.int,pred1.u)
}
####### (iii) Population, Below: predictions and credible zone ##########################
# matrix to catch results for 95% credible zone
res4c <- matrix(NA,length(ix),3)
for (i in 1:length(ix)){
pred1 <- lapply(ms4.sol, function(y){
# Note indexing within the square brackets refers to correct columns in ms4.sol
ny <- y[9]*(ix[i]^2) + y[8]*ix[i] + y[7]
return (ny)
})
pred1.int <- HPDinterval(mcmc(unlist(pred1)))
# expected functional curve is summarised using the mean of predicted posterior samples
pred1.u <- mean(unlist(pred1))
res4c[i,] <- c(pred1.int,pred1.u)
}
####### (iv) Community, Above: predictions and credible zone ############################
# matrix to catch results for 95% credible zone
res4d <- matrix(NA,length(ix),3)
for (i in 1:length(ix)){
pred1 <- lapply(ms4.sol, function(y){
# Note indexing within the square brackets refers to correct columns in ms4.sol
ny <- y[12]*(ix[i]^2) + y[11]*ix[i] + y[10]
return (ny)
})
pred1.int <- HPDinterval(mcmc(unlist(pred1)))
# expected functional curve is summarised using the mean of predicted posterior samples
pred1.u <- mean(unlist(pred1))
res4d[i,] <- c(pred1.int,pred1.u)
}
####### (v) Community, Intra: predictions and credible zone #############################
# matrix to catch results for 95% credible zone
res4e <- matrix(NA,length(ix),3)
for (i in 1:length(ix)){
pred1 <- lapply(ms4.sol, function(y){
# Note indexing within the square brackets refers to correct columns in ms4.sol
ny <- y[15]*(ix[i]^2) + y[14]*ix[i] + y[13]
return (ny)
})
pred1.int <- HPDinterval(mcmc(unlist(pred1)))
# expected functional curve is summarised using the mean of predicted posterior samples
pred1.u <- mean(unlist(pred1))
res4e[i,] <- c(pred1.int,pred1.u)
}
####### (vi) Community, Below: predictions and credible zone #############################
# matrix to catch results for 95% credible zone
res4f <- matrix(NA,length(ix),3)
for (i in 1:length(ix)){
pred1 <- lapply(ms4.sol, function(y){
# Note indexing within the square brackets refers to correct columns in ms4.sol
ny <- y[18]*(ix[i]^2) + y[17]*ix[i] + y[16]
return (ny)
})
pred1.int <- HPDinterval(mcmc(unlist(pred1)))
# expected functional curve is summarised using the mean of predicted posterior samples
pred1.u <- mean(unlist(pred1))
res4f[i,] <- c(pred1.int,pred1.u)
}
######## Proportional change in native responses ###################################
######## over the typical range in invasive species' ###############################
######## abundance investigated in the literature ##################################
# note: changes are on average over meta-analysed studies, the response in individual studies varies.
res4a[1000,3] - res4a[1,3]
# Where invasive species occupied a higher trophic level, there was a 44.0% decrease in population-level native responses over the typical range in invasive abundance investigated in the literature
res4b[1000,3] - res4b[1,3]
# Where invasive species occupied the same trophic level, there was a 19.7% decrease in population-level native responses over the typical range in invasive abundance investigated in the literature
res4c[1000,3] - res4c[1,3]
# Where invasive species occupied a lower trophic level, there was a 0.5% increase in population-level native responses over the typical range in invasive abundance investigated in the literature
res4d[1000,3] - res4d[1,3]
# Where invasive species occupied a higher trophic level, there was a 52.0% decrease in community-level native responses over the typical range in invasive abundance investigated in the literature
res4e[1000,3] - res4e[1,3]
# Where invasive species occupied the same trophic level, there was a 27.8% decrease in community-level native responses over the typical range in invasive abundance investigated in the literature
res4f[1000,3] - res4f[1,3]
# Where invasive species occupied a lower trophic level, there was a 7.6% decrease in community-level native responses over the typical range in invasive abundance investigated in the literature
##########################################################
## Figure 3a: Population response for invader at higher trophic #######################################
par(mfrow=c(1,1))
par(mar = c(5,5,2,2))
plot((ix+0.5),res4a[,3],type="n",xlim=c(0,1),ylim=c(0,1), ylab = "Population response", xlab = "Invasive abundance", cex.lab = 2, cex.axis = 2)
polygon(x = c(ix,ix[length(ix):1])+0.5, y = c(res4a[,1],res4a[length(ix):1,2]),border = NA, col = "darkgrey")
lines((ix+0.5),res4a[,3],type="l", lwd = 2)
## Figure 3b: Population response for invader at same trophic #######################################
par(mfrow=c(1,1))
par(mar = c(5,5,2,2))
plot((ix+0.5),res4b[,3],type="n",xlim=c(0,1),ylim=c(0,1), ylab = "Population response", xlab = "Invasive abundance", cex.lab = 2, cex.axis = 2)
polygon(x = c(ix,ix[length(ix):1])+0.5, y = c(res4b[,1],res4b[length(ix):1,2]),border = NA, col = "darkgrey")
lines((ix+0.5),res4b[,3],type="l", lwd = 2)
## Figure 3c: Population response for invader at lower trophic #######################################
par(mfrow=c(1,1))
par(mar = c(5,5,2,2))
plot((ix+0.5),res4c[,3],type="n",xlim=c(0,1),ylim=c(0,1), ylab = "Population response", xlab = "Invasive abundance", cex.lab = 2, cex.axis = 2)
polygon(x = c(ix,ix[length(ix):1])+0.5, y = c(res4c[,1],res4c[length(ix):1,2]),border = NA, col = "darkgrey")
lines((ix+0.5),res4c[,3],type="l", lwd = 2)
## Figure 3d: Community response for invader at higher trophic #######################################
par(mfrow=c(1,1))
par(mar = c(5,5,2,2))
plot((ix+0.5),res4d[,3],type="n",xlim=c(0,1),ylim=c(0,1), ylab = "Community response", xlab = "Invasive abundance", cex.lab = 2, cex.axis = 2)
polygon(x = c(ix,ix[length(ix):1])+0.5, y = c(res4d[,1],res4d[length(ix):1,2]),border = NA, col = "darkgrey")
lines((ix+0.5),res4d[,3],type="l", lwd = 2)
## Figure 3e: Community response for invader at same trophic #######################################
par(mfrow=c(1,1))
par(mar = c(5,5,2,2))
plot((ix+0.5),res4e[,3],type="n",xlim=c(0,1),ylim=c(0,1), ylab = "Community response", xlab = "Invasive abundance", cex.lab = 2, cex.axis = 2)
polygon(x = c(ix,ix[length(ix):1])+0.5, y = c(res4e[,1],res4e[length(ix):1,2]),border = NA, col = "darkgrey")
lines((ix+0.5),res4e[,3],type="l", lwd = 2)
## Figure 3f: Community response for invader at lower trophic #######################################
par(mfrow=c(1,1))
par(mar = c(5,5,2,2))
plot((ix+0.5),res4f[,3],type="n",xlim=c(0,1),ylim=c(0,1), ylab = "Community response", xlab = "Invasive abundance", cex.lab = 2, cex.axis = 2)
polygon(x = c(ix,ix[length(ix):1])+0.5, y = c(res4f[,1],res4f[length(ix):1,2]),border = NA, col = "darkgrey")
lines((ix+0.5),res4f[,3],type="l", lwd = 2)
##################################################################
## Figure 4: Slopes by diversity metric
##################################################################
avi.ES4.s <- avi.ES.s[which(avi.ES.s$Multi_spp_resp == "MULTIPLE"),]
avi.ES4.s <- avi.ES4.s[-c(which(avi.ES4.s$Response_type=="Abundance"),which(avi.ES4.s$Response_type=="Other")),]
# remove empty levels (clean up)
avi.ES4.s <- droplevels (avi.ES4.s)
ms29a <- MCMCglmm(Int.u ~ Response_type, random = ~ Article_ID, mev = avi.ES4.s$Int.s, prior = prior, nitt = 110000, burnin = 10000, thin = 100, verbose = F, data = avi.ES4.s)
ms29b <- MCMCglmm(Lin.u ~ Response_type, random = ~ Article_ID, mev = avi.ES4.s$Lin.s, prior = prior, nitt = 110000, burnin = 10000, thin = 100, verbose = F, data = avi.ES4.s)
ms29c <- MCMCglmm(Pol.u ~ Response_type, random = ~ Article_ID, mev = avi.ES4.s$Pol.s, prior = prior, nitt = 110000, burnin = 10000, thin = 100, verbose = F, data = avi.ES4.s)
summary(ms29b)
# The linear term for diversity responses is significantly less than zero p < 0.001
# The linear term for evenness is not significantly different from that for diversity p = 0.278
# The linear term for richness is significantly greater than that for diversity p = 0.036
# set reference level of Trophic_level to intra, to examine intra vs. below
avi.ES4.s$Response_type <- relevel (avi.ES4.s$Response_type, ref = 3)
#model m29b2
m29b2 <- MCMCglmm(Lin.u ~ Response_type, random = ~ Article_ID, mev = avi.ES4.s$Lin.s, prior = prior, nitt = 110000, burnin = 10000, thin = 100, verbose = F, data = avi.ES4.s)
summary(m29b2)
# Linear effect size for evenness is significantly lower than (more negative) than that for richness below p = 0.004
# VERY IMPORTANT: Reset the Trophic_level variable to original indexing
avi.ES3.s$Trophic_level <- relevel (avi.ES3.s$Trophic_level, ref = 2)
summary(ms29c)
# The polynomial term for diversity responses is not significantly different from zero p = 0.888
# The polynomial term for evenness is not significantly different from that for diversity p = 0.200
# The polynomial term for richness is not significantly different from that for diversity p = 0.188
# set reference level of Trophic_level to intra, to examine intra vs. below
avi.ES4.s$Response_type <- relevel (avi.ES4.s$Response_type, ref = 2)
#model m29c2
m29c2 <- MCMCglmm(Pol.u ~ Response_type, random = ~ Article_ID, mev = avi.ES4.s$Pol.s, prior = prior, nitt = 110000, burnin = 10000, thin = 100, verbose = F, data = avi.ES4.s)
summary(m29c2)
# Polynomial effect size for evenness is significantly greater than that for richness p = 0.012
# VERY IMPORTANT: Reset the Trophic_level variable to original indexing
avi.ES3.s$Trophic_level <- relevel (avi.ES3.s$Trophic_level, ref = 2)
ms29.sol <- cbind(ms29a$Sol[, "(Intercept)"] + ms29a$Sol[, "Response_typeRichness"],
ms29b$Sol[, "(Intercept)"] + ms29b$Sol[, "Response_typeRichness"],
ms29c$Sol[, "(Intercept)"] + ms29c$Sol[, "Response_typeRichness"],
ms29a$Sol[, "(Intercept)"],
ms29b$Sol[, "(Intercept)"],
ms29c$Sol[, "(Intercept)"],
ms29a$Sol[, "(Intercept)"]+ ms29a$Sol[, "Response_typeEvenness"],
ms29b$Sol[, "(Intercept)"]+ ms29b$Sol[, "Response_typeEvenness"],
ms29c$Sol[, "(Intercept)"]+ ms29c$Sol[, "Response_typeEvenness"])
# Estimates for 9 regression term effect sizes
apply(mcmc(ms29.sol), 2, mean)
# Ordering of estimates is as follows, these contain information to add to plots
# 1 Richness, Intercept
# 2 Richness, Linear
# 3 Richness, Polynomial
# 4 Diversity, Intercept
# 5 Diversity, Linear
# 6 Diversity, Polynomial
# 7 Evenness, Intercept
# 8 Evenness, Linear
# 9 Evenness, Polynomial
# p-values for 9 regression term effect sizes, comparing to zero
apply(mcmc(ms29.sol), 2, pMCMC)
# Ordering is as in comments immediately preceding, some of these p-values to be added to plots as star symbols
# split by row, for lapply convenience later...
ms29.sol <- split(ms29.sol,1:1000)
ix <- (-500:500)/1000
####### (i) Richness: predictions and credible zone #############################
# matrix to catch results for 95% credible zone
res29a <- matrix(NA,length(ix),3)
for (i in 1:length(ix)){
pred1 <- lapply(ms29.sol, function(y){
ny <- y[3]*(ix[i]^2) + y[2]*ix[i] + y[1]
return (ny)
})
pred1.int <- HPDinterval(mcmc(unlist(pred1)))
# expected functional curve is summarised using the mean of predicted posterior samples
pred1.u <- mean(unlist(pred1))
res29a[i,] <- c(pred1.int,pred1.u)
}
####### (ii) Diversity: predictions and credible zone ###########################
# matrix to catch results for 95% credible zone
res29b <- matrix(NA,length(ix),3)
for (i in 1:length(ix)){
pred1 <- lapply(ms29.sol, function(y){
# Note indexing within the square brackets refers to correct columns in ms29.sol
ny <- y[6]*(ix[i]^2) + y[5]*ix[i] + y[4]
return (ny)
})
pred1.int <- HPDinterval(mcmc(unlist(pred1)))
# expected functional curve is summarised using the mean of predicted posterior samples
pred1.u <- mean(unlist(pred1))
res29b[i,] <- c(pred1.int,pred1.u)
}
####### (iii) Evenness, Below: predictions and credible zone ##########################
# matrix to catch results for 95% credible zone
res29c <- matrix(NA,length(ix),3)
for (i in 1:length(ix)){
pred1 <- lapply(ms29.sol, function(y){
# Note indexing within the square brackets refers to correct columns in ms29.sol
ny <- y[9]*(ix[i]^2) + y[8]*ix[i] + y[7]
return (ny)
})
pred1.int <- HPDinterval(mcmc(unlist(pred1)))
# expected functional curve is summarised using the mean of predicted posterior samples
pred1.u <- mean(unlist(pred1))
res29c[i,] <- c(pred1.int,pred1.u)
}
######## Proportional change in native responses ###################################
######## over the typical range in invasive species' ###############################
######## abundance investigated in the literature ##################################
# note: changes are on average over meta-analysed studies, the response in individual studies varies.
res29a[1000,3] - res29a[1,3]
# There was a 10.9% decrease in native species richness over the typical range in invasive abundance investigated in the literature
res29b[1000,3] - res29b[1,3]
# There was a 23.4% decrease in native species diversity over the typical range in invasive abundance investigated in the literature
res29c[1000,3] - res29c[1,3]
# There was a 29.8% decrease in native species evenness over the typical range in invasive abundance investigated in the literature
##########################################################
## Figure 4a: Slopes for richness analyses #######################################
par(mfrow=c(1,1))
par(mar = c(5,5,2,2))
par(pty="s") #square!!
plot((ix+0.5),res29a[,3],type="n",xlim=c(0,1),ylim=c(0,1), ylab = "Native response", xlab = "Invasive abundance", cex.lab = 2, cex.axis = 2)
polygon(x = c(ix,ix[length(ix):1])+0.5, y = c(res29a[,1],res29a[length(ix):1,2]),border = NA, col = "red")
lines((ix+0.5),res29a[,3],type="l", lwd = 2)
###
## Figure 4b: Slopes for diversity analyses #######################################
par(mfrow=c(1,1))
par(mar = c(5,5,2,2))
par(pty="s") #square!!
plot((ix+0.5),res29b[,3],type="n",xlim=c(0,1),ylim=c(0,1), ylab = "Native response", xlab = "Invasive abundance", cex.lab = 2, cex.axis = 2, new = F)
polygon(x = c(ix,ix[length(ix):1])+0.5, y = c(res29b[,1],res29b[length(ix):1,2]),border = NA, col = "cyan")
lines((ix+0.5),res29b[,3],type="l", lwd = 2)
## Figure 4c: Slopes for evenness analyses #######################################
par(mfrow=c(1,1))
par(mar = c(5,5,2,2))
par(pty="s") #square!!
plot((ix+0.5),res29c[,3],type="n",xlim=c(0,1),ylim=c(0,1), ylab = "Native response", xlab = "Invasive abundance", cex.lab = 2, cex.axis = 2)
polygon(x = c(ix,ix[length(ix):1])+0.5, y = c(res29c[,1],res29c[length(ix):1,2]),border = NA, col = "blue")
lines((ix+0.5),res29c[,3],type="l", lwd = 2)
##################################################################
## Analysis S3.3: Recipient habitat/ ecosystem by trophic level
##################################################################
ms10a <- MCMCglmm(Int.u ~ Inv_habitat + Trophic_level, random = ~ Article_ID, mev = avi.ES3.s$Int.s, prior = prior, nitt = 110000, burnin = 10000, thin = 100, verbose = F, data = avi.ES3.s)
ms10b <- MCMCglmm(Lin.u ~ Inv_habitat + Trophic_level, random = ~ Article_ID, mev = avi.ES3.s$Lin.s, prior = prior, nitt = 110000, burnin = 10000, thin = 100, verbose = F, data = avi.ES3.s)
ms10c <- MCMCglmm(Pol.u ~ Inv_habitat + Trophic_level, random = ~ Article_ID, mev = avi.ES3.s$Pol.s, prior = prior, nitt = 110000, burnin = 10000, thin = 100, verbose = F, data = avi.ES3.s)
summary(ms10b)
# The linear term for Aquatic studies is significantly less than zero p < 0.001
# Neither the linear term for terrestrial nor marine studies are significantly different from that for aquatic studies (p = 0.328; p = 0.376)
# set reference level of Inv_habitat to terrestrial, to examine terr vs. marine
avi.ES3.s$Inv_habitat <- relevel (avi.ES3.s$Inv_habitat, ref = 3)
#model m10b2
m10b2 <- MCMCglmm(Lin.u ~ Inv_habitat + Trophic_level, random = ~ Article_ID, mev = avi.ES3.s$Lin.s, prior = prior, nitt = 110000, burnin = 10000, thin = 100, verbose = F, data = avi.ES3.s)
summary(m10b2)
# Linear effect size for terrestrial studies is significantly greater than (less negative) than that for marine studies p = 0.024
# VERY IMPORTANT: Reset the Inv_habitat variable to original indexing
avi.ES3.s$Inv_habitat <- relevel (avi.ES3.s$Inv_habitat, ref = 2)
summary(ms10c)
# The polynomial term for Aquatic studies is significantly less than zero p < 0.001
# Neither the polynomial term for terrestrial nor marine studies are significantly different from that for aquatic studies (p = 0.112; p = 0.152)
# set reference level of Inv_habitat to terrestrial, to examine terr vs. marine
avi.ES3.s$Inv_habitat <- relevel (avi.ES3.s$Inv_habitat, ref = 3)
#model m10c2
m10c2 <- MCMCglmm(Pol.u ~ Inv_habitat + Trophic_level, random = ~ Article_ID, mev = avi.ES3.s$Pol.s, prior = prior, nitt = 110000, burnin = 10000, thin = 100, verbose = F, data = avi.ES3.s)
summary(m10c2)
# Polynomial effect size for terrestrial studies not significantly different from that for marine studies p = 0.878
# VERY IMPORTANT: Reset the Inv_habitat variable to original indexing
avi.ES3.s$Inv_habitat <- relevel (avi.ES3.s$Inv_habitat, ref = 2)
ms10.sol <- cbind(ms10a$Sol[, "(Intercept)"] + ms10a$Sol[, "Inv_habitatterrestrial"],
ms10b$Sol[, "(Intercept)"] + ms10b$Sol[, "Inv_habitatterrestrial"],
ms10c$Sol[, "(Intercept)"] + ms10c$Sol[, "Inv_habitatterrestrial"],
ms10a$Sol[, "(Intercept)"],
ms10b$Sol[, "(Intercept)"],
ms10c$Sol[, "(Intercept)"],
ms10a$Sol[, "(Intercept)"]+ ms10a$Sol[, "Inv_habitatmarine"],
ms10b$Sol[, "(Intercept)"]+ ms10b$Sol[, "Inv_habitatmarine"],
ms10c$Sol[, "(Intercept)"]+ ms10c$Sol[, "Inv_habitatmarine"],
ms10a$Sol[, "(Intercept)"] + ms10a$Sol[, "Inv_habitatterrestrial"] + ms10a$Sol[, "Trophic_levelIntra"],
ms10b$Sol[, "(Intercept)"] + ms10b$Sol[, "Inv_habitatterrestrial"] + ms10b$Sol[, "Trophic_levelIntra"],
ms10c$Sol[, "(Intercept)"] + ms10c$Sol[, "Inv_habitatterrestrial"] + ms10c$Sol[, "Trophic_levelIntra"],
ms10a$Sol[, "(Intercept)"] + ms10a$Sol[, "Trophic_levelIntra"],
ms10b$Sol[, "(Intercept)"] + ms10b$Sol[, "Trophic_levelIntra"],
ms10c$Sol[, "(Intercept)"] + ms10c$Sol[, "Trophic_levelIntra"],
ms10a$Sol[, "(Intercept)"]+ ms10a$Sol[, "Inv_habitatmarine"] + ms10a$Sol[, "Trophic_levelIntra"],
ms10b$Sol[, "(Intercept)"]+ ms10b$Sol[, "Inv_habitatmarine"] + ms10b$Sol[, "Trophic_levelIntra"],
ms10c$Sol[, "(Intercept)"]+ ms10c$Sol[, "Inv_habitatmarine"] + ms10c$Sol[, "Trophic_levelIntra"],
ms10a$Sol[, "(Intercept)"] + ms10a$Sol[, "Inv_habitatterrestrial"] + ms10a$Sol[, "Trophic_levelBelow"],
ms10b$Sol[, "(Intercept)"] + ms10b$Sol[, "Inv_habitatterrestrial"] + ms10b$Sol[, "Trophic_levelBelow"],
ms10c$Sol[, "(Intercept)"] + ms10c$Sol[, "Inv_habitatterrestrial"] + ms10c$Sol[, "Trophic_levelBelow"],
ms10a$Sol[, "(Intercept)"] + ms10a$Sol[, "Trophic_levelBelow"],
ms10b$Sol[, "(Intercept)"] + ms10b$Sol[, "Trophic_levelBelow"],
ms10c$Sol[, "(Intercept)"] + ms10c$Sol[, "Trophic_levelBelow"],
ms10a$Sol[, "(Intercept)"]+ ms10a$Sol[, "Inv_habitatmarine"] + ms10a$Sol[, "Trophic_levelBelow"],
ms10b$Sol[, "(Intercept)"]+ ms10b$Sol[, "Inv_habitatmarine"] + ms10b$Sol[, "Trophic_levelBelow"],
ms10c$Sol[, "(Intercept)"]+ ms10c$Sol[, "Inv_habitatmarine"] + ms10c$Sol[, "Trophic_levelBelow"])
# Estimates for 27 regression term effect sizes
apply(mcmc(ms10.sol), 2, mean)
# Ordering of estimates is as follows, these contain information to add to plots
# 1 Terrestrial, Above, Intercept
# 2 Terrestrial, Above, Linear
# 3 Terrestrial, Above, Polynomial
# 4 Aquatic, Above, Intercept
# 5 Aquatic, Above, Linear
# 6 Aquatic, Above, Polynomial
# 7 Marine, Above, Intercept
# 8 Marine, Above, Linear
# 9 Marine, Above, Polynomial
# 10 Terrestrial, Intra, Intercept
# 11 Terrestrial, Intra, Linear
# 12 Terrestrial, Intra, Polynomial
# 13 Aquatic, Intra, Intercept
# 14 Aquatic, Intra, Linear
# 15 Aquatic, Intra, Polynomial
# 16 Marine, Intra, Intercept
# 17 Marine, Intra, Linear
# 18 Marine, Intra, Polynomial
# 19 Terrestrial, Below, Intercept
# 20 Terrestrial, Below, Linear
# 21 Terrestrial, Below, Polynomial
# 22 Aquatic, Below, Intercept
# 23 Aquatic, Below, Linear
# 24 Aquatic, Below, Polynomial
# 25 Marine, Below, Intercept
# 26 Marine, Below, Linear
# 27 Marine, Below, Polynomial
# p-values for 27 regression term effect sizes, comparing to zero
apply(mcmc(ms10.sol), 2, pMCMC)
# Ordering is as in comments immediately preceding, some of these p-values to be added to plots as star symbols
# split by row, for lapply convenience later...
ms10.sol <- split(ms10.sol,1:1000)
ix <- (-500:500)/1000
####### (i) Terrestrial, above: predictions and credible zone ###########################
# matrix to catch results for 95% credible zone
res10a <- matrix(NA,length(ix),3)
for (i in 1:length(ix)){
pred1 <- lapply(ms10.sol, function(y){
ny <- y[3]*(ix[i]^2) + y[2]*ix[i] + y[1]
return (ny)
})
pred1.int <- HPDinterval(mcmc(unlist(pred1)))
# expected functional curve is summarised using the mean of predicted posterior samples
pred1.u <- mean(unlist(pred1))
res10a[i,] <- c(pred1.int,pred1.u)
}
####### (ii) Aquatic, above: predictions and credible zone ###########################
# matrix to catch results for 95% credible zone
res10b <- matrix(NA,length(ix),3)
for (i in 1:length(ix)){
pred1 <- lapply(ms10.sol, function(y){
ny <- y[6]*(ix[i]^2) + y[5]*ix[i] + y[4]
return (ny)
})
pred1.int <- HPDinterval(mcmc(unlist(pred1)))
# expected functional curve is summarised using the mean of predicted posterior samples
pred1.u <- mean(unlist(pred1))
res10b[i,] <- c(pred1.int,pred1.u)
}
####### (iii) Marine, above: predictions and credible zone ###########################
# matrix to catch results for 95% credible zone
res10c <- matrix(NA,length(ix),3)
for (i in 1:length(ix)){
pred1 <- lapply(ms10.sol, function(y){
ny <- y[9]*(ix[i]^2) + y[8]*ix[i] + y[7]
return (ny)
})
pred1.int <- HPDinterval(mcmc(unlist(pred1)))
# expected functional curve is summarised using the mean of predicted posterior samples
pred1.u <- mean(unlist(pred1))
res10c[i,] <- c(pred1.int,pred1.u)
}
####### (iv) Terrestrial, intra: predictions and credible zone #########################
# matrix to catch results for 95% credible zone
res10d <- matrix(NA,length(ix),3)
for (i in 1:length(ix)){
pred1 <- lapply(ms10.sol, function(y){
ny <- y[12]*(ix[i]^2) + y[11]*ix[i] + y[10]
return (ny)
})
pred1.int <- HPDinterval(mcmc(unlist(pred1)))
# expected functional curve is summarised using the mean of predicted posterior samples
pred1.u <- mean(unlist(pred1))
res10d[i,] <- c(pred1.int,pred1.u)
}
####### (v) Aquatic, intra: predictions and credible zone ###########################
# matrix to catch results for 95% credible zone
res10e <- matrix(NA,length(ix),3)
for (i in 1:length(ix)){
pred1 <- lapply(ms10.sol, function(y){
ny <- y[15]*(ix[i]^2) + y[14]*ix[i] + y[13]
return (ny)
})
pred1.int <- HPDinterval(mcmc(unlist(pred1)))
# expected functional curve is summarised using the mean of predicted posterior samples
pred1.u <- mean(unlist(pred1))
res10e[i,] <- c(pred1.int,pred1.u)
}
####### (vi) Marine, intra: predictions and credible zone ###########################
# matrix to catch results for 95% credible zone
res10f <- matrix(NA,length(ix),3)
for (i in 1:length(ix)){
pred1 <- lapply(ms10.sol, function(y){
ny <- y[18]*(ix[i]^2) + y[17]*ix[i] + y[16]
return (ny)
})
pred1.int <- HPDinterval(mcmc(unlist(pred1)))
# expected functional curve is summarised using the mean of predicted posterior samples
pred1.u <- mean(unlist(pred1))
res10f[i,] <- c(pred1.int,pred1.u)
}
####### (vii) Terrestrial, below: predictions and credible zone #########################
# matrix to catch results for 95% credible zone
res10g <- matrix(NA,length(ix),3)
for (i in 1:length(ix)){
pred1 <- lapply(ms10.sol, function(y){
ny <- y[21]*(ix[i]^2) + y[20]*ix[i] + y[19]
return (ny)
})
pred1.int <- HPDinterval(mcmc(unlist(pred1)))
# expected functional curve is summarised using the mean of predicted posterior samples
pred1.u <- mean(unlist(pred1))
res10g[i,] <- c(pred1.int,pred1.u)
}
####### (viii) Aquatic, below: predictions and credible zone ###########################
# matrix to catch results for 95% credible zone
res10h <- matrix(NA,length(ix),3)
for (i in 1:length(ix)){
pred1 <- lapply(ms10.sol, function(y){
ny <- y[24]*(ix[i]^2) + y[23]*ix[i] + y[22]
return (ny)
})
pred1.int <- HPDinterval(mcmc(unlist(pred1)))
# expected functional curve is summarised using the mean of predicted posterior samples
pred1.u <- mean(unlist(pred1))
res10h[i,] <- c(pred1.int,pred1.u)
}
####### (ix) Marine, below: predictions and credible zone ###########################
# matrix to catch results for 95% credible zone
res10i <- matrix(NA,length(ix),3)
for (i in 1:length(ix)){
pred1 <- lapply(ms10.sol, function(y){
ny <- y[27]*(ix[i]^2) + y[26]*ix[i] + y[25]
return (ny)
})
pred1.int <- HPDinterval(mcmc(unlist(pred1)))
# expected functional curve is summarised using the mean of predicted posterior samples
pred1.u <- mean(unlist(pred1))
res10i[i,] <- c(pred1.int,pred1.u)
}
######## Proportional change in native responses ###################################
######## over the typical range in invasive species' ###############################
######## abundance investigated in the literature ##################################
# note: changes are on average over meta-analysed studies, the response in individual studies varies.
res10a[1000,3] - res10a[1,3]
# Where the invasive species was at a higher trophic level, there was a 54.9% decrease in terrestrial native responses over the typical range in invasive abundance investigated in the literature
res10b[1000,3] - res10b[1,3]
# Where the invasive species was at a higher trophic level, there was a 45.7% decrease in aquatic native responses over the typical range in invasive abundance investigated in the literature
res10c[1000,3] - res10c[1,3]
# Where the invasive species was at a higher trophic level, there was a 35.0% decrease in marine native responses over the typical range in invasive abundance investigated in the literature
res10d[1000,3] - res10d[1,3]
# Where the invasive species was at the same trophic level, there was a 30.0% decrease in terrestrial native responses over the typical range in invasive abundance investigated in the literature
res10e[1000,3] - res10e[1,3]
# Where the invasive species was at the same trophic level, there was a 20.8% decrease in aquatic native responses over the typical range in invasive abundance investigated in the literature
res10f[1000,3] - res10f[1,3]
# Where the invasive species was at the same trophic level, there was a 10.1% decrease in marine native responses over the typical range in invasive abundance investigated in the literature
res10g[1000,3] - res10g[1,3]
# Where the invasive species was at a lower trophic level, there was a 11.4% decrease in terrestrial native responses over the typical range in invasive abundance investigated in the literature
res10h[1000,3] - res10h[1,3]
# Where the invasive species was at a lower trophic level, there was a 2.2% decrease in aquatic native responses over the typical range in invasive abundance investigated in the literature
res10i[1000,3] - res10i[1,3]
# Where the invasive species was at a lower trophic level, there was a 8.5% increase in marine native responses over the typical range in invasive abundance investigated in the literature
##########################################################
## Figure S3.3a: Terrestrial response for invader at higher trophic #######################################
par(mfrow=c(1,1))
par(mar = c(5,5,2,2))
plot((ix+0.5),res10a[,3],type="n",xlim=c(0,1),ylim=c(0,1), ylab = "Native response (Terr)", xlab = "Invasive abundance", cex.lab = 2, cex.axis = 2)
polygon(x = c(ix,ix[length(ix):1])+0.5, y = c(res10a[,1],res10a[length(ix):1,2]),border = NA, col = "darkgrey")
lines((ix+0.5),res10a[,3],type="l", lwd = 2)
## Figure S3.3d: Aquatic response for invader at higher trophic #######################################
par(mfrow=c(1,1))
par(mar = c(5,5,2,2))
plot((ix+0.5),res10b[,3],type="n",xlim=c(0,1),ylim=c(0,1), ylab = "Native response (Aqua)", xlab = "Invasive abundance", cex.lab = 2, cex.axis = 2)
polygon(x = c(ix,ix[length(ix):1])+0.5, y = c(res10b[,1],res10b[length(ix):1,2]),border = NA, col = "darkgrey")
lines((ix+0.5),res10b[,3],type="l", lwd = 2)
## Figure S3.3g: Marine response for invader at higher trophic #######################################
par(mfrow=c(1,1))
par(mar = c(5,5,2,2))
plot((ix+0.5),res10c[,3],type="n",xlim=c(0,1),ylim=c(0,1), ylab = "Native response (Mar)", xlab = "Invasive abundance", cex.lab = 2, cex.axis = 2)
polygon(x = c(ix,ix[length(ix):1])+0.5, y = c(res10c[,1],res10c[length(ix):1,2]),border = NA, col = "darkgrey")
lines((ix+0.5),res10c[,3],type="l", lwd = 2)
## Figure S3.3b: Terrestrial response for invader at same trophic #######################################
par(mfrow=c(1,1))
par(mar = c(5,5,2,2))
plot((ix+0.5),res10d[,3],type="n",xlim=c(0,1),ylim=c(0,1), ylab = "Native response (Terr)", xlab = "Invasive abundance", cex.lab = 2, cex.axis = 2)
polygon(x = c(ix,ix[length(ix):1])+0.5, y = c(res10d[,1],res10d[length(ix):1,2]),border = NA, col = "darkgrey")
lines((ix+0.5),res10d[,3],type="l", lwd = 2)
## Figure S3.3e: Aquatic response for invader at same trophic #######################################
par(mfrow=c(1,1))
par(mar = c(5,5,2,2))
plot((ix+0.5),res10e[,3],type="n",xlim=c(0,1),ylim=c(0,1), ylab = "Native response (Aqua)", xlab = "Invasive abundance", cex.lab = 2, cex.axis = 2)
polygon(x = c(ix,ix[length(ix):1])+0.5, y = c(res10e[,1],res10e[length(ix):1,2]),border = NA, col = "darkgrey")
lines((ix+0.5),res10e[,3],type="l", lwd = 2)
## Figure S3.3h: Marine response for invader at same trophic #######################################
par(mfrow=c(1,1))
par(mar = c(5,5,2,2))
plot((ix+0.5),res10f[,3],type="n",xlim=c(0,1),ylim=c(0,1), ylab = "Native response (Mar)", xlab = "Invasive abundance", cex.lab = 2, cex.axis = 2)
polygon(x = c(ix,ix[length(ix):1])+0.5, y = c(res10f[,1],res10f[length(ix):1,2]),border = NA, col = "darkgrey")
lines((ix+0.5),res10f[,3],type="l", lwd = 2)
## Figure S3.3c: Terrestrial response for invader at lower trophic #######################################
par(mfrow=c(1,1))
par(mar = c(5,5,2,2))
plot((ix+0.5),res10g[,3],type="n",xlim=c(0,1),ylim=c(0,1), ylab = "Native response (Terr)", xlab = "Invasive abundance", cex.lab = 2, cex.axis = 2)
polygon(x = c(ix,ix[length(ix):1])+0.5, y = c(res10g[,1],res10g[length(ix):1,2]),border = NA, col = "darkgrey")
lines((ix+0.5),res10g[,3],type="l", lwd = 2)
## Figure S3.3f: Aquatic response for invader at lower trophic #######################################
par(mfrow=c(1,1))
par(mar = c(5,5,2,2))
plot((ix+0.5),res10h[,3],type="n",xlim=c(0,1),ylim=c(0,1), ylab = "Native response (Aqua)", xlab = "Invasive abundance", cex.lab = 2, cex.axis = 2)
polygon(x = c(ix,ix[length(ix):1])+0.5, y = c(res10h[,1],res10h[length(ix):1,2]),border = NA, col = "darkgrey")
lines((ix+0.5),res10h[,3],type="l", lwd = 2)
## Figure S3.3i: Marine response for invader at lower trophic #######################################
par(mfrow=c(1,1))
par(mar = c(5,5,2,2))
plot((ix+0.5),res10i[,3],type="n",xlim=c(0,1),ylim=c(0,1), ylab = "Native response", xlab = "Invasive abundance", cex.lab = 2, cex.axis = 2)
polygon(x = c(ix,ix[length(ix):1])+0.5, y = c(res10i[,1],res10i[length(ix):1,2]),border = NA, col = "darkgrey")
lines((ix+0.5),res10i[,3],type="l", lwd = 2)
##################################################################
## Analysis S3.4: Invasive plants vs. animals by trophic level
##################################################################
avi.ES5.s <- avi.ES3.s[-which(avi.ES3.s$Inv_kingdom =="Bacteria"),]
avi.ES5.s <- droplevels(avi.ES5.s)
ms19a <- MCMCglmm(Int.u ~ Inv_kingdom + Trophic_level, random = ~ Article_ID, mev = avi.ES5.s$Int.s, prior = prior, nitt = 110000, burnin = 10000, thin = 100, verbose = F, data = avi.ES5.s)
ms19b <- MCMCglmm(Lin.u ~ Inv_kingdom + Trophic_level, random = ~ Article_ID, mev = avi.ES5.s$Lin.s, prior = prior, nitt = 110000, burnin = 10000, thin = 100, verbose = F, data = avi.ES5.s)
ms19c <- MCMCglmm(Pol.u ~ Inv_kingdom + Trophic_level, random = ~ Article_ID, mev = avi.ES5.s$Pol.s, prior = prior, nitt = 110000, burnin = 10000, thin = 100, verbose = F, data = avi.ES5.s)
summary(ms19b)
# Linear effect size for invasive animals is significantly less than zero (p < 0.001)
# Linear effect sizes of invasive plants and animals do not differ (p = 0.186)
summary(ms19c)
# Polynomial effect size for invasive animals is significantly greater than zero (p < 0.001)
# Polynomial effect sizes significantly lower in invasive plants than invasive animals (p = 0.036)
ms19.sol <- cbind(ms19a$Sol[, "(Intercept)"],
ms19b$Sol[, "(Intercept)"],
ms19c$Sol[, "(Intercept)"],
ms19a$Sol[, "(Intercept)"] + ms19a$Sol[, "Trophic_levelIntra"],
ms19b$Sol[, "(Intercept)"] + ms19b$Sol[, "Trophic_levelIntra"],
ms19c$Sol[, "(Intercept)"] + ms19c$Sol[, "Trophic_levelIntra"],
ms19a$Sol[, "(Intercept)"] + ms19a$Sol[, "Trophic_levelBelow"],
ms19b$Sol[, "(Intercept)"] + ms19b$Sol[, "Trophic_levelBelow"],
ms19c$Sol[, "(Intercept)"] + ms19c$Sol[, "Trophic_levelBelow"],
ms19a$Sol[, "(Intercept)"] + ms19a$Sol[, "Inv_kingdomPlant"],
ms19b$Sol[, "(Intercept)"] + ms19b$Sol[, "Inv_kingdomPlant"],
ms19c$Sol[, "(Intercept)"] + ms19c$Sol[, "Inv_kingdomPlant"],
ms19a$Sol[, "(Intercept)"] + ms19a$Sol[, "Trophic_levelIntra"]+ ms19a$Sol[, "Inv_kingdomPlant"],
ms19b$Sol[, "(Intercept)"] + ms19b$Sol[, "Trophic_levelIntra"]+ ms19b$Sol[, "Inv_kingdomPlant"],
ms19c$Sol[, "(Intercept)"] + ms19c$Sol[, "Trophic_levelIntra"]+ ms19c$Sol[, "Inv_kingdomPlant"],
ms19a$Sol[, "(Intercept)"] + ms19a$Sol[, "Trophic_levelBelow"]+ ms19a$Sol[, "Inv_kingdomPlant"],
ms19b$Sol[, "(Intercept)"] + ms19b$Sol[, "Trophic_levelBelow"]+ ms19b$Sol[, "Inv_kingdomPlant"],
ms19c$Sol[, "(Intercept)"] + ms19c$Sol[, "Trophic_levelBelow"]+ ms19c$Sol[, "Inv_kingdomPlant"])
# Estimates for 18 regression term effect sizes
apply(mcmc(ms19.sol), 2, mean)
# Ordering of estimates is as follows, these contain information to add to plots
# 1 Animal, Above, Intercept
# 2 Animal, Above, Linear
# 3 Animal, Above, Polynomial
# 4 Animal, Intra, Intercept
# 5 Animal, Intra, Linear
# 6 Animal, Intra, Polynomial
# 7 Animal, Below, Intercept
# 8 Animal, Below, Linear
# 9 Animal, Below, Polynomial
# 10 Plant, Above, Intercept
# 11 Plant, Above, Linear
# 12 Plant, Above, Polynomial
# 13 Plant, Intra, Intercept
# 14 Plant, Intra, Linear
# 15 Plant, Intra, Polynomial
# 16 Plant, Below, Intercept
# 17 Plant, Below, Linear
# 18 Plant, Below, Polynomial
# p-values for 18 regression term effect sizes, comparing to zero
apply(mcmc(ms19.sol), 2, pMCMC)
# Ordering is as in comments immediately preceding, some of these p-values to be added to plots as star symbols
# split by row, for lapply convenience later...
ms19.sol <- split(ms19.sol,1:1000)
ix <- (-500:500)/1000
####### (i) Animal, above: predictions and credible zone ###########################
# matrix to catch results for 95% credible zone
res19a <- matrix(NA,length(ix),3)
for (i in 1:length(ix)){
pred1 <- lapply(ms19.sol, function(y){
ny <- y[3]*(ix[i]^2) + y[2]*ix[i] + y[1]
return (ny)
})
pred1.int <- HPDinterval(mcmc(unlist(pred1)))
# expected functional curve is summarised using the mean of predicted posterior samples
pred1.u <- mean(unlist(pred1))
res19a[i,] <- c(pred1.int,pred1.u)
}
####### (ii) Animal, intra: predictions and credible zone ###########################
# matrix to catch results for 95% credible zone
res19b <- matrix(NA,length(ix),3)
for (i in 1:length(ix)){
pred1 <- lapply(ms19.sol, function(y){
ny <- y[6]*(ix[i]^2) + y[5]*ix[i] + y[4]
return (ny)
})
pred1.int <- HPDinterval(mcmc(unlist(pred1)))
# expected functional curve is summarised using the mean of predicted posterior samples
pred1.u <- mean(unlist(pred1))
res19b[i,] <- c(pred1.int,pred1.u)
}
####### (iii) Animal, below: predictions and credible zone ###########################
# matrix to catch results for 95% credible zone
res19c <- matrix(NA,length(ix),3)
for (i in 1:length(ix)){
pred1 <- lapply(ms19.sol, function(y){
ny <- y[9]*(ix[i]^2) + y[8]*ix[i] + y[7]
return (ny)
})
pred1.int <- HPDinterval(mcmc(unlist(pred1)))
# expected functional curve is summarised using the mean of predicted posterior samples
pred1.u <- mean(unlist(pred1))
res19c[i,] <- c(pred1.int,pred1.u)
}
####### (iv) Plant, above: predictions and credible zone ###########################
# matrix to catch results for 95% credible zone
res19d <- matrix(NA,length(ix),3)
for (i in 1:length(ix)){
pred1 <- lapply(ms19.sol, function(y){
ny <- y[12]*(ix[i]^2) + y[11]*ix[i] + y[10]
return (ny)
})
pred1.int <- HPDinterval(mcmc(unlist(pred1)))
# expected functional curve is summarised using the mean of predicted posterior samples
pred1.u <- mean(unlist(pred1))
res19d[i,] <- c(pred1.int,pred1.u)
}
####### (v) Plant, intra: predictions and credible zone ###########################
# matrix to catch results for 95% credible zone
res19e <- matrix(NA,length(ix),3)
for (i in 1:length(ix)){
pred1 <- lapply(ms19.sol, function(y){
ny <- y[15]*(ix[i]^2) + y[14]*ix[i] + y[13]
return (ny)
})
pred1.int <- HPDinterval(mcmc(unlist(pred1)))
# expected functional curve is summarised using the mean of predicted posterior samples
pred1.u <- mean(unlist(pred1))
res19e[i,] <- c(pred1.int,pred1.u)
}
####### (vi) Plant, below: predictions and credible zone ###########################
# matrix to catch results for 95% credible zone
res19f <- matrix(NA,length(ix),3)
for (i in 1:length(ix)){
pred1 <- lapply(ms19.sol, function(y){
ny <- y[18]*(ix[i]^2) + y[17]*ix[i] + y[16]
return (ny)
})
pred1.int <- HPDinterval(mcmc(unlist(pred1)))
# expected functional curve is summarised using the mean of predicted posterior samples
pred1.u <- mean(unlist(pred1))
res19f[i,] <- c(pred1.int,pred1.u)
}
######## Proportional change in native responses ###################################
######## over the typical range in invasive species' ###############################
######## abundance investigated in the literature ##################################
# note: changes are on average over meta-analysed studies, the response in individual studies varies.
res19a[1000,3] - res19a[1,3]
# Where the invasive species was at a higher trophic level, there was a 47.3% decrease in native animal responses over the typical range in invasive abundance investigated in the literature
res19b[1000,3] - res19b[1,3]
# Where the invasive species was at the same trophic level, there was a 18.2% decrease in native animal responses over the typical range in invasive abundance investigated in the literature
res19c[1000,3] - res19c[1,3]
# Where the invasive species was at a lower trophic level, there was a 1.3% increase in native animal responses over the typical range in invasive abundance investigated in the literature
res19d[1000,3] - res19d[1,3]
# Where the invasive species was at a higher trophic level, there was a 58.0% decrease in native plant responses over the typical range in invasive abundance investigated in the literature
res19e[1000,3] - res19e[1,3]
# Where the invasive species was at the same trophic level, there was a 28.9% decrease in native plant responses over the typical range in invasive abundance investigated in the literature
res19f[1000,3] - res19f[1,3]
# Where the invasive species was at a lower trophic level, there was a 9.4% decrease in native plant responses over the typical range in invasive abundance investigated in the literature
##########################################################
## Figure S3.4a: Invasive animal at higher trophic #######################################
par(mfrow=c(1,1))
par(mar = c(5,5,2,2))
plot((ix+0.5),res19a[,3],type="n",xlim=c(0,1),ylim=c(0,1), ylab = "Native response", xlab = "Invasive abundance", cex.lab = 2, cex.axis = 2)
polygon(x = c(ix,ix[length(ix):1])+0.5, y = c(res19a[,1],res19a[length(ix):1,2]),border = NA, col = "darkgrey")
lines((ix+0.5),res19a[,3],type="l", lwd = 2)
## Figure S3.4b: Invasive animal at same trophic #######################################
par(mfrow=c(1,1))
par(mar = c(5,5,2,2))
plot((ix+0.5),res19b[,3],type="n",xlim=c(0,1),ylim=c(0,1), ylab = "Native response", xlab = "Invasive abundance", cex.lab = 2, cex.axis = 2)
polygon(x = c(ix,ix[length(ix):1])+0.5, y = c(res19b[,1],res19b[length(ix):1,2]),border = NA, col = "darkgrey")
lines((ix+0.5),res19b[,3],type="l", lwd = 2)
## Figure S3.4c: Invasive animal at lower trophic #######################################
par(mfrow=c(1,1))
par(mar = c(5,5,2,2))
plot((ix+0.5),res19c[,3],type="n",xlim=c(0,1),ylim=c(0,1), ylab = "Native response", xlab = "Invasive abundance", cex.lab = 2, cex.axis = 2)
polygon(x = c(ix,ix[length(ix):1])+0.5, y = c(res19c[,1],res19c[length(ix):1,2]),border = NA, col = "darkgrey")
lines((ix+0.5),res19c[,3],type="l", lwd = 2)
## Figure S3.4d: Invasive plant at same trophic #######################################
par(mfrow=c(1,1))
par(mar = c(5,5,2,2))
plot((ix+0.5),res19e[,3],type="n",xlim=c(0,1),ylim=c(0,1), ylab = "Native response", xlab = "Invasive abundance", cex.lab = 2, cex.axis = 2)
polygon(x = c(ix,ix[length(ix):1])+0.5, y = c(res19e[,1],res19e[length(ix):1,2]),border = NA, col = "darkgrey")
lines((ix+0.5),res19e[,3],type="l", lwd = 2)
## Figure S3.4e: Invasive plant at lower trophic #######################################
par(mfrow=c(1,1))
par(mar = c(5,5,2,2))
plot((ix+0.5),res19f[,3],type="n",xlim=c(0,1),ylim=c(0,1), ylab = "Native response", xlab = "Invasive abundance", cex.lab = 2, cex.axis = 2)
polygon(x = c(ix,ix[length(ix):1])+0.5, y = c(res19f[,1],res19f[length(ix):1,2]),border = NA, col = "darkgrey")
lines((ix+0.5),res19f[,3],type="l", lwd = 2)
##################################################################
##################################################################
##################################################################
# SECTION 6. function to look see how the sign of the raw data is distributed, both for invasive and native, run the hashed code following the function
# loc.xy <- function (data) {
#
# xx <- split(data,list(data$Article_ID,data$Study_ID),drop=T)
# yy <- lapply(xx, function(y){
# c(sign(min(y$Response)), sign(max(y$Response)), sign(min(y$Abundance_Invader)), sign(max(y$Abundance_Invader)))
# })
#
# yy2 <- do.call(rbind,yy)
#
# return (yy2)
#
# }
#
# test <- loc.xy(data)
# table(test[,1],test[,2]) #min, max signs for native response
# table(test[,3],test[,4]) #min, max signs for invader abundance
##################################################################
##################################################################
##################################################################
|
35c1cac39f62c8b7a090608b381e2bf5b1a88260
|
510172d8e8aa68d23d38e63271aa07f0e3f97660
|
/code/ensemble/process_ensemble.R
|
3f6c902afa5f3e7d334a2857bd5a21bf9bc6415e
|
[
"MIT"
] |
permissive
|
nickreich/covid19-scenario-modeling-hub
|
bef5bf197aaafe04f697a0dfb84e66cf91162a08
|
f36a437c3591e62416e468af1d92e3b6b6752e95
|
refs/heads/master
| 2023-08-16T07:46:27.869612
| 2021-09-27T21:26:13
| 2021-09-27T21:26:13
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 5,458
|
r
|
process_ensemble.R
|
# Library and System -----------------------------------------------------------
# Installation
# install.packages(c("purrr", "lubridate", "dplyr", "vroom", "stringr"))
# Library
library(purrr) # for list management;
# for "flatten", "reduce"
library(lubridate) # for date management;
# for "epiweek", "year"
library(dplyr) # for data frame management;
# for "mutate", "bind_rows", "%>%", "filter"
################################## FUNCTIONS ###################################
# Evaluate path and create them if necessary, also create a variable in the
# environment with the variable name = "name_path" and value = "path"
eval_path <- function(name_path, path) {
assign(name_path, path, inherits = TRUE)
if (!(dir.exists(path))) dir.create(path)
}
# Calculate ensemble: use the median of the median (50% quantile) projections
# for each model, and the 50/95% confidence interval for the ensemble should be
# the median of the 50/95% quantiles
ensemble_quantile <- function(df, quantile, type, quantile_name) {
df %>%
dplyr::filter(quantile == !!quantile) %>%
dplyr::summarise(value = median(value, na.rm = TRUE)) %>%
dplyr::ungroup() %>%
dplyr::mutate(type := !!type,
quantile := !!quantile_name,
model_projection_date = target_end_date - (
as.numeric(gsub("[^[:digit:]]", "", target)) * 7))
}
# Read CSV or ZIP format file
read_files <- function(path) {
if (grepl(".csv$", basename(path))) {
df <- vroom::vroom(path, delim = ",", na = c("", "NA", "NaN"))
}
if (grepl(".zip$", basename(path))) {
file_name <- unzip(path,list = TRUE)[,"Name", TRUE]
unzip(path)
df <- vroom::vroom(file_name[1], delim = ",", na = c("", "NA", "NaN"))
file.remove(file_name)
}
df
}
# Read all the files in a path containing the model ordered by folder. Aggregate
# them all in one data frame and add two columns with model name and model name,
# scenario name paste together.
list_model <- function(path) {
lst_model <- lapply(name_model, function(x) {
list_files <- grep("csv|zip", dir(x, full.names = TRUE), value = TRUE)
lst_ds <- lapply(list_files, function(y) {
df <- read_files(y)
df <- dplyr::filter(df, !is.na(quantile)) %>%
dplyr::mutate(
model_name = gsub(".{4}-.{2}-.{2}-|.csv|.zip", "", basename(y)),
# add column with scenario and model information
model_s = paste(model_name, scenario_name, sep = " - "))
df
})
lst_ds <- setNames(lst_ds, basename(list_files))
})
lst_model <- purrr::flatten(lst_model)
lst_model <- purrr::reduce(lst_model, rbind)
}
# Prerequisite -----------------------------------------------------------------
# Create Path Variables and folders (if necessary)
eval_path("path_model", "data-processed/")
#################################### MODELS ####################################
# Download and save model from GitHub Scenario Hub repository:
name_model <- grep("Ensemble$", dir(path_model, full.names = TRUE),
value = TRUE, invert = TRUE)
# Read, process all:
# Process:
# - Add week information
# - Add column with model name and with model name - scenario (for plot)
df_model <- list_model(name_model)
# Filter Ensemble:
# - Select only model with all quantiles information
# - Filter date that does not contain the projection of all selected models
df_model_ensemble <- df_model %>%
dplyr::group_by(scenario_id, scenario_name, target, target_end_date, location,
model_s) %>%
dplyr::mutate(sel = ifelse(length(quantile) == 23, 1, 0)) %>%
dplyr::filter(sel == 1) %>%
dplyr::ungroup(model_s) %>%
dplyr::mutate(round_date = target_end_date - (
as.numeric(gsub("[^[:digit:]]", "", target)) * 7)) %>%
dplyr::mutate(round_date = ifelse(round_date == "2021-01-02", 1, 2))
sel_df <- df_model_ensemble %>%
dplyr::ungroup() %>%
dplyr::select(target_end_date, model_name, round_date) %>%
dplyr::distinct() %>%
dplyr::group_by(target_end_date, round_date) %>%
dplyr::summarise(n_model = dplyr::n(), .groups = "drop") %>%
dplyr::group_by(round_date) %>%
dplyr::mutate(sel = ifelse(n_model == max(n_model), 0 ,1))
round_n <- unique(sel_df$round_date)
sel_df <- dplyr::filter(sel_df, sel == 1) %>% dplyr::ungroup()
df_model_ensemble <- lapply(round_n, function (x) {
df_ens <- dplyr::filter(dplyr::ungroup(df_model_ensemble), round_date == x)
sel_ndate <- sel_df[which(sel_df$round_date == x), "target_end_date", TRUE]
df_ens <- df_ens[which(!(df_ens$target_end_date %in% sel_ndate)), ]
}) %>% dplyr::bind_rows() %>%
dplyr::group_by(scenario_id, scenario_name, target, target_end_date, location)
# Calculate Ensemble:
df_ensemble <- lapply(unique(df_model_ensemble$quantile), function(x) {
ensemble_quantile(df_model_ensemble, x, "quantile", x)
}) %>% dplyr::bind_rows()
# Write output by round information (in CSV or ZIP depending on the size of
# the output):
lapply(unique(df_ensemble$model_projection_date), function(x) {
df <- dplyr::filter(df_ensemble, grepl(x, model_projection_date))
name_file <- paste0(path_model, "Ensemble/", x, "-Ensemble.csv")
vroom::vroom_write(df, name_file, delim = ",")
if (file.size(name_file) / 1e6 > 100) {
wd0 <- getwd()
setwd(dirname(name_file))
zip(gsub(".csv$", "", basename(name_file)), basename(name_file))
setwd(wd0)
file.remove(name_file)
}
})
|
59bd2fb353ef432329e2d6610172182e3e20f6de
|
fe612f81a3118bf3ebef644bae3281bd1c156442
|
/man/h2o.listTimezones.Rd
|
c9aa99b73aa65e9556c27d7278295ead8ed99f81
|
[] |
no_license
|
cran/h2o
|
da1ba0dff5708b7490b4e97552614815f8d0d95e
|
c54f9b40693ae75577357075bb88f6f1f45c59be
|
refs/heads/master
| 2023-08-18T18:28:26.236789
| 2023-08-09T05:00:02
| 2023-08-09T06:32:17
| 20,941,952
| 3
| 3
| null | null | null | null |
UTF-8
|
R
| false
| true
| 307
|
rd
|
h2o.listTimezones.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/frame.R
\name{h2o.listTimezones}
\alias{h2o.listTimezones}
\title{List all of the Time Zones Acceptable by the H2O cluster.}
\usage{
h2o.listTimezones()
}
\description{
List all of the Time Zones Acceptable by the H2O cluster.
}
|
0d7f9018ac68c175e9364f6dcec3f7932dbac781
|
42567bc0d579a357d8340c6b3b6f6205726367ba
|
/assignment3/assignment3.R
|
79df8b0c088693837fc8f06b70ca0833a170be14
|
[] |
no_license
|
koallen/skku-data-science
|
ba89fc6084e46db84d65789acd14ee29dcfdbd44
|
65a5d1293502016780a755dc1c682b7317cb2244
|
refs/heads/master
| 2021-01-09T06:47:09.970391
| 2016-07-05T05:54:53
| 2016-07-05T05:54:53
| 38,341,477
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 1,065
|
r
|
assignment3.R
|
# Data Science and Management
# Assignment 3
#
# Liu Siyuan
# 15/07/2015
#
# Regression
#
# the dataset 'phones' is used here
library(MASS)
data(phones)
# plot original data and do linear regression
plot(phones$year, phones$calls)
ye.model <- lm(calls~year, data=phones)
phones$pred <- predict(ye.model, phones)
# plot the linear regression line
abline(ye.model$coefficients[1], ye.model$coefficients[2])
# plot the predicted values
points(phones$year, phones$pred, col="blue", pch=16)
#
# K-means Clustering
#
bad <- kmeans(mtcars, centers=2)
plot(mtcars$mpg, mtcars$hp, col=bad$cluster, pch=16, xlab="MPG", ylab="Horsepower")
#
# SPAM
#
library(ElemStatLearn)
library(caret)
sub <- sample(nrow(spam), floor(nrow(spam) * 0.9))
train <- spam[sub,]
test <- spam[-sub,]
xTrain <- train[,-58]
yTrain <- train$spam
xTest <- test[,-58]
yTest <- test$spam
# train a spam filter model
model <- train(xTrain, yTrain, 'nb', trControl=trainControl(method='cv', number=10))
# display the results
prop.table(table(predict(model$finalModel, xTest)$class, yTest))
|
f4077b7160481b59c3dc188d04d93d9cbfc33dfb
|
7a659c0d9d85442acc3aef13958e38fa1964df15
|
/man/calc.DESeq2.L2FC.Rd
|
3d57795bee1a764ff45c58ace5e52869939f6591
|
[] |
no_license
|
christensensm/COMPOSE
|
f6fd8a4ba2eb920fddbdf3672000aa1918ee61c8
|
fcce69bc70fd64c7ef5992e884e47788014f3b03
|
refs/heads/master
| 2021-06-21T22:03:31.328698
| 2020-12-21T18:18:21
| 2020-12-21T18:18:21
| 160,730,927
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| true
| 1,173
|
rd
|
calc.DESeq2.L2FC.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/calc.DESeq2.L2FC.R
\name{calc.DESeq2.L2FC}
\alias{calc.DESeq2.L2FC}
\title{Fold-change calculation for CRISPR screen}
\usage{
calc.DESeq2.L2FC(
countsTable,
metadata,
meta.idnum = NULL,
include.batch = F,
p.cutoff = 0.05,
save = F,
verbose = T
)
}
\arguments{
\item{countsTable}{input matrix containing normalized gRNA counts with gRNA ids as row names}
\item{metadata}{input dataframe containing sample names and other identifiers or data}
\item{meta.idnum}{vector containing the column numbers in metadata that represent (1) Cell line, (2) replicates, and (3) condition}
\item{include.batch}{logical to include replicates in the model}
\item{p.cutoff}{numeric - specified p-value cut-off}
\item{save}{logical - do you want to save the fold-change table to csv}
\item{verbose}{TRUE/FALSE}
}
\value{
matrix containing Log2 fold-changes for each comparison
}
\description{
Calculates log2 fold-changes between samples of a specified variable using input of a count matrix and metadata dataframe
}
\examples{
L2FC <- calc.DESeq2.L2FC(countsTable, design.table, save = T)
...
}
|
6f1f29bf4d59b7fa071e5e7dd12a8dab6d36e569
|
7b2eff9c39e1f62dbab32a00d3e7fe1369c8c6dd
|
/R/lefse_rawdata.R
|
fb7890d8c5fcbea547054e9ef73175c430fb2744
|
[] |
no_license
|
ZhonghuiGai/gglefse
|
de1b1650d745b95ee15c2f11011829e0175a9cc1
|
7e13b30d53b731f02c6b0a779954160f8831c608
|
refs/heads/main
| 2023-08-21T20:43:00.356744
| 2021-10-13T02:03:31
| 2021-10-13T02:03:31
| 393,857,495
| 1
| 2
| null | null | null | null |
UTF-8
|
R
| false
| false
| 1,998
|
r
|
lefse_rawdata.R
|
#' Prepare data for downstream Lefse analysis using asv.table, taxa info and the grouping metadata
#'
#' @param asv.path the path of ASVs_table obtained using usearch
#' @param taxa.path the path of ASV_taxa info obtained suing usearch
#' @param group.path the grouping metadata
#' @param lefse the logical value for which data to save
#'
#' @return a txt text file for lefse analysis
#' @export
#'
#' @author ZHonghuiGai
#' @examples
#' lefse_rawdata(asv.path = "ASVs_norm.txt", taxa.path = "ASV_tax2.txt", group.path = "group.txt", lefse = TRUE)
lefse_rawdata <- function(asv.path, taxa.path, group.path, lefse = TRUE){
# step 1 import the ASV table
ASV <- read.delim(asv.path)
colnames(ASV)[1] <- "ASV"
rownames(ASV) <- ASV[, 1]
# step 2 import the taxa table
taxa <- read.delim(taxa.path, header = FALSE)
colnames(taxa) <- c("ASV", "class")
rownames(taxa) <- taxa[, 1]
taxa$class <- gsub(";", "|", taxa$class)
# step 3 merge data
stopifnot(nrow(ASV) == nrow(taxa))
stopifnot(all(rownames(ASV) %in% rownames(taxa)))
ASV <- ASV[rownames(taxa), ]
if (all(rownames(ASV) ==rownames(taxa))) {
ASV.taxa <- data.frame(class = taxa$class, ASV[, -1])
rownames(ASV.taxa) <- NULL
}
# step 4 import the grouping metadata
grp <- read.delim(group.path, header = FALSE)
rownames(grp) <- grp[, 1]
grp <- grp[colnames(ASV.taxa)[-1], ]
stopifnot(length(colnames(ASV.taxa)[-1]) == length(grp[, 1]))
stopifnot(colnames(ASV.taxa)[-1] %in% grp[, 1])
rownames(grp) <- NULL
# step 5 substitute the first row of ASV.taxa with the grouping information
ASV.taxa.lefse <- rbind(colnames(ASV.taxa), ASV.taxa)
colnames(ASV.taxa.lefse) <- NULL
ASV.taxa.lefse[1, ] <- c("class", grp[, 2])
if (lefse) {
write.table(ASV.taxa.lefse, "lefse.txt", quote = FALSE,
sep = "\t", col.names = FALSE, row.names = FALSE)
} else {
write.table(ASV.taxa, "lefse.txt", quote = FALSE,
sep = "\t", col.names = FALSE, row.names = FALSE)
}
}
|
14096e7a34eceb96fa0475f43541d987469fac3d
|
8d5d4ec19ff50496c6ee023221d28b23a1c1594a
|
/test_dummy_proofing.R
|
1e0fe4acf7864d3e1804fa02d94047426fbdd4e9
|
[
"MIT"
] |
permissive
|
RMHogervorst/templates
|
61b2a3054d715ac6e63d0ce9b71b923013710c64
|
bb8194878a1e7705ee7c8b36db7493769cb231cb
|
refs/heads/master
| 2020-05-22T01:38:52.339664
| 2018-04-04T04:54:51
| 2018-04-04T04:54:51
| 59,126,035
| 0
| 1
| null | null | null | null |
UTF-8
|
R
| false
| false
| 1,842
|
r
|
test_dummy_proofing.R
|
# This is a template for dummyproofing your functions
# based primararly on https://github.com/jtleek/rpackages
#######################
# i expect that you use testthat
#
# Goals ####
# That your error messages catch what they should
# That your outputs are what you expect them to be
# That you get reasonable answers for reasonable inputs
# When you know what the answer should be - you get it
# self contained (don't rely on code outside, except the function you are testing)
# isolated: don't rely on other testcases (although I do declare at top of script and test it further on)
# unique: test what has not been tested
# useful: focus on edge cases and inputs where the function may behave erratically or wildly
# Mock out all external services and state
# Don’t unit-test configuration settings
# Name your unit tests clearly and consistently
# When you find a bug, first write a test for it and then fix the bug.
# examples #####
## input proofing for every function
context("input proofing")
# correct values
test_that("correct input of "function" displays correct result", {
# expect_equal( "your function bla bla bla" , "answer") # should be identical
})
# wrong input types: null, numbered, character, factor, data frame, matrix, list, function # choose what makes sense
test_that("wrong imput values throw informative error" ,{
#expect_error(object, regexp = )
})
## output proofing
test_that("output is of the type expected" , {
expect_type(object, type)
})
# results what should be
### add standard cases, most expected input types
### edge cases
# infinity / zero / 1 / 1000 times more then usual
### random inputs
#use random generator
### Tip: try to break your functions: enormous files, inf and -inf as input, Null or NA as input.
### numeric vs integer, vs numeric but in character coding. etc.
|
51e65d533ba6f49750de93d0ffbe08ca9d3147c6
|
6e51b924f87575b662160b34f4a903d1fc7fa0e2
|
/RcppExports.R
|
7e197d055008032fa660cbde5f7e8639f459bbe4
|
[
"Apache-2.0"
] |
permissive
|
dominikj2/RoI_Align_3D
|
c19823f8e4984c639509283713246076582ac843
|
f4e990924cdf395f29f0169b82b1fb5c518cf8b1
|
refs/heads/main
| 2023-07-18T13:42:34.078399
| 2021-08-23T01:22:53
| 2021-08-23T01:22:53
| 398,936,749
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 167
|
r
|
RcppExports.R
|
cpp_contrib_RoI_Align_3D <- function(Voxel_Space, boxes, box_index) {
.Call('_torch_cpp_contrib_RoI_Align_3D', PACKAGE = 'torchpkg', Voxel_Space, boxes, box_index)
}
|
595d97875a3efe9288e25be01239e8971b519c2b
|
9aafde089eb3d8bba05aec912e61fbd9fb84bd49
|
/codeml_files/newick_trees_processed/2766_0/rinput.R
|
384ced2c326b10d355e1e8af62a15960df2637ad
|
[] |
no_license
|
DaniBoo/cyanobacteria_project
|
6a816bb0ccf285842b61bfd3612c176f5877a1fb
|
be08ff723284b0c38f9c758d3e250c664bbfbf3b
|
refs/heads/master
| 2021-01-25T05:28:00.686474
| 2013-03-23T15:09:39
| 2013-03-23T15:09:39
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 135
|
r
|
rinput.R
|
library(ape)
testtree <- read.tree("2766_0.txt")
unrooted_tr <- unroot(testtree)
write.tree(unrooted_tr, file="2766_0_unrooted.txt")
|
19c9e0072f1624f8d4116b578f05929b709e7962
|
43ecaba0b376d125f5b9c78a1a0f480003a744d1
|
/R/is_dist_units.R
|
4ff5c4cd994242d49775ac816fc71bc33766dfda
|
[
"MIT"
] |
permissive
|
elipousson/overedge
|
fc26a11ebbf2bdda30d5a88a35c6bbbd67bc176e
|
27dd49ed496601efdc25f1cb56c9d40e2f02155a
|
refs/heads/main
| 2023-05-23T05:56:13.443240
| 2022-08-10T21:28:44
| 2022-08-10T21:28:44
| 449,945,031
| 13
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 5,736
|
r
|
is_dist_units.R
|
#' General utility functions for working with distance units objects
#'
#' - [is_dist_units]: Is x a distance unit object?
#' - [is_diff_dist]: What is the difference between x and y distance?
#' - [is_same_dist]: Is x the same distance as y? or does the bbox of x and bbox of y have the same x, y, or diagonal distance?
#' - [is_shorter], is_longer: Is x shorter or longer than y?
#' - [is_same_area]: do x and y have the same area?
#'
#' There are two additional functions that support these utility functions:
#'
#' - [get_dist_units]: Get the distance units from x (if x is a sf or units
#' objects or a character string from [dist_unit_options])
#' - [as_dist_units]: Convert x to units using [units::as_units]
#'
#' @name is_dist_units
#' @param x,y objects to check
#' @family dist
#' @export
is_dist_units <- function(x) {
is_units(x) && (get_dist_units(x) %in% c(dist_unit_options, area_unit_options))
}
#' @name diff_dist
#' @rdname is_dist_units
#' @param units For [is_diff_dist], if x and y are both not units objects, use
#' units; default to `NULL`.
#' @export
#' @importFrom dplyr case_when
#' @importFrom cli cli_alert_danger
is_diff_dist <- function(x, y, units = NULL) {
which_is_units <-
dplyr::case_when(
is_units(x) && !is_units(y) ~ "x",
!is_units(x) && is_units(y) ~ "y",
is_units(x) && is_units(y) ~ "xy",
TRUE ~ "neither"
)
if ((which_is_units == "neither") && is.null(units)) {
cli::cli_alert_danger("No units could be determined for x or y.")
}
switch(which_is_units,
"x" = diff(c(x, as_dist_units(y, units = x))),
"y" = diff(c(as_dist_units(x, units = y), y)),
"xy" = diff(c(x, y)),
"neither" = diff(c(as_units(x, units = units), as_units(y, units = units)))
)
}
#' @name is_same_dist
#' @rdname is_dist_units
#' @param dist type of distance to compare if x and y are `sf`, `sfc`, or `bbox`
#' objects; "diagdist", "xdist", "ydist". defaults to `NULL`.
#' @param diff If `TRUE`, return results from [is_diff_dist] or [is_diff_area];
#' if `FALSE`, return logical indicator; defaults to `FALSE`
#' @param ... Additional parameters passed to all.equal
#' @export
#' @importFrom sf st_area
is_same_dist <- function(x, y, dist = NULL, diff = FALSE, ...) {
if (is.character(dist) && is_sf(x, ext = TRUE) && is_sf(y, ext = TRUE)) {
x <- as_bbox(x)
y <- as_bbox(x)
dist <- match.arg(dist, c("diagdist", "xdist", "ydist"))
x <-
switch(dist,
# FIXME: Is this going to work or is there a tolerance factor needed?
"diagdist" = sf_bbox_diagdist(x, drop = FALSE),
"xdist" = sf_bbox_xdist(x, drop = FALSE),
"ydist" = sf_bbox_ydist(x, drop = FALSE)
)
y <-
switch(dist,
# FIXME: Is this going to work or is there a tolerance factor needed?
"diagdist" = sf_bbox_diagdist(y, drop = FALSE),
"xdist" = sf_bbox_xdist(y, drop = FALSE),
"ydist" = sf_bbox_ydist(y, drop = FALSE)
)
}
if (diff) {
return(is_diff_dist(x, y))
}
all.equal(as.numeric(is_diff_dist(x, y)), 0, ...)
}
#' @name is_longer
#' @rdname is_dist_units
#' @export
is_longer <- function(x, y) {
as.numeric(is_diff_dist(x, y)) > 0
}
#' @name is_shorter
#' @rdname is_dist_units
#' @export
is_shorter <- function(x, y) {
as.numeric(is_diff_dist(x, y)) < 0
}
#' @name get_dist_units
#' @rdname is_dist_units
#' @param null.ok If null.ok is `TRUE`, allow x to return a `NULL` value; if
#' `FALSE`, error on `NULL` values.
#' @export
#' @importFrom cli cli_abort
#' @importFrom sf st_crs
get_dist_units <- function(x, null.ok = TRUE) {
if (is.null(x) && null.ok) {
return(x)
} else if (is.null(x)) {
cli::cli_abort(
"{.var units} must be a unit chracter string, a unit class object, or a sf object with a valid coordinate reference system."
)
}
if (is_sf(x)) {
return(sf::st_crs(x)$units_gdal)
}
if (is_units(x) && all(as.character(units(x)[["numerator"]]) %in% dist_unit_options) && !(as.character(units(x)) %in% area_unit_options)) {
return(as.character(units(x)[["numerator"]]))
}
if (is_units(x)) {
return(as.character(units(x)))
}
if (is.character(x)) {
return(x[x %in% c(dist_unit_options, area_unit_options)])
}
}
#' @name as_dist_units
#' @rdname is_dist_units
#' @export
#' @importFrom sf st_crs
#' @importFrom units as_units
as_dist_units <- function(x, units = NULL, null.ok = FALSE) {
units <- get_dist_units(units, null.ok = null.ok)
if (!is.null(units)) {
units <- match.arg(units, c(dist_unit_options, area_unit_options))
} else if (null.ok) {
return(x)
}
if (is.numeric(x) && !is_dist_units(x)) {
units::as_units(x, units)
} else if (cli_yeah("Did you mean to convert {.var x} to {.val {units}}?")) {
convert_dist_units(
dist = x,
to = units
)
}
}
#' @name is_diff_area
#' @rdname is_dist_units
#' @param union If `TRUE`, union objects before comparing area with
#' [is_diff_area()] or [is_same_area()], defaults to `TRUE`.
#' @export
#' @importFrom sf st_union st_area
is_diff_area <- function(x, y, units = NULL, union = TRUE) {
if (union) {
x <- sf::st_union(x)
y <- sf::st_union(y)
}
x_area <- sf::st_area(x)
y_area <- sf::st_area(y)
diff(x_area, y_area)
}
#' @name is_same_area
#' @rdname is_dist_units
#' @export
is_same_area <- function(x, y, units = NULL, union = TRUE, diff = FALSE, ...) {
if (diff) {
return(is_diff_area(x, y, units = units, union = union))
}
all.equal(as.numeric(is_diff_area(x, y, union = union)), 0, ...)
}
#' @noRd
is_units <- function(x) {
is_class(x, "units")
}
#' @noRd
is_same_units <- function(x, y) {
as.character(units(x)) == as.character(units(y))
}
|
2beae3b6fbc14f1c46a44edabdab6616a31f9f23
|
a76e6b446f784d30e8e0eb761b816d92cf056934
|
/R/rs_compare_groups.r
|
b0e3b99829bb796039a6aa35b5bc13c1ed073218
|
[] |
no_license
|
tilltnet/ratingScaleSummary
|
55879033b905cc5370f10d82cb44e040eb94c680
|
c792c882eec2cef0cd0e8983518cf5f48bde6085
|
refs/heads/master
| 2021-01-11T19:48:45.545975
| 2017-01-19T22:59:24
| 2017-01-19T22:59:24
| 79,402,288
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 2,245
|
r
|
rs_compare_groups.r
|
#' Compare Medians of a questionnaire battery of equally coded ordinal/
#' categorial variables
#'
#' @description Returns a \emph{data.frame} where each row represents one variable
#' and the columns show the median values of the comparison groups. An additional
#' colum shows the significance levels, based on the \emph{Kruskal-Wallis Rank
#' Sum Test}.
#'
#' @param df dataframe containing the rating scale variables
#' @param groups vector defining groups/ independent variable
#' @param item_labels optional character vector of variable labels
#' @param w Optional weight vector.
#' @param ... additional options for gmedian(), e.g. percentile, scale_intervall or w(eight).
#' @examples
#' # Create sample data.
#' df <- data.frame(replicate(6,sample(1:7, 100, replace = T)))
#'
#' # Compare medians between groups accross several variables.
#' g <- sample(c("G1", "G2", "G3"), 100, replace = T)
#' res_comp <- rs_compare_groups(df = df, groups = g)
#' @export
rs_compare_groups <- function(df, groups, item_labels = NULL, w = NULL, ...) {
if(!is.factor(groups)) {
warning("'groups' is not a factor. Converting to factor.")
groups <- factor(groups)
}
av_names <- names(df)
# comp_df <- data.frame()
# for(var_ in av_names) {
# temp_df <- tapply_to_df(df[[var_]], groups)
# comp_df <- rbind(comp_df, temp_df)
# }
subsets_ <- list()
i <- 1
for(lev_ in levels(groups)) {
subsets_[[lev_]] <- subset(df, groups == lev_)
i <- i + 1
}
if(is.null(w)) {
comp_df <- do.call(cbind, lapply(subsets_,
FUN = function(x) plyr::ldply(x[av_names],
function(y) gmedian(y, ...))[-1]))
} else {
comp_df <- do.call(cbind, lapply(subsets_,
FUN = function(x) plyr::ldply(x[av_names],
function(y) gmedian(y, w = x[[w]], ...))[-1]))
}
names(comp_df) <- levels(groups)
rownames(comp_df) <- av_names
if(!is.null(item_labels)) {
rownames(comp_df) <- item_labels[low_:high_]
}
comp_df <- cbind(comp_df, create_sig_col(df[av_names], groups))
comp_df[order(comp_df[1]),]
}
|
90fbffb11612bb66cad633cccaad66cabccc15bd
|
33b6d6594a378b009170401675babc913cf3ec8a
|
/man/make_tidy_df.Rd
|
1de9b376d8f9612ed63f04f1a91d84af11399c34
|
[] |
no_license
|
berthetclement/covid
|
0bcf19101eac6be3100950c4fcb825bac7f6e37e
|
8e4fad6924e191f55f84d80e2a52cb2556bb4c7a
|
refs/heads/master
| 2023-01-23T10:48:30.002524
| 2020-11-23T13:34:14
| 2020-11-23T13:34:14
| 294,462,880
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| true
| 545
|
rd
|
make_tidy_df.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/data_tools.R
\name{make_tidy_df}
\alias{make_tidy_df}
\title{Structuration des donnees.}
\usage{
make_tidy_df(data)
}
\arguments{
\item{data}{Objet de type data frame (time series cases/deaths/recovered).}
}
\value{
Data.frame au format tidy.
}
\description{
Mise au format "tidy".
L'objectif est de faciliter les jointures.
}
\seealso{
Other data tools:
\code{\link{data_covid}()},
\code{\link{data_recod_dom}()},
\code{\link{data_recod}()}
}
\concept{data tools}
|
c132a3e05657811a64ac7da913d7ef1567c1d37e
|
fc0f27dfd49ab477995b68a4d9e0acbfcf193eb0
|
/man/timeBlock.append.Rd
|
8e9fdac1bbf339491749b1e517c31b6e1c358891
|
[] |
no_license
|
EpidemiologyDVM/contact
|
c96d8a22375d20d7c881965d7dbcd1270ebc147a
|
a048a21ca6474cde0d241871353e523d73d3341f
|
refs/heads/master
| 2021-01-14T23:30:11.405964
| 2020-02-24T22:06:47
| 2020-02-24T22:06:47
| 242,796,232
| 0
| 0
| null | 2020-02-24T17:16:01
| 2020-02-24T17:16:01
| null |
UTF-8
|
R
| false
| true
| 2,308
|
rd
|
timeBlock.append.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/timeBlock.append.R
\name{timeBlock.append}
\alias{timeBlock.append}
\title{Append TimeBlock Information to a Data Frame}
\usage{
timeBlock.append(x = NULL, dateTime = NULL, blockLength = 10,
blockUnit = "mins")
}
\arguments{
\item{x}{Data frame containing dateTime information, and to which block
information will be appended. if NULL, dateTime input relies solely on
the dateTime argument.}
\item{dateTime}{Vector of length nrow(x) or singular character data,
detailing the relevant colname in x, that denotes what dateTime
information will be used. If argument == NULL, the function assumes a
column with the colname "dateTime" exists in x. Defaults to NULL.}
\item{blockLength}{Numerical. Describes the number blockUnits within each
temporal block. Defaults to 10.}
\item{blockUnit}{Character string taking the values, "secs," "mins,"
"hours," "days," or "weeks." Defaults to "hours."}
}
\value{
Appends the following columns to \code{x}.
\item{block}{Integer ID describing unique blocks of time of pre-specified
length.}
\item{block.start}{The timepoint in \code{x} at which the \code{block}
begins.}
\item{block.end}{The timepoint in \code{x} at which the \code{block}
ends.}
\item{numBlocks}{Integer describing the total number of time blocks
observed within \code{x} at which the \code{block}}
}
\description{
Appends "block," "block.start," "block.end," and "numBlocks" columns to an
input data frame (x) with a dateTime (see dateTime.append) column. This
allows users to "block" data into blockLength-blockUnit-long
(e.g., 10-min-long) temporal blocks. If x == NULL, the function output
will be a data frame with "dateTime" and block-related columns.
}
\details{
This is a sub-function that can be found in the contactDur functions.
}
\examples{
data("calves")
calves.dateTime<-contact::datetime.append(calves, date = calves$date,
time = calves$time) #add dateTime identifiers for location fixes.
calves.block<-contact::timeBlock.append(x = calves.dateTime,
dateTime = calves.dateTime$dateTime, blockLength = 10,
blockUnit = "mins")
head(calves.block) #see that block information has been appended.
}
\keyword{data-processing}
\keyword{sub-function}
|
65503c3699ab31bd1cde3524c48d495383ae37dd
|
9f273727d61fea30a468dc1e5f8c7b6fd14ed0eb
|
/HUD_GenMatching1.3_9_24.R
|
a6d82f52e0291bd000f33b96661eb5db4ca29617
|
[] |
no_license
|
checono/christine_code
|
3f9ec0cb8c6bdf67800d0aca1e8fa1120f4f5d85
|
3ba222656405a8c12bbe8eb3e56cd8932606071c
|
refs/heads/master
| 2021-01-12T00:05:27.566184
| 2017-01-11T19:57:25
| 2017-01-11T19:57:25
| 78,672,198
| 0
| 0
| null | 2017-01-11T19:32:41
| 2017-01-11T19:30:44
| null |
UTF-8
|
R
| false
| false
| 5,611
|
r
|
HUD_GenMatching1.3_9_24.R
|
#--------------Set up-----------------------------------------------------------------------------------------------------------------------------------------------#
install.packages("sas7bdat")
install.packages("Matching", dependencies=TRUE)
install.packages("dplyr")
install.packages("stats")
#initiate sas7bdat library for reading in foreign file
library("sas7bdat")
library("Matching")
library("dplyr")
library("stats")
#read in SAS DS with the read.sas7bsdat() function
radDS = read.sas7bdat('K:/RAD_Evaluation_2014-2017/Task15/Data/rad_balanced1b.sas7bdat')
dim(radDS) #6488 43
checkVarsALL <- data.frame(sapply(radDS, function(x,y) sum(is.na(x))), y=colnames(radDS))#for entire sample
write.matrix(checkVarsALL ,'~/GetNACounts.csv', sep=",")
FortyThreeNA_OrLess<- data.frame(checkVarsALL[which(checkVarsALL[1]<=43),])
dim(FortyThreeNA_OrLess) # 35; includes RAD
write.matrix(FortyThreeNA_OrLess ,'~/CH_RAD-Vars_43NAs_OrLess.csv', sep=",")
#cross tab; control (i.e., non-RAD) versus RAD
xtabs(~ RAD, data=radDS)
#Subset the data frame to select only cols w/ <=43 NAs; omit NAs from the remaining cols
radDS <- radDS %>%
select(RAD, Project_ID, p_Elder, m_TTP, p_Black, p_Disabled,p_Single, m_adj_inc, m_members,m_BRs,m_age,st,ZIP_c,TOTAL_UNIT_CNT, ACC_UNIT_CNT, cap_fund,PASS_Score,bldg_age,scattered,p_ACC,p_Elder_dev,
vacancy_rt,pers_per_BR,PHA_size_code,Vacant_Rate,Percent_renters_GRAPI_35__or_mor,Percent_White_Asian,Percent_Black,Percent_HISPANIC,
Poverty_rate,LFPR_,U_,Mean_household_income,Median_household_income, Overcrowd_rate_1, Overcrowd_rate_1_5) %>%
na.omit() %>%
mutate_each(funs(as.numeric))
dim(radDS) #5799 36
#--------------Model Set A (no logit p-values) ---------------------------------------------------------------------------------------------------------------------#
#A ==NO logit p-values are included
print("Model set A: No p-values are included; variables included = those from David's original code")
ModelA <- radDS %>%
select(Project_ID, RAD, m_adj_inc, p_Black, p_Disabled, p_Elder, p_Single, PASS_Score, m_BRs, vacancy_rt, Overcrowd_rate_1, Percent_Black, Percent_HISPANIC, Vacant_Rate) %>%
na.omit() %>%
mutate_each(funs(as.numeric))
dim(ModelA) #5799 14
#check to see how many RAD and Non-RAD remain based on the above variable selection
xtabs(~ RAD, data=ModelA)
#separate matrix by treatment and all other variables
x <- ModelA %>% select(-RAD, -Project_ID) %>% as.matrix()
#check dimensions
dim(x) #5799 12
#establish balanceMatrix for the actual model
BalanceMat <- as.matrix(x)
genA <- GenMatch(Tr = ModelA$RAD, X = x, BalanceMatrix = BalanceMat, M=4, pop.size=7000, ties=TRUE, replace=TRUE)
#Generate output
#sink file so that output can later be retrieved
sink(file='~/CH_RAD-GenMatchingOutput_1.3_9_24.txt')
#running MATCH is not necessary since I do not care about causal inference; this matching is for analytic purposes
#run the match WITHOUT an outcome that uses genMatch weighting matrix; kept all parameters - e.g., 4 controls per treatment, deterministic ties, replacement
mgenA <- Match(Tr=ModelA$RAD, X=x, M=4, replace=TRUE, ties=TRUE, Weight=3, Weight.matrix=genA)
summary(mgenA)
mbgenA <- MatchBalance(ModelA$RAD ~ ModelA$m_adj_inc+ ModelA$p_Black + ModelA$p_Disabled+ ModelA$p_Elder + ModelA$p_Single + ModelA$PASS_Score + ModelA$m_BRs + ModelA$vacancy_rt + ModelA$Overcrowd_rate_1 + ModelA$Percent_Black + ModelA$Percent_HISPANIC + ModelA$Vacant_Rate, data=ModelA, match.out=mgenA, nboots=1000)
mlobA <-Match(Tr=ModelA$RAD, X=x, M=4, replace=TRUE, ties=TRUE, Weight=2)
summary(mlobA)
mblobA<- MatchBalance(ModelA$RAD ~ ModelA$m_adj_inc+ ModelA$p_Black + ModelA$p_Disabled+ ModelA$p_Elder + ModelA$p_Single + ModelA$PASS_Score + ModelA$m_BRs + ModelA$vacancy_rt + ModelA$Overcrowd_rate_1 + ModelA$Percent_Black + ModelA$Percent_HISPANIC + ModelA$Vacant_Rate, data=ModelA, match.out=mlobA, nboots=1000)
sink(file='~/CH_RAD-GenMatchingOutput_1.3_9_24_PAIRS.txt')
length(mgenA$index.treated)
length(mgenA$index.control)
# Get get a simple matrix showing the RAD property IDs and their 4 matched counterparts.
ddA = as.numeric(mgenA$index.control)
dim(ddA) <- c(4,201) #4 controls for each of the 220 matched RADs
ddA <- t(ddA) #transpose the matrix
dtA = as.numeric(mgenA$index.treat)
dim(dtA) <- c(4,201)
dtA <- t(dtA)
dsA = cbind(dtA[,1],ddA)
dfA = matrix("",nrow=201,ncol=5)
dfA[,1] <- as.character(radDS$Project_ID[dsA[,1]])
dfA[,2] <- as.character(radDS$Project_ID[dsA[,2]])
dfA[,3] <- as.character(radDS$Project_ID[dsA[,3]])
dfA[,4] <- as.character(radDS$Project_ID[dsA[,4]])
dfA[,5] <- as.character(radDS$Project_ID[dsA[,5]])
print(dfA)
#----------------------------------------------------------------------------------------
# Get get a simple matrix showing the RAD property IDs and their 4 matched counterparts.
ddA2 = as.numeric(mlobA$index.control)
dim(ddA2) <- c(4,201) #4 controls for each of the 220 matched RADs
ddA2 <- t(ddA2) #transpose the matrix
dtA2 = as.numeric(mlobA$index.treat)
dim(dtA2) <- c(4,201)
dtA2 <- t(dtA2)
dsA2 = cbind(dtA2[,1],ddA2)
dfA2 = matrix("",nrow=201,ncol=5)
dfA2[,1] <- as.character(radDS$Project_ID[ds2[,1]])
dfA2[,2] <- as.character(radDS$Project_ID[ds2[,2]])
dfA2[,3] <- as.character(radDS$Project_ID[ds2[,3]])
dfA2[,4] <- as.character(radDS$Project_ID[ds2[,4]])
dfA2[,5] <- as.character(radDS$Project_ID[ds2[,5]])
print(dfA2)
sink()
|
180114fac4e3a7cf590ed6bb91655aec4f004af1
|
8450cd8d46322e46964eaeabb27e299d5d417ca6
|
/inst/R_old/plotBactRes.R
|
8cb4dbcc9ad668e5e80b359d774ae103b13b54a4
|
[] |
no_license
|
WillFox/TBsim
|
0251b444b8247796ed11d56283344b88e4329099
|
d304c5957dd1199e2ad08ba00fe054b8c6e30366
|
refs/heads/master
| 2020-07-07T04:07:23.609484
| 2018-07-05T21:52:44
| 2018-07-05T21:52:44
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 3,088
|
r
|
plotBactRes.R
|
#===========================================================================
# plotBactRes.R
# Plot resistant bacteria - as totals or per compartment
#
# John Fors, UCSF
# Oct 10, 2014
#===========================================================================
plotBactRes <- function(isSummary, isFromDrugStart){
# read header file
list <- readHeaderFile(folder)
timeStamp <- list[[1]]
nTime <- list[[2]]
nSteps <- list[[3]]
drugStart <- list[[4]]
nPatients <- list[[5]]
isResistance <- list[[6]]
isImmuneKill <- list[[7]]
isGranuloma <- list[[8]]
isPersistance <- list[[9]]
diseaseText <- list[[10]]
nDrugs <- list[[11]]
doseText <- list[[12]]
outcomeIter <- list[[13]]
drugNames <- list[[14]]
timePeriods <- 1:nTime
output <- readBactRes(folder, "bactRes.txt", "bactRes")
times <- output[[1]]
drugs <- output[[2]]
compartments <- output[[3]]
values <- output[[4]]
# build dataframe 1
df1 <- data.frame(times, drugs, compartments, values)
colnames(df1) <- c("Day", "Drug", "Compartment", "Median")
# Combine data into single data frame
# apply log transform
if (isSummary==0){
yset <- data.frame(df1$Day, df1$Drug, df1$Compartment, log10(df1$Median), rm.na=TRUE)
colnames(yset) <- c("time", "Drug", "Compartment", "Median")
# apply compartment labels
compNames <- c("Extracellular", "Intracellular", "Extracell Granuloma", "Intracell Granuloma")
yset$Compartment <- compNames[yset$Compartment]
yset$Compartment <- factor(yset$Compartment,
levels = c("Extracellular", "Intracellular", "Extracell Granuloma", "Intracell Granuloma"))
}
# if summary across all compartments then create sum
if (isSummary==1){
df1Agg <- aggregate(Median ~ Day + Drug, dfc, FUN=sum, na.rm=TRUE)
yset <- data.frame(df1Agg$Day, df1Agg$Drug, log10(df1Agg$Median))
colnames(yset) <- c("time", "Drug", "Median")
}
# filter out data before drugStart
if (isFromDrugStart==1){
yset <- yset[yset$time>drugStart,]
}
# filter out to have 1/5 of points, for more smooth curve
#yset <- yset[seq(1, nrow(yset), 5), ]
# apply drug names
yset$Drug <- drugNames[yset$Drug]
xlabel <- "Time after infection (Days)"
if (isFromDrugStart==1) {
xlabel <- "Time after drug start (Days)"
}
ylabel <- "Bacterial load [log(CFU/ml)]"
titleText <- "Total Resistant Bacteria "
#labx <- c(seq(0, nTime, by = 60))
#namesx <- labx
dev.new()
pl <- ggplot(data = yset, aes(x = time)) +
#geom_ribbon(aes(ymin=Q1, ymax=Q3), alpha=0.2) +
geom_line(aes(y=Median), colour="blue", size=0.5) +
theme(plot.title = element_text(size=16, face="bold", vjust=2)) +
#scale_y_continuous(breaks = laby, labels = namesy) +
#scale_x_continuous(breaks = labx, labels = namesx) +
xlab(xlabel) +
ylab(ylabel) +
ggtitle(titleText)
if (isSummary==0){
pl <- pl + facet_grid(Drug ~ Compartment, scales="free_y")
}
if (isSummary==1){
pl <- pl + facet_wrap(~Drug, nrow=1, scales="free_y")
}
print(pl)
}
|
95d6e0af67f47103e7f3d9029e9a817be2008432
|
903da089f3ac659f7295a2b1d351981394e8bcdc
|
/man/mc.wlinreg.Rd
|
d25343cdb59f3be9a2347b9374ca602b20466722
|
[] |
no_license
|
cran/mcr
|
1f27b59cda2a87be199a8f6534bec6882154b042
|
069b879be631491ed07a54a0f348b1adbebf7867
|
refs/heads/master
| 2023-02-06T22:38:44.942104
| 2023-01-26T21:00:19
| 2023-01-26T21:00:19
| 17,697,375
| 1
| 0
| null | null | null | null |
UTF-8
|
R
| false
| true
| 928
|
rd
|
mc.wlinreg.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/mcLinReg.r
\name{mc.wlinreg}
\alias{mc.wlinreg}
\title{Calculate Weighted Ordinary Linear Regression
and Estimate Standard Errors}
\usage{
mc.wlinreg(X, Y)
}
\arguments{
\item{X}{measurement values of reference method.}
\item{Y}{measurement values of test method.}
}
\value{
a list with elements.
\item{b0}{intercept.}
\item{b1}{slope.}
\item{se.b0}{respective standard error of intercept.}
\item{se.b1}{respective standard error of slope.}
\item{xw}{weighted average of reference method values.}
}
\description{
The weights of regression are taken as reverse squared values of the reference method,
that's why it is impossible to achieve the calculations for zero values.
}
\references{
Neter J., Wassermann W., Kunter M.
Applied Statistical Models.
Richard D. Irwing, INC., 1985.
}
|
396d3fb2b91760a0a1a298b69fdeba4180be8224
|
af1d09f6444857362b9fc2d863d5d543faeb8ab1
|
/man/av_complexity.Rd
|
290eebbe9e9e48b44b93f2e5deaf1e44e9ff3efa
|
[
"Apache-2.0"
] |
permissive
|
franzbischoff/tsmp
|
aaa754fa031ff8cf20de90231d96429c51ff68ec
|
13f796dfe9d25f6111b1bbc50779fd0c89da67d5
|
refs/heads/master
| 2021-06-08T16:23:24.541370
| 2020-03-03T16:55:01
| 2020-03-03T16:55:01
| 226,805,705
| 6
| 0
|
NOASSERTION
| 2020-03-03T15:13:56
| 2019-12-09T06:58:45
|
R
|
UTF-8
|
R
| false
| true
| 1,458
|
rd
|
av_complexity.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/annotations.R
\name{av_complexity}
\alias{av_complexity}
\title{Computes the annotation vector that favors complexity}
\usage{
av_complexity(.mp, data, dilution_factor = 0, apply = FALSE)
}
\arguments{
\item{.mp}{a Matrix Profile object.}
\item{data}{a \code{vector} or a column \code{matrix} of \code{numeric}.}
\item{dilution_factor}{a \code{numeric}. (Default is \code{0}). Larger numbers means more dilution.}
\item{apply}{logical. (Default is \code{FALSE}). Applies the Annotation Vector over the Matrix Profile.
Use with caution.}
}
\value{
Returns the input \code{.mp} object with an embedded annotation vector.
}
\description{
Computes the annotation vector that favors complexity
}
\examples{
data <- mp_test_data$train$data[1:1000]
w <- 50
mp <- tsmp(data, window_size = w, verbose = 0)
av <- av_complexity(mp, apply = TRUE)
}
\references{
\itemize{
\item Dau HA, Keogh E. Matrix Profile V: A Generic Technique to Incorporate Domain
Knowledge into Motif Discovery. In: Proceedings of the 23rd ACM SIGKDD International Conference
on Knowledge Discovery and Data Mining - KDD '17. New York, New York, USA: ACM Press; 2017. p.
125-34.
}
}
\seealso{
Other Annotation vectors:
\code{\link{av_apply}()},
\code{\link{av_hardlimit_artifact}()},
\code{\link{av_motion_artifact}()},
\code{\link{av_stop_word}()},
\code{\link{av_zerocrossing}()}
}
\concept{Annotation vectors}
|
70c51c2573bd6262b9efe434bc5bff0698f38e5e
|
ec8a8287a6c6fddaae2a9fa9b56f18312708af1f
|
/Desktop/Assignment2JS/cachematrix.R
|
56c06d6ef2cd865dc1f12187d6d79c7eef91de6e
|
[] |
no_license
|
jsigman1/Assignment2JS
|
bc17ba8b49d3e703c160f91636b3645ab88919c0
|
c432e67edc400a04a68856e99c0daab7e6dfebb3
|
refs/heads/master
| 2020-12-24T09:00:30.577292
| 2016-11-09T18:23:38
| 2016-11-09T18:23:38
| 73,309,025
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 860
|
r
|
cachematrix.R
|
## First function sets and gets matrix, then sets and finds inverse of matrix
makeCacheMatrix <- function(x = matrix()) {
i<- NULL
set<- function(y){
x<<- y
i<- NULL
}
get<- function() x
setInv<- function(inv){
i<<- inv
return(i)
}
getInv<- function() i
list(set = set, get = get,
setInv = setInv,
getInv = getInv)
}
## Function returns inverse of matrix, but checks to see if inverse has been returned first, always assumes matrix is inversible.
cacheSolve <- function(x, ...) {
i <- x$getInv()
if( !is.null(i) ) {
message("Getting cached data!")
return(i)
}
datmat <- x$get()
i <- solve(datmat, ...)
x$set(i)
i
}
|
4e7e2c8c38ae29aeb97e361904cda47a98edcfd6
|
50d98feafa15c290ef429ae307a20a5ca5e21018
|
/demo/demo_leverage_exposure_constraint.R
|
da32be563b17806368a3dc9d0af1664c23a3ffcd
|
[] |
no_license
|
IanMadlenya/PortfolioAnalytics
|
cebdcc7017b095d78853b4b6a526f04becb4a28e
|
0bb92fcb424a2f3d3ad88a3910752a05aa2e2e0d
|
refs/heads/master
| 2021-01-21T05:15:07.988245
| 2014-06-09T15:33:44
| 2014-06-09T15:33:44
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 2,278
|
r
|
demo_leverage_exposure_constraint.R
|
# Examples for solving optimization problems with a leverage exposure constraint
library(PortfolioAnalytics)
data(edhec)
R <- edhec[, 1:10]
funds <- colnames(R)
# Set up an initial portfolio object with basic constraints
init.portf <- portfolio.spec(assets=funds)
# Add an objective to maximize mean return per unit expected shortfall
init.portf <- add.objective(portfolio=init.portf, type="return", name="mean")
init.portf <- add.objective(portfolio=init.portf, type="risk", name="ES")
# The leverage_exposure constraint type is supported for random, DEoptim, pso,
# and GenSA solvers. The following examples use DEoptim for solving the
# optimization problem.
# Dollar neutral portfolio with max 2:1 leverage constraint
dollar.neutral.portf <- init.portf
dollar.neutral.portf <- add.constraint(portfolio=dollar.neutral.portf,
type="weight_sum",
min_sum=-0.01, max_sum=0.01)
dollar.neutral.portf <- add.constraint(portfolio=dollar.neutral.portf,
type="box", min=-0.5, max=0.5)
dollar.neutral.portf <- add.constraint(portfolio=dollar.neutral.portf,
type="leverage_exposure", leverage=2)
# Run optimization
dollar.neutral.opt <- optimize.portfolio(R=R, portfolio=dollar.neutral.portf,
optimize_method="DEoptim",
search_size=2000)
dollar.neutral.opt
# Leveraged portfolio with max 1.6:1 leverage constraint
leveraged.portf <- init.portf
leveraged.portf <- add.constraint(portfolio=leveraged.portf,
type="weight_sum",
min_sum=0.99, max_sum=1.01)
leveraged.portf <- add.constraint(portfolio=leveraged.portf,
type="box", min=-0.3, max=0.8)
leveraged.portf <- add.constraint(portfolio=leveraged.portf,
type="leverage_exposure", leverage=1.6)
# Run optimization
leveraged.opt <- optimize.portfolio(R=R, portfolio=leveraged.portf,
optimize_method="DEoptim",
search_size=2000)
leveraged.opt
|
834fd6120dd7a4906b55b390244e1b9140c5ec28
|
152bc5dff8535503f6d61aa481c5a5d4ea08289b
|
/wk3/hw3p1.R
|
700ee151ee494ebe20a5770b7ca78dfcfe677d14
|
[] |
no_license
|
franciszxlin/MSCFRiskManagement1
|
122abcda9d48b7da69c81769ee4e3f5305b5fc14
|
5dc86fb3b6f9a5cdef90894d37a86a28f57ac3af
|
refs/heads/master
| 2021-07-22T09:12:44.236573
| 2017-10-31T18:29:13
| 2017-10-31T18:29:13
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 1,314
|
r
|
hw3p1.R
|
# Homework 3 problem 1
# Part (a)
# This is a function to generate a specified number of random variables with the Frechet distribution.
# Inputs:
# n - size of the sample of random variables with the Frechet distribution
# alp - the specified constant value of alpha parameter in the Frechet c.d.f.
# Output: a vector containing the generated sample with the Frechet distribution
genFrechetDist<-function(n, alp)
{
u<-runif(n)
f<-(log(1/u))^(-1/alp)
return(f)
}
hold_test<-genFrechetDist(1000,2) # I used choice of alpha = 2 parameter for the Frechet distribution
# Part (c)
# This is a function to generate a specified number of random variables with the Pareto distribution.
# Inputs:
# n - size of the sample of random variables with the Pareto distribution
# alp - the specified alpha parameter
# c - the specified c parameter
# Outputs: a vector containing the generated sample with the specified Pareto distribution
genParetoDist<-function(n, alp, c)
{
u<-runif(n)
f<-c*(1-u)^(-1/alp)-c
return(f)
}
N<-1000
n<-250
alpha<-2
c<-1
hold_scaled_max=numeric(N)
for (i in 1:N)
{
hold_max<-max(genParetoDist(n,alpha,c))
hold_scaled_max[i]=hold_max/(1000^(1/2))
}
length(hold_scaled_max)
head(hold_scaled_max)
plot(sort(hold_scaled_max), sort(hold_test))
lines(sort(hold_test),sort(hold_test))
|
bf7f7d0d9534ad564f2e115775557ceaf2cd0659
|
20ce64eef4b8b3b0e6a62b5dbd639cfb1758db47
|
/ch02_Statistical_Learning/shiny/auto_hist/App.R
|
e89fa46ac134df81259f58903cf78ded98db43e6
|
[] |
no_license
|
GucciTheCarpenter/ISLR_labs
|
d87da85762b63d9e00ea6669f99f92a97bd246bb
|
72132ecae7a9ffc4ec8bfcc9df1efe4516d9f01c
|
refs/heads/master
| 2021-01-10T17:40:19.863721
| 2016-03-31T09:00:30
| 2016-03-31T09:00:30
| 50,114,757
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 1,494
|
r
|
App.R
|
require(shiny)
Auto <- read.csv('../../../data_sets/Auto.csv')
ui <- fluidPage(
titlePanel('Auto'),
sidebarLayout(
sidebarPanel(
h4('Choose feature & bins'),
br(),
selectInput('f',
'Feature',
names(Auto)[1:7],
selected = Auto$mpg),
numericInput('bins',
'Bins',
value = 8,
min = 3,
max = 23,
step = 5
),
h6("NOTE: not all bin values will result in a modified plot."),
br()
),
mainPanel(
h4('Histogram / Bar Chart'),
plotOutput(outputId = "hist")
)
)
)
server <- function(input, output){
output$hist <- renderPlot({
bins <- input$bins
if(class(Auto[,input$f]) == "factor") {
hist(as.numeric(as.character(Auto[,input$f])),
xlab = as.character(input$f),
main = '',
col = 'lightblue')
} else {
hist(Auto[,input$f],
breaks = bins,
xlab = as.character(input$f),
main = '',
col = 'lightblue'
)
}
})
}
shinyApp(ui = ui, server = server)
|
544dc583bd0763ee1e5c4cee0c35a5a852c11d06
|
f9252ab1aab8bf9db53b12c10246343552aec7f0
|
/tests/testthat/testAst.R
|
16b615ae58f542441c8305cea3e22e8863d1b6f4
|
[] |
no_license
|
cran/rethinker
|
e1ac739b9cb8c0f15331587c4a2ade43def67289
|
001eea06f489680a59c0668ca70630bc3efd82df
|
refs/heads/master
| 2021-06-03T10:06:13.156582
| 2017-11-13T10:20:29
| 2017-11-13T10:20:29
| 48,087,317
| 1
| 1
| null | null | null | null |
UTF-8
|
R
| false
| false
| 1,347
|
r
|
testAst.R
|
library(rjson);
context("Abstract Syntax Tree");
test_that("Basics",{
expect_identical(
toJSON(r()$db("a")$table("b")$add()$query),
"[24,[[15,[[14,[\"a\"]],\"b\"]]]]");
expect_identical(
toJSON(r()$db("a")$table("b")$query),
"[15,[[14,[\"a\"]],\"b\"]]");
});
test_that("Functions",{
Q<-r()$funcall(function(x) x,777)$query;
expect_identical(toJSON(Q),
"[64,[[69,[[2,[1]],[10,[1]]]],777]]")
expect_error(r()$filter(function() r()$add(1))$query);
expect_error(r()$filter(list(a=function(x) x))$query,
"Functions can only exist as direct term arguments.");
})
test_that("Make array appears",{
Q<-r()$db(c("a","b","c"))$query;
expect_identical(toJSON(Q),"[14,[[2,[\"a\",\"b\",\"c\"]]]]");
})
test_that("Expressions",{
Q<-r()$add(r()$add(1,2),4)$query;
expect_identical(toJSON(Q),"[24,[[24,[1,2]],4]]");
})
test_that("Implicit var throws",{
expect_error(r()$row("a"),"Implicit");
})
test_that("Complex list nesting maps as it should",{
Q1<-r()$insert(list(a=list(list(a=3))))$query
expect_identical(toJSON(Q1),"[56,[{\"a\":[2,[{\"a\":3}]]}]]")
Q2<-r()$insert(list(a=list(list(a=list(r()$monday())))))$query
expect_identical(toJSON(Q2),"[56,[{\"a\":[2,[{\"a\":[2,[[107,[]]]]}]]}]]")
})
test_that("Single element list is an array",{
Q<-r()$insert(list(777))$query;
expect_identical(toJSON(Q),"[56,[[2,[777]]]]");
})
|
c40e5a9c68ce096da367f51a93646aaf85c0dbd6
|
f72d96e618ace7455424a5bf5da9064f0ac37b30
|
/data/deaths.R
|
259a5ca8cc337f3d28877012d6107b864dc3cf74
|
[] |
no_license
|
yikouyi312/mortality
|
27e317fdee872a40e081de55259b42c0a95a6795
|
556c1d8568695822e4d139a8c9d51ae9e47e1038
|
refs/heads/master
| 2023-07-26T12:35:00.168409
| 2021-09-04T00:57:25
| 2021-09-04T00:57:25
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 792
|
r
|
deaths.R
|
library(tidyverse)
library(feather)
zip3_rel <- read_feather("zip3_rel.feather")
deaths <- read_csv("covid_deaths_usafacts.csv") %>%
mutate(countyFIPS = as.character(countyFIPS)) %>%
mutate(countyFIPS = str_pad(countyFIPS, 5, pad = "0")) %>%
mutate(state = str_sub(countyFIPS, 1, 2), county = str_sub(countyFIPS, 3, 5)) %>%
select(!c(`County Name`, State, StateFIPS, countyFIPS)) %>%
relocate(state, county)
deaths_zip3_wide <- deaths %>%
inner_join(zip3_rel, by = c("state", "county")) %>%
group_by(zip3) %>%
summarize(across(`2020-01-22`:`2021-07-12`, ~ sum(.x * POPPT / sum(POPPT))))
deaths_zip3 <- deaths_zip3_wide %>%
pivot_longer(-1, names_to = "date", values_to = "deaths") %>%
mutate(date = parse_date(date))
write_feather(deaths_zip3, "deaths_zip3.feather")
|
bc01cce8765ce526b7b8e93841b775552100d84d
|
d57908fd05d972c9d1c9d89e51c41771022a3d35
|
/Code/Code_art_rem_sen_2020/Code_r/func_obj_l_L_mu_produto_biv.r
|
821e8860fc3484c8bd1cc84598c8ad8dcc9f9451
|
[] |
no_license
|
anderborba/ufal_mack
|
b7be15ec5a025359d27386cf644100c153b7c05e
|
a9aee623e40f309afd946e3bd3960b25654377ac
|
refs/heads/master
| 2023-09-01T20:37:26.185653
| 2023-08-23T18:55:20
| 2023-08-23T18:55:20
| 99,868,996
| 4
| 1
| null | null | null | null |
UTF-8
|
R
| false
| false
| 1,759
|
r
|
func_obj_l_L_mu_produto_biv.r
|
# Autor: AAB data: 12/09/2018 versao 1.0
# A funcao recebe parametros ao qual atribui param como variavel da funcao,
# L como numero de visadas, m (< L) como o número de canais da imagens,
# N como o tamanho da imagem e z como valores da imagem (dist Wishart),
# assim a subrotina define a funcão objetivo l(j) descrita na eq(5) do artigo NHFC_2014
#func_obj_l_L_mu_produto_biv <- function(param){
func_obj_l_L_mu_produto_biv <- function(param){
j <- param
s1e <- matdf1[j,1]
s2e <- matdf1[j,2]
rhoe <- matdf1[j,3]
#
s1d <- matdf2[j,1]
s2d <- matdf2[j,2]
rhod <- matdf2[j,3]
#
soma1 <- sum(z1[1: j])
soma2 <- sum(z2[1: j])
c1 <- 1.0 / (s1e * s2e)^0.5
c2 <- rhoe / (1 - rhoe^2)
soma3 <- sum(log(besselK(2 * L * (z1[1: j] * z2[1: j])^0.5 * c1 * c2, L - 1)))
#
aux1 <- log(1 - rhoe^2)
aux2 <- (L - 1) * log(rhoe)
aux3 <- 0.5 * (L + 1) * log(s1e)
aux4 <- 0.5 * (L + 1) * log(s2e)
aux5 <- (L / (s1e * (1 - rhoe^2))) * soma1 / j
aux6 <- (L / (s2e * (1 - rhoe^2))) * soma2 / j
aux7 <- soma3 / j
a1 <- -aux1 - aux2 - aux3 - aux4 - aux5 - aux6 + aux7
#
soma1 <- sum(z1[(j + 1): N])
soma2 <- sum(z2[(j + 1): N])
c1 <- 1.0 / (s1d * s2d)^0.5
c2 <- rhod / (1 - rhod^2)
soma3 <- sum(log(besselK(2 * L * (z1[(j + 1): N] * z2[(j + 1): N])^0.5 * c1 * c2, L - 1)))
#
aux1 <- log(1 - rhod^2)
aux2 <- (L - 1) * log(rhod)
aux3 <- 0.5 * (L + 1) * log(s1d)
aux4 <- 0.5 * (L + 1) * log(s2d)
aux5 <- (L / (s1d * (1 - rhod^2))) * soma1 / (N - j)
aux6 <- (L / (s2d * (1 - rhod^2))) * soma2 / (N - j)
aux7 <- soma3 / (N - j)
a2 <- -aux1 - aux2 - aux3 - aux4 - aux5 - aux6 + aux7
#
func_obj_l_L_mu_produto_biv <- (j * a1 + (N - j) * a2)
return(func_obj_l_L_mu_produto_biv)
}
|
9e2f706060a8f23d69afbc588bcb52e1fdbac435
|
e530188423158c74d8487cc11a9e7110e11134cb
|
/R/item_move.R
|
818bc6eeac32e270cb896ed08da009a213002a38
|
[
"CC0-1.0",
"LicenseRef-scancode-warranty-disclaimer",
"LicenseRef-scancode-public-domain-disclaimer"
] |
permissive
|
dblodgett-usgs/sbtools
|
dca8c2cd70eb402ba506a5d54c900072a133eec7
|
da21bc9815c1972534dcca133cde3492733cbb3a
|
refs/heads/main
| 2023-05-10T03:08:06.333294
| 2023-05-01T11:19:15
| 2023-05-01T11:19:15
| 227,438,050
| 0
| 0
|
CC0-1.0
| 2019-12-11T18:55:35
| 2019-12-11T18:55:34
| null |
UTF-8
|
R
| false
| false
| 896
|
r
|
item_move.R
|
#' Move item from one folder to another
#'
#' @export
#' @template manipulate_item
#' @param id_new Folder/item to move \code{id} to. A ScienceBase ID or something
#' that can be coerced to a SB item ID by \code{\link{as.sbitem}}
#'
#' @return An object of class \code{sbitem}. Same as \code{id}, but with new
#' parent id
#'
#' @examples \dontrun{
#' # create 1st folder
#' (fold1 <- folder_create(user_id(), "bear123"))
#' (res <- item_create(fold1, "item-to-move"))
#'
#' # create 2nd folder
#' (fold2 <- folder_create(user_id(), "bear456"))
#'
#' # move item in 1st folder to 2nd folder
#' (res2 <- item_move(res, fold2))
#'
#' # test identical
#' identical(res2$parentId, fold2$id)
#' }
item_move <- function(sb_id, id_new, ..., session = current_session()) {
id <- as.sbitem(sb_id)
id_new <- as.sbitem(id_new)
item_update(id, list(parentId = id_new$id), ..., session = session)
}
|
15645dbddf3abf6bc2f99d60a989ed6805f05f94
|
df9c306238e105d36561c94aaed36dc6073492b7
|
/scripts/figure_04.R
|
7b9f40e77b23c2e9ab6f3e4374c9942477edde86
|
[] |
no_license
|
cmcninch/bulliform_cell_gwas
|
743b630556ef71f6305037f5963f243594a193d3
|
59c55411ded101dc4ea849bf965202dd43606736
|
refs/heads/master
| 2020-08-17T10:02:19.067425
| 2019-10-16T22:01:12
| 2019-10-16T22:01:12
| 215,649,656
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 5,544
|
r
|
figure_04.R
|
### Load libraries ###
##################################################
library(tidyverse)
library(data.table)
library(poppr)
library(ape)
library(lme4)
library(multcompView)
library(genetics)
library(GenomicRanges)
library(LDheatmap)
library(mapLD)
library(grid)
library(grDevices)
### Load data and format ###
##################################################
snp_positions <- fread("~/Box Sync/McNinch Projects/Bulliform_Cell_GWAS_Manuscript/data/snps/MAF_MissRate_Filtered_WiDiv_SNPs.map") %>%
dplyr::select(V2, V1, V4)
colnames(snp_positions) <- c("SNP", "Chromosome", "Position")
all_snps <- fread("~/Box Sync/McNinch Projects/Bulliform_Cell_GWAS_Manuscript/data/snps/MAF_MissRate_Filtered_WiDiv_SNPs.xmat")
taxa <- all_snps$taxa
all_snps <- all_snps[, -1]
snp_info <- fread("~/Box Sync/McNinch Projects/Bulliform_Cell_GWAS_Manuscript/data/snps/snp_general_info.txt") %>%
dplyr::select(`Site Name`, `Physical Position`, `Major Allele`, `Minor Allele`)
colnames(snp_info) <- c("SNP", "Position", "Major_Allele", "Minor_Allele")
Zm00001d007372_snps <- fread("~/Box Sync/McNinch Projects/Bulliform_Cell_GWAS_Manuscript/data/snps/Zm00001d007372_SNP_Positions.csv")
### Create functions ###
##################################################
blup_multiple_enviro_func <- function(df, trait, new_trait_name){
blups <- ranef(lmer(get(trait) ~ (1 | SNP_Genotype_Name) + (1 | Location) + (1 | SNP_Genotype_Name:Location) + (1 | Rep:Location), data = df))$SNP_Genotype_Name %>%
rownames_to_column(var = "Taxa")
colnames(blups) <- c("Taxa", new_trait_name)
return(blups)
}
### Compute BLUPs ###
##################################################
### Trait enntry-means ###
bulliform_ff_entry_means <- fread("~/Box Sync/McNinch Projects/Bulliform_Cell_GWAS_Manuscript/data/trait_data/Bulliform_Cell_Field_Counts.csv") %>%
mutate(Bulliform_FF = Bulliform_Cell_Field_Count/1040 * 245) %>% # height of image is 1040 pixels; 245 pixels = 1mm
group_by(SNP_Genotype_Name, Location, Rep, Row) %>%
summarise(Bulliform_FF = mean(Bulliform_FF, na.rm = TRUE)) %>%
drop_na()
### BLUPs ###
bff_both_blups <- blup_multiple_enviro_func(df = bulliform_ff_entry_means, trait = "Bulliform_FF", new_trait_name = "BFF_Both_Locations")
### LD at serine carboypeptidase1 ###
##################################################
snps <- Zm00001d007372_snps$SNP
### Compute LD ###
snp_calls <- cbind(data.frame(Taxa = taxa),
as.data.frame(all_snps)[, which(colnames(all_snps) %in% snps)]) %>%
gather("SNP", "SNP_Call", 2:ncol(.)) %>%
left_join(., snp_info, by = "SNP") %>%
mutate(Genotype = ifelse(SNP_Call == 0, paste(Major_Allele, Major_Allele, sep = "/"),
ifelse(SNP_Call == 2, paste(Minor_Allele, Minor_Allele, sep = "/"),
paste(Major_Allele, Minor_Allele, sep = "/")))) %>%
mutate(Allele1 = gsub('\\/.*', '', Genotype),
Allele2 = gsub('.*\\/', '', Genotype)) %>%
mutate(Allele1 = ifelse(SNP_Call != 1, Allele1, NA),
Allele2 = ifelse(SNP_Call != 1, Allele2, NA))
temp_LD <- mapLD(SNPdata = as.data.frame(dplyr::select(snp_calls, Allele1, Allele2, Taxa, SNP)),
locusID.col = 'SNP',
subjectID.col = 'Taxa',
allele.cols = c('Allele1', 'Allele2'),
WhichGene = NA,
outgraph = NA)
### Graphing LD heatmaps ###
snp_calls <- dplyr::select(snp_calls, Taxa, SNP, Genotype) %>%
spread(SNP, Genotype) %>%
column_to_rownames(var = "Taxa") %>%
mutate_all(., .funs = genotype)
MyHeatmap <- LDheatmap(snp_calls,
as.numeric(gsub('.*\\_', '', colnames(snp_calls))),
LDmeasure = "r",
title = NULL,
flip = TRUE,
add.map = FALSE,
SNP.name = snps,
color = c("#FF0000FF", "#FF4900FF", "#FF9200FF", "#FFDB00FF", "#FFFFE6FF"))
### Haplotypes at serine carboypeptidase1 ###
##################################################
snp_df <- as.data.frame(all_snps)[, c(1, which(colnames(all_snps) %in% c("rs2_230052852", "rs2_230053236")))] %>%
mutate(Haplotype = paste(rs2_230052852, rs2_230053236, sep = "_")) %>%
mutate(Hap_Code = ifelse(Haplotype == "0_0", 1,
ifelse(Haplotype == "2_0", 2,
ifelse(Haplotype == "0_2", 3, 4))),
Taxa = taxa) %>%
filter(Hap_Code != 4)
trait_snp_df <- left_join(snp_df, bff_both_blups, by = "Taxa")
aov_model <- aov(BFF_Both_Locations ~ Haplotype, data = trait_snp_df)
hsd <- TukeyHSD(aov_model, ordered = FALSE, conf.level = 0.95)
hsd_letters <- as.data.frame(multcompLetters(hsd$Haplotype[,4])[[1]]) %>%
rownames_to_column(var = "Haplotype") %>%
mutate(Y_Coord = c(0.7, 0.8, 0.5))
colnames(hsd_letters) <- c("Haplotype", "Letter", "Y_Coord")
### Graphing Haplotype bff blup distributions ###
ggplot(trait_snp_df, aes(x = Haplotype, y = BFF_Both_Locations)) +
geom_boxplot() +
geom_dotplot(binaxis = "y", binwidth = 0.02, stackdir = "center", aes(fill = Haplotype)) +
theme_bw() +
theme(axis.title = element_text(size = "24"),
axis.text = element_text(size = "20"),
panel.border = element_rect(size = 4),
legend.position = "none") +
labs(y = "BFF BLUP") +
geom_text(data = hsd_letters,
mapping = aes(label = Letter,
x = Haplotype,
y = Y_Coord),
size = 12)
|
90186b6c571957174ea9029b329f27164bacbf82
|
7ae3892dc8325f65537f3c9ade61f85f8eff9c29
|
/R/qgg_utility_functions.R
|
66fa41beb60e2d80fd9d78a95437640ecf23a965
|
[] |
no_license
|
vplagnol/qgg
|
f26c2bf859c3742124df7959741537643b7d5079
|
1c7c193948613e976f3247b8464529eabcd4e84b
|
refs/heads/master
| 2020-04-16T07:17:34.849187
| 2019-04-13T10:49:42
| 2019-04-13T10:49:42
| 165,380,653
| 0
| 0
| null | 2019-04-13T10:49:43
| 2019-01-12T11:35:32
|
R
|
UTF-8
|
R
| false
| false
| 5,423
|
r
|
qgg_utility_functions.R
|
#' @export
#'
auc <- function(yobs=NULL, ypred=NULL) {
n0 <- length(yobs[yobs==0])
n1 <- length(yobs[yobs==1])
y <- cbind(yobs, ypred)
y <- y[order(y[,2], decreasing=TRUE),]
y <- cbind(y, seq(from=nrow(y), to=1))
rd <- mean(y[y[,1]==1,][,3])
auc <- (1/n0)*(rd-(n1/2)-(1/2))
auc
}
#' @export
#'
rnag <- function(yobs=NULL,ypred=NULL) {
fit0 <- glm(yobs~1,family=binomial(link='logit'))
fit1 <- glm(yobs~1+ypred,family=binomial(link='logit'))
n <- length(yobs)
LR <- anova(fit1)$Deviance[2]
L0 <- as.numeric(logLik(fit0))
r2nag <- (1-exp(-LR/n))/(1-exp(-(-2*L0)/n))
return(r2nag)
}
#' @export
#'
acc <- function(yobs=NULL,ypred=NULL,typeoftrait="quantitative") {
fit <- lm(ypred ~ yobs)
r2 <- summary(fit)$r.squared
pa <- cor(ypred, yobs)
mspe <- sum((ypred - yobs)^2)/length(yobs)
intercept <- fit$coef[1]
slope <- fit$coef[2]
aurc <- r2nag <- NA
if(typeoftrait=="binary") aurc <- auc(yobs=yobs,ypred=ypred)
if(typeoftrait=="binary") r2nag <- rnag(yobs=yobs,ypred=ypred)
res <- round(c(pa,r2,r2nag,aurc,intercept,slope,mspe),3)
names(res) <- c("Corr","R2","Nagel R2", "AUC", "intercept", "slope", "MSPE")
return(res)
}
#' @export
#'
fastlm <- function (y=NULL, X=NULL, sets=NULL) {
XX <-crossprod(X)
XXi <- chol2inv(chol(XX))
Xy <- crossprod(X,y)
coef <- crossprod(XXi,Xy)
rownames(coef) <- colnames(X)
yhat <- crossprod(t(X),coef)
sse <- sum((y-yhat)**2)
dfe <- length(y)-ncol(X)
se <- sqrt(sse/dfe)*sqrt(diag(XXi))
stat <- coef/se
p <- 2 * pt(-abs(stat), df = dfe)
names(se) <- colnames(X)
sigma_e <- sse/dfe
ftest <- NULL
if (!is.null(sets)) {
for ( i in 1:nsets) {
rws <- sets[[i]]
dfq <- length(rws)
q <- crossprod(coef[rws,],crossprod(solve(XXi[rws,rws]*sigma_e),coef[rws,]))
pq <- pchisq(q, df=dfq, lower.tail = FALSE)
pfstat <- pf(q/dfq, dfq, dfe, lower.tail=FALSE)
ftest <- rbind(ftest,c(q/dfq,dfq,dfe,pfstat))
}
colnames(ftest) <- c("F-stat","dfq","dfe","p")
rownames(ftest) <- names(sets)
}
fit <- list(coef=coef,se=se,stat=stat,p=p,ftest=ftest, yhat=yhat)
return(fit)
}
panel.cor <- function(x, y, ...) {
par(usr = c(0, 1, 0, 1))
txt <- paste("R2=",as.character(format(cor(x, y)**2, digits=2)))
text(0.5, 0.5, txt, cex = 1, col=1)
}
get_lower_tri<-function(cormat){
cormat[upper.tri(cormat)] <- NA
return(cormat)
}
get_upper_tri <- function(cormat){
cormat[lower.tri(cormat)]<- NA
return(cormat)
}
reorder_cormat <- function(cormat){
dd <- as.dist((1-cormat)/2)
hc <- hclust(dd)
cormat <-cormat[hc$order, hc$order]
cormat
}
#' @export
#'
hmmat <- function(df=NULL,xlab="Cols",ylab="Rows",title=NULL,fname=NULL) {
rowOrder <- order(rowSums(df))
colOrder <- order(colSums(abs(df)))
melted_df <- melt(df[rowOrder,colOrder], na.rm = TRUE)
colnames(melted_df)[1:2] <- c(ylab,xlab)
tiff(file=fname,res = 300, width = 2800, height = 2200,compression = "lzw")
hmplot <- ggplot(melted_df, aes_string(y=ylab,x=xlab)) +
ggtitle(title) +
geom_tile(aes(fill = value)) +
scale_fill_gradient2(low = "blue", high = "red", mid = "white", midpoint = 0, space = "Lab", name="Statistics") +
theme(axis.text.x = element_text(angle = 45, vjust = 1, size = 8, hjust = 1)) +
coord_fixed()
print(hmplot)
dev.off()
}
#' @export
#'
hmcor <- function(df=NULL,fname=NULL) {
cormat <- round(cor(df),2)
cormat <- reorder_cormat(cormat)
melted_cormat <- melt(get_upper_tri(cormat), na.rm = TRUE)
colnames(melted_cormat)[1:2] <- c("Study1","Study2")
tiff(file = fname,res = 300, width = 2800, height = 2200,compression = "lzw")
hmplot <- ggplot(melted_cormat, aes(Study2, Study1, fill = value)) +
geom_tile(color = "white") +
scale_fill_gradient2(low = "blue", high = "red", mid = "white", midpoint = 0, limit = c(-1,1), space = "Lab", name="Pearson\nCorrelation") +
theme_minimal() +
theme(axis.text.x = element_text(angle = 45, vjust = 1, size = 12, hjust = 1)) +
geom_text(aes(Study2, Study1, label = value), color = "black", size = 4) +
coord_fixed()
print(hmplot)
dev.off()
}
# y <- c(5,8,6,2,3,1,2,4,5) #dependent/observation
# x <- c(-1,-1,-1,0,0,0,1,1,1) #independent
# d1 <- as.data.frame(cbind(y=y,x=x))
#
# model <- glm(y~x, data=d1, family = poisson(link="log"))
# summary(model)
#
# X <- cbind(1,x)
#
# #write an interatively reweighted least squares function with log link
# glmfunc.log <- function(d,betas,iterations=1)
# {
# X <- cbind(1,d[,"x"])
# for(i in 1:iterations) {
# z <- as.matrix(betas[1]+betas[2]*d[,"x"]+((d[,"y"]-exp(betas[1]+betas[2]*d[,"x"]))/exp(betas[1]+betas[2]*d[,"x"])))
# W <- diag(exp(betas[1]+betas[2]*d[,"x"]))
# betas <- solve(t(X)%*%W%*%X)%*%t(X)%*%W%*%z
# print(betas)
# }
# return(list(betas=betas,Information=t(X)%*%W%*%X))
# }
#
# #run the function
# model <- glmfunc.log(d=d1,betas=c(1,0),iterations=10)
|
035b25eb4e20818e16e0c4cb89c7c07a1a43920a
|
66587ef188a45a2074acd1e14e88fd62da412be6
|
/R Code zum Lernen/imputation.r
|
e0ef3b952eaff0e70eddb98cbdc54a7997b5c1f1
|
[] |
no_license
|
malsch/R-Code-Snippets
|
51ff6cf08afb4659289a2862b80ad9204ec26d53
|
8c72c688bd777faba8bb44c8c0500b576ecd6002
|
refs/heads/master
| 2021-03-12T20:46:49.494334
| 2013-03-18T07:22:11
| 2013-03-18T07:22:11
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 225
|
r
|
imputation.r
|
# impute missing values
library(e1071) # recommended from http://horicky.blogspot.com.au/2012/05/predictive-analytics-data-preparation.html
fixIris1 <- impute(irissample[,1:4], what='mean')
# alternative in discussion: Amelia
|
9648f84c26c200ac1d9f932bb13f71f39392f6d8
|
31c951d364bd64c1e983cb9400d1f257d1b6e35a
|
/Plot3.R
|
44b1cbbdfc141c587d4288fe2d5bd7884fd9da7b
|
[] |
no_license
|
charlestsang/ExData_Plotting1
|
0eb9c1638500c393dea46ace1ea28ba808a4e10e
|
14a0810888a58ca3f6cc0a6ef763174dca0efd17
|
refs/heads/master
| 2021-01-19T09:08:56.588547
| 2017-04-09T21:20:12
| 2017-04-09T21:20:12
| 87,732,791
| 0
| 0
| null | 2017-04-09T19:12:57
| 2017-04-09T19:12:57
| null |
UTF-8
|
R
| false
| false
| 1,127
|
r
|
Plot3.R
|
getwd()
setwd('/Users/Charlestsang/Dropbox/Data_Science_Coursera/Course4_Wk1/Course4_Wk1_Assignment')
dir()
#Load dataset as data
data <- read.csv("household_power_consumption.txt", header=T, sep=';', na.strings="?",
nrows=2075259, check.names=F, stringsAsFactors=F, comment.char="", quote='\"')
#Observation on the dataset
str(data)
# The task only use data from the dates 2007-02-01 and 2007-02-02, create a subset corespondingly.
sub_data <- subset(data, Date %in% c("1/2/2007","2/2/2007"))
sub_data$Date <- as.Date(sub_data$Date, format="%d/%m/%Y")
#Plot 3
dtime <- paste(as.Date(sub_data$Date), sub_data$Time)
sub_data$Datetime <- as.POSIXct(dtime)
with(sub_data, {
plot(Sub_metering_1~Datetime, type="l",
ylab="Global_active_power (kilowatts)", xlab="")
lines(Sub_metering_2~Datetime,col='Red')
lines(Sub_metering_3~Datetime,col='Blue')
})
legend("topright", col=c("black", "red", "blue"), lty=1, lwd=2,
legend=c("Sub_metering_1", "Sub_metering_2", "Sub_metering_3"))
#Copy to png file
dev.copy(png, file="./plot3.png", height=480, width=480)
dev.off()
|
6bd6b44626bd58f07162ac3687f6d916c648de73
|
ffdea92d4315e4363dd4ae673a1a6adf82a761b5
|
/data/genthat_extracted_code/rsimsum/examples/simsum.Rd.R
|
af8233166f588454c68ab91eaa79879bb3460273
|
[] |
no_license
|
surayaaramli/typeRrh
|
d257ac8905c49123f4ccd4e377ee3dfc84d1636c
|
66e6996f31961bc8b9aafe1a6a6098327b66bf71
|
refs/heads/master
| 2023-05-05T04:05:31.617869
| 2019-04-25T22:10:06
| 2019-04-25T22:10:06
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 413
|
r
|
simsum.Rd.R
|
library(rsimsum)
### Name: simsum
### Title: Analyses of simulation studies including Monte Carlo error
### Aliases: simsum
### ** Examples
data("MIsim")
s <- simsum(data = MIsim, estvarname = "b", true = 0.5, se = "se", methodvar = "method", ref = "CC")
# If `ref` is not specified, the reference method is inferred
s <- simsum(data = MIsim, estvarname = "b", true = 0.5, se = "se", methodvar = "method")
|
9497fedd67162b0e5ff4263dbd13ccf0a2de2a02
|
7d29aba8439e5c673eb8da4e3bd7829b0ae416fd
|
/man/PAL.Rd
|
fdaf7e508ecaa35a5e196fa539d20842cc5c2ae7
|
[] |
no_license
|
cran/ITRSelect
|
3e5d60be33685b7e8a5d7c3e5e721b749120a225
|
cc008e6031cce24cf92bb4519bcfd64fbf2d42e4
|
refs/heads/master
| 2020-03-14T13:19:57.601976
| 2018-09-24T03:20:10
| 2018-09-24T03:20:10
| 131,630,693
| 1
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 8,612
|
rd
|
PAL.Rd
|
\name{PAL}
\alias{PAL}
\alias{PAL.fit}
%- Also NEED an '\alias' for EACH other topic documented here.
\title{
Penalized A-learning for optimal dynamic treatment regime
}
\description{
Selects important variables that are involved in the optimal treatment regime based on
penalized A-learning estimating equation. This function can be applied to two-stage
studies where treatments are sequentially assigned at two different time points.
}
\usage{
PAL(formula, data, subset, na.action, IC = c("BIC", "CIC", "VIC"),
lambda.list = exp(seq(-3.5, 2, 0.1)), refit = TRUE, control = PAL.control(...),
model = TRUE, y = TRUE, a1 = TRUE, x1 = TRUE, a2 = TRUE, x2 = TRUE, ...)
PAL.fit(y, x1, x2 = NULL, a1, a2 = NULL, IC = c("BIC", "CIC", "VIC"),
lambda.list = exp(seq(-3.5, 2, 0.1)), refit = TRUE,
control = PAL.control())
}
%- maybe also 'usage' for other objects documented here.
\arguments{
\item{formula}{A symbolic description of the model to be fitted(of type y ~ x1 | a1 or
y ~ x1 | a1 | x2 | a2. Details are given 'Details').
}
\item{data}{An optional list or environment containing variables in \code{formula}.
}
\item{subset, na.action}{Arguments controlling formula processing via \code{\link{model.frame}}.
}
\item{IC}{Information criterion used in determining the regularization parameter. See 'Details'.
}
\item{lambda.list}{A list of regularization parameter values. Default is \code{exp(seq(-3.5, 2, 0.1))}.
}
\item{refit}{After variable selection, should the coefficients be refitted using A-learning
estimating equation? Default is TRUE.
}
\item{control}{A list of control argument via \code{\link{PAL.control}}.
}
\item{model}{A logical value indicating whether \emph{model frame} should be included
as a component of the return value.
}
\item{y, a1, x1, a2, x2}{For \code{PAL}: logical values indicating whether the response,
the first and second treatments, the baseline and intermediate covariates should be included
as a component of the return value.
For \code{PAL.fit}: y is the response vector (the larger the better), a1 and a2 are the first
and second treatments patients receive, x1 and x2 are the design matrices consisting of
patients' baseline covariates and intermediate covariates.
}
\item{\dots}{Argument passed to \code{\link{PAL.control}}.
}
}
\details{Penalized A-learning is developed to select important variables involved in the optimal
individualized treatment regime. An individualized treatment regime is a function that maps patients
covariates to the space of available treatment options. The method can be applied to both single-stage
and two-stage studies.
PAL applied the Dantzig selector on the A-learning estimating equation for variable selection. The
regularization parameter in the Dantzig selector is chosen according to the information criterion.
Specifically, we provide a Bayesian information criterion (BIC), a concordance information criterion
(CIC) and a value information criterion (VIC). For illustration of these information criteria, consider
a single-stage study. Assume the data is summarized as \eqn{(Y_i, A_i, X_i), i=1,...,n} where \eqn{Y_i}
is the response of the \eqn{i}-th patient, \eqn{A_i} denotes the treatment that patient receives and
\eqn{X_i} is the corresponding baseline covariates. Let \eqn{\hat{\pi}_i} and \eqn{\hat{h}_i} denote the
estimated propensity score and baseline mean of the \eqn{i}-th patient. For any linear treatment regime
\eqn{I(x^T \beta>c)}, BIC is defined as
\deqn{BIC=-n\log\left( \sum_{i=1}^n (A_i-\hat{\pi}_i)^2 (Y_i-\hat{h}_i-A_i c-A_i X_i^T \beta)^2 \right)-\|\beta\|_0 \kappa_B,}
where \eqn{\kappa_B=\{\log (n)+\log (p+1) \}/\code{kappa}} and \code{kappa} is the model complexity penalty used in the function \code{PAL.control}.
VIC is defined as
\deqn{VIC=\sum_{i=1}^n \left(\frac{A_i d_i}{\hat{\pi}_i}+\frac{(1-A_i) (1-d_i)}{1-\hat{\pi}_i} \right)\{Y_i-\hat{h}_i-A_i (X_i^T \beta+c)\}+
\{\hat{h}_i+\max(X_i^T \beta+c,0)\}-\|\beta\|_0 \kappa_V,}
where \eqn{d_i=I(X_i^T \beta>-c)} and \eqn{\kappa_V=n^{1/3} \log^{2/3} (p) \log (\log (n))/\code{kappa}}.
CIC is defined as
\deqn{CIC=\sum_{i\neq j} \frac{1}{n} \left( \frac{(A_i-\hat{\pi}_i) \{Y_i-\hat{h}_i\} A_j}{\hat{\pi}_i (1-\hat{\pi}_i) \hat{\pi}_j}-
\frac{(A_j-\hat{\pi}_j) \{Y_j-\hat{h}_j\} A_i}{\hat{\pi}_j (1-\hat{\pi}_j) \hat{\pi}_i} \right) I(X_i^T \beta> X_j^T \beta)
-\|\beta\|_0 \kappa_C,}
where \eqn{\kappa_C=\log (p) \log_{10}(n) \log(\log_{10}(n))/\code{kappa}}.
Under certain conditions, it can be shown that CIC and VIC is consistent as long as either the estimated
propensity score or the estimated baseline is consistent.
For single-stage study, the formula should specified as y ~ x1 | a1 where y is the reponse vector (y
should be specified in such a way that a larger value of y indicates better clinical outcomes), x1 is
patient's baseline covariates and a1 is the treatment that patient receives.
For two-stage study, the formula should be specified as y ~ x1 | a1 | x2 | a2 where y is the response
vector, a1 and a2 the vectors of patients' first and second treatments, x1 and x2 are the design matrices
consisting of patients' baseline covariates and intermediate covariates.
\code{PAL} standardizes the covariates and includes an intercept in the estimated individualized treatment
regime by default. For single-stage study, the estimated treamtent regime is given by \eqn{I(\code{x1}^T \code{beta1.est}>0)}.
For two-stage study, the estimated regime is given by \eqn{\code{a1}=I(x1^T \code{beta1.est}>0)} and \eqn{\code{a2}=I(\code{x}^T \code{beta2.est}>0)}
where \code{x=c(x1, a1, x2)}.
}
\value{
\item{beta2.est}{Estimated coefficients in the second decision rule.}
\item{beta1.est}{Estimated coefficients in the first decision rule.}
\item{pi2.est}{Estimated propensity score at the second stage.}
\item{pi1.est}{Estimated propensity score at the first stage.}
\item{h2.est}{Estimated baseline function at the second stage.}
\item{h1.est}{Estimated baseline function at the first stage.}
\item{alpha2.est}{Regression coefficients in the estimated propensity score at the second stage.}
\item{alpha1.est}{Regression coefficients in the estimated propensity score at the first stage.}
\item{theta2.est}{Regression coefficients in the estimated baseline function at the second stage.}
\item{theta1.est}{Regression coefficients in the estimated baseline function at the first stage.}
\item{model}{The full model frame (if \code{model = TRUE}).}
\item{y}{Response vector (if \code{y = TRUE}).}
\item{x1}{Baseline covariates (if \code{x1 = TRUE}).}
\item{a1}{A vector of first treatment (if \code{a1 = TRUE}).}
\item{x2}{Intermediate covariates (if \code{x2 = TRUE}).}
\item{a2}{A vector of second treatment (if \code{a2 = TRUE}).}
}
\references{
Shi, C. and Fan, A. and Song, R. and Lu, W. (2018) High-Dimensional A-Learing for Optimal
Dynamic Treatment Regimes. \emph{Annals of Statistics,} \bold{ 46:} 925-957.
Shi, C. and Song, R. and Lu, W. (2018) Concordance and Value Information Criteria for
Optimal Treatment Decision. \emph{Under review}.
}
\author{
Chengchun Shi and Ailin Fan
}
%% ~Make other sections like Warning with \section{Warning }{....} ~
\seealso{\code{\link{PAL.control}}
%% ~~objects to See Also as \code{\link{help}}, ~~~
}
\examples{
\donttest{
## single-stage study
set.seed(12345)
n <- 200
p <- 1000
X <- matrix(rnorm(n*p), nrow=n, ncol=p)
A <- rbinom(n, 1, 0.5)
CX <- (X[,1] + X[,2])
h <- 1 + X[,1] * X[,3]
Y <- h + A*CX + 0.5*rnorm(n)
result <- PAL(Y~X|A)
## two-stage study
set.seed(12345*2)
n <- 200
p <- 1000
X1 <- matrix(rnorm(n*p), nrow=n, ncol=p)
A1 <- rbinom(n, 1, 0.5)
X2 <- X1[,1] + A1 + 0.5*rnorm(n)
A2 <- rbinom(n, 1, 0.5)
Y <- A2*(A1 + X2) + A1*X1[,1] + 0.5*rnorm(n)
result <- PAL(Y~X1|A1|X2|A2)
}
## single-stage study
set.seed(12345)
n <- 50
p <- 20
X <- matrix(rnorm(n*p), nrow=n, ncol=p)
A <- rbinom(n, 1, 0.5)
CX <- (X[,1] + X[,2])
h <- 1 + X[,1] * X[,3]
Y <- h + A*CX + 0.5*rnorm(n)
result <- PAL(Y~X|A)
## two-stage study
set.seed(12345*2)
n <- 50
p <- 20
X1 <- matrix(rnorm(n*p), nrow=n, ncol=p)
A1 <- rbinom(n, 1, 0.5)
X2 <- X1[,1] + A1 + 0.5*rnorm(n)
A2 <- rbinom(n, 1, 0.5)
Y <- A2*(A1 + X2) + A1*X1[,1] + 0.5*rnorm(n)
result <- PAL(Y~X1|A1|X2|A2)
}
% Add one or more standard keywords, see file 'KEYWORDS' in the
% R documentation directory.
\keyword{Penalized A-learning}
|
a71a77d97fa4234626730b60e9c3480e4afd4742
|
83d08b682b35e1fdc2fdba3b4caedf07de87e72f
|
/data_preparation.R
|
75e7d12af847d2a73d22c870042782b24c15f152
|
[] |
no_license
|
ZenCO/Data-Visualization-Lesson
|
3372703e5ac2018ab2b3ffda49a7bcfdcda7794d
|
3431eaf40bd116330a9ab89d29fef49180611e3a
|
refs/heads/master
| 2020-05-23T19:49:48.240103
| 2019-05-14T22:14:19
| 2019-05-14T22:14:19
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 961
|
r
|
data_preparation.R
|
# This file will prepare the data
# Data was originally found on data.world
# http://www.eia.gov/dnav/pet/pet_crd_crpdn_adc_mbbl_m.htm
library(dplyr)
library(tidyr)
library(stringr)
library(zoo)
getData = function(){
data = read.csv('Monthly Crude Oil Production by State 1981 - Nov 2016.csv')
data = data[data$Date != 'Back to Contents',]
data = data[data$Date != 'Sourcekey',]
data = data[data$Date != '',]
#Convert data
df = data.frame(data %>% gather(Location,ThousandBarrel, -Date))
df = data.frame(df %>% separate(Date,c('Month','Year'),"-"))
df$Date = as.Date(as.yearmon(paste(df$Month,df$Year)))
df$Location = as.character(sub('.Field.Production.of.Crude.Oil..Thousand.Barrels.','',df$Location))
df$ThousandBarrel = as.numeric(df$ThousandBarrel)
States = data.frame(Location = as.character(state.name))
df = merge(df,States,by='Location')
df$Month = factor(df$Month, levels = month.abb)
return(df)
}
|
840e22984513398f89241357abb9c30f6a5f39d9
|
e40c58f8d40e6dca14c26d7ddf9f437642c15b81
|
/man/rapport.inputs.Rd
|
5175d9dd7caa440925b0f7306e73c8f8e412e750
|
[] |
no_license
|
cran/rapport
|
304e9930d913db4590abbce26a184de89a09c044
|
18a98122f53c25a8d1cdc30dd7f5a464499a477e
|
refs/heads/master
| 2021-06-03T23:38:32.485410
| 2021-04-11T20:50:02
| 2021-04-11T20:50:02
| 17,699,014
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| true
| 6,670
|
rd
|
rapport.inputs.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/template.R
\name{rapport.inputs}
\alias{rapport.inputs}
\alias{tpl.inputs}
\title{Template Inputs}
\usage{
rapport.inputs(fp, use.header = FALSE)
}
\arguments{
\item{fp}{a template file pointer (see \code{rapport:::rapport.read} for details)}
\item{use.header}{a logical value indicating whether the header section is provided in \code{h} argument}
}
\description{
Displays summary for template inputs (if any). Note that as of version \code{0.5}, \code{rapport} template inputs should be defined using YAML syntax. See \code{deprecated-inputs} for details on old input syntax. The following sections describe new YAML input definition style.
}
\details{
\strong{Introduction}
The full power of \code{rapport} comes into play with \emph{template inputs}. One can match inputs against dataset variables or custom \code{R} objects. The inputs provide means of assigning \code{R} objects to \code{symbol}s in the template evaluation environment. Inputs themselves do not handle only the template names, but also provide an extensive set of rules that each dataset variable/user-provided \code{R} object has to satisfy. The new YAML input specification takes advantage of \code{R} class system. The input attributes should resemble common \code{R} object attributes and methods.
Inputs can be divided into two categories:
\itemize{
\item \emph{dataset inputs}, i.e. the inputs that refer to named element of an |code{R} object provided in \code{data} argument in \code{rapport} call. Currently, \code{rapport} supports only \code{data.frame} objects, but that may change in the (near) future.
\item \emph{standalone inputs} - the inputs that do not depend on the dataset. The user can just provide an \code{R} object of an appropriate class (and other input attributes) to match a \emph{standalone} input.
}
\strong{General input attributes}
Following attributes are available for all inputs:
\itemize{
\item \code{name} (character string, required) - input name. It acts as an identifier for a given input, and is required as such. Template cannot contain duplicate names. \code{rapport} inputs currently have custom naming conventions - see \code{\link{guess.input.name}} for details.
\item \code{label} (character string) - input label. It can be blank, but it's useful to provide input label as \code{rapport} helpers use that information in plot labels and/or exported HTML tables. Defaults to empty string.
\item \code{description} (character string) - similar to \code{label}, but should contain long description of given input.
\item \code{class} (character string) - defines an input class. Currently supported input classes are: \code{character}, \code{complex}, \code{factor}, \code{integer}, \code{logical}, \code{numeric} and \code{raw} (all atomic vector classes are supported). Class attribute should usually be provided, but it can also be \code{NULL} (default) - in that case the input class will be guessed based on matched \code{R} object's value.
\item \code{required} (logical value) - does the input require a value? Defaults to \code{FALSE}.
\item \code{standalone} (logical value) - indicates that the input depends on a dataset. Defaults to \code{FALSE}.
\item \code{length} (either an integer value or a named list with integer values) - provides a set of rules for input value's length. \code{length} attribute can be defined via:
\itemize{
\item an integer value, e.g. \code{length: 10}, which sets restriction to exactly 10 vectors or values.
\item named list with \code{min} and/or \code{max} attributes nested under \code{length} attribute. This will define a range of values in which input length must must fall. Note that range limits are inclusive. Either \code{min} or \code{max} attribute can be omitted, and they will default to \code{1} and \code{Inf}, respectively.
}
\strong{IMPORTANT!} Note that \code{rapport} treats input length in a bit different manner. If you match a subset of 10 character vectors from the dataset, input length will be \code{10}, as you might expect. But if you select only one variable, length will be equal to \code{1}, and not to the number of vector elements. This stands both for standalone and dataset inputs. However, if you match a character vector against a standalone input, length will be stored correctly - as the number of vector elements.
\item \code{value} (a vector of an appropriate class). This attribute only exists for standalone inputs. Provided value must satisfy rules defined in \code{class} and \code{length} attributes, as well as any other class-specific rules (see below).
}
\strong{Class-specific attributes}
\emph{character}
\itemize{
\item \code{nchar} - restricts the number of characters of the input value. It accepts the same attribute format as \code{length}. If \code{NULL} (default), no checks will be performed.
\item \code{regexp} (character string) - contains a string with regular expression. If non-\code{NULL}, all strings in a character vector must match the given regular expression. Defaults to \code{NULL} - no checks are applied.
\item \code{matchable} (logical value) - if \code{TRUE}, \code{options} attribute must be provided, while \code{value} is optional, though recommended. \code{options} should contain values to be chosen from, just like \code{<option>} tag does when nested in \code{<select>} HTML tag, while \code{value} must contain a value from \code{options} or it can be omitted (\code{NULL}). \code{allow_multiple} will allow values from \code{options} list to be matched multiple times. Note that unlike previous versions of \code{rapport}, partial matching is not performed.
}
\emph{numeric}, \emph{integer}
\itemize{
\item \code{limit} - similar to \code{length} attribute, but allows only \code{min} and \code{max} nested attributes. Unlike \code{length} attribute, \code{limit} checks input values rather than input length. \code{limit} attribute is \code{NULL} by default and the checks are performed only when \code{limit} is defined (non-\code{NULL}).
}
\emph{factor}
\itemize{
\item \code{nlevels} - accepts the same format as \code{length} attribute, but the check is performed rather on the number of factor levels.
\item \code{matchable} - \emph{ibid} as in character inputs (note that in previous versions of \code{rapport} matching was performed against factor levels - well, not any more, now we match against values to make it consistent with \code{character} inputs).
}
}
\seealso{
{
\code{\link{rapport.meta}}
\code{\link{rapport.info}}
}
}
|
11164e05782a74013bc23b852b4badf1519f2bca
|
32c105a13c42333c8359ef19c610e7e0fca7ed8d
|
/codes/Arthur/modelisation_cout.R
|
b463225e876cfe09d8072a3614739f40594bec45
|
[] |
no_license
|
YohannLeFaou/Pricing-Game-2017
|
7e68d3e84c440ce952eba74c5c8f5fc789e5ff27
|
8be758368743ac65e844ef9f277b432a69916ffa
|
refs/heads/master
| 2021-01-11T18:35:27.366199
| 2017-04-29T13:08:44
| 2017-04-29T13:08:44
| 79,576,502
| 0
| 1
| null | 2017-04-29T13:08:45
| 2017-01-20T16:37:24
|
R
|
UTF-8
|
R
| false
| false
| 3,453
|
r
|
modelisation_cout.R
|
library(caret)
library(MASS)
load("train_cout.RData")
Score_GLM<-function(dataX,dataY,controle_methode,controle_nombre_fold,controle_nombre_repetition=1,fit_famille,fit_metrique = "RMSE",seed=2017){
#dataX : Les prédicteurs
#dataY : La variables Cible
#controle_methode : Paramètre de contrôle qui peut prendre les valeurs "cv","repeatedcv","LOOCV"
#controle_nombre_fold : Paramètre de contrôle qui gère le nombre de fold (segments) utilisés pour la k-fold cross-validation
#controle_nombre_repetition : Paramètre de contrôle qui gère le nombre répétition de la k-fold cross-validation (fixé à 1 par défaut)
#fit_famille : Paramètre de la fonction glm() peut prendre toutes les valeurs que propose la fonction glm()
#fit_metrique : Paramètre qui gère la métrique d'évaluation fixé à RMSE pour évaluer une régression. Si classification rentre "Accuracy"
#seed : La graine pour la reproductibilité des résultats (fixée à 2017 par défaut)
set.seed(seed)
# inTraining <- createDataPartition(dataY, p = .75, list = FALSE)
# training <- data.frame(dataX[ inTraining,])
# testing <- data.frame(dataX[-inTraining,])
# Y_training <- dataY[ inTraining]
# Y_testing <- dataY[-inTraining]
fitControl <- trainControl(# 10-fold CV
method = controle_methode,
number = controle_nombre_fold, # On répète 10 fois la 10-fold CV
repeats = controle_nombre_repetition,
allowParallel = F)
GLM.fit <- train(x = dataX,
y = dataY,
method = "glm",
family = fit_famille,
metric = fit_metrique,
trControl = fitControl)
# GLM.fit <- train(training,Y_training,
# method = "glm",
# family = fit_famille,
# metric = fit_metrique,
# trControl = fitControl)
# res<-data.frame(Fold = as.character(GLM.fit$resample$Resample),
# RMSE = as.numeric(GLM.fit$resample$RMSE),
# Rsquared = as.numeric(GLM.fit$resample$Rsquared))
return(GLM.fit$resample)
}
#--------------------------------------------------------------------------------------------------------------------------------------------------------------
fit <- glm(formula = cout ~ .,
data = train_cout[,c("cout",setdiff(x_var_quali_cout_dummy, c(modes_quali_var_cout, var_dummy_delete_cout)))],
family = "gaussian")
step <- stepAIC(fit, direction="both")
best<-step$model
save(best,file="meilleur_aic_cout.RData")
fit.gamma <- glm(formula = cout ~ .,
data = train_cout[train_cout$cout>0,c("cout",setdiff(x_var_quali_cout_dummy, c(modes_quali_var_cout, var_dummy_delete_cout)))],
family = Gamma(link="log"))
step <- stepAIC(fit.gamma, direction="both")
#var_num<-sapply(train_cout[1,1:50],is.numeric)
#bis<-train_cout[,1:50]
# glm.fit <- glm(cout ~.,
# data = bis[,var_num],
# family = "gaussian")
Scores<-Score_GLM(dataX = train_cout[,c("cout",setdiff(x_var_quali_cout_dummy, c(modes_quali_var_cout, var_dummy_delete_cout)))],
dataY = train_cout$cout,
controle_methode = "repeatedcv",
controle_nombre_fold = 2,
controle_nombre_repetition = 3,
fit_famille = "gaussian",
fit_metrique = "RMSE",
seed=2017)
Scores
|
fc5d788bce491a9a310bbcfc96dcbfb2d81ca622
|
9ddd623471e8174ade5b9921dbc1cb1da731e115
|
/man/rank_char.Rd
|
41f6ebe341dd04ee28670d0d51a7b083f990ea82
|
[] |
no_license
|
zackarno/koborg
|
2eba2f837b51a494b7efcb8d491e800de6ec70d9
|
6312bb3ab0b59b96f91812b90f5afd224d599b04
|
refs/heads/master
| 2022-09-13T17:11:09.884337
| 2020-05-27T09:45:22
| 2020-05-27T09:45:22
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| true
| 585
|
rd
|
rank_char.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/class_rank.R
\name{rank_char}
\alias{rank_char}
\title{Rank character constructor}
\usage{
rank_char(
x = character(),
relevant = NA,
choice_names = NA,
choice_labels = NA,
q_name = NA,
label = NA,
constraint = NA,
max_rank = na_length(choice_names),
position_sep = "/"
)
}
\description{
`rank_char()` constructs a rank character vector, each value in the vector corresponds to the ordered
options for that survey row in string format. Can be constructed from a character vector itself.
}
|
99c794a95b8e393436a96526a6766feba5e9c8f6
|
d102c6ec4db9f2a932fcb6b424773469dbf989fc
|
/venn_diagram.R
|
8a6df0f930a9f584697d854ba417e3f100e1d8d9
|
[
"MIT"
] |
permissive
|
Jokendo-collab/R_codes
|
94fae0fa004fdc1d012386a4329b8ccacacd816c
|
35a90d4e218c535bfb47c42929708aa1a137b0b5
|
refs/heads/master
| 2022-04-11T17:46:39.383799
| 2020-03-07T18:28:13
| 2020-03-07T18:28:13
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 1,636
|
r
|
venn_diagram.R
|
install.packages('VennDiagram')
setwd("/home/javan/Downloads")
dir()
# Libraries
BiocManager::install("limma")
install.packages("tidyverse")
install.packages("hrbrthemes")
install.packages("tm")
install.packages("proustr")
library(tidyverse)
library(hrbrthemes)
library(tm)
library(proustr)
library(limma)
data <- read.table("venn_diagram.txt",header = T,sep = '\t')
View(data)
#cMake the plot
venn.diagram(
x = list(
data %>% filter(artist=="booba") %>% select(word) %>% unlist() ,
data %>% filter(artist=="nekfeu") %>% select(word) %>% unlist() ,
data %>% filter(artist=="georges-brassens") %>% select(word) %>% unlist()
),
category.names = c("Booba (1995)" , "Nekfeu (663)" , "Brassens (471)"),
filename = 'IMG/venn.png',
output = TRUE ,
imagetype="png" ,
height = 480 ,
width = 480 ,
resolution = 300,
compression = "lzw",
lwd = 1,
col=c("#440154ff", '#21908dff', '#fde725ff'),
fill = c(alpha("#440154ff",0.3), alpha('#21908dff',0.3), alpha('#fde725ff',0.3)),
cex = 0.5,
fontfamily = "sans",
cat.cex = 0.3,
cat.default.pos = "outer",
cat.pos = c(-27, 27, 135),
cat.dist = c(0.055, 0.055, 0.085),
cat.fontfamily = "sans",
cat.col = c("#440154ff", '#21908dff', '#fde725ff'),
rotation = 1
)
attach(data)
colnames(data)
Early_log <- data$Early.log
Mid_log <- data$Mid.log
early_stationary <- data$early.stationary
Stationary <- data$Stationary
Late_stationary <- data$Late.stationary
#Single_Colony <- data$Single.Colony
bind_data <- cbind(Early_log,Mid_log,early_stationary,Stationary,Late_stationary)#,Single_Colony)
a <- vennCounts(bind_data)
a
vennDiagram(a)
|
5fd41a65a21dc300a1b149ed9363d2df6a4555e6
|
0898f23284c280dc10c4526bd45b80068eb3a97c
|
/fbs.codes/fun.DU.CCBS.R
|
5c6bf612d2757b4263510ce63098c9e47bd5ba16
|
[] |
no_license
|
AdamNewb/fbs.compiler
|
9f171c478a04bc51972ee4ed82329cff40bd2493
|
4d65ccc669ef3d4b6fedd6da59662cfcb1fbcfa8
|
refs/heads/master
| 2021-01-01T15:19:11.425472
| 2014-12-09T18:50:38
| 2014-12-09T18:50:38
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 3,552
|
r
|
fun.DU.CCBS.R
|
#' Function to get cereal food from the CCBS database of EST
#'
#' This function imports CCBS data in the form of FCL area coded csv files
#' and then consolidates into a dataframe of cereal food availabilities. These quantities
#' inform FBS cereal food. Feed is also collected for reference.
#' @param sYr - from which year to begin processing
#' @param lYr - from which year to end processing
#' @param XCBSpath - path where CCBS csv files are located
#' @param XCBSmap - file by which to map CCBS to FBS cereals
#' @param XCBSele = vector of CCBS elements
#' @param XCBSelex = vector of CCBS element reference of relevance,
#' @param XCBSref = vector of CCBS reference elements
#' @param XCBSid = vector of CCBS reference IDs
#' @param XCBSitems = CCBS item to extract
#' @param XCBSareacode = CCBS area codes to extract
#' @param XCBSareaname = CCBS area names to extract
#' @keywords CCBS, food
#' @export
#' @examples
#' fun.DU.CCBS()
fun.DU.CCBS = function(sYr, lYr,
XCBSpath,
XCBSmap,
XCBSele,
XCBSelex,
XCBSref,
XCBSid,
XCBSitems,
XCBSareacode,
XCBSareaname){
setwd(XCBSpath)
XCBSfiles <- list.files(pattern = "*.csv")
XCBSdata <- do.call("rbind",
lapply(XCBSfiles, function(x) read.csv(x, stringsAsFactors = FALSE)))
XCBSYears <- NULL
# EU <- read.csv("C:/Users/prakash/Dropbox/CPC-FBS/Compile/inData/5706.csv", header = TRUE, sep = ",")
# print(str(XCBSdata))
# print(str(EU))
for(i in sYr:lYr){
XCBSYears <- c(XCBSYears, paste0("X", i))
}
XCBSdata <- XCBSdata[XCBSdata$ELEM.DES %in% XCBSele == TRUE,
c(XCBSref, XCBSYears)]
if(351 %in% XCBSdata$COUN.CODE == FALSE ){
ChnMeta <- XCBSdata[XCBSdata$COUN.CODE == 41, c(1:4)]
a <- which(colnames(XCBSdata) == paste0("X", sYr))
b <- which(colnames(XCBSdata) == paste0("X", lYr))
ChnData <- XCBSdata[XCBSdata$COUN.CODE == 41, c(a:b)] +
XCBSdata[XCBSdata$COUN.CODE == 214, c(a:b)] +
XCBSdata[XCBSdata$COUN.CODE == 96, c(a:b)] +
XCBSdata[XCBSdata$COUN.CODE == 128, c(a:b)]
ChnMeta["COUN.CODE"] <- "351"
ChnMeta["COUN.DES"] <- "China"
Chn <- cbind(ChnMeta, ChnData)
XCBSdata <- rbind(XCBSdata, Chn)
}
mergedXCBS <- merge(XCBSdata, XCBSmap,
by = c(XCBSitems), all = FALSE)
mergedXCBS <- mergedXCBS[c(-1)]
lXCBS <- melt(mergedXCBS, id.vars = c(XCBSid),
variable.name = "Year")
lXCBS$value <- as.numeric(as.character(lXCBS$value))
lXCBS$Year <- substr(lXCBS$Year,2,5)
lXCBS$Year <- as.integer(as.numeric(lXCBS$Year))
colnames(lXCBS)[colnames(lXCBS) == XCBSareacode] <- "AreaCode"
colnames(lXCBS)[colnames(lXCBS) == XCBSareaname] <- "AreaName"
castXCBS <- dcast(lXCBS, AreaCode + AreaName + Commodity + Year ~ ELEM.DES,
value.var = "value",
fun.aggregate = mean)
XCBS.DU <- castXCBS
return(XCBS.DU)
}
|
b1869d6d858ba48e50ed61666a5af5cd1a4e3148
|
920d09a034d18fc1fa7e8add14a6fe0fc4472f8e
|
/src/data/get_teams.R
|
d436a01961087fe1811ac63bb8cc3193b0cd6a17
|
[] |
no_license
|
zmalosh/SoccerDashboard
|
b7eca92380f2818f60e2568e0c437e5e022b61a6
|
807024c67d6378d100ef0be71bcb55c229109846
|
refs/heads/master
| 2020-08-29T02:06:10.507249
| 2020-01-05T19:12:08
| 2020-01-05T19:12:08
| 217,889,245
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 2,137
|
r
|
get_teams.R
|
source('requirements.R')
source('src/data/get_api_football_json_from_url.R')
get_teams_by_league <- function(leagueId, allowCache = TRUE){
localPath <- paste0(getwd(), '/data/raw/teams/teams_', str_pad(leagueId, 4, pad = '0'), '.csv')
cacheExpirationMin <- 24 * 60
if(allowCache){
if(!dir.exists(dirname(localPath))){
dir.create(dirname(localPath))
}
if(file.exists(localPath) && (file.info(localPath)$ctime + (cacheExpirationMin * 60)) > Sys.time()){
cols <- cols(
TeamId = col_double(),
TeamName = col_character(),
TeamCode = col_character(),
Country = col_character(),
LogoUrl = col_character(),
FoundedYear = col_double(),
VenueName = col_character(),
VenueSurface = col_character(),
VenueAddress = col_character(),
VenueCity = col_character(),
VenueCapacity = col_double(),
LeagueId = col_double()
)
teams <- read_csv(localPath, col_types = cols)
return (teams)
}
}
inputLeagueId <- leagueId
url <- paste0('https://api-football-v1.p.rapidapi.com/v2/teams/league/', leagueId)
json <- get_api_football_json_from_url(url)
teams <- json$teams
teams <- teams %>%
mutate(TeamId = team_id,
TeamName = name,
TeamCode = code,
LogoUrl = logo,
Country = country,
FoundedYear = founded,
VenueName = venue_name,
VenueSurface = venue_surface,
VenueAddress = venue_address,
VenueCity = venue_city,
VenueCapacity = venue_capacity,
LeagueId = inputLeagueId
) %>%
select(TeamId, TeamName, TeamCode, Country,
LogoUrl, FoundedYear, VenueName,
VenueSurface, VenueAddress, VenueCity,
VenueCapacity, LeagueId)
if(allowCache){
write_csv(teams, localPath)
}
return (teams)
}
get_all_teams <- function(allowCache = TRUE){
source('src/data/get_leagues.R')
leagues <- get_leagues()
teams <- get_teams_by_league(1, allowCache = allowCache)
for(i in seq(from = 2, to = nrow(leagues), by = 1)){
league <- leagues[i,]
leagueId <- league$league_id
leagueTeams <- get_teams_by_league(leagueId, allowCache = allowCache)
teams <- rbind(teams, leagueTeams)
}
return (teams)
}
|
493c8bf20363371e7e1153bbc523cc3ddc93c563
|
2b7066f30b070930a3e325f9a2f3abbf774da88c
|
/man/print.summary.roll.Rd
|
33bb71b0d37ac0e29edbb09d731678610f203875
|
[] |
no_license
|
clagett/dieroller
|
cc1e896f648af366669bbf31fa26c263fb12e40a
|
21f0c2af629d09753262637e77a2668675b89d09
|
refs/heads/master
| 2020-03-14T01:30:20.126938
| 2018-04-28T06:15:30
| 2018-04-28T06:15:30
| 131,378,845
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| true
| 253
|
rd
|
print.summary.roll.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/rolls.R
\name{print.summary.roll}
\alias{print.summary.roll}
\title{summary.roll}
\usage{
\method{print}{summary.roll}(x, ...)
}
\description{
print method for summary.roll
}
|
6b612eb25154d2d94c8e4144f8160bd500db1640
|
c04220321b8886ea49930bc7fd7ea0ab24462eb5
|
/charclust/man/objcharac.Rd
|
dd12d653aedaf688fc7bb36db231498b967286cd
|
[] |
no_license
|
andlg/Package_R_Caracterisation_Classes_Clustering
|
185fa4e7f2b3a2831ec653f7218e45c0a9fd4b06
|
62b66ac9e4196d80ec11208b581467879130c62a
|
refs/heads/main
| 2023-01-29T04:39:49.892309
| 2020-12-08T19:36:10
| 2020-12-08T19:36:10
| 317,900,910
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| true
| 1,047
|
rd
|
objcharac.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/object.R
\name{objcharac}
\alias{objcharac}
\title{objcharac}
\usage{
objcharac(df, var_act, var_illus = NULL, var_grp)
}
\arguments{
\item{df}{a dataframe with all the data}
\item{var_act}{a dataframe with active variables}
\item{var_illus}{a dataframe with illustrative variables}
\item{var_grp}{a dataframe or a list with the clusters}
}
\value{
an object of type objcharac
\describe{
\item{$data}{dataframe}
\item{$act}{dataframe with active variables for clustering}
\item{$illus}{dataframe with illustrative variables, default=NULL}
\item{$grp}{clusters for each observations}
}
}
\description{
objcharac
}
\examples{
\dontrun{
library(charclust)
data(iris)
obj <- objcharac(iris, iris[,-5], NULL, iris$Species)
-----------------
data(insertion_master)
data.illu <- insertion_master[,c(1:6,12)]
data.act <-insertion_master[,7:11]
res.kmeans<-kmeans(data.act,centers=3,nstart=5)
obj2 <- objcharac(insertion_master, data.act, data.illu, res.kmeans$cluster)
}
}
|
6dd56eaf588b09653adbcebcb0d8f8e515d25c5b
|
7641a5bb8a685bc2ae38a0fa32615dd7539f6442
|
/man/timeDot.Rd
|
c0949ecb87847a6c6b5fd659d4cf6ddef6eb3685
|
[] |
no_license
|
grantg012/XCTrackerCpp4
|
9e0c1b0d72b76a8e25f3e9f6adbf590928ec00fc
|
757b41a22ad37714c0d8bd4146e17c0a6cecba13
|
refs/heads/master
| 2021-01-19T18:48:28.924742
| 2018-08-12T02:31:38
| 2018-08-12T02:31:38
| 88,383,000
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| true
| 365
|
rd
|
timeDot.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/RcppExports.R
\name{timeDot}
\alias{timeDot}
\title{Appends word to the prefix "Time."}
\usage{
timeDot(word)
}
\arguments{
\item{word}{A string of the word to append}
}
\value{
The concatenation of "Time." and word.
}
\description{
Appends word to the prefix "Time."
}
|
e725c108ed9ef0f822f909a7d27bdbef2535bd9e
|
bd56d216f8ea8c7a5edab6986c168f8a405d9bb7
|
/R/asDataFrame.R
|
7f01c46bd271f2c04318a14ee13896b4cf88b1ca
|
[] |
no_license
|
cran/datarobot
|
2b00acb58e93740848508666cf5b29e45fab4f94
|
2466c294c9827923d727d77393522e101fc087aa
|
refs/heads/master
| 2023-08-23T18:48:30.147248
| 2023-08-07T20:00:02
| 2023-08-07T21:30:43
| 67,064,589
| 3
| 6
| null | null | null | null |
UTF-8
|
R
| false
| false
| 12,187
|
r
|
asDataFrame.R
|
#' DataRobot S3 object methods for R's generic as.data.frame function
#'
#' These functions extend R's generic as.data.frame function to the
#' DataRobot S3 object classes listOfBlueprints, listOfFeaturelists,
#' listOfModels, and projectSummaryList.
#'
#' All of the DataRobot S3 `listOf' class objects have relatively
#' complex structures and are often easier to work with as dataframes.
#' The methods described here extend R's generic as.data.frame function
#' to convert objects of these classes to convenient dataframes. For
#' objects of class listOfBlueprints and listOfFeaturelists or objects
#' of class listOfModels and projectSummaryList with simple = FALSE,
#' the dataframes contain all information from the original S3 object.
#' The default value simple = TRUE provides simpler dataframes for
#' objects of class listOfModels and projectSummaryList.
#'
#' If simple = TRUE (the default), this method returns a dataframe with
#' one row for each model and the following columns: modelType, expandedModel
#' (constructed from modelType and processes from the listOfModels elements),
#' modelId, blueprintId, featurelistName, featurelistId, samplePct, and the
#' metrics validation value for projectMetric. If simple = FALSE, the method
#' returns a complete dataframe with one row for each model and columns
#' constructed from all fields in the original listOfModels object
#'
#' @param x S3 object to be converted into a dataframe.
#' @param row.names character. Optional. Row names for the dataframe returned by
#' the method.
#' @param optional logical. Optional. If TRUE, setting row
#' names and converting column names to syntactic names: see help for
#' \code{make.names} function.
#' @param simple logical. Optional. if TRUE (the default), a
#' simplified dataframe is returned for objects of class listOfModels
#' or projectSummaryList.
#' @param \dots list. Additional optional parameters to be passed to the
#' generic as.data.frame function (not used at present).
#' @return A dataframe containing some or all of the data from the
#' original S3 object; see Details.
#' @name as.data.frame
NULL
#' @rdname as.data.frame
#' @export
as.data.frame.listOfBlueprints <- function(x, row.names = NULL,
optional = FALSE, ...) {
nList <- length(x)
if (nList == 0) {
return(data.frame(projectId = character(), modelType = character(),
expandedModel = character(),
blueprintId = character(),
stringsAsFactors = FALSE))
}
sumFrame <- NULL
for (i in 1:nList) {
modelType <- x[[i]]$modelType
components <- union(modelType, x[[i]]$processes)
expandedModel <- paste(components, collapse = "::")
blueprintId <- x[[i]]$blueprintId
projectId <- x[[i]]$projectId
upFrame <- data.frame(projectId = projectId, modelType = modelType,
expandedModel = expandedModel,
blueprintId = blueprintId,
stringsAsFactors = FALSE)
sumFrame <- rbind.data.frame(sumFrame, upFrame)
}
if (!is.null(row.names)) {
rownames(sumFrame) <- row.names
}
sumFrame
}
#' @rdname as.data.frame
#' @export
as.data.frame.listOfFeaturelists <- function(x, row.names = NULL,
optional = FALSE, ...) {
nList <- length(x)
if (nList == 0) {
upFrame <- data.frame(featurelistId = character(),
projectId = character(),
features = I(list()),
name = character(),
stringsAsFactors = FALSE)
class(upFrame$features) <- "list"
return(upFrame)
}
sumFrame <- NULL
for (i in 1:nList) {
upFrame <- as.data.frame(x[[i]], stringsAsFactors = FALSE)
sumFrame <- rbind.data.frame(sumFrame, upFrame)
}
if (!is.null(row.names)) {
rownames(sumFrame) <- row.names
}
sumFrame
}
#' @rdname as.data.frame
#' @export
as.data.frame.listOfModels <- function(x, row.names = NULL,
optional = FALSE, simple = TRUE, ...) {
if (!is.logical(simple)) { stop("simple must be TRUE or FALSE") }
nList <- length(x)
if (nList == 0) {
return(data.frame(modelType = character(),
expandedModel = character(),
modelId = character(),
blueprintId = character(),
featurelistName = character(),
featurelistId = character(),
samplePct = numeric(),
validationMetric = numeric(),
stringsAsFactors = FALSE))
}
outFrame <- NULL
if (isTRUE(simple)) {
for (i in 1:nList) {
element <- x[[i]]
modelType <- element$modelType
components <- union(modelType, element$processes)
expandedModel <- paste(components, collapse = "::")
modelId <- element$modelId
blueprintId <- element$blueprintId
featurelistName <- element$featurelistName
if (is.null(featurelistName)) { featurelistName <- "Multiple featurelists" }
featurelistId <- element$featurelistId
if (is.null(featurelistId)) { featurelistId <- "Multiple featurelist ids" }
samplePct <- element$samplePct
if (is.null(samplePct)) { samplePct <- NA }
metricToReturn <- element$projectMetric
allMetrics <- element$metrics
metricIndex <- which(names(allMetrics) == metricToReturn)
if (length(metricIndex) > 0) {
validationMetric <- allMetrics[[metricIndex]]$validation
} else {
validationMetric <- NA
}
upFrame <- data.frame(modelType = modelType,
expandedModel = expandedModel,
modelId = modelId, blueprintId = blueprintId,
featurelistName = featurelistName,
featurelistId = featurelistId,
samplePct = samplePct,
validationMetric = validationMetric,
stringsAsFactors = FALSE)
outFrame <- rbind.data.frame(outFrame, upFrame)
}
if (!is.null(row.names)) {
rownames(outFrame) <- row.names
}
} else {
for (i in 1:nList) {
element <- x[[i]]
modelType <- element$modelType
components <- union(modelType, element$processes)
expandedModel <- paste(components, collapse = "::")
modelId <- element$modelId
blueprintId <- element$blueprintId
featurelistName <- element$featurelistName
if (is.null(featurelistName)) { featurelistName <- "Multiple featurelists" }
featurelistId <- element$featurelistId
if (is.null(featurelistId)) { featurelistId <- "Multiple featurelist ids" }
samplePct <- element$samplePct
if (is.null(samplePct)) { samplePct <- NA }
modelCategory <- element$modelCategory
projectName <- element$projectName
projectId <- element$projectId
projectTarget <- element$projectTarget
projectMetric <- element$projectMetric
firstFrame <- data.frame(modelType = modelType,
expandedModel = expandedModel,
modelId = modelId, blueprintId = blueprintId,
featurelistName = featurelistName,
featurelistId = featurelistId,
samplePct = samplePct,
modelCategory = modelCategory,
projectName = projectName,
projectId = projectId,
projectTarget = projectTarget,
projectMetric = projectMetric,
stringsAsFactors = FALSE)
validFrame <- BuildMetricFrame(element, "validation")
colnames(validFrame) <- paste(colnames(validFrame), "validation",
sep = ".")
crossFrame <- BuildMetricFrame(element, "crossValidation")
colnames(crossFrame) <- paste(colnames(crossFrame),
"crossValidation", sep = ".")
holdFrame <- BuildMetricFrame(element, "holdout")
colnames(holdFrame) <- paste(colnames(holdFrame), "holdout", sep = ".")
secondFrame <- cbind.data.frame(validFrame, crossFrame, holdFrame)
upFrame <- cbind.data.frame(firstFrame, secondFrame)
outFrame <- rbind.data.frame(outFrame, upFrame)
}
if (!is.null(row.names)) {
rownames(outFrame) <- row.names
}
}
outFrame
}
# This function builds a dataframe that summarizes all of the metrics
# included in the metrics element of the dataRobotModel object model
BuildMetricFrame <- function(model, evaluation) {
metrics <- model$metrics
metricNames <- names(metrics)
n <- length(metricNames)
oneMetric <- metrics[[1]]
evals <- names(oneMetric)
evalIndex <- which(evals == evaluation)
metricFrame <- data.frame(oneMetric[evalIndex], stringsAsFactors = FALSE)
if (n > 1) {
for (i in 2:n) {
oneMetric <- metrics[[i]]
evals <- names(oneMetric)
evalIndex <- which(evals == evaluation)
upFrame <- data.frame(oneMetric[evalIndex], stringsAsFactors = FALSE)
metricFrame <- cbind.data.frame(metricFrame, upFrame)
}
}
colnames(metricFrame) <- metricNames
metricFrame
}
#' Convert the project summary list to a dataframe
#'
#' If simple = TRUE (the default), this method returns a dataframe with
#' one row for each model and the following columns: projectName, projectId,
#' created, fileName, target, targetType, positiveClass, metric,
#' autopilotMode, stage, maxTrainPct, and holdoutUnlocked.
#' If simple = FALSE, a dataframe is constructed from all elements of
#' projectSummaryList.
#'
#' @rdname as.data.frame
#' @export
as.data.frame.projectSummaryList <- function(x, row.names = NULL,
optional = FALSE,
simple = TRUE, ...) {
if (!is.logical(simple)) { stop("simple must be TRUE or FALSE") }
simpleFrame <- data.frame(projectName = x$projectName,
projectId = x$projectId,
created = x$created, fileName = x$fileName,
target = x$target, targetType = x$targetType,
positiveClass = x$positiveClass, metric = x$metric,
autopilotMode = x$autopilotMode, stage = x$stage,
maxTrainPct = x$maxTrainPct,
holdoutUnlocked = x$holdoutUnlocked,
stringsAsFactors = FALSE)
if (isTRUE(simple)) {
outFrame <- simpleFrame
} else {
partFrame <- x$partition
advFrame <- x$advancedOptions
outFrame <- cbind.data.frame(simpleFrame, partFrame, advFrame,
stringsAsFactors = FALSE)
}
if (!is.null(row.names)) {
rownames(outFrame) <- row.names
}
outFrame
}
#' @rdname as.data.frame
#' @export
as.data.frame.listOfDataRobotPredictionDatasets <- function(x, row.names = NULL,
optional = FALSE, ...) {
nList <- length(x)
if (nList == 0) {
return(data.frame(numColumns = numeric(),
name = character(),
created = character(),
projectId = character(),
numRows = numeric(),
id = character(),
forecastPoint = numeric(),
predictionStartDate = numeric(),
predictionEndDate = numeric(),
stringsAsFactors = FALSE))
}
sumFrame <- NULL
for (i in 1:nList) {
# patch NULL values to NA to work with as.data.frame
dataset <- lapply(x[[1]], function(y) if (is.null(y)) { NA } else { y })
upFrame <- as.data.frame(dataset, stringsAsFactors = FALSE)
sumFrame <- rbind.data.frame(sumFrame, upFrame)
}
if (!is.null(row.names)) {
rownames(sumFrame) <- row.names
}
sumFrame
}
|
dc9e48b032108739234c213e45fced98cd41e193
|
c1a38a900066e7a679e5f77108fc29fe4f5e4b15
|
/man/xmu_safe_run_summary.Rd
|
e7437b7669300c6f4a29085b6db19ff0eac2f2b6
|
[] |
no_license
|
qingwending/umx
|
3024a3cc8bb64c244d79b530add3797309a659fd
|
810c93657407058efb75ae7e46d102375f7bcf93
|
refs/heads/master
| 2020-06-20T11:34:51.982246
| 2019-07-14T14:14:38
| 2019-07-14T14:14:38
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| true
| 3,624
|
rd
|
xmu_safe_run_summary.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/xmu.R
\name{xmu_safe_run_summary}
\alias{xmu_safe_run_summary}
\title{Safely run and summarize a model}
\usage{
xmu_safe_run_summary(model1, model2 = NULL, autoRun = TRUE,
tryHard = c("no", "yes", "mxTryHard", "mxTryHardOrdinal",
"mxTryHardWideSearch"), summary = !umx_set_silent(silent = TRUE),
show = c("none", "raw", "std", "list of column names"),
comparison = TRUE)
}
\arguments{
\item{model1}{The model to attempt to run and summarize.}
\item{model2}{Optional second model to compare with model1.}
\item{autoRun}{Whether to run or not (default = TRUE) Options are FALSE and "if needed".}
\item{tryHard}{Default ('no') uses normal mxRun. "yes" uses mxTryHard. Other options: "mxTryHardOrdinal", "mxTryHardWideSearch"}
\item{summary}{Whether to print model summary (default = autoRun).}
\item{show}{What to print in summary (default "none") (alternatives: "raw", "std", "list of column names")}
\item{comparison}{Toggle to allow not making comparison, even if second model is provided (more flexible in programming).}
}
\value{
\itemize{
\item \code{\link[=mxModel]{mxModel()}}
}
}
\description{
The main benefit is that it returns the model, even if it can't be run.
The function will run the model if requested, wrapped in \code{\link[=tryCatch]{tryCatch()}} to avoid throwing an error.
If summary = TRUE then \code{\link[=umxSummary]{umxSummary()}} is requested (again, wrapped in try).
\emph{note}: If autoRun is logical, then it over-rides summary to match autoRun. This is useful for easy use umxRAM and twin models.
}
\examples{
m1 = umxRAM("tim", data = mtcars,
umxPath(c("wt", "disp"), to = "mpg"),
umxPath("wt", with = "disp"),
umxPath(v.m. = c("wt", "disp", "mpg"))
)
m2 = umxModify(m1, "wt_to_mpg")
# Summary ignored if run is false
xmu_safe_run_summary(m1, autoRun = FALSE, summary = TRUE)
# Run, no summary
xmu_safe_run_summary(m1, autoRun = TRUE, summary = FALSE)
# Default summary is just fit string
xmu_safe_run_summary(m1, autoRun = TRUE, summary = TRUE)
# Show std parameters
xmu_safe_run_summary(m1, autoRun = TRUE, summary = TRUE, show = "std")
# Run + Summary + comparison
xmu_safe_run_summary(m1, m2, autoRun = TRUE, summary = TRUE)
# Run + Summary + no comparison
xmu_safe_run_summary(m1, m2, autoRun = TRUE, summary = TRUE, show = "std", comparison= FALSE)
}
\seealso{
\itemize{
\item \code{\link[=mxTryHard]{mxTryHard()}}
}
Other xmu internal not for end user: \code{\link{umxModel}},
\code{\link{umx}}, \code{\link{xmuHasSquareBrackets}},
\code{\link{xmuLabel_MATRIX_Model}},
\code{\link{xmuLabel_Matrix}},
\code{\link{xmuLabel_RAM_Model}}, \code{\link{xmuMI}},
\code{\link{xmuMakeDeviationThresholdsMatrices}},
\code{\link{xmuMakeOneHeadedPathsFromPathList}},
\code{\link{xmuMakeTwoHeadedPathsFromPathList}},
\code{\link{xmuMaxLevels}}, \code{\link{xmuMinLevels}},
\code{\link{xmuPropagateLabels}},
\code{\link{xmu_assemble_twin_supermodel}},
\code{\link{xmu_check_levels_identical}},
\code{\link{xmu_clean_label}},
\code{\link{xmu_dot_make_paths}},
\code{\link{xmu_dot_make_residuals}},
\code{\link{xmu_dot_maker}},
\code{\link{xmu_dot_move_ranks}},
\code{\link{xmu_dot_rank_str}},
\code{\link{xmu_lavaan_process_group}},
\code{\link{xmu_make_mxData}},
\code{\link{xmu_make_top_twin}},
\code{\link{xmu_model_needs_means}},
\code{\link{xmu_name_from_lavaan_str}},
\code{\link{xmu_set_sep_from_suffix}},
\code{\link{xmu_simplex_corner}},
\code{\link{xmu_start_value_list}},
\code{\link{xmu_starts}}
}
\concept{xmu internal not for end user}
|
a9dc510f92d70c58d6697d8afe819b1a7c6a2cc3
|
18347ef9bc1f489e63e83cf03338b7211d21b7c8
|
/man/match.Rd
|
fba3b3083988d81c56e1bf1f84b03649a2e0847d
|
[
"BSD-3-Clause",
"CC-BY-4.0"
] |
permissive
|
stan-dev/posterior
|
cd1e0778f5b930b7ef97b9c1f09167f162fb9d7e
|
55e92336c2984be1a2487cdd489552a07e273d70
|
refs/heads/master
| 2023-08-18T07:53:15.023052
| 2023-08-07T08:13:36
| 2023-08-07T08:13:36
| 212,145,446
| 105
| 20
|
NOASSERTION
| 2023-08-07T08:13:37
| 2019-10-01T16:30:28
|
R
|
UTF-8
|
R
| false
| true
| 2,023
|
rd
|
match.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/rvar-.R
\name{match}
\alias{match}
\alias{match.default}
\alias{match.rvar}
\alias{\%in\%}
\title{Value Matching}
\usage{
match(x, table, ...)
\method{match}{default}(x, ...)
\method{match}{rvar}(x, ...)
x \%in\% table
}
\arguments{
\item{x}{(multiple options) the values to be matched. Can be:
\itemize{
\item A base vector: see \code{\link[base:match]{base::match()}}
\item An \link{rvar}
}}
\item{table}{(vector) the values to be matched against.}
\item{...}{
Arguments passed on to \code{\link[base:match]{base::match}}
\describe{
\item{\code{nomatch}}{the value to be returned in the case when no match is
found. Note that it is coerced to \code{integer}.}
\item{\code{incomparables}}{a vector of values that cannot be matched. Any
value in \code{x} matching a value in this vector is assigned the
\code{nomatch} value. For historical reasons, \code{FALSE} is
equivalent to \code{NULL}.}
}}
}
\value{
When \code{x} is a base vector, a vector of the same length as \code{x}.
When \code{x} is an \link{rvar}, an \link{rvar} the same shape as \code{x}.
}
\description{
Generic version of \code{\link[base:match]{base::match()}}. For base vectors, returns a vector of the
positions of (first) matches of its first argument in its second. For \link{rvar}s,
returns an \link{rvar} of the matches.
}
\details{
For more information on how match behaves with base vectors, see \code{\link[base:match]{base::match()}}.
When \code{x} is an \link{rvar}, the draws of \code{x} are matched against \code{table} using
\code{\link[base:match]{base::match()}}, and the result is returned as an \link{rvar}.
The implementation of \code{\%in\%} here is identical to \code{base::\%in\%}, except
it uses the generic version of \code{match()} so that non-base vectors (such
as \link{rvar}s) are supported.
}
\examples{
x <- rvar(c("a","b","b","c","d"))
x \%in\% c("b","d")
# for additional examples, see base::match()
}
|
71ba12b5e088e844efc9277bd8802e78e271e2ac
|
04405e66b5736b252ecb49aecf4503929c76e35f
|
/man/attribute.project.Rd
|
fda35e740dfab03d2ad6023b394e3e3cc15b7583
|
[] |
no_license
|
fridde/OilSandsTools
|
eb1029dc004750320d247e9041c3a426484148cc
|
3dcb49cd5a9288bb2104418fad242392b941edaa
|
refs/heads/master
| 2021-01-19T00:12:50.576487
| 2013-03-20T13:58:16
| 2013-03-20T13:58:16
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 2,161
|
rd
|
attribute.project.Rd
|
\name{attribute.project}
\alias{attribute.project}
%- Also NEED an '\alias' for EACH other topic documented here.
\title{
%% ~~function to do ... ~~
}
\description{
%% ~~ A concise (1-5 lines) description of what the function does. ~~
}
\usage{
attribute.project(project.values.string, columns.to.include = "")
}
%- maybe also 'usage' for other objects documented here.
\arguments{
\item{project.values.string}{
%% ~~Describe \code{project.values.string} here~~
}
\item{columns.to.include}{
%% ~~Describe \code{columns.to.include} here~~
}
}
\details{
%% ~~ If necessary, more details than the description above ~~
}
\value{
%% ~Describe the value returned
%% If it is a LIST, use
%% \item{comp1 }{Description of 'comp1'}
%% \item{comp2 }{Description of 'comp2'}
%% ...
}
\references{
%% ~put references to the literature/web site here ~
}
\author{
%% ~~who you are~~
}
\note{
%% ~~further notes~~
}
%% ~Make other sections like Warning with \section{Warning }{....} ~
\seealso{
%% ~~objects to See Also as \code{\link{help}}, ~~~
}
\examples{
##---- Should be DIRECTLY executable !! ----
##-- ==> Define data, use random,
##-- or do help(data=index) for the standard data sets.
## The function is currently defined as
function (project.values.string, columns.to.include = "")
{
if (length(columns.to.include) == 1 && columns.to.include ==
"") {
columns.to.include = names(attribution.table)
columns.to.include = columns.to.include[columns.to.include !=
"Main.Compilation"]
}
values.to.test = rep.int(FALSE, nrow(attribution.table))
for (ii in 1:nrow(attribution.table)) {
values.to.test[ii] = all(project.values.string == paste(attribution.table[ii,
columns.to.include]))
}
if (length(which(values.to.test)) == 1) {
Main.Compilation = attribution.table$Main.Compilation[which(values.to.test)]
}
else {
Main.Compilation = NA
}
return(Main.Compilation)
}
}
% Add one or more standard keywords, see file 'KEYWORDS' in the
% R documentation directory.
\keyword{ ~kwd1 }
\keyword{ ~kwd2 }% __ONLY ONE__ keyword per line
|
d0b8212ccb2b9b4e371e2d6117f7bc599cf1620e
|
ffdea92d4315e4363dd4ae673a1a6adf82a761b5
|
/data/genthat_extracted_code/expss/examples/vlookup.Rd.R
|
b5deb838d71138c60694bc1dd3c5cefe617eb1eb
|
[] |
no_license
|
surayaaramli/typeRrh
|
d257ac8905c49123f4ccd4e377ee3dfc84d1636c
|
66e6996f31961bc8b9aafe1a6a6098327b66bf71
|
refs/heads/master
| 2023-05-05T04:05:31.617869
| 2019-04-25T22:10:06
| 2019-04-25T22:10:06
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 3,767
|
r
|
vlookup.Rd.R
|
library(expss)
### Name: vlookup
### Title: Look up values in dictionary.
### Aliases: vlookup vlookup_df add_columns .add_columns
### ** Examples
# with data.frame
dict = data.frame(num=1:26, small=letters, cap=LETTERS, stringsAsFactors = FALSE)
rownames(dict) = paste0('rows', 1:26)
identical(vlookup_df(1:3, dict), dict[1:3,]) # should be TRUE
vlookup(c(45,1:3,58), dict, result_column='cap')
vlookup_df(c('z','d','f'), dict, lookup_column = 'small')
vlookup_df(c('rows7', 'rows2', 'rows5'), dict, lookup_column = 'row.names')
# with vector
dict=1:26
names(dict) = letters
vlookup(c(2,4,6), dict, result_column='row.names')
# The same results
vlookup(c(2,4,6), dict, result_column='rownames')
vlookup(c(2,4,6), dict, result_column='names')
# example for 'add_columns' from base 'merge'
authors = sheet(
surname = c("Tukey", "Venables", "Tierney", "Ripley", "McNeil"),
nationality = c("US", "Australia", "US", "UK", "Australia"),
deceased = c("yes", rep("no", 4))
)
books = sheet(
surname = c("Tukey", "Venables", "Tierney",
"Ripley", "Ripley", "McNeil", "R Core"),
title = c("Exploratory Data Analysis",
"Modern Applied Statistics ...",
"LISP-STAT",
"Spatial Statistics", "Stochastic Simulation",
"Interactive Data Analysis",
"An Introduction to R")
)
add_columns(books, authors)
# Just for fun. Examples borrowed from Microsoft Excel.
# It is not the R way of doing things.
# Example 2
ex2 = utils::read.table(header = TRUE, text = "
Item_ID Item Cost Markup
ST-340 Stroller 145.67 0.30
BI-567 Bib 3.56 0.40
DI-328 Diapers 21.45 0.35
WI-989 Wipes 5.12 0.40
AS-469 Aspirator 2.56 0.45
", stringsAsFactors = FALSE)
# Calculates the retail price of diapers by adding the markup percentage to the cost.
vlookup("DI-328", ex2, 3) * (1 + vlookup("DI-328", ex2, 4)) # 28.9575
# Calculates the sale price of wipes by subtracting a specified discount from
# the retail price.
(vlookup("WI-989", ex2, "Cost") * (1 + vlookup("WI-989", ex2, "Markup"))) * (1 - 0.2) # 5.7344
A2 = ex2[1, "Item_ID"]
A3 = ex2[2, "Item_ID"]
# If the cost of an item is greater than or equal to $20.00, displays the string
# "Markup is nn%"; otherwise, displays the string "Cost is under $20.00".
ifelse(vlookup(A2, ex2, "Cost") >= 20,
paste0("Markup is " , 100 * vlookup(A2, ex2, "Markup"),"%"),
"Cost is under $20.00") # Markup is 30%
# If the cost of an item is greater than or equal to $20.00, displays the string
# Markup is nn%"; otherwise, displays the string "Cost is $n.nn".
ifelse(vlookup(A3, ex2, "Cost") >= 20,
paste0("Markup is: " , 100 * vlookup(A3, ex2, "Markup") , "%"),
paste0("Cost is $", vlookup(A3, ex2, "Cost"))) #Cost is $3.56
# Example 3
ex3 = utils::read.table(header = TRUE, text = "
ID Last_name First_name Title Birth_date
1 Davis Sara 'Sales Rep.' 12/8/1968
2 Fontana Olivier 'V.P. of Sales' 2/19/1952
3 Leal Karina 'Sales Rep.' 8/30/1963
4 Patten Michael 'Sales Rep.' 9/19/1958
5 Burke Brian 'Sales Mgr.' 3/4/1955
6 Sousa Luis 'Sales Rep.' 7/2/1963
", stringsAsFactors = FALSE)
# If there is an employee with an ID of 5, displays the employee's last name;
# otherwise, displays the message "Employee not found".
if_na(vlookup(5, ex3, "Last_name"), "Employee not found") # Burke
# Many employees
if_na(vlookup(1:10, ex3, "Last_name"), "Employee not found")
# For the employee with an ID of 4, concatenates the values of three cells into
# a complete sentence.
paste0(vlookup(4, ex3, "First_name"), " ",
vlookup(4, ex3, "Last_name"), " is a ",
vlookup(4, ex3, "Title")) # Michael Patten is a Sales Rep.
|
25460338381aeb1abb539c65184a030548d5210a
|
44d0ba82f86729e2f6966f107911d16c99bb6722
|
/R/scoreVars.R
|
cda6076fe38f0debbf9955bac100c2b2ad357650
|
[] |
permissive
|
isglobal-brge/nlOmicAssoc
|
6f51e329f820dad39173e7632ef6d36c037d737f
|
8bd77d15c1ce426afb9594c824678632d3ab9816
|
refs/heads/master
| 2021-01-20T01:14:17.247921
| 2019-02-21T12:21:40
| 2019-02-21T12:21:40
| 89,244,014
| 0
| 1
|
MIT
| 2018-09-14T08:08:36
| 2017-04-24T13:34:13
|
R
|
UTF-8
|
R
| false
| false
| 468
|
r
|
scoreVars.R
|
#' scoring the variables of selected_vars slot in an nlAssoc object
#'
#' @param res an nlAssoc object as obtained by nlOmicAssoc()
#'
#' @export scoreVars
scoreVars <- function (res)
{
#we will return a score of the repeated vars/number of tested probes
#if there are many null results, % will decrease
if (!(is.null(res))) {
ll<-length(res$selected_vars)
vars_all <- sort(table(unlist(res$selected_vars)),decreasing=T)/ll
return(vars_all)
}
}
|
d597cd0c1b7228ca490168af09e81770cd6b60ce
|
7dd4aacc963aa0ed128f79cd65812343dc1838fb
|
/corresp.R
|
5d8eea959f7f1b7def6372af262896e9f39ad40c
|
[] |
no_license
|
abraham314/multivgit
|
4004ac28b211728d3a3a35989a967767c6ee3be0
|
cdb45853ae34a171a3a0d29bf2fd365a91ef590b
|
refs/heads/master
| 2020-05-27T07:01:58.011353
| 2017-05-11T13:02:14
| 2017-05-11T13:02:14
| 82,531,984
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 248
|
r
|
corresp.R
|
datasets::HairEyeColor
help("HairEyeColor",package = "datasets")
summary(datasets::HairEyeColor)
Haireye<-apply(HairEyeColor,c(1,2),sum)
Haireye
test<-chisq.test(Haireye)
plot(function(x) dchisq(x,df=9),xlim=c(0,150))
test$observed
test$expected
|
f08f42929d078477bfdcdc0f89aa4d1d7e39eae4
|
00de211efeabe8582a307c517f83ed0f3dcd9e98
|
/plot3.R
|
11c6bda5565dcc9edf12c2d29a89ed9e99d7c94f
|
[] |
no_license
|
macjoan/ExData_Plotting1
|
ff91a22f1b71177d67cae6aa32972885f199c79b
|
bdc78ce47bd05e579e7692bcb507112ddce5d2b6
|
refs/heads/master
| 2021-01-04T06:53:35.450800
| 2020-02-24T04:36:38
| 2020-02-24T04:36:38
| 240,438,048
| 0
| 0
| null | 2020-02-14T05:50:40
| 2020-02-14T05:50:39
| null |
UTF-8
|
R
| false
| false
| 1,277
|
r
|
plot3.R
|
# REading the file andsaving in a R object mfor power consumption data
powerDF <- read.table("household_power_consumption.txt", header = TRUE, sep = ";", na.strings = "?")
powerDF <- subset(powerDF, powerDF$Date=="1/2/2007" | powerDF$Date == "2/2/2007")
# Convert date an time as Date and time type
powerDF$Date <- as.Date(powerDF$Date, format="%d/%m/%Y")
powerDF$Time <- strptime(powerDF$Time, format="%H:%M:%S")
powerDF[powerDF$Date=="2007-02-01", "Time"] <- format(powerDF[powerDF$Date=="2007-02-01", "Time"],
"2007-02-01 %H:%M:%S")
powerDF[powerDF$Date=="2007-02-02", "Time"] <- format(powerDF[powerDF$Date=="2007-02-02", "Time"],
"2007-02-02 %H:%M:%S")
#makikng the plot 3
png("plot3.png", width=480, height=480)
plot(powerDF$Time, powerDF$Sub_metering_1, type="n", xlab="", ylab="Energy sub metering")
lines(powerDF$Time, powerDF$Sub_metering_1)
lines(powerDF$Time, powerDF$Sub_metering_2, col = "red")
lines(powerDF$Time, powerDF$Sub_metering_3, col = "blue")
legend("topright", lty=1, col=c("black","red","blue"),
legend=c("Sub_metering_1","Sub_metering_2","Sub_metering_3"))
title(main="Energy sub-metering")
dev.off()
|
ed765dece92a5987dac6ce8f6f5f1e7495522990
|
774fb176051d79ed83955ec2bc98b83363fa23da
|
/plot4.R
|
9e3d4dc801a9d16bca4c0ee643985881e159b567
|
[] |
no_license
|
git-mhaque/ExData_Plotting1
|
185a2cd6a731e670014669877f4665719ac1cdef
|
95ed8663b2f531db93c89bef6bb401e5c8c8138c
|
refs/heads/master
| 2021-01-18T05:26:14.414152
| 2015-02-05T13:02:44
| 2015-02-05T13:02:44
| 30,299,404
| 0
| 0
| null | 2015-02-04T13:13:26
| 2015-02-04T13:13:25
| null |
UTF-8
|
R
| false
| false
| 1,299
|
r
|
plot4.R
|
data <- read.csv("household_power_consumption.txt",sep=";",header=TRUE)
data$Date <- as.Date(data$Date,format="%d/%m/%Y")
sub <- data[data$Date >= as.Date("2007-02-01") & data$Date <= as.Date("2007-02-02"),]
#Plot 4
png(file = "plot4.png")
par(bg = "transparent")
par(mfrow = c(2, 2))
with(sub, {
plot(as.POSIXlt(paste(sub$Date,sub$Time)),as.numeric(as.character(sub$Global_active_power)),type="l", xlab="", ylab="Global Active Power")
plot(as.POSIXlt(paste(sub$Date,sub$Time)),as.numeric(as.character(sub$Voltage)),type="l", xlab="datetime", ylab="Voltage")
with(subset(sub), plot(as.POSIXlt(paste(sub$Date,sub$Time)),as.numeric(as.character(sub$Sub_metering_1)),type="l", col="black", xlab="",ylab="Energy sub metering"))
with(subset(sub), lines(as.POSIXlt(paste(sub$Date,sub$Time)),as.numeric(as.character(sub$Sub_metering_2)), col="red"))
with(subset(sub), lines(as.POSIXlt(paste(sub$Date,sub$Time)),as.numeric(as.character(sub$Sub_metering_3)), col="blue"))
legend("topright", lty = c("solid", "solid", "solid") , col = c("black","blue", "red"),, legend = c("Sub_metering_1", "Sub_metering_2", "Sub_metering_3"))
plot(as.POSIXlt(paste(sub$Date,sub$Time)),as.numeric(as.character(sub$Global_reactive_power)),type="l", xlab="datetime", ylab="Global_reactive_power")
})
dev.off()
|
7e03b2dacf14234f3826500e7fb9dbe0a7f83c8c
|
3286ce572728b201ad5c8ee17211b7d8e78b380f
|
/Waldron_2016-07-15_CSAMA_metaanalysis/curatedMetagenomicData_MAE.R
|
daf59d72be01081dea8e72e0ac2f7adcb7490bfd
|
[
"CC0-1.0"
] |
permissive
|
Tatisgua/presentations
|
07b3b0584330171a9c090a559102d9ebd8bf041a
|
abb16713c23aacd9f00473f0028f25ee3f513c32
|
refs/heads/master
| 2021-04-12T03:26:42.020884
| 2017-07-31T03:47:08
| 2017-07-31T03:47:08
| 125,815,912
| 1
| 0
| null | 2018-03-19T07:08:21
| 2018-03-19T07:08:21
| null |
UTF-8
|
R
| false
| false
| 663
|
r
|
curatedMetagenomicData_MAE.R
|
## curatedMetagenomicData and ExperimentHub demo
## requires bioc-devel (2.4)
library(ExperimentHub)
eh = ExperimentHub()
myquery = query(eh, "curatedMetagenomicData")
View(mcols(myquery))
candela.eh = display(myquery)
##candela.ids = c("EH2", EH6", "EH7")
candela.list = lapply(names(candela.eh), function(id) candela.eh[[id]])
names(candela.list) = candela.eh$title
library(MultiAssayExperiment)
mae = MultiAssayExperiment(ExperimentList = candela.list,
pData = pData(candela.list[[2]]))
experiments(mae)
rownames(mae)
colnames(mae)
mae[1:6, , ]
mae[, pData(mae)$gender == "male", ]
mae[, , 1:2]
## Subsetting by CharacterList
|
140254e7eea483b9d7b84c186e79c4a677d15536
|
b2135b13e71233c9d9c0bc712b44e98a4b0aceb2
|
/plot3.R
|
b1d5e75558fdeb3444b50f7d9536af7bb53a64f5
|
[] |
no_license
|
yunshun26/ExData_Plotting1
|
65897c99b150afc68f91bba04cbf53d424b09b18
|
4b3799b16eb1876e9ae83d29e43765a34ffc5ffb
|
refs/heads/master
| 2021-01-15T13:29:45.517646
| 2017-08-12T14:51:19
| 2017-08-12T14:51:19
| 99,675,455
| 0
| 0
| null | 2017-08-08T09:29:37
| 2017-08-08T09:29:36
| null |
UTF-8
|
R
| false
| false
| 1,892
|
r
|
plot3.R
|
#-----------------------------------------------------------------------------
#THIS R SCRIPT GENERATES PLOT 3 AND SAVES TO FILE IN PNG FORMAT
#-----------------------------------------------------------------------------
## Set library and current working directory
library(sqldf)
setwd("C:/Users/catherine/projects/ExData_Plotting1")
## Read portion of data file from Date 1/2/2007 to 2/2/2007.
file="./household_power_consumption.txt"
pdata <- read.csv.sql(file, sql="select * from file where Date in ('1/2/2007',
'2/2/2007')", sep=";", header=TRUE, stringsAsFactors=F, colClasses=
c("character","character","numeric","numeric","numeric",
"numeric","numeric","numeric","numeric"))
#------------------------------------------------------------------------------
# Plot 3 - Scatterplots of Energy sub metering vs DateTime
#------------------------------------------------------------------------------
## Remove incomplete observations
pdata <- pdata[complete.cases(pdata),]
## Combine Date and Time fields
pdata$Date <- as.Date(pdata$Date,"%d/%m/%Y")
pdata$DateTime <- paste(pdata$Date, pdata$Time)
pdata$DateTime <- as.POSIXct(pdata$DateTime)
## Sub_metering_1 plot
with(pdata, plot(DateTime, Sub_metering_1, type="l", ylim=c(0,38), xlab="",
ylab="Energy sub metering"))
## Sub_metering_2 plot
points(pdata$DateTime, pdata$Sub_metering_2, type="l", col="red")
## Sub_metering_3 plot
points(pdata$DateTime, pdata$Sub_metering_3, type="l", col="blue")
## Legend
legend("topright", c("Sub_metering_1","Sub_metering_2","Sub_metering_3"),
lty=c(1,1,1),col=c("black","red","blue"))
#-----------------------------------------------------------------------------
# Save file in png format and close dev
#-----------------------------------------------------------------------------
dev.copy(png, "plot3.png", width=480, height=480)
dev.off()
|
0095a67ae7a847cf831836bc3038fab60c735ebb
|
1ec02088714f94bfcf8e6d20cd4f7153b5e60427
|
/processed_data/preprocessing_scripts/R_functions/visualize_data.R
|
cc654e90e80590598ff3a7f19130800e59477d70
|
[
"MIT"
] |
permissive
|
kristabh/gaze-following-analysis
|
0592472328034d90baf769e48b12671b573b4767
|
e84238f827a2f7e00983dd123c37c96e33fc9c1a
|
refs/heads/master
| 2022-02-25T10:15:27.106172
| 2022-02-08T22:42:05
| 2022-02-08T22:42:05
| 171,480,702
| 2
| 1
| null | null | null | null |
UTF-8
|
R
| false
| false
| 2,715
|
r
|
visualize_data.R
|
## make plots to inspect data quality
library(ggplot2)
library(RColorBrewer)
visualize_data <- function(x){
d <- x[[1]]
output_data <- x[[2]]
aoir <- x[[3]]
aoil <- x[[4]]
aoif <- x[[5]]
SCREEN_RES_HEIGHT_PX <- x[[6]]
SCREEN_RES_WIDTH_PX <- x[[7]]
STIM_HEIGHT_PX <- x[[8]]
STIM_WIDTH_PX <- x[[9]]
SAMPLERATE <- x[[10]]
LAB <- x[[11]]
wd <- '~/Gaze following/visualizations/'
p <- list()
for(pp in unique(d$PP_NAME)){
fd <- output_data[output_data$PP_NAME == pp,]
rd <- d[d$PP_NAME == pp,]
rd$time <- as.vector(unlist(sapply(rle(rd$TRIAL_INDEX)$len, function(x) 1:x))) / SAMPLERATE
p[[pp]] <- list()
for(trial in unique(fd$Trial)){
fdt <- fd[fd$Trial == trial,]
rdt <- rd[rd$TRIAL_INDEX == trial,]
colourCount = nrow(fdt)
getPalette = colorRampPalette(brewer.pal(9, "Paired"))
colY <- head(brewer.pal(11, "RdYlBu"), 2)
colX <- tail(brewer.pal(11, "RdYlBu"), 2)
p[[pp]][[trial]] <- ggplot(data = rdt, aes(x = LX, y = time)) +
ylim(tail(rdt$time, 1), 0) + xlim(aoil[1,1], aoir[3,1]) +
annotate('rect', xmin=aoif[1,1], xmax=aoif[3,1], ymin=-Inf, ymax=4, fill = 'blue', alpha = .2) +
annotate('rect', xmin=aoil[1,1], xmax=aoil[3,1], ymin=4, ymax=Inf, fill = ifelse(fdt$congruent[1] == 'L', 'green', 'red')
, alpha = .2) +
annotate('rect', xmin=aoir[1,1], xmax=aoir[3,1], ymin=4, ymax=Inf, fill = ifelse(fdt$congruent[1] == 'R', 'green', 'red')
, alpha = .2) +
annotate('rect', xmin=fdt$mean_x - 25, xmax=fdt$mean_x + 25, ymin=fdt$Start/1000, ymax=fdt$End/1000,
fill = getPalette(colourCount), alpha = .7, col = 'black') +
geom_path(data = rdt, aes(x = LX, col = colX[1]), na.rm = T) +
geom_path(data = rdt, aes(x = RX, col = colX[2]), na.rm = T) +
geom_path(data = rdt, aes(x = LY, col = colY[1]), na.rm = T) +
geom_path(data = rdt, aes(x = RY, col = colY[2]), na.rm = T) +
labs(title = paste('Participant =', pp, ' ~ Trial =', fdt$TRIAL_NAME[1]),
x = 'X- and Y-coordinates', y = 'Time (seconds)') +
scale_color_manual(values=c(colX, colY),
breaks=c(colX, colY),
name="Raw data\nsignal",
labels=c("Left eye X", "Right eye X", "Left eye Y", "Right eye Y")) +
theme_minimal()
}
}
pdf(paste0(wd, 'visualize_', LAB, '.pdf'), onefile = TRUE)
for(pp in unique(d$PP_NAME)){
if(length(p[[pp]]) > 0) f <- lapply(p[[pp]], function(x) if(!is.null(x)) plot(x))
}
dev.off()
}
|
5a711d6117efee7d4e13da23cc888629a02d3324
|
b6b746c44ea977f62d8bb7c98137b477b931ca64
|
/man/pWNMT.Rd
|
fb08667986146f9a73458753851c4ea0ba72319a
|
[] |
no_license
|
cran/NSM3
|
77f776ba5f934652800ecb0c3fbc8f87a7428571
|
7318aa2e0e6bf4f2badf8d0ae014f297650347f4
|
refs/heads/master
| 2022-08-30T00:39:28.420513
| 2022-08-16T21:40:02
| 2022-08-16T21:40:02
| 17,681,120
| 1
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 2,262
|
rd
|
pWNMT.Rd
|
\name{pWNMT}
\alias{pWNMT}
\title{
Wilcoxon, Nemenyi, McDonald-Thompson
}
\description{
Function to compute the P-value for the observed Wilcoxon, Nemenyi, McDonald-Thompson R statistic.
}
\usage{
pWNMT(x,b=NA,trt=NA,method=NA, n.mc=10000, standardized=FALSE)
}
\arguments{
\item{x}{Either a matrix or a vector containing the data.}
\item{b}{If x is a vector, b is a required vector of block labels. Otherwise, not used.}
\item{trt}{If x is a vector, trt is a required vector of treatment labels. Otherwise, not used.}
\item{method}{Either "Exact", "Monte Carlo" or "Asymptotic", indicating the desired distribution. When method=NA, "Exact" will be used if the number of permutations is 10,000 or less. Otherwise, "Monte Carlo" will be used.}
\item{n.mc}{
If method="Monte Carlo", the number of Monte Carlo samples used to estimate the distribution. Otherwise, not used.
}
\item{standardized}{If TRUE, divide the observed statistic by (nk(k+1)/12)^0.5 before returning.}
}
\details{
The data entry is intended to be flexible, so that the data can be entered in either of two ways. The following are equivalent:
\code{pWNMT(x=matrix(c(1,2,3,4,5,6),ncol=2,byrow=T))}
\code{pWNMT(x=c(1,2,3,4,5,6),b=c(1,1,2,2,3,3),trt=c(1,2,1,2,1,2))}
}
\value{
Returns a list with "NSM3Ch7MCp" class containing the following components:
\item{k}{number of treatments}
\item{n}{number of blocks}
\item{obs.stat}{the observed R* statistic for each of the k*(k-1)/2 comparisons}
\item{p.val}{upper tail P-value corresponding to each observed R statistic}
}
\author{
Grant Schneider
}
\examples{
##Hollander-Wolfe-Chicken Example 7.3 Rounding First Base
RoundingTimes<-matrix(c(5.40, 5.50, 5.55, 5.85, 5.70, 5.75, 5.20, 5.60, 5.50, 5.55, 5.50, 5.40,
5.90, 5.85, 5.70, 5.45, 5.55, 5.60, 5.40, 5.40, 5.35, 5.45, 5.50, 5.35, 5.25, 5.15, 5.00, 5.85,
5.80, 5.70, 5.25, 5.20, 5.10, 5.65, 5.55, 5.45, 5.60, 5.35, 5.45, 5.05, 5.00, 4.95, 5.50, 5.50,
5.40, 5.45, 5.55, 5.50, 5.55, 5.55, 5.35, 5.45, 5.50, 5.55, 5.50, 5.45, 5.25, 5.65, 5.60, 5.40,
5.70, 5.65, 5.55, 6.30, 6.30, 6.25),nrow = 22,byrow = TRUE,dimnames = list(1 : 22,
c("Round Out", "Narrow Angle", "Wide Angle")))
pWNMT(RoundingTimes,n.mc=2500)
}
\keyword{Wilcoxon}
\keyword{Nemenyi}
\keyword{McDonald-Thompson}
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.