blob_id
stringlengths 40
40
| directory_id
stringlengths 40
40
| path
stringlengths 2
327
| content_id
stringlengths 40
40
| detected_licenses
listlengths 0
91
| license_type
stringclasses 2
values | repo_name
stringlengths 5
134
| snapshot_id
stringlengths 40
40
| revision_id
stringlengths 40
40
| branch_name
stringclasses 46
values | visit_date
timestamp[us]date 2016-08-02 22:44:29
2023-09-06 08:39:28
| revision_date
timestamp[us]date 1977-08-08 00:00:00
2023-09-05 12:13:49
| committer_date
timestamp[us]date 1977-08-08 00:00:00
2023-09-05 12:13:49
| github_id
int64 19.4k
671M
⌀ | star_events_count
int64 0
40k
| fork_events_count
int64 0
32.4k
| gha_license_id
stringclasses 14
values | gha_event_created_at
timestamp[us]date 2012-06-21 16:39:19
2023-09-14 21:52:42
⌀ | gha_created_at
timestamp[us]date 2008-05-25 01:21:32
2023-06-28 13:19:12
⌀ | gha_language
stringclasses 60
values | src_encoding
stringclasses 24
values | language
stringclasses 1
value | is_vendor
bool 2
classes | is_generated
bool 2
classes | length_bytes
int64 7
9.18M
| extension
stringclasses 20
values | filename
stringlengths 1
141
| content
stringlengths 7
9.18M
|
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
8fce621ec05200bccd7efa73c31085614c86f2b9
|
26648108b95b0b50e5cc6170ef103c8bfc463078
|
/inst/design-based-spatial-autocorrelation-test.R
|
0e276d622d64d62ef0194c0cbf1a38262ac2e519
|
[] |
no_license
|
pbs-assess/gfsynopsis
|
773a49e69735432a451adaabd87f39927c7f60b2
|
0ac1a42e96791a77b0a7f77c8914c83b3e814451
|
refs/heads/master
| 2023-08-17T08:21:02.676898
| 2023-07-27T22:30:04
| 2023-07-27T22:30:04
| 122,661,487
| 11
| 2
| null | 2023-07-17T21:49:45
| 2018-02-23T19:04:12
|
TeX
|
UTF-8
|
R
| false
| false
| 3,539
|
r
|
design-based-spatial-autocorrelation-test.R
|
# Are design-based bootstrapped survey calculations affected by
# spatial autocorrelation?
#
# Short answer: I think not. Obvious in retrospect.
library(dplyr)
library(ggplot2)
x <- seq(0, 1, 0.1)
y <- seq(0, 1, 0.1)
N <- 10
survey_sample <- seq(1, N)
grid1 <- expand.grid(x = x, y = y) %>% as_tibble()
plot(grid1)
grid <- expand.grid(x = x, y = y, survey_sample = survey_sample) %>%
group_by(x, y) %>%
mutate(
sampling_x = x + runif(N, 0, 0.1),
sampling_y = y + runif(N, 0, 0.1)
) %>%
mutate(value = rnorm(N, 0, 0.2))
ggplot(grid, aes(sampling_x, sampling_y)) +
geom_point(aes(colour = value)) +
scale_color_viridis_c() +
geom_point(data = grid1, aes(x = x, y = y, colour = NA),
colour = 'black', pch = 21, size = 3)
grid <- expand.grid(x = x, y = y, survey_sample = survey_sample) %>%
mutate(grouping_code = paste(x, y, sep = '-')) %>%
group_by(x, y) %>%
mutate(
sampling_x = x + runif(N, 0, 1/N),
sampling_y = y + runif(N, 0, 1/N)
) %>% ungroup()
sigma_O <- 0.3
kappa <- 8
rf_omega <- RandomFields::RMmatern(nu = 1, var = sigma_O^2, scale = 1 / kappa)
grid$omega_s <- suppressMessages(
RandomFields::RFsimulate(model = rf_omega,
x = grid$sampling_x, y = grid$sampling_y)$variable1)
grid <- grid %>%
mutate(value = exp(rnorm(nrow(grid), omega_s, 0.1)))
ggplot(grid, aes(sampling_x, sampling_y)) +
geom_point(aes(colour = omega_s)) +
scale_color_gradient2()
ggplot(grid, aes(sampling_x, sampling_y)) +
geom_point(aes(colour = value)) +
scale_color_viridis_c()
group_by(grid, grouping_code) %>%
summarise(density = mean(value)) %>%
summarise(biomass = sum(density)) %>%
pull(biomass)
calc_bio <- function(dat, i = seq_len(nrow(dat))) {
# print(i)
oo <- dat[i, , drop = FALSE] %>%
group_by(grouping_code) %>%
summarise(density = mean(value)) %>%
summarise(biomass = sum(density)) %>%
pull(biomass)
# print(oo)
oo
}
boot_biomass <- function(dat, reps = 10) {
dat %>%
mutate(grouping_code = as.factor(grouping_code)) %>%
do({
b <- boot::boot(., statistic = calc_bio, strata = .$grouping_code,
R = reps)
suppressWarnings(bci <- boot::boot.ci(b, type = "perc"))
dplyr::tibble(
mean_boot = mean(b$t),
median_boot = median(b$t),
lwr = bci$percent[[4]],
upr = bci$percent[[5]],
cv = sd(b$t) / mean(b$t),
biomass = calc_bio(.)
)
})
}
out <- boot_biomass(grid, reps = 50)
head(as.data.frame(out))
select(grid, -value) %>%
mutate(omega_s = exp(omega_s)) %>%
rename(value = omega_s) %>%
calc_bio()
#######
check_bio <- function() {
grid <- expand.grid(x = x, y = y, survey_sample = survey_sample) %>%
mutate(grouping_code = paste(x, y, sep = '-')) %>%
group_by(x, y) %>%
mutate(
sampling_x = x + runif(N, 0, 1/N),
sampling_y = y + runif(N, 0, 1/N)
) %>% ungroup()
grid$omega_s <- suppressMessages(
RandomFields::RFsimulate(model = rf_omega,
x = grid$sampling_x, y = grid$sampling_y)$variable1)
grid <- grid %>%
mutate(value = exp(rnorm(nrow(grid), omega_s, 0.3)) - (0.3^2)/2)
true_biomass <- select(grid, -value) %>%
mutate(omega_s = exp(omega_s)) %>%
rename(value = omega_s) %>%
calc_bio()
out <- boot_biomass(grid, reps = 25)
data.frame(out, true_biomass = true_biomass)
}
out <- purrr::map_df(seq(1:20), function(x) check_bio())
out %>%
mutate(id = seq(1, nrow(out))) %>%
ggplot(aes(id, true_biomass)) +
geom_point() +
geom_linerange(aes(ymin = lwr, ymax = upr))
|
9e359a333f936a945f9a559df4195d34e6ae1fde
|
db8eeb68541dba916fa0ab9567fe9199d95bdb6a
|
/man/chargeHydropathyPlot.Rd
|
095b5c1f8d18ffbb478f13b8f42a2c1cbdc98a3b
|
[] |
no_license
|
alptaciroglu/idpr
|
f26544ffe869854a0fd636fcf7a2fa85a41efeed
|
e5f7838d27fb9ada1b10d6a3f0261a5fa8588908
|
refs/heads/master
| 2023-01-27T20:40:33.186127
| 2020-12-05T21:57:55
| 2020-12-05T21:57:55
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| true
| 4,865
|
rd
|
chargeHydropathyPlot.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/chargeHydropathyPlot.R
\name{chargeHydropathyPlot}
\alias{chargeHydropathyPlot}
\title{Charge-Hydropathy Plot}
\usage{
chargeHydropathyPlot(
sequence,
displayInsolubility = TRUE,
insolubleValue = 0.7,
proteinName = NA,
customPlotTitle = NA,
pH = 7,
pKaSet = "IPC_protein",
...
)
}
\arguments{
\item{sequence}{amino acid sequence (or pathway to a fasta file)
as a character string. Supports multiple sequences / files, as a
character vector of strings. Additionally, this supports a single protein
as character vectors. Multiple proteins are not supported as a character
vector of single characters.}
\item{displayInsolubility}{logical value, TRUE by default.
This adds (or removes when FALSE) the vertical line
separating collapsed proteins and insoluble proteins}
\item{insolubleValue}{numerical value. 0.7 by default.
Ignored when \code{displayInsolubility = FALSE}. Plots the vertical line
\eqn{<H> = displayInsolubility}.}
\item{proteinName, customPlotTitle}{optional character string. NA by default.
Used to either add the name of the protein to the plot title when there
is only one protein, or to create a custom plot title for the output.}
\item{pH}{numeric value, 7.0 by default.
The environmental pH is used to calculate residue charge.}
\item{pKaSet}{pKa set used for charge calculations. See
\code{\link{netCharge}} for additional details}
\item{...}{additional arguments to be passed to
\link[idpr:netCharge]{idpr::netCharge()},
\link[idpr:meanScaledHydropathy]{idpr::meanScaledHydropathy()} or
\code{\link[ggplot2]{ggplot}}}
}
\value{
Graphical values of Charge-Hydropathy Plot
}
\description{
This function calculates the average net charge <R> and the average
scaled hydropathy <H> and visualizes the data. There are known boundaries
on the C-H plot that separate extended and collapsed proteins. \cr
This was originally described in Uversky et al. (2000)\cr
\url{https://doi.org/10.1002/1097-0134(20001115)41:3<415::AID-PROT130>3.0.CO;2-7}
. \cr
The plot returned is based on the charge-hydropathy plot from
Uversky (2016) \url{https://doi.org/10.1080/21690707.2015.1135015}. \cr
See Uversky (2019) \url{https://doi.org/10.3389/fphy.2019.00010} for
additional information and a recent review on the topic.
This plot has also been referred to as a "Uversky Plot".
}
\section{Plot Colors}{
For users who wish to keep a common aesthetic, the following colors are
used when plotResults = TRUE. \cr
\itemize{
\item Point(s) = "chocolate1" or "#ff7f24"
\item Lines = "black"}
}
\examples{
#Amino acid sequences can be character strings
aaString <- "ACDEFGHIKLMNPQRSTVWY"
#Amino acid sequences can also be character vectors
aaVector <- c("A", "C", "D", "E", "F",
"G", "H", "I", "K", "L",
"M", "N", "P", "Q", "R",
"S", "T", "V", "W", "Y")
#Alternatively, .fasta files can also be used by providing
##The path to the file as a character string
chargeHydropathyPlot(sequence = aaString)
chargeHydropathyPlot( sequence = aaVector)
#This function also supports multiple sequences
#only as character strings or .fasta files
multipleSeq <- c("ACDEFGHIKLMNPQRSTVWY",
"ACDEFGHIK",
"LMNPQRSTVW")
chargeHydropathyPlot(sequence = multipleSeq)
#since it is a ggplot, we can add additional annotations or themes
chargeHydropathyPlot(
sequence = multipleSeq) +
ggplot2::theme_void()
chargeHydropathyPlot(
sequence = multipleSeq) +
ggplot2::geom_hline(yintercept = 0,
color = "red")
#choosing the pKa set used for calculations
chargeHydropathyPlot(
sequence = multipleSeq,
pKaSet = "EMBOSS")
}
\references{
Kozlowski, L. P. (2016). IPC – Isoelectric Point Calculator. Biology
Direct, 11(1), 55. \url{https://doi.org/10.1186/s13062-016-0159-9} \cr
Kyte, J., & Doolittle, R. F. (1982). A simple method for
displaying the hydropathic character of a protein.
Journal of molecular biology, 157(1), 105-132. \cr
Uversky, V. N. (2019). Intrinsically Disordered Proteins and Their
“Mysterious” (Meta)Physics. Frontiers in Physics, 7(10).
\url{https://doi.org/10.3389/fphy.2019.00010} \cr
Uversky, V. N. (2016). Paradoxes and wonders of intrinsic disorder:
Complexity of simplicity. Intrinsically Disordered Proteins, 4(1),
e1135015. \url{https://doi.org/10.1080/21690707.2015.1135015} \cr
Uversky, V. N., Gillespie, J. R., & Fink, A. L. (2000).
Why are “natively unfolded” proteins unstructured under physiologic
conditions?. Proteins: structure, function, and bioinformatics, 41(3),
415-427.
\url{https://doi.org/10.1002/1097-0134(20001115)41:3<415::AID-PROT130>3.0.CO;2-7}
}
\seealso{
\code{\link{netCharge}} and
\code{\link{meanScaledHydropathy}}
for functions used to calculate values.
}
|
d47dfbd5ff6d7ece464e01f0d3daa4c11f9bd514
|
629d8c6ef6c86d475ac3730623dd943e872e2b90
|
/MammalAbundance/Code/Analysis_All_Years.R
|
4bd592e80195a45ca6ca5de38f31a40774e7d094
|
[
"MIT"
] |
permissive
|
djhocking/Small_Mammal_Synchrony
|
773b895b98ded209ca67156b53aa330739063d33
|
e999a31d01ff924489397d72afe094a08349d195
|
refs/heads/master
| 2021-01-13T00:15:58.384006
| 2016-02-15T15:10:51
| 2016-02-15T15:10:51
| 51,762,643
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 16,164
|
r
|
Analysis_All_Years.R
|
######################################################
# Small Mammal Abundance
# White Mountain National Forest
# Daniel J. Hocking
# with Ryan Stephens, Becca Rowe, Mariko Yamasaki
# 2013
######################################################
source('Code/Poisson_100.R')
dm100p.NAIN <- dm100p(NAIN, n.burn = 50000, n.it = 50000, n.thin = 25, outfile = "Output/NAIN_P_Diagnostic.pdf", outfile2 = "Output/NAIN_Table.csv") #
dm100p.MIPI <- dm100p(MIPI, n.burn = 50000, n.it = 50000, n.thin = 25, outfile = "Output/MIPI_P_Diagnostic.pdf", outfile2 = "Output/MIPI_Table.csv") #
dm100p.MYGA <- dm100p(MYGA, n.burn = 50000, n.it = 50000, n.thin = 25, outfile = "Output/MYGA_P_Diagnostic.pdf", outfile2 = "Output/MYGA_Table.csv") #
dm100p.PELE <- dm100p(PELE, n.burn = 50000, n.it = 50000, n.thin = 25, outfile = "Output/PELE_P_Diagnostic.pdf", outfile2 = "Output/PELE_Table.csv") #
dm100p.PEMA <- dm100p(PEMA, n.burn = 50000, n.it = 50000, n.thin = 25, outfile = "Output/PEMA_P_Diagnostic.pdf", outfile2 = "Output/PEMA_Table.csv") #
dm100p.SOCI <- dm100p(SOCI, n.burn = 50000, n.it = 50000, n.thin = 25, outfile = "Output/SOCI_P_Diagnostic.pdf", outfile2 = "Output/SOCI_Table.csv") #
dm100p.SODI <- dm100p(SODI, n.burn = 50000, n.it = 50000, n.thin = 25, outfile = "Output/SODI_P_Diagnostic.pdf", outfile2 = "Output/SODI_Table.csv") #
dm100p.ZAHU <- dm100p(ZAHU, n.burn = 50000, n.it = 50000, n.thin = 25, outfile = "Output/ZAHU_P_Diagnostic.pdf", outfile2 = "Output/ZAHU_Table.csv") #
dm100p.SOFU <- dm100p(SOFU, n.burn = 50000, n.it = 50000, n.thin = 25, outfile = "Output/SOFU_P_Diagnostic.pdf", outfile2 = "Output/SOFU_Table.csv") #
dm100p.SOHO <- dm100p(SOHO, n.burn = 50000, n.it = 50000, n.thin = 25, outfile = "Output/SOHO_P_Diagnostic.pdf", outfile2 = "Output/SOHO_Table.csv") #
dm100p.SOPA <- dm100p(SOPA, n.burn = 50000, n.it = 50000, n.thin = 25, outfile = "Output/SOPA_P_Diagnostic.pdf", outfile2 = "Output/SOPA_Table.csv") #
dm100p.SYCO <- dm100p(SYCO, n.burn = 50000, n.it = 50000, n.thin = 25, outfile = "Output/SYCO_P_Diagnostic.pdf", outfile2 = "Output/SYCO_Table.csv") #
dm100p.BLBR <- dm100p(BLBR, n.burn = 50000, n.it = 50000, n.thin = 25, outfile = "Output/BLBR_P_Diagnostic.pdf", outfile2 = "Output/BLBR_Table.csv") #
source('Code/helper_functions.R')
abund.NAIN <- abund(model = dm100p.NAIN, output = "Output/N_NAIN.csv", sep = ",")
abund.MIPI <- abund(model = dm100p.MIPI, output = "Output/N_MIPI.csv", sep = ",")
abund.MYGA <- abund(model = dm100p.MYGA, output = "Output/N_MYGA.csv", sep = ",")
abund.PELE <- abund(model = dm100p.PELE, output = "Output/N_PELE.csv", sep = ",")
abund.PEMA <- abund(model = dm100p.PEMA, output = "Output/N_PEMA.csv", sep = ",")
abund.SOCI <- abund(model = dm100p.SOCI, output = "Output/N_SOCI.csv", sep = ",")
abund.SODI <- abund(model = dm100p.SODI, output = "Output/N_SODI.csv", sep = ",")
abund.ZAHU <- abund(model = dm100p.ZAHU, output = "Output/N_ZAHU.csv", sep = ",")
abund.SOFU <- abund(model = dm100p.SOFU, output = "Output/N_SOFU.csv", sep = ",")
abund.SOHO <- abund(model = dm100p.SOHO, output = "Output/N_SOHO.csv", sep = ",")
abund.SOPA <- abund(model = dm100p.SOPA, output = "Output/N_SOPA.csv", sep = ",")
abund.SYCO <- abund(model = dm100p.SYCO, output = "Output/N_SYCO.csv", sep = ",")
abund.BLBR <- abund(model = dm100p.BLBR, output = "Output/N_BLBR.csv", sep = ",")
##################### Extra ###############
#-----------NAIN ---------okay - some autocorr------
# prepare data for this model
# Add random year effect?
# Take out things that correlate with year
nmixp.nain1 <- nmixp(NAIN, n.burn = 1000, n.it = 1000, n.thin = 1, K = 1, outfile = "Output/NAIN_diagnostics_nmixp1.pdf")
nmixp.nain2 <- nmixp(NAIN, n.burn = 1000, n.it = 1000, n.thin = 1, K = 2, outfile = "Output/NAIN_diagnostics_nmixp2.pdf")
dmp.nain <- dmp(NAIN, n.burn = 1000, n.it = 1000, n.thin = 1, outfile = "Output/NAIN_diagnostics.pdf")
dmp.myga <- dmp(MYGA, n.burn = 1000, n.it = 1000, n.thin = 1, outfile = "Output/MYGA_diagnostics.pdf")
dm100p.nain <- dm100p(NAIN, n.burn = 1000, n.it = 1000, n.thin = 1, outfile = "Output/NAIN_diagnostics.pdf")
dm100pod.nain <- dm100pod(NAIN, n.burn = 1000, n.it = 3000, n.thin = 1)
dm100zip.nain <- dm100zip(NAIN, n.burn = 3000, n.it = 5000, n.thin = 3)
dm100zipod.nain <- dm100zipod(NAIN, n.burn = 3000, n.it = 5000, n.thin = 3)
dm100pgama.nain <- dm100pgama(NAIN, n.burn = 3000, n.it = 3000, n.thin = 1, outfile = "NAIN_diagnostics.pdf")
dm100pgama.myga <- dm100pgama(MYGA, n.burn = 1000, n.it = 3000, n.thin = 3, outfile = "MYGA_diagnostics.pdf")
dm100p.year.myga <- dm100p.year(MYGA, n.burn = 1000, n.it = 3000, n.thin = 3, outfile = "Output/MYGA_diagnostics.pdf")
library(ggmcmc)
ggs_traceplot(ggs(dm100p.nain[ , c("p0", "p.precip","p.trap","a.N", "b.soft100", "b.age100", "b.stream100", "b.trap", "b.year2", "b.year3", "b.elev", "sigma.gam0", "gam1", "sigma.site")]))
ggmcmc(ggs(dm100p.nain[ , c("p0", "p.precip","p.trap","a.N", "b.soft100", "b.age100", "b.stream100", "b.trap", "b.year2", "b.year3", "b.elev", "sigma.gam0", "gam1", "sigma.site")]), file = "dm100p_NAIN.pdf")
plot(dm100p.nain[ , c("p0", "p.precip","p.trap","a.N", "b.soft100", "b.age100", "b.stream100", "b.trap", "b.year2", "b.year3", "b.elev", "sigma.gam0", "gam1", "sigma.site")]) # "gam.a",
plot(dm100pod.nain[ , c("p0", "p.precip","p.trap","a.N", "b.soft100", "b.age100", "b.stream100", "b.trap", "b.year2", "b.year3", "b.elev", "sigma.gam0", "gam1", "sigma.site", "sigma.delta")])
plot(dm100zip.nain[ , c("p0", "p.precip","p.trap","a.N", "b.soft100", "b.age100", "b.stream100", "b.trap", "b.year2", "b.year3", "b.elev", "sigma.gam0", "gam.a", "gam1", "sigma.site", "omega")])
plot(dm100zipod.nain[ , c("p0", "p.precip","p.trap","a.N", "b.soft100", "b.age100", "b.stream100", "b.trap", "b.year2", "b.year3", "b.elev", "sigma.gam0", "gam.a", "gam1", "sigma.site", "sigma.delta", "omega")])
par(mfrow=c(1,1))
summary(dm100.list.nain[ , c("p0", "p.precip","p.trap","a.N", "b.hard100", "b.soft100", "b.age100", "b.stream100", "b.trap", "b.year2", "b.year3", "b.elev", "sigma.gam0", "gam1", "sigma.site")])
autocorr.plot(dm100.list.nain[ , c("p0", "p.precip","p.trap","a.N", "b.hard100", "b.soft100", "b.age100", "b.stream100", "b.trap", "b.year2", "b.year3", "b.elev", "sigma.gam0", "gam1", "sigma.site")])
acfplot(dm100.list.nain[ , c("p0", "p.precip","p.trap","a.N", "b.hard100", "b.soft100", "b.age100", "b.stream100", "b.trap", "b.year2", "b.year3", "b.elev", "sigma.gam0", "gam1", "sigma.site")])
# Check fit
for(i in 1:3) bayesP.nain <- mean(dm3.list.nain[, "fit.new",][[i]] > dm3.list.nain[, "fit",][[i]]) #
print(bayesP.nain, dig = 3) #
par(mfrow=c(1,1))
plot(as.matrix(dm3.list.nain[, "fit",]), as.matrix(dm3.list.nain[, "fit.new",])) #
abline(0, 1, col = 'red')
print(gelman.diag(x=dm3.list.nain[ , c("p0", "p.precip","p.trap","a.N", "b.drainage", "b.stems", "b.softwood", "b.hardwood", "b.herb","b.cwd", "b.litter", "b.trap", "b.year2", "b.year3", "b.elev", "sigma.gam0", "gam1", "sigma.site")]), dig=3) #
gelman.diag(dm3.list.nain) # multivariate psrf = can't run
geweke.diag(dm3.list.nain[ , c("p0", "p.precip","p.trap","a.N", "b.drainage", "b.stems", "b.softwood", "b.hardwood", "b.herb","b.cwd", "b.litter", "b.trap", "b.year2", "b.year3", "b.elev", "sigma.gam0", "gam1", "sigma.site")])
heidel.diag(dm3.list.nain)
rejectionRate(dm3.list.nain[ , c("p0", "p.precip","p.trap","a.N", "b.drainage", "b.stems", "b.softwood", "b.hardwood", "b.herb","b.cwd", "b.litter", "b.trap", "b.year2", "b.year3", "b.elev", "sigma.gam0", "gam1", "sigma.site")])
rejectionRate(dm3.list.nain)
accept.nain <- 1 - rejectionRate(dm3.list.nain)
HPDinterval(dm3.list.nain)
# Calculate and organize summaries for publication tables--------------
Quants.nain <- apply(as.matrix(dm3.list.nain[ , c("p0", "p.precip","p.trap","a.N", "b.drainage", "b.stems", "b.softwood", "b.hardwood", "b.herb","b.cwd", "b.litter", "b.trap", "b.year2", "b.year3", "b.elev", "sigma.gam0", "gam1", "sigma.site")]), 2, FUN = quantile, probs = c(0.025, 0.5, 0.975))
Means.nain <- apply(as.matrix(dm3.list.nain[ , c("p0", "p.precip","p.trap","a.N", "b.drainage", "b.stems", "b.softwood", "b.hardwood", "b.herb","b.cwd", "b.litter", "b.trap", "b.year2", "b.year3", "b.elev", "sigma.gam0", "gam1", "sigma.site")]), 2, FUN = mean)
SDs.nain <- apply(as.matrix(dm3.list.nain[ , c("p0", "p.precip","p.trap","a.N", "b.drainage", "b.stems", "b.softwood", "b.hardwood", "b.herb","b.cwd", "b.litter", "b.trap", "b.year2", "b.year3", "b.elev", "sigma.gam0", "gam1", "sigma.site")]), 2, FUN = sd)
nain.variables <- c("p-intercept", "Precip", "Trap Type", "N-intercept", "Drainage", "Number Stems", "Softwood basal", "Hardwood basal", "Herbaceous", "CWD", "Litter", "Trap Type", "Year (1996)", "Year (1997)", "Elevation", "Random gamma SD", "Autoreg N", "Site SD")
nain.summary <- data.frame(nain.variables, Means.nain, SDs.nain, Quants.nain["2.5%", ], Quants.nain["50%", ], Quants.nain["97.5%", ])
colnames(nain.summary) <- c("Variable", "Mean", "SD", "2.5%", "Median", "97.5%")
write.table(nain.summary, file = "nain_Summary.csv", sep = ",", col.names = NA, row.names = TRUE)
N.nain <- matrix(NA, 108, 6)
for(i in 1:108){
for(j in 1:2){
foo <- apply(as.matrix(dm100p.nain[, c(paste("N[", i,",", j, "]", sep = ""))]), 2, FUN= quantile, probs = c(0.5, 0.025, 0.975))
foo <- as.integer(foo)
for(k in 1:3){
if(j == 1) {
N.nain[i,k] <- foo[k]
}
if(j == 2){
N.nain[i,k+3] <- foo[k]
}
}
}
}
ymax1 <- apply(as.matrix(NAIN[,2:9, 1]),1,sum)
ymax2 <- apply(NAIN[,10:17, 2],1,sum)
colnames(N.nain) <- c("Median1", "CI_2.5", "CI_97.5", "Median2", "CI_2.5", "CI_97.5")
cbind(ymax1, N.nain[ ,"Median1"], ymax2, N.nain[, "Median2"])
write.table(N.nain, file = "N_nain.csv", col.names = NA, row.names = TRUE, sep = ",")
range(N.nain[, "Median"]) #
mean(N.nain[, "Median"]) #
#---------------Figures------------------
# Abundance
# Elevation effects
plot(Data$elev, N.nain[ , 2])
elev.range <- seq(from = min(Data$elev), to = 2025, length.out = 1000)
elev.sd <- sd(Data$elev)
elev.mean <- mean(Data$elev)
elev.range.std <- (elev.range - elev.mean)/elev.sd
nain.elev.nbar <- exp(Means.nain["alpha.lam"] + Means.nain["beta1.lam"]*elev.range.std)
nain.elev.LCI <- exp(Quants.nain[1, "alpha.lam"] + Quants.nain[1, "beta1.lam"]*elev.range.std)
nain.elev.UCI <- exp(Quants.nain[3, "alpha.lam"] + Quants.nain[3, "beta1.lam"]*elev.range.std)
bitmap("Plot_nain_Elev.tiff", height = 12, width = 17, units = 'cm', type = "tifflzw", res = 300)
tiff("Plot_nain_Elev.tiff", height = 12, width = 17, units = 'cm', compression = "lzw", res = 300)
postscript("Plot_nain_Elev-courier.eps", width = 8, height = 8, horizontal = FALSE, onefile = FALSE, paper = "special", colormodel = "cmyk", family = "Courier")
par(mar=c(3.5,3.5,1,1), mgp=c(2,0.7,0), tck=-0.01)
plot(elev.range, nain.elev.nbar, type = 'n', xlab = 'Elevation (m)', ylab = expression(paste(italic(D.)," ",italic(wrighti), " abundance")))
polygon(c(elev.range, rev(elev.range)), c(nain.elev.UCI, rev(nain.elev.LCI)), col = 'light gray', border = NA)
lines(elev.range, nain.elev.nbar)
dev.off()
# Slope
slope.range <- seq(from = min(Data$Slope), to = max(Data$Slope), length.out = 1000)
slope.sd <- sd(Data$Slope)
slope.mean <- mean(Data$Slope)
slope.range.std <- (slope.range - slope.mean)/slope.sd
nain.slope.nbar <- exp(Means.nain["alpha.lam"] + Means.nain["beta3.lam"]*slope.range.std)
nain.slope.LCI <- exp(Quants.nain[1, "alpha.lam"] + Quants.nain[1, "beta3.lam"]*slope.range.std)
nain.slope.UCI <- exp(Quants.nain[3, "alpha.lam"] + Quants.nain[3, "beta3.lam"]*slope.range.std)
bitmap("Plot_nain_slope.tiff", height = 12, width = 17, units = 'cm', type = "tifflzw", res = 300)
tiff("Plot_nain_slope.tiff", height = 12, width = 17, units = 'cm', compression = "lzw", res = 300)
postscript("Plot_nain_slope.eps", width = 8, height = 8, horizontal = FALSE, onefile = FALSE, paper = "special", colormodel = "cmyk", family = "Times")
par(mar=c(3.5,3.5,1,1), mgp=c(2,0.7,0), tck=-0.01)
plot(slope.range, nain.slope.nbar, type = 'n', xlab = 'Slope', ylab = expression(paste(italic(D.)," ",italic(wrighti), " abundance")))
polygon(c(slope.range, rev(slope.range)), c(nain.slope.UCI, rev(nain.slope.LCI)), col = 'light gray', border = NA)
lines(slope.range, nain.slope.nbar)
dev.off()
# Combined
bitmap("Plot_nain_abund.tiff", height = 12, width = 17, units = 'cm', type = "tifflzw", res = 300)
tiff("Plot_nain_abund.tiff", height = 12, width = 17, units = 'cm', compression = "lzw", res = 300)
postscript("Plot_nain_abund.eps", width = 8, height = 8, horizontal = FALSE, onefile = FALSE, paper = "special", colormodel = "cmyk", family = "Times")
par(mfrow = c(1,2), mar=c(3.5,3.5,1,1), mgp=c(2,0.7,0), tck=-0.01)
plot(slope.range, nain.slope.nbar, type = 'n', xlab = 'Slope', ylab = expression(paste(italic(D.)," ",italic(wrighti), " abundance")))
polygon(c(slope.range, rev(slope.range)), c(nain.slope.UCI, rev(nain.slope.LCI)), col = 'light gray', border = NA)
lines(slope.range, nain.slope.nbar)
plot(elev.range, nain.elev.nbar, type = 'n', ylim = c(0, 150), xlab = 'Elevation (m)', ylab = '')
polygon(c(elev.range, rev(elev.range)), c(nain.elev.UCI, rev(nain.elev.LCI)), col = 'light gray', border = NA)
lines(elev.range, nain.elev.nbar)
dev.off()
par(mfrow = c(1,1))
# Detection
# Precip effects
precip.range <- seq(from = min(Precip, na.rm = T), to = max(Precip, na.rm = T), length.out = 1000)
precip.sd <- sd(as.matrix(Precip), na.rm = TRUE)
precip.mean <- mean(as.matrix(Precip), na.rm = TRUE)
precip.range.std <- (precip.range - precip.mean)/precip.sd
nain.precip.nbar <- 1 / (1 + exp(-1*(Means.nain["alpha.p"] + Means.nain["beta3.p"]*precip.range.std)))
nain.precip.LCI <- 1 / (1 + exp(-1 * (Quants.nain[1, "alpha.p"] + Quants.nain[1, "beta3.p"]*precip.range.std)))
nain.precip.UCI <- 1 / (1 + exp(-1 * (Quants.nain[3, "alpha.p"] + Quants.nain[3, "beta3.p"]*precip.range.std)))
# Humidity effects
RH.range <- seq(from = min(RH, na.rm = T), to = max(RH, na.rm = T), length.out = 1000)
RH.sd <- sd(as.matrix(RH), na.rm = TRUE)
RH.mean <- mean(as.matrix(RH), na.rm = TRUE)
RH.range.std <- (RH.range - RH.mean)/RH.sd
nain.RH.nbar <- 1 / (1 + exp(-1 * (Means.nain["alpha.p"] + Means.nain["beta10.p"]*RH.range.std)))
nain.RH.LCI <- 1 / (1 + exp(-1 * (Quants.nain[1, "alpha.p"] + Quants.nain[1, "beta10.p"]*RH.range.std)))
nain.RH.UCI <- 1 / (1 + exp(-1* (Quants.nain[3, "alpha.p"] + Quants.nain[3, "beta10.p"]*RH.range.std)))
# Temperature effects
Temp.range <- seq(from = min(Temp, na.rm = T), to = max(Temp, na.rm = T), length.out = 1000)
Temp.sd <- sd(as.matrix(Temp), na.rm = TRUE)
Temp.mean <- mean(as.matrix(Temp), na.rm = TRUE)
Temp.range.std <- (Temp.range - Temp.mean)/Temp.sd
nain.Temp.nbar <- 1 / (1 + exp(-1 * (Means.nain["alpha.p"] + Means.nain["beta1.p"]*Temp.range.std + Means.nain["beta2.p"]*Temp.range.std^2)))
nain.Temp.LCI <- 1 / (1 + exp(-1 * (Quants.nain[1, "alpha.p"] + Quants.nain[1, "beta1.p"]*Temp.range.std)))
nain.Temp.UCI <- 1 / (1 + exp(-1* (Quants.nain[3, "alpha.p"] + Quants.nain[3, "beta1.p"]*Temp.range.std)))
# Combined
bitmap("Plot_nain_detection.tiff", height = 12, width = 17, units = 'cm', type = "tifflzw", res = 300)
tiff("Plot_nain_detection.tiff", height = 12, width = 17, units = 'cm', compression = "lzw", res = 300)
postscript("Plot_nain_detection.eps", width = 8, height = 8, horizontal = FALSE, onefile = FALSE, paper = "special", colormodel = "cmyk", family = "Times")
par(mfrow = c(1,2), mar=c(3.5,3.5,1,1), mgp=c(2,0.7,0), tck=-0.01)
plot(precip.range, nain.precip.nbar, type = 'n', ylim = c(0, 0.6), xlab = '24-hour precipitation', ylab = expression(paste(italic(D.)," ",italic(wrighti), " detection probability")))
polygon(c(precip.range, rev(precip.range)), c(nain.precip.UCI, rev(nain.precip.LCI)), col = 'light gray', border = NA)
lines(precip.range, nain.precip.nbar)
plot(RH.range, nain.RH.nbar, type = 'n', ylim = c(0, 0.6), xlab = 'Relative humidity', ylab = '')
polygon(c(RH.range, rev(RH.range)), c(nain.RH.UCI, rev(nain.RH.LCI)), col = 'light gray', border = NA)
lines(RH.range, nain.RH.nbar)
dev.off()
par(mfrow = c(1,1))
|
3cc503dc206a8ae36075d745ca0cc1c1e8685c2a
|
c5d59c940c47ec7219df2e3bf24d1463fcf9a42e
|
/ROSE2_callSuper.R
|
54032d4bb5b1dcd3350683ff74714814366c2ebf
|
[
"MIT"
] |
permissive
|
linlabcode/pipeline
|
5dd8ecd5f0f499e6fd1b45c6f2875a2e9a2270c7
|
396ff17a67c9323024b9448665529142b6aa18be
|
refs/heads/master
| 2023-07-19T18:11:14.456745
| 2022-04-26T17:57:43
| 2022-04-26T17:57:43
| 60,634,937
| 4
| 10
|
NOASSERTION
| 2018-12-17T10:14:39
| 2016-06-07T18:01:22
|
Python
|
UTF-8
|
R
| false
| false
| 19,969
|
r
|
ROSE2_callSuper.R
|
#============================================================================
#==============SUPER-ENHANCER CALLING AND PLOTTING FUNCTIONS=================
#============================================================================
#This function calculates the cutoff by sliding a diagonal line and finding where it is tangential (or as close as possible)
calculate_cutoff <- function(inputVector, drawPlot=TRUE,...){
inputVector <- sort(inputVector)
inputVector[inputVector<0]<-0 #set those regions with more control than ranking equal to zero
slope <- (max(inputVector)-min(inputVector))/length(inputVector) #This is the slope of the line we want to slide. This is the diagonal.
xPt <- floor(optimize(numPts_below_line,lower=1,upper=length(inputVector),myVector= inputVector,slope=slope)$minimum) #Find the x-axis point where a line passing through that point has the minimum number of points below it. (ie. tangent)
y_cutoff <- inputVector[xPt] #The y-value at this x point. This is our cutoff.
if(drawPlot){ #if TRUE, draw the plot
plot(1:length(inputVector), inputVector,type="l",...)
b <- y_cutoff-(slope* xPt)
abline(v= xPt,h= y_cutoff,lty=2,col=8)
points(xPt,y_cutoff,pch=16,cex=0.9,col=2)
abline(coef=c(b,slope),col=2)
title(paste("x=",xPt,"\ny=",signif(y_cutoff,3),"\nFold over Median=",signif(y_cutoff/median(inputVector),3),"x\nFold over Mean=",signif(y_cutoff/mean(inputVector),3),"x",sep=""))
axis(1,sum(inputVector==0),sum(inputVector==0),col.axis="pink",col="pink") #Number of regions with zero signal
}
return(list(absolute=y_cutoff,overMedian=y_cutoff/median(inputVector),overMean=y_cutoff/mean(inputVector)))
}
#this is an accessory function, that determines the number of points below a diagnoal passing through [x,yPt]
numPts_below_line <- function(myVector,slope,x){
yPt <- myVector[x]
b <- yPt-(slope*x)
xPts <- 1:length(myVector)
return(sum(myVector<=(xPts*slope+b)))
}
convert_stitched_to_bed <- function(inputStitched,trackName,trackDescription,outputFile,splitSuper=TRUE,score=c(),superRows=c(),baseColor="0,0,0",superColor="255,0,0"){
outMatrix <- matrix(data="",ncol=4+ifelse(length(score)==nrow(inputStitched),1,0),nrow=nrow(inputStitched))
outMatrix[,1] <- as.character(inputStitched$CHROM)
outMatrix[,2] <- as.character(inputStitched$START)
outMatrix[,3] <- as.character(inputStitched$STOP)
outMatrix[,4] <- as.character(inputStitched$REGION_ID)
if(length(score)==nrow(inputStitched)){
score <- rank(score,ties.method="first")
score <- length(score)-score+1 #Stupid rank only does smallest to largest.
outMatrix[,5] <- as.character(score)
}
trackDescription <- paste(trackDescription,"\nCreated on ",format(Sys.time(), "%b %d %Y"),collapse="",sep="")
trackDescription <- gsub("\n","\t", trackDescription)
tName <- gsub(" ","_",trackName)
cat('track name="', tName,'" description="', trackDescription,'" itemRGB=On color=',baseColor,"\n",sep="",file=outputFile)
write.table(file= outputFile,outMatrix,sep="\t",quote=FALSE,row.names=FALSE,col.names=FALSE,append=TRUE)
if(splitSuper==TRUE){
cat("\ntrack name=\"Super_", tName,'" description="Super ', trackDescription,'" itemRGB=On color=', superColor,"\n",sep="",file=outputFile,append=TRUE)
write.table(file= outputFile,outMatrix[superRows,],sep="\t",quote=FALSE,row.names=FALSE,col.names=FALSE,append=TRUE)
}
}
writeSuperEnhancer_table <- function(superEnhancer,description,outputFile,additionalData=NA){
description <- paste("#",description,"\nCreated on ",format(Sys.time(), "%b %d %Y"),collapse="",sep="")
description <- gsub("\n","\n#",description)
cat(description,"\n",file=outputFile)
if(is.matrix(additionalData)){
if(nrow(additionalData)!=nrow(superEnhancer)){
warning("Additional data does not have the same number of rows as the number of super enhancers.\n--->>> ADDITIONAL DATA NOT INCLUDED <<<---\n")
}else{
superEnhancer <- cbind(superEnhancer,additionalData)
superEnhancer = superEnhancer[order(superEnhancer$enhancerRank),]
}
}
write.table(file=outputFile,superEnhancer,sep="\t",quote=FALSE,row.names=FALSE,append=TRUE)
}
#============================================================================
#============================HELPER FUNCTIONS================================
#============================================================================
#http://stackoverflow.com/questions/9837766/r-plot-circle-with-radius-1-and-angle-0-2pi-in-polar-coordinates
circle <- function(x, y, rad = 1, nvert = 500, ...){
rads <- seq(0,2*pi,length.out = nvert)
xcoords <- cos(rads) * rad + x
ycoords <- sin(rads) * rad + y
polygon(xcoords, ycoords, ...)
}
magnitude <- function(x,y){
magnitudeVector=c()
for(i in 1:length(x)){
magnitudeVector = c(magnitudeVector,sqrt((x[i])^2 + (y[i])^2))
}
return(magnitudeVector)
}
geneToRefseq <- function(geneName,transcribedTable){
refseqIDs = c()
rowID = which(transcribedTable[,3] == geneName)
for(row in rowID){
refseqIDs = c(refseqIDs,as.character(transcribedTable[row,2]))
}
return(refseqIDs)
}
#get the row by enhancer ID for enhancer tables that are sorted uniquely
enhancerIDToRow <- function(enhancerID,targetTable){
return(which(targetTable[,1]==enhancerID))
}
#gets genes associated w/ an enhancer by ID
getEnhancerGenes <- function(enhancerID,enhancerTable){
enhancerGenes = c()
row = enhancerIDToRow(enhancerID,enhancerTable)
foo = as.character(enhancerTable[row,7])
enhancerGenes = c(enhancerGenes,unlist(strsplit(foo,',')))
foo = as.character(enhancerTable[row,8])
enhancerGenes = c(enhancerGenes,unlist(strsplit(foo,',')))
foo = as.character(enhancerTable[row,9])
enhancerGenes = c(enhancerGenes,unlist(strsplit(foo,',')))
enhancerGenes = unique(enhancerGenes)
return(enhancerGenes)
}
getRefseqIDs <- function(enhancerIDList,enhancerTable,transcribedTable){
refIDs = c()
for(enhancerID in enhancerIDList){
enhancerGenes = getEnhancerGenes(enhancerID,enhancerTable)
for(geneName in enhancerGenes){
refIDs = c(refIDs,geneToRefseq(geneName,transcribedTable))
}
}
#print(refIDs)
return(refIDs)
}
#============================================================================
#===================SUPER-ENHANCER CALLING AND PLOTTING======================
#============================================================================
#============================================================================
#==============================INPUT ARGUMENTS===============================
#============================================================================
args <- commandArgs()
print('THESE ARE THE ARGUMENTS')
print(args)
#ARGS
outFolder = args[6]
enhancerFile = args[7]
enhancerName = args[8]
wceName = args[9]
#============================================================================
#================================WORKSHOPPING================================
#============================================================================
# setwd('/Volumes/grail/projects/newRose/')
# enhancerFile = 'roseTest/MM1S_H3K27AC_DMSO_peaks_6KB_STITCHED_TSS_DISTAL_ENHANCER_REGION_MAP.txt'
# outFolder = 'roseTest/'
# #wceName = 'MM1S_WCE_DMSO_MERGED.hg18.bwt.sorted.bam'
# wceName = 'NONE'
# enhancerName = 'MM1S_H3K27AC_DMSO_peaks'
#============================================================================
#================================DATA INPUT==================================
#============================================================================
#Read enhancer regions with closestGene columns
stitched_regions <- read.delim(file= enhancerFile,sep="\t")
#perform WCE subtraction. Using pipeline table to match samples to proper background.
rankBy_factor = colnames(stitched_regions)[7]
prefix = unlist(strsplit(rankBy_factor,'_'))[1]
if(wceName == 'NONE'){
rankBy_vector = as.numeric(stitched_regions[,7])
}else{
wceName = colnames(stitched_regions)[8]
print('HERE IS THE WCE NAME')
print(wceName)
rankBy_vector = as.numeric(stitched_regions[,7])-as.numeric(stitched_regions[,8])
}
#SETTING NEGATIVE VALUES IN THE rankBy_vector to 0
rankBy_vector[rankBy_vector < 0] <- 0
#============================================================================
#======================SETTING ORIGINAL ROSE CUTOFFS=========================
#============================================================================
#FIGURING OUT THE CUTOFF
cutoff_options <- calculate_cutoff(rankBy_vector, drawPlot=FALSE,xlab=paste(rankBy_factor,'_enhancers'),ylab=paste(rankBy_factor,' Signal','- ',wceName),lwd=2,col=4)
#These are the super-enhancers
superEnhancerRows <- which(rankBy_vector> cutoff_options$absolute)
typicalEnhancers = setdiff(1:nrow(stitched_regions),superEnhancerRows)
enhancerDescription <- paste(enhancerName," Enhancers\nCreated from ", enhancerFile,"\nRanked by ",rankBy_factor,"\nUsing cutoff of ",cutoff_options$absolute," for Super-Enhancers",sep="",collapse="")
#============================================================================
#========================MAKING SUPER HOCKEY STICK===========================
#============================================================================
#MAKING HOCKEY STICK PLOT
plotFileName = paste(outFolder,enhancerName,'_Plot_points.png',sep='')
png(filename=plotFileName,height=600,width=600)
signalOrder = order(rankBy_vector,decreasing=TRUE)
if(wceName == 'NONE'){
plot(length(rankBy_vector):1,rankBy_vector[signalOrder], col='red',xlab=paste(rankBy_factor,'_enhancers'),ylab=paste(rankBy_factor,' Signal'),pch=19,cex=2)
}else{
plot(length(rankBy_vector):1,rankBy_vector[signalOrder], col='red',xlab=paste(rankBy_factor,'_enhancers'),ylab=paste(rankBy_factor,' Signal','- ',wceName),pch=19,cex=2)
}
abline(h=cutoff_options$absolute,col='grey',lty=2)
abline(v=length(rankBy_vector)-length(superEnhancerRows),col='grey',lty=2)
lines(length(rankBy_vector):1,rankBy_vector[signalOrder],lwd=4, col='red')
text(0,0.8*max(rankBy_vector),paste(' Cutoff used: ',cutoff_options$absolute,'\n','Super-Enhancers identified: ',length(superEnhancerRows)),pos=4)
dev.off()
#============================================================================
#======================SETTING STRETCH ROSE CUTOFFS==========================
#============================================================================
#FIGURING OUT THE CUTOFF
stretch_vector = abs(as.numeric(stitched_regions[,6]))
stretch_cutoff_options <- calculate_cutoff(stretch_vector, drawPlot=FALSE,xlab=paste(rankBy_factor,'_enhancers'),ylab=paste(rankBy_factor,' enhancer lengths'),lwd=2,col=4)
#These are the stretch-enhancers
stretchEnhancerRows <- which(stretch_vector > stretch_cutoff_options$absolute)
typicalStretchEnhancers = setdiff(1:nrow(stitched_regions), stretchEnhancerRows)
stretchEnhancerDescription <- paste(enhancerName," Enhancers\nCreated from ", enhancerFile,"\nRanked by ",rankBy_factor," lengths\nUsing cutoff of ", stretch_cutoff_options$absolute," for Stretch-Enhancers",sep="",collapse="")
#============================================================================
#=========================MAKING STRETCH ROSE PLOTS==========================
#============================================================================
#MAKING HOCKEY STICK PLOT
plotFileName = paste(outFolder,enhancerName,'_Plot_points_stretch.png',sep='')
png(filename=plotFileName,height=600,width=600)
signalOrder = order(stretch_vector,decreasing=TRUE)
plot(length(stretch_vector):1, stretch_vector[signalOrder], col='red',xlab=paste(rankBy_factor,'_enhancers'),ylab=paste(rankBy_factor,' lengths (bp)'),pch=19,cex=2)
abline(h=stretch_cutoff_options$absolute,col='grey',lty=2)
abline(v=length(stretch_vector)-length(stretchEnhancerRows),col='grey',lty=2)
lines(length(stretch_vector):1, stretch_vector[signalOrder],lwd=4, col='red')
text(0,0.8*max(stretch_vector),paste(' Cutoff used: ',stretch_cutoff_options$absolute,'\n','Stretch-Enhancers identified: ',length(stretchEnhancerRows)),pos=4)
dev.off()
#============================================================================
#================================MAKING PANEL PLOTS==========================
#============================================================================
#MAKING NEW HOCKEY STICK PLOT
plotFileName = paste(outFolder,enhancerName,'_Plot_panel.png',sep='')
png(filename=plotFileName,height=600,width=1200)
par(mfrow= c(1,3))
#FIRST THE HOCKEY
signalOrder = order(rankBy_vector,decreasing=TRUE)
enhancerOrder = signalOrder
plot(length(rankBy_vector):1, rankBy_vector[enhancerOrder], col='red',xlab='Enhancers ranked by increasing signal',ylab='Enhancer signal (total rpm)',lwd=2,type='l')
points(length(rankBy_vector):(length(rankBy_vector)-length(superEnhancerRows)+1),rankBy_vector[enhancerOrder[1:length(superEnhancerRows)]],pch=19,cex=1,col='red')
points((length(rankBy_vector)-length(superEnhancerRows)):1,rankBy_vector[enhancerOrder[(length(superEnhancerRows)+1):length(enhancerOrder)]],pch=19,cex=0.75,col='grey')
abline(h=cutoff_options$absolute,col=rgb(0.3,0.3,0.3),lty=2)
abline(v=length(rankBy_vector)-length(superEnhancerRows),col=rgb(0.3,0.3,0.3),lty=2)
text(0,0.8*max(rankBy_vector),paste(' Cutoff used: ',cutoff_options$absolute,'\n','Super-Enhancers identified: ',length(superEnhancerRows)),pos=4)
#THEN THE SCATTER
allSEs = union(superEnhancerRows,stretchEnhancerRows)
superStretch = intersect(superEnhancerRows,stretchEnhancerRows)
enhMagnitude = magnitude(stretch_vector[allSEs]/max(stretch_vector),rankBy_vector[allSEs]/max(rankBy_vector))
m = as.matrix(cbind(stretch_vector[allSEs]/max(stretch_vector),rankBy_vector[allSEs]/max(rankBy_vector)))
mDiag = apply(m,1,sum)/2
mDist = sqrt(2*(m[,1]-mDiag)^2)
mDist[which(m[,2] > m[,1])] <- mDist[which(m[,2] > m[,1])]*-1
plot(mDist, enhMagnitude,cex=0.75,col='grey',ylim =c(-.05,1),xlim = c(-0.5,0.5),xlab='Enhancer skew',ylab='Enhancer combined magnitude')
ssSubset = c()
for(x in 1:length(allSEs)){
if(length(which(superStretch == allSEs[x])) > 0){
ssSubset = c(ssSubset,x)
}
}
points(mDist[ssSubset],enhMagnitude[ssSubset],pch=19,cex=1,col='red')
abline(h=0)
abline(v=0)
text(0,-.05,"MORE SUPER",pos=2)
text(0,-.05,"MORE STRETCH",pos=4)
legend(-.5,.95,c(paste(length(superStretch),'SUPER AND STRETCH')),pch=19,col='red')
#THEN STRETCH
signalOrder = order(stretch_vector,decreasing=FALSE)
enhancerOrder = signalOrder
plot(1:length(stretch_vector), stretch_vector[rev(enhancerOrder)], col='red',xlab='Enhancers ranked by decreasing length',ylab='Enhancer length (bm)',lwd=2,type='l')
points(1:length(stretchEnhancerRows), stretch_vector[enhancerOrder[length(stretch_vector):(length(stretch_vector)-length(stretchEnhancerRows)+1)]],pch=19,cex=1,col='red')
points(length(stretchEnhancerRows):length(stretch_vector), stretch_vector[enhancerOrder[(length(typicalStretchEnhancers)+1):1]],pch=19,cex=0.75,col='grey')
abline(h=stretch_cutoff_options$absolute,col=rgb(0.3,0.3,0.3),lty=2)
abline(v=length(stretchEnhancerRows),col=rgb(0.3,0.3,0.3),lty=2)
text(length(stretch_vector),0.8*max(stretch_vector),paste(' Cutoff used: ',stretch_cutoff_options$absolute,'\n','Stretch-Enhancers identified: ',length(stretchEnhancerRows)),pos=2)
dev.off()
#============================================================================
#============================WRITING SUPER OUTPUT============================
#============================================================================
#Writing a bed file
bedFileName = paste(outFolder,enhancerName,'_Enhancers_withSuper.bed',sep='')
convert_stitched_to_bed(stitched_regions,paste(rankBy_factor,"Enhancers"), enhancerDescription,bedFileName,score=rankBy_vector,splitSuper=TRUE,superRows= superEnhancerRows,baseColor="0,0,0",superColor="255,0,0")
#This matrix is just the super_enhancers
true_super_enhancers <- stitched_regions[superEnhancerRows,]
additionalTableData <- matrix(data=NA,ncol=2,nrow=nrow(stitched_regions))
colnames(additionalTableData) <- c("enhancerRank","isSuper")
additionalTableData[,1] <- nrow(stitched_regions)-rank(rankBy_vector,ties.method="first")+1
additionalTableData[,2] <- 0
additionalTableData[superEnhancerRows,2] <- 1
#Writing enhancer and super-enhancer tables with enhancers ranked and super status annotated
enhancerTableFile = paste(outFolder,enhancerName,'_AllEnhancers.table.txt',sep='')
writeSuperEnhancer_table(stitched_regions, enhancerDescription,enhancerTableFile, additionalData= additionalTableData)
superTableFile = paste(outFolder,enhancerName,'_SuperEnhancers.table.txt',sep='')
writeSuperEnhancer_table(true_super_enhancers, enhancerDescription,superTableFile, additionalData= additionalTableData[superEnhancerRows,])
#============================================================================
#============================WRITING STRETCH ROSE============================
#============================================================================
#Writing a bed file
bedFileName = paste(outFolder,enhancerName,'_Enhancers_withStretch.bed',sep='')
convert_stitched_to_bed(stitched_regions,paste(rankBy_factor,"Enhancers"), enhancerDescription,bedFileName,score= stretch_vector,splitSuper=TRUE,superRows= stretchEnhancerRows,baseColor="0,0,0",superColor="255,0,0")
#This matrix is just the super_enhancers
true_stretch_enhancers <- stitched_regions[stretchEnhancerRows,]
additionalTableData <- matrix(data=NA,ncol=2,nrow=nrow(stitched_regions))
colnames(additionalTableData) <- c("enhancerRank","isStretch")
additionalTableData[,1] <- nrow(stitched_regions)-rank(stretch_vector,ties.method="first")+1
additionalTableData[,2] <- 0
additionalTableData[stretchEnhancerRows,2] <- 1
#Writing enhancer and stretch-enhancer tables with enhancers ranked and stretch status annotated
enhancerTableFile = paste(outFolder,enhancerName,'_AllEnhancers_Length.table.txt',sep='')
writeSuperEnhancer_table(stitched_regions, enhancerDescription,enhancerTableFile, additionalData= additionalTableData)
stretchTableFile = paste(outFolder,enhancerName,'_StretchEnhancers.table.txt',sep='')
writeSuperEnhancer_table(true_stretch_enhancers, enhancerDescription,stretchTableFile, additionalData= additionalTableData[stretchEnhancerRows,])
#============================================================================
#================================WRITING 2D ROSE=============================
#============================================================================
#Writing a bed file
bedFileName = paste(outFolder,enhancerName,'_Enhancers_withSuperStretch.bed',sep='')
convert_stitched_to_bed(stitched_regions,paste(rankBy_factor,"Enhancers"), enhancerDescription,bedFileName,score= stretch_vector,splitSuper=TRUE,superRows= superStretch,baseColor="0,0,0",superColor="255,0,0")
#This matrix is just the super_enhancers
true_superStretch_enhancers <- stitched_regions[superStretch,]
print(length(superStretch))
print(dim(true_superStretch_enhancers))
additionalTableData <- matrix(data=NA,ncol=2,nrow=nrow(stitched_regions))
colnames(additionalTableData) <- c("enhancerRank","isSuperStretch")
enhMagnitude = magnitude(stretch_vector/max(stretch_vector),rankBy_vector/max(rankBy_vector))
additionalTableData[,1] <- nrow(stitched_regions)-rank(enhMagnitude,ties.method="first")+1
additionalTableData[,2] <- 0
additionalTableData[superStretch,2] <- 1
#Writing enhancer and superStretch-enhancer tables with enhancers ranked and superStretch status annotated
enhancerTableFile = paste(outFolder,enhancerName,'_AllEnhancers_SuperStretch.table.txt',sep='')
writeSuperEnhancer_table(stitched_regions, enhancerDescription,enhancerTableFile, additionalData= additionalTableData)
superStretchTableFile = paste(outFolder,enhancerName,'_SuperStretchEnhancers.table.txt',sep='')
writeSuperEnhancer_table(true_superStretch_enhancers, enhancerDescription,superStretchTableFile, additionalData= additionalTableData[superStretch,])
|
30c7f2f84f7e872b9cd87c5376fa08e684a27402
|
91022a1967b40e85ae44a728da2d996def736a61
|
/R/streamClient.R
|
d646fcdcc5159a6e50690c8ffed29e0b22f467f8
|
[] |
no_license
|
arbuzovv/tcsinvest
|
0e4d7a7aad2e98d8ace8a5ae8840cbb12cb24fe5
|
6df27cea5dd53595ef4e87da6ccfabcb671b9ae6
|
refs/heads/master
| 2023-07-14T03:19:56.310556
| 2021-08-18T21:28:02
| 2021-08-18T21:28:02
| 392,618,079
| 5
| 1
| null | null | null | null |
UTF-8
|
R
| false
| false
| 1,131
|
r
|
streamClient.R
|
#' @title Create stream client
#'
#' @description this function creates client for streaming
#'
#' @param token token from Tinkoff account
#' @details As described by the official Tinkoff Investments documentation. If you want live trading, use sandbox=FALSE with live token
#' @note Not for the faint of heart. All profits and losses related are yours and yours alone. If you don't like it, write it yourself.
#' @author Vyacheslav Arbuzov
#' @seealso \code{\link{streamStatus}} \code{\link{streamClose}} \code{\link{streamSubscribe}}
#' @import websocket
#' @export
streamClient = function(token = '')
{
ws <- WebSocket$new("wss://api-invest.tinkoff.ru/openapi/md/v1/md-openapi/ws",
headers = list("Authorization" = paste("Bearer",token)),
autoConnect = FALSE)
ws$onOpen(function(event) {
cat("Connection opened\n")
})
ws$onClose(function(event) {
cat("Client disconnected with code ", event$code,
" and reason ", event$reason, "\n", sep = "")
})
ws$onError(function(event) {
cat("Client failed to connect: ", event$message, "\n")
})
return(ws)
}
|
01d28fd8845501c6f1c9948549e145e4c00bc311
|
73ddf6aee285774a76a365f5144dafc3afae8ba8
|
/R/SILOMorton.R
|
ed7376593960c89e3e133f9bef74a10c1f18e300
|
[] |
no_license
|
matt-s-gibbs/swtools
|
d702fa44255c14b646d99b29bbe2296f6786ea2f
|
356e8df9e86e7c17e2d4e219352a2b6ea11adbde
|
refs/heads/master
| 2023-05-27T23:01:20.044583
| 2023-05-24T07:50:54
| 2023-05-24T07:50:54
| 125,963,455
| 4
| 1
| null | 2018-04-05T04:26:33
| 2018-03-20T05:06:29
|
R
|
UTF-8
|
R
| false
| false
| 3,567
|
r
|
SILOMorton.R
|
#' Plot the quality codes of the input data for Morton's Evap calculations
#'
#' Produces a tile plot displaying the quality codes for variables that are input to the
#' calculation of Morton's evaporation equations, being maximum and minimum temperature, solar radiation and vapor pressure (derived from wet bulb temperature).
#' Evaporation is also plotted, if the site has pan observations.
#'
#' @param SILO a list of sites with SILO data, as created by SILOLoad()
#' @param filename optional, filename to write a plot of the rainfall quality codes to, including extension (e.g. png). Filename can include full path or sub folders.
#'
#' @return a ggplot geom_tile plot of the rainfall quality codes
#'
#' @examples
#' \dontrun{
#' X<-SILOLoad(c("24001","24002","24003"),path="./SWTools/extdata")
#' p<-SILOMortonQualityCodes(X)
#' }
#'
#' @export
SILOMortonQualityCodes<-function(SILO,filename=NULL)
{
#lookup table to relate quality code to what it means
lookup<-data.frame(Code=c(0,23,13,15,35,25,75,26),
Quality=c("Station data, as supplied by Bureau",
"Nearby station, data from BoM",
"Deaccumulated using nearby station",
"Deaccumulated using interpolated data",
"interpolated from daily observations using anomaly interpolation method",
"interpolated daily observations",
"interpolated long term average",
"synthetic pan evaporation"))
#colours to shade codes, green to red, derived from
#rev(RColorBrewer::brewer.pal(7,"RdYlGn"))
cols<-c("#1A9850", "#91CF60", "#D9EF8B", "#FFFFBF", "#FEE08B", "#FC8D59", "#D73027")
cols<-c(cols,"#2b83ba") #add 8th item for span from a different colour palette
names(cols)<-lookup$Quality
#pull out the quality code column for each dataset in the list
my.data<-NULL
for(i in 1:length(SILO))
{
temp<-zoo::fortify.zoo(SILO[[i]]$tsd)
temp<-temp %>% dplyr::select(Date=.data$Index,Tmax=.data$Smx,TMin=.data$Smn,Radn=.data$Ssl,VP=.data$Svp,Evap=.data$Sev) %>%
dplyr::mutate(Station=SILO[[i]]$Station,
Site=SILO[[i]]$Site)
my.data<-rbind(my.data,temp)
}
my.data<-my.data %>% tidyr::gather("Variable","Code",-.data$Date,-.data$Station,-.data$Site) %>%
dplyr::mutate(ID=paste(.data$Station,.data$Variable,sep="-"))
#Add the interpretation for each quality code
my.data<-my.data %>% dplyr::left_join(lookup,by="Code")
#fix the factor order so the are in order from best to worst, not alphabetical
suppressWarnings(my.data$Quality<-forcats::fct_relevel(my.data$Quality,as.character(lookup$Quality)))
#generate the plot
p<-ggplot2::ggplot(my.data)+
ggplot2::geom_tile(ggplot2::aes(x=.data$Date, y=factor(.data$ID),fill = factor(.data$Quality)))+
ggplot2::scale_fill_manual(values = cols, name='Quality Code' )+
ggplot2::theme_bw()+
ggplot2::ylab("Station-Varible")+
ggplot2::xlab("Date")+
ggplot2::theme(legend.position = "top") +
#ggplot2::guides(fill = ggplot2::guide_legend(nrow = length(unique(my.data$Code)))) +
ggplot2::guides(fill = ggplot2::guide_legend(ncol=2)) +
ggplot2::theme(text = ggplot2::element_text(size = 10))
if(!is.null(filename)) ggplot2::ggsave(filename,p,width=19,height=19,units="cm",compression="lzw",dpi = 1000)
return(p)
}
|
26f99129b746cd6e71845431798e6308f51700b7
|
28c0bb9cf47bc8a8f629b389ba62c1808fd34691
|
/man/verhulst.update.Rd
|
deecca7fc3f7548bd8e9bb228977499f5d59dae7
|
[] |
no_license
|
gcostaneto/ZeBook
|
836e3dc8ab80de9ecce782e809606f4d647f30c0
|
b892a7e80a233b1c468526307eb5f7b49d95514d
|
refs/heads/master
| 2020-05-14T08:41:25.584061
| 2018-11-09T16:40:03
| 2018-11-09T16:40:03
| 181,727,649
| 1
| 0
| null | 2019-04-16T16:33:07
| 2019-04-16T16:33:07
| null |
UTF-8
|
R
| false
| true
| 581
|
rd
|
verhulst.update.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/verhulst.model.r
\name{verhulst.update}
\alias{verhulst.update}
\title{The Verhulst (logistic) model - calculate change for one day}
\usage{
verhulst.update(Y, a, k)
}
\arguments{
\item{Y}{: state variable Y(t=day)}
\item{a}{: growth rate}
\item{k}{: capacity}
}
\value{
state variable at Y(t=day+1)
}
\description{
The Verhulst (logistic) model - calculate change for one day
}
\seealso{
\code{\link{verhulst.model}} for the integration loop function of the Verhulst model.
}
|
da2bf8cff6bb29ceab80f2bfbf6cc8a028819803
|
c166067186df88673a62d111dd8187267ba6b7fa
|
/R/gts.r
|
ec27e7b71b73e32498bf9c3d1356845b9401fce1
|
[] |
no_license
|
psmits/preserve
|
886552cbe93e2d16ff7157c622505b15780e4a39
|
15ee35bf0d56778ddfa9dd3b67071cfc36c91ac3
|
refs/heads/master
| 2020-04-15T11:53:31.574495
| 2019-01-17T21:04:17
| 2019-01-17T21:04:17
| 30,316,121
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 1,553
|
r
|
gts.r
|
# stage order from young to old
cenozoic <- c('Calabrian', 'Gelasian', 'Piacenzian', 'Zanclean', 'Messinian',
'Tortonian', 'Serravallian', 'Langhian', 'Burdigalian',
'Aquitanian', 'Chattian', 'Rupelian', 'Priabonian', 'Bartonian',
'Lutetian', 'Ypresian', 'Thanetian', 'Selandian', 'Danian')
mesozoic <- c('Masstrichtian', 'Campanian', 'Santonian', 'Coniacian',
'Turonian', 'Cenomanian', 'Albian', 'Aptian', 'Barremian',
'Hauterivian', 'Valanginian', 'Berriasian', 'Tithonian',
'Kimmeridgian', 'Oxfordian', 'Callovian', 'Bathonian',
'Bajocian', 'Aalenian', 'Toarcian', 'Pliensbachian',
'Sinemurian','Hettangian', 'Rhaetian', 'Norian', 'Carnian',
'Ladinian', 'Anisian', 'Olenekian', 'Induan')
permian <- c('Changhsingian', 'Wuchiapingian', 'Capitanian', 'Wordian',
'Roadian', 'Kungurian', 'Artinskian', 'Sakmarian', 'Asselian')
carboniferous <- c('Gzhelian', 'Kasimovian', 'Moscovian', 'Bashkirian',
'Serpukhovian', 'Visean', 'Tournaisian')
devonian <- c('Famennian', 'Frasnian', 'Givetian', 'Eifilian', 'Emsian',
'Pragian', 'Lochkovian')
silurian <- c('Ludfordian', 'Gorstian', 'Homerian', 'Sheinwoodian',
'Telychian', 'Aeronian', 'Rhuddanian')
ordovician <- c('Hirnantian', 'Katian', 'Sandbian', 'Darriwilian',
'Dapingian', 'Floian', 'Tremadocian')
gts <- c(cenozoic, mesozoic, permian, carboniferous,
devonian, silurian, ordovician)
|
e25d35481d32531bc21af10ff814e5a202181913
|
82a166776f39db4ca236f5e22040f39b2f9dad8f
|
/R/JKalman.R
|
242a87da693715e5db804ee1f63e48376931c8f7
|
[] |
no_license
|
cran/ctsem
|
10397ed03d5f9d430dd096e991ec7762e08ddb20
|
af8a5eb855940a014d8f64720f01493d8ba292f5
|
refs/heads/master
| 2023-04-06T17:35:50.263992
| 2023-03-26T16:10:02
| 2023-03-26T16:10:02
| 40,494,050
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 4,364
|
r
|
JKalman.R
|
if(1==99){
checkm<-ctModel(
type='stanct',
n.latent=2,n.TDpred=1,n.TIpred=1,n.manifest=2,
MANIFESTVAR=matrix(c('merror',0,0,'merror'),2,2),
MANIFESTMEANS=0,
DRIFT=c('dr1','dr12','dr21||||TI1','dr22'),
DIFFUSION=c('diff11',0,'diff21','diff22||||TI1'),
CINT=matrix(c('cint1||||TI1','cint2||||TI1'),ncol=1),
LAMBDA=diag(2),tipredDefault=FALSE)
ctstantestfit<-ctStanFit(ctsem::ctstantestdat,checkm,cores=1,control=list(max_treedepth=4),
optimize = F,optimcontrol=list(finishsamples=20,stochastic=F),nopriors=FALSE)
s=ctstantestfit$standata
pars <- ctstantestfit$stanfit$rawest
Jkalman <- function(s, pars){
ms <- data.frame(s$matsetup)
mv <- data.frame(s$matvalues)
#preallocate
II <- diag(1,s$nlatentpop)
yprior <- rep(NA, s$nmanifest)
ycov <- matrix(NA, s$nmanifest,s$nmanifest)
ychol <- matrix(NA, s$nmanifest,s$nmanifest)
state <- rep(NA,s$nlatentpop)
statecov <- matrix(NA, s$nlatentpop,s$nlatentpop)
T0VAR <- T0cov <- JA <- matrix(NA, s$nlatentpop,s$nlatentpop)
A <- dtA <- matrix(NA, s$nlatent,s$nlatent)
T0MEANS <- CINT <- dtb <- matrix(NA, s$nlatent)
TDPREDEFFECT <- matrix(NA, s$nlatent, s$ntdpred)
JTD <- matrix(NA, s$nlatentpop, s$ntdpred)
G <- matrix(NA, s$nlatent,s$nlatent)
Q <- matrix(NA, s$nlatent,s$nlatent)
dtQ <- matrix(NA, s$nlatent,s$nlatent)
Qinf <- matrix(NA, s$nlatent,s$nlatent)
di <- 1:s$nlatent
LAMBDA <- matrix(NA, s$nmanifest,s$nlatent)
JLAMBDA <- matrix(NA, s$nmanifest, s$nlatentpop)
MANIFESTMEANS <- matrix(NA, s$nmanifest)
MANIFESTVAR <- Mcov <- matrix(NA,s$nmanifest,s$nmanifest)
K <- matrix(NA, s$nlatent,s$nmanifest)
ll <- rep(NA, s$ndatapoints)
t0step <- function(){
state[di] <- T0MEANS
statecov <- T0cov
}
dynamicstep <- function(){
state <<- dtA %*% state[di] + dtb
statecov <<- JdtA %*% statecov %*% t(JdtA)
statecov[di,di] <<- statecov[di,di] + dtQ
}
tdpredstep <- function(){
state[di] <<- state[di] + TDPREDEFFECT %*% tdpreds[i,]
statecov <<- JTD %*% statecov %*% t(JTD)
}
stateupdate <- function(){
yprior[oi,] <<- LAMBDA[oi, ] %*% state[di] + MANIFESTMEANS[oi]
ycov[oi,oi] <<- JLAMBDA[oi, ] %*% statecov %*% t(JLAMBDA[oi, oi]) + Mcov[oi,oi]
K[,oi] <<- ycov[oi,oi] %*% t(JLAMBDA[oi,]) %*% solve(ycov[oi,oi])
state <<- state + K[,oi] %*% y[i,]
statecov <<- (II - K %*% JLAMBDA) %*% statecov
}
loglik <- function(){
return( dnorm(di = y[i,oi] - yprior[oi], log=TRUE) + log(trace(ychol[oi,oi])))
}
dtAfunc <- function(dt) dtA<<-expm::expm(A * dt)
dtJAfunc <- function(dt) dtJA<<-expm::expm(JA * dt)
dtbfunc <- function() dtb <<- Ainv %*% (dtA-II[di,di]) %*% CINT
Qinffunc <- function(){
Ahatch<<-A %di% II[di,di] + II[di,di] %di% A
Qinf<<-matrix(-solve(Ahatch , Q), nrow=nrow(A))
}
dtQfunc <- function(Qinf, dtJA) dtQ <<- Qinf - (dtJA %*% Qinf %*% t(dtJA ))
fillSysMats<-function(when){
for(ri in 1:nrow(ms)){
if(ms$when[ri]==when){
if(when > 0 || ms$indvarying[ri]>0 || ms$tipred[ri] >0 || si==1){
}
}
}
}
tformState<-function(){
}
si <- t0check <- 0
for(i in 1:ndatapoints){
if(s$subject[i] != si) t0check <- 0 else t0check <- t0check + 1
si <- s$subject[i]
if(t0check > 0) dt <- s$time[i]-prevtime
prevtime <- s$time[i]
#t0 setup
if(t0check==0){
fillSysMats(when=0)
fillSysMats(when=1)
t0step()
}
if(t0check > 0){
fillSysMats(when=2)
dtAfunc()
dtbfunc()
Qinffunc()
dtQfunc()
dynamicstep()
}
if(s$ntdpred > 0){
fillSysMats(when=3)
tdpredstep()
}
oi <- s$which
if(length(oi) > 0){
stateupdate()
ll[i]<-loglik()
}
}
return(sum(ll,na.rm=TRUE))
}
}
|
65f4d701a902bebe448cbf30accd9f9cf950c759
|
432a02b2af0afa93557ee16176e905ca00b653e5
|
/LBNL/Cepheid/extract_sample_patient_data.R
|
e373ea48f55a52044d7b27588211c4e807f04059
|
[] |
no_license
|
obigriffith/analysis-projects-R
|
403d47d61c26f180e3b5073ac4827c70aeb9aa6b
|
12452f9fc12c6823823702cd4ec4b1ca0b979672
|
refs/heads/master
| 2016-09-10T19:03:53.720129
| 2015-01-31T19:45:05
| 2015-01-31T19:45:05
| 25,434,074
| 0
| 1
| null | null | null | null |
UTF-8
|
R
| false
| false
| 1,303
|
r
|
extract_sample_patient_data.R
|
#Set working directory and filenames for Input/output
setwd("C:/Users/Obi/Documents/My Dropbox/Projects/Cepheid/intellectual_property/RFRS/")
datafile="C:/Users/Obi/Documents/My Dropbox/Projects/Cepheid/processing/processed_final2/test_survival/combined/ALL_gcrma.txt" #combined (standardCDF + customCDF)
#Read in data (expecting a tab-delimited file with header line and rownames)
data_import=read.table(datafile, header = TRUE, na.strings = "NA", sep="\t")
header=colnames(data_import)
#Get predictor variables
top17opt_probes=c("204767_s_at","10682_at","201291_s_at","9133_at","1164_at","208079_s_at","23224_at","55435_at","23220_at","201461_s_at","202709_at","57122_at","23405_at","201483_s_at","29127_at","204416_x_at","10628_at")
top17opt_data=data_import[data_import[,1]%in%top17opt_probes,]
predictor_data=t(top17opt_data[,4:length(header)]) #Top20 optimized list
predictor_names=top17opt_data[,3] #gene symbol
colnames(predictor_data)=predictor_names
#Extract sample data for one patient to use as test case
#GSM36893 - predicted high risk (RFRS=0.810)
sample_predictor_data=as.data.frame(t(predictor_data["GSM36893.CEL",]))
rownames(sample_predictor_data)="GSM36893"
write.table(sample_predictor_data, file="patient_data.txt", col.names=NA, quote=FALSE, sep="\t")
|
46ee565ff850bc23242233cd23f1fa4f0671fdf5
|
9434187932d623555696d14e7c9431e41e4f4f42
|
/install_R_dependencies.R
|
89a6eae3b84df118eef716e8753e912efe361444
|
[
"MIT"
] |
permissive
|
Oshlack/MINTIE
|
0852c4aaa2f5d845746e18d191c88a4572c9ac17
|
4072e6d8dd4e39e57c93cf91cdeca28c9fe468fa
|
refs/heads/master
| 2022-12-11T20:29:30.955043
| 2022-10-20T04:22:37
| 2022-10-20T04:22:37
| 200,984,645
| 26
| 7
|
MIT
| 2022-10-20T04:22:38
| 2019-08-07T06:14:27
|
Python
|
UTF-8
|
R
| false
| false
| 924
|
r
|
install_R_dependencies.R
|
repos="http://cran.r-project.org"
if (!require("dplyr")) {
install.packages("dplyr", repos=repos)
}
if (!require("data.table")) {
install.packages("data.table", repos=repos)
}
if (!require("readr")) {
install.packages("readr", repos=repos)
}
if (!require("jsonlite")) {
install.packages("jsonlite", repos=repos)
}
if (!require("statmod")) {
install.packages("statmod", repos=repos)
}
if (!require("tximport") | !require("edgeR")) {
r_version = paste(R.Version()$major, strsplit(R.Version()$minor, '\\.')[[1]][1], sep='.')
if(as.numeric(r_version) < 3.5) {
source("https://bioconductor.org/biocLite.R")
biocLite("tximport")
biocLite("edgeR")
} else {
if (!requireNamespace("BiocManager", quietly = TRUE)) {
install.packages("BiocManager", repos=repos)
}
BiocManager::install("tximport")
BiocManager::install("edgeR")
}
}
|
0f74e18644ca15dc77ac9d0ef04f09368efa32aa
|
51f54ad3c888a3d89e482a3ad2f0b9ee48caabac
|
/pratica/17-shinydashboard-elementos.R
|
dd9a10284bd6ca157bef7470cbc00b6acc3af05b
|
[] |
no_license
|
gabrielDjusto/202109-dashboards
|
2af3e64786640f26a0cd5c33fc80b334fa06c8ac
|
836e38aac2204c225cdbc5a08f8dabf2dae5e63b
|
refs/heads/master
| 2023-08-23T21:03:50.051632
| 2021-10-26T01:18:23
| 2021-10-26T01:18:23
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 3,818
|
r
|
17-shinydashboard-elementos.R
|
library(shiny)
library(shinydashboard)
library(dplyr)
library(ggplot2)
# remotes::install_github("curso-r/basesCursoR")
# imdb <- basesCursoR::pegar_base("imdb")
separar <- function(tab, coluna) {
tab |>
pull({{coluna}}) |>
stringr::str_split(", ") |>
purrr::flatten_chr() |>
unique()
}
separar_e_contar_distintos <- function(tab, coluna) {
tab |>
separar({{coluna}})|>
length()
}
ui <- dashboardPage(
dashboardHeader(),
dashboardSidebar(
sidebarMenu(
menuItem("Informações gerais", tabName = "info"),
menuItem("Financeiro", tabName = "financeiro"),
menuItem("Elenco", tabName = "elenco")
)
),
dashboardBody(
tabItems(
tabItem(
tabName = "info",
fluidRow(
column(
width = 12,
h2("Informações gerais dos filmes")
)
),
br(),
fluidRow(
infoBoxOutput(outputId = "num_filmes", width = 4),
infoBoxOutput(outputId = "num_dir", width = 4),
infoBoxOutput(outputId = "num_atr", width = 4)
),
fluidRow(
column(
width = 12,
plotOutput("grafico_filmes_ano", height = "400px")
)
)
),
tabItem(
tabName = "financeiro",
fluidRow(
column(
width = 12,
h2("Financeiro")
)
),
fluidRow(
box(
width = 4,
title = "Filtros",
status = "info",
solidHeader = TRUE,
uiOutput(outputId = "ui_fin_genero")
),
box(
width = 8,
plotOutput("grafico_orc_vs_receita")
)
)
),
tabItem(
tabName = "elenco",
# Lição de casa: dado um ator/atriz (ou diretor(a)), mostrar um
# gráfico com os filmes feitos por essa pessoa e a nota desses filmes.
)
)
)
)
server <- function(input, output, session) {
# imdb <- readr::read_rds("dados/imdb.rds")
imdb <- basesCursoR::pegar_base("imdb")
output$num_filmes <- renderInfoBox({
numero_de_filmes <- nrow(imdb) |>
scales::number(big.mark = ".", decimal.mark = ",")
infoBox(
title = "Número de filmes",
value = numero_de_filmes,
subtitle = "teste",
color = "orange",
icon = icon("film"),
fill = TRUE
)
})
output$num_dir <- renderInfoBox({
numero_dir <- separar_e_contar_distintos(imdb, direcao)
infoBox(
title = "Número de diretoras(res)",
value = numero_dir,
color = "fuchsia",
icon = icon("film"),
fill = TRUE
)
})
output$num_atr <- renderInfoBox({
numero_atr <- separar_e_contar_distintos(imdb, elenco)
numero_de_filmes <- nrow(imdb)
infoBox(
title = "Número de atores/atrizes",
value = numero_atr,
color = "navy",
icon = icon("film"),
fill = TRUE
)
})
output$grafico_filmes_ano <- renderPlot({
imdb |>
count(ano, sort = TRUE) |>
ggplot(aes(x = ano, y = n)) +
geom_col(color = "black", fill = "pink") +
ggtitle("Número de filmes por ano")
})
output$ui_fin_genero <- renderUI({
generos <- separar(imdb, genero) |> sort()
selectInput(
inputId = "fin_genero",
label = "Selecione um ou mais gêneros",
multiple = TRUE,
choices = generos,
selected = "Action"
)
})
output$grafico_orc_vs_receita <- renderPlot({
imdb |>
mutate(
genero = stringr::str_split(genero, ", ")
) |>
tidyr::unnest(genero) |>
filter(genero %in% input$fin_genero) |>
distinct(titulo, .keep_all = TRUE) |>
ggplot(aes(x = orcamento, y = receita)) +
geom_point()
})
}
shinyApp(ui, server)
|
e3e00fc71f57f1b74447922202be944ea7b0ea6d
|
9abed51f8f76a64b961dd8f4d9be0f0e42c50453
|
/exploratory_data_analysis/wk3/proj2/src/plot5.R
|
4d038fa1704c14a279c995dab9bdaa7b76f7581b
|
[] |
no_license
|
ddexter/datasciencecoursera
|
1546e6a087bf2e351ae4210779f3879eb23d0fd5
|
a23811f23a787fbe441ba9f4e06942b34a72fe3e
|
refs/heads/master
| 2021-01-02T09:26:37.010181
| 2014-10-26T14:29:00
| 2014-10-26T14:29:00
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 738
|
r
|
plot5.R
|
library('ggplot2')
nei <- readRDS('../data/summarySCC_PM25.rds')
scc <- readRDS('../data/Source_Classification_Code.rds')
# Select all sectors pertaining to 'mobile' since "motor vehicle" is an ambiguous term
scc_mobile <- subset(scc, grepl('mobile', EI.Sector, ignore.case=TRUE))
nei_mobile <- subset(nei, SCC %in% unique(scc_mobile$SCC))
# Total emissions for Baltimore City, MD
df <- subset(nei_mobile, fips == '24510')
df <- aggregate(Emissions~year, df, sum)
png('../out/plot5.png')
# Total emissions by mobile
ggplot(data=df, aes(x=sapply(year,as.character), y=Emissions)) +
geom_bar(stat='identity') +
xlab('year') +
ylab('PM 2.5 Emissions (tons)') +
ggtitle('Baltimore, MD Motor Vehicle Emissions in U.S.')
dev.off()
|
6da80b59d312a163b7107bc06274cc457c4cf16c
|
b9486008ccf0d1bce671131c2111b3928334b83b
|
/tests/regtest-inference.R
|
d871b6b08dbff7372ca05990b0e13043a266e62b
|
[] |
no_license
|
strategist922/mboost
|
11405a7e168130619cd6134576ca528d564ca53f
|
5d06f360b66620ffc9fe5b800e7c97247f27e0b8
|
refs/heads/master
| 2021-01-17T21:14:42.747037
| 2017-03-03T07:20:06
| 2017-03-03T07:20:06
| 84,169,384
| 1
| 0
| null | 2017-03-07T07:37:34
| 2017-03-07T07:37:33
| null |
UTF-8
|
R
| false
| false
| 1,139
|
r
|
regtest-inference.R
|
require("mboost")
set.seed(1907)
### check confidence intervals
data("bodyfat", package = "TH.data")
bodyfat$ID <- factor(sample(1:5, size = nrow(bodyfat), replace = TRUE))
glm <- glmboost(DEXfat ~ ., data = bodyfat)
gam <- gamboost(DEXfat ~ ., data = bodyfat)
refit <- glm$update(weights = model.weights(glm), risk = "inbag")
stopifnot(all.equal(coef(refit), coef(glm)))
glm[200]
confint.glm <- confint(glm, B = 100, B.mstop = 2)
confint.glm
confint.gam <- confint(gam, B = 100, B.mstop = 1)
plot(confint.gam, which = 1)
plot(confint.gam, which = 2)
plot(confint.gam, which = 3)
### check cvrisk (it should run even if a fold leads to an error)
folds <- cv(model.weights(glm), type = "kfold")
folds[1, 1] <- NA
cvrisk(glm, folds = folds, papply = lapply)
cvrisk(glm, folds = folds, papply = mclapply)
## test if cvrisk starts at 0 and provides a sensible model
data <- data.frame(y = rnorm(100), x1 = rnorm(100), x2 = rnorm(100), x3 = rnorm(100))
glm <- glmboost(y ~ ., data = data)
gam <- gamboost(y ~ ., data = data)
cvr.glm <- cvrisk(glm)
cvr.gam <- cvrisk(gam)
stopifnot(mstop(cvr.glm) == 0)
stopifnot(mstop(cvr.gam) == 0)
|
b710f97f1006a5bf0efe2d62d45e62870d79946e
|
680f44adcd020315efe719852ab4b112ad89bc83
|
/apps/124-async-download/tests/shinytest.R
|
351e5f9078ebbf2d9d7221badfaecd5d811a6021
|
[
"MIT"
] |
permissive
|
rstudio/shinycoreci-apps
|
73e23b74412437982275d60ded5ac848fc900cbe
|
add53dde46fc9c31f2063f362ea30ca4da3b2426
|
refs/heads/main
| 2023-04-15T09:10:38.668013
| 2022-06-15T17:53:03
| 2022-06-15T17:57:59
| 228,669,687
| 39
| 5
|
NOASSERTION
| 2023-03-30T06:07:28
| 2019-12-17T17:38:59
|
JavaScript
|
UTF-8
|
R
| false
| false
| 132
|
r
|
shinytest.R
|
# This application has a random issue about a url not being valid. Better to manually test
if (FALSE) {
shinytest2::test_app()
}
|
fdce52fc42ccdd906d0a21f9d10a3c2f06238216
|
fb560d821418a1cecb0c9d5705514b54c9f2d244
|
/bootcamp-aditi.R
|
66ac012a126e3f0f49a1e5360e5eae117aa18355
|
[] |
no_license
|
SurajMalpani/BootcampR2018
|
49619906f4e393d360f418012b3db590b862d43a
|
fd4d0e688978da08f4a2c45bd4ab026b08ddf9b2
|
refs/heads/master
| 2021-04-26T23:41:46.908640
| 2018-03-04T20:20:43
| 2018-03-04T20:20:43
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 2,484
|
r
|
bootcamp-aditi.R
|
#Assignment operator
x <- 1
#Explicit Print
print(x)
#Auto Print
x
#What is the [1] for?
# ->Depicts a vector but not part of it
#Sequence
x<- 1:20
x
#Infinite
1/Inf
#NAN
0/0
#Vector
x <- c(1.5,2.3) #numeric
x <- c(TRUE,FALSE)
x <- c("a","b","c")
x <- vector()
class(x)
length(x)
#Implicit Coercion
y <- c(1.7, "a")
y <- c(TRUE, 2)
y <- c("a", TRUE)
class(y)
#Explicit Coercion
x <- 0:6
class(x)
as.numeric(x)
as.logical(x)
x <- c("a","b","c")
as.numeric(x)
#Matrices
m <- matrix(1:6, nrow = 2, ncol = 3)
m
dim(m)
attributes(m)
#Creating a matrix from a vector
m <- 1:10
m
dim(m) <- c(2,5)
m
x <- 1:3
y <- 10:12
cbind(x,y)
rbind(x,y)
#List
x<- list(1, "A", TRUE, 1+4i)
x
#Factors
x <- factor(c("yes","yes","no","no","yes"))
x
table(x)
unclass(x)
x <- factor(c("yes","yes","no","no","yes"), levels = c("yes", "no"))
x
unclass(x)
#Missing values
x <- c(1, 2, NA, 10, 3)
is.na(x)
is.nan(x)
#Data Frames
x <- data.frame(foo = 1:4, bar= c(T,T,F,F))
x
attributes(x)
#Names
names(x)
names(x) <- c("A","B")
x
x <- list(Boston = 1, London = 2)
x
x$Boston
x[1]
dimnames(m) <- list(c("a","b"), c("c","d","e","f","g"))
m
rownames(m) <- c("row1", "row2")
m
#Read and Write data
getwd()
setwd("C:/Users/aditi/Desktop/Adv BA with R")
dept <- read.table("departments.txt", header = FALSE, sep=" ")
write.csv(dept, file = "deptdata.csv")
#Subsets
#1. Vectors
x <- c("a","b","c","d")
x[1]
x[1:3]
x[c(3,4)]
u <- x > "a"
x[u]
#2. Matrix
x<- matrix(1:6, 2, 3)
x
x[1,]
x[,2]
#3. List
x <- list(foo=1:4, bar=0.6, baz="hello")
x
x[[1]]
x[["bar"]]
x[1]
x$bar
x[c(1,3)]
#4. Partial Matching
x <- list(aardvark = 1:5)
x$a
x[["a"]]
x[["aar", exact = FALSE]]
#5. Removing NA
x <- c(1,2,NA,3,NA,4)
bad <- is.na(x)
bad
x[!bad]
#6. Complete cases - Incase you have more than one R object
x <- c(1, 2, NA, 4, NA, 5)
y <- c("a", "b", NA, "d", NA, "f")
good <- complete.cases(x, y)
good
x[good]
y[good]
#7. Subset function
subset(x, x>3)
#Date and Time
x <- as.Date("1970-01-01")
x
unclass(as.Date("1970-01-02"))
x <- Sys.time()
x
class(x)
unclass(x) #in POSIXct format
p <- as.POSIXlt(x)
names(unclass(p))
p$wday
datestring <- c("January 10, 2012 10:40", "December 9, 2011 9:10")
x <- strptime(datestring, "%B %d, %Y %H:%M")
?strptime
x
#Clearing out Environment
rm(list=ls())
|
53146c8a527e667d0ba6a2df539c8d07eed45a65
|
0977efb8fd05e0a7a6802fc7465389dc5613e437
|
/#2_code/packages.R
|
655c5ef9706fe493fe4b6d617482a8dbcdfb2024
|
[] |
no_license
|
dataiscoming/titanic_R
|
1d83a95db33cff34498395e182763ceb8e15ffd9
|
86e5ff5d7c2e4922ee88ea29027f319da6100086
|
refs/heads/master
| 2023-03-21T21:45:44.589180
| 2021-03-17T12:19:16
| 2021-03-17T12:19:16
| 347,649,809
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 669
|
r
|
packages.R
|
# Packages
# Do not forget to install Rtools before !
# list of the R package to install
packages_cran <- c("rlang","dplyr", "stringr", "naniar", "VIM", "ggplot2", "reticulate", "mice", "corrplot", "Hmisc", "aod",
"lmtest","pscl","ResourceSelection","tibble","broom","ggimage","rsvg","ggnewscale", "pROC", "remotes",
"cvms", "caret")
packages_python <- c("kaggle")
install_load_packages(list_packages_cran = packages_cran,
list_packages_python = packages_python)
rm(packages_cran, packages_python, files.sources)
# Change the environnement variable for the kaggle file
Sys.setenv(KAGGLE_CONFIG_DIR = "#1_input/")
|
1ebec85b96ce1c24c8a96ef1d1374d736211b591
|
038ab87e58aaa8fe73cdfabb36fd6189955b324b
|
/R/authorMatrix/make_data_for_model.R
|
7303265a499c76a99efac58760e938a08281a614
|
[] |
no_license
|
Oleg-295/InfluencingCogSci
|
b83ed09b2db58b152249f213bc32b45c6c5fd075
|
f955090f4863584dc3ec5750af5fbc1bebcc1edf
|
refs/heads/master
| 2022-09-22T01:39:58.795033
| 2020-05-30T21:43:20
| 2020-05-30T21:43:20
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 2,026
|
r
|
make_data_for_model.R
|
library(tidyverse)
library(lme4)
library(futile.matrix)
i=1
coauthor_mats = list()
topicSims = list()
for(year in 1981:2019){
load(paste0("authorMatrix/fullcogsci_binary/author_mat_year_fullcogsci_", year,".RData"))
coauthor_mats[[i]] <- author_mat_year
names(coauthor_mats[[i]]) <-NULL
topicSims[[i]] <- read_csv(paste0("topicSimYear/cogsci_topicSim_", year,".csv"))
i = i+1
}
length(topicSims) == length(coauthor_mats)
isSymmetric(coauthor_mats[[39]])
# this makes is the coauthorship network is symmetric
makeSym = function(mat){
mat[upper.tri(mat, diag = TRUE)] <- t(mat)[upper.tri(mat, diag = TRUE)]
return(mat)
}
# Lauren's attempt to combine topic similarity and publication.
# after running this, still need to full_bind() with next year's coauthorship network
topic.coauthor.matrices <- list()
years = 1981:2019
for(i in 1:length(topicSims)){
author_key = fullcogsci_byAuthor %>% filter(year == years[i]) %>% pull(authorAbbr) %>% unique()
print(i)
tempTop <- topicSims[[i]]
tempTop <- tempTop %>%
spread(authorB, authorsSim) %>%
dplyr::select(-authorA) %>%
as.matrix()
tempTop[upper.tri(tempTop, diag = TRUE)] <- NA #ignore upper triangle
tempAuth <- coauthor_mats[[i]]
colnames(tempAuth) <- author_key
#peek(tempAuth, 15)
tempTop.df <- tempTop %>%
as.data.frame() %>%
gather("authorA","topicSim",1:ncol(.)) %>%
mutate(authorB=rep(colnames(tempTop),length(colnames(tempTop)))) %>%
filter(!is.na(topicSim))
tempAuth.df <- tempAuth %>%
as.data.frame() %>%
gather("authorA","prior_publication",1:ncol(.)) %>%
mutate(authorB=rep(colnames(tempAuth),length(colnames(tempAuth)))) %>%
filter(!is.na(prior_publication))
tempBoth <- tempTop.df %>%
left_join(tempAuth.df, by=c("authorA","authorB")) #this takes the longest
topic.coauthor.matrices[[i]] <- tempBoth
}
write.csv( topic.coauthor.matrices[[i]], file = "topic.coauthor.matrices.fullcogsci.csv")
|
8dafae24671e922dfe04a1ae8346c1bf2ddef19d
|
960835d9f4da6a28fcef02d3a290d2cec893472e
|
/server.R
|
774ccaef00725ff58495c42b570d15664345ec56
|
[] |
no_license
|
DataDan01/DDP-Course-Project
|
088e4fffb644a1aee0777a06617d236d0b561eeb
|
6d90dc39b7c7da6c1b7559f34806adf811dbcd96
|
refs/heads/master
| 2021-01-10T18:01:53.098340
| 2015-12-22T17:21:47
| 2015-12-22T17:21:47
| 48,408,962
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 3,898
|
r
|
server.R
|
##Large list of English words to be used in the function below.
dictionary<-readLines("./dictionary.txt")
##Top "filler" words used in the English language.
filler.words<-read.table("./fillerwords.txt")
##Function to get a dataframe of word counts.
clean.word.count<-function(url){
##Changing https to http
if(substr(url,5,5)=="s")
{url<-sub("^....(.)","http",url)}
##Read in the text from the source.
text <- readLines(url)
##Collpase the text into one long character vector.
text<-paste(text,collapse=" ")
##Get rid of numbers and punctuation. Make everything lower case.
text<-gsub("[[:punct:]]", "", text)
text<-gsub("[[:digit:]]", "", text)
text<-tolower(text)
##Split large character vector into substrings.
text<-strsplit(text,split=" ")
##Create a frequency table and make it into a dataframe.
##Get rid of the empty space count.
text.df<-as.data.frame(table(text))
text.df<-text.df[-(which(text.df[,1]==c(""))),]
##Checking that the words are in English.
text.df<-text.df[(text.df$text %in% dictionary),]
##Sort by count and turning text into a factor.
text.df<-text.df[order(text.df$Freq,decreasing=TRUE),]
text.df$text<-factor(text.df$text, levels = text.df$text)
return(text.df)
}
##Downloading some examples.
#Atlas.Shrugged.url<-"https://raw.githubusercontent.com/blueconcept/Data-Compression-using-Huffman/master/Rand%2C%20Ayn%20-%20Atlas%20Shrugged.txt"
#Atlas.Shrugged<-clean.word.count(Atlas.Shrugged.url)
##
#R.wiki.page.url<-"https://en.wikipedia.org/wiki/R_(programming_language)"
#R.wiki<-clean.word.count(R.wiki.page.url)
##
#MLK.speech.url<-"http://www.let.rug.nl/usa/documents/1951-/martin-luther-kings-i-have-a-dream-speech-august-28-1963.php"
#MLK.speech<-clean.word.count(MLK.speech.url)
library(ggplot2)
##Plotting function.
word.freq.plot<-function(text.df,no.words=5,filler.rm=F){
##Remove filler words if the user wants to.
if(filler.rm==TRUE)
{text.df<-text.df[!(text.df$text %in% filler.words$V2),]}
freq.plot<-ggplot(text.df[1:no.words,],aes(x=text,y=Freq)) +
geom_bar(aes(fill=text),stat="identity") +
ggtitle("Word Frequency Count") +
theme(plot.title=element_text(face="bold",size=20)) +
ylab("Frequency") +
xlab("Word") +
theme(axis.text.x=element_text(angle=90,hjust=0,vjust=0.25)) +
theme(axis.text.y=element_text(vjust=-0.25)) +
theme(legend.position="none") +
theme(axis.text.x=element_text(size=(18-.2/3*no.words)),
axis.text.y=element_text(size=16),
axis.title=element_text(size=14,face="bold"))
return(freq.plot)
}
library(wordcloud)
##Word cloud plotting function.
word.cloud.funct<-function(text.df,no.words=5,filler.rm=F){
##Remove filler words if the user wants to.
if(filler.rm==TRUE)
{text.df<-text.df[!(text.df$text %in% filler.words$V2),]}
word.cloud<-wordcloud(words=text.df$text,freq=text.df$Freq,
max.words=no.words,colors=brewer.pal(8,"Dark2"),
rot.per=0.35,random.order=FALSE,
scale=c((7-no.words/50),(2-no.words/50)))
title("Word Cloud")
return(word.cloud)
}
library(shiny)
shinyServer(function(input, output){
#appInput <- reactive({
#switch(input$text.select,
#"My Custom Input (above)" = clean.word.count(input$manual.input),
#"Atlas Shrugged by Ayn Rand" = Atlas.Shrugged,
#"R.wiki" = R.wiki,
#"MLK I Have A Dream Speech" = MLK.speech
#)
#})
output$wordplot <- renderPlot({
word.freq.plot(clean.word.count(input$manual.input),input$no.words,filler.rm=input$filler.check)
})
output$wordcloud <- renderPlot({
word.cloud.funct(clean.word.count(input$manual.input),input$no.words,filler.rm=input$filler.check)
})
})
#runApp(display.mode="showcase")
|
d88b576a49cce8a96162a5a41487cd361aea8aeb
|
cb39b112d37440dadfad339ad7a9f8a769352a35
|
/Survival_analysis/rcode/gbmci.R
|
773a403852a42ced99f4e8301a98b377d58c14d6
|
[] |
no_license
|
Humboldt-WI/InformationSystemsWS1718
|
c6974ac87cc3cdac69fe2d042f2651c0a477cdbf
|
7eda9af502a9ba19c9cf6f885dfb7bfa7158aa31
|
refs/heads/master
| 2023-01-01T18:03:06.957460
| 2018-03-16T09:28:47
| 2018-03-16T09:28:47
| 108,554,909
| 0
| 1
| null | 2020-10-20T15:21:24
| 2017-10-27T14:16:48
|
Jupyter Notebook
|
UTF-8
|
R
| false
| false
| 1,705
|
r
|
gbmci.R
|
memory.limit(30000)
if("package:gbm" %in% search()) detach("package:gbm", unload=TRUE)
install.packages("C:/Users/B_JD/Desktop/GBMCI-master", repos = NULL, type = "source")
library("gbm")
if(!require("KMsurv")) install.packages("KMsurv"); library("KMsurv")
if(!require("survAUC")) install.packages("survAUC"); library("survAUC")
#if(!require("survival")) install.packages("survival"); library("survival")
#for pbc dataset
#cox.R must have run to create train test, and coxph object!
gbmcipbc = gbm(formula =Surv(trainpbc$time,trainpbc$status ==2 )~ coxpredicted_trainpbc,
distribution = "sci",
n.trees = 2500,
shrinkage = 1,
n.minobsinnode = 4)
summary(gbmcipbc)
gbmcitrainpbc = predict(object = gbmcipbc,
newdata = trainpbc,
n.trees = 1500,
type = "response")
#gives error: Warning message:
#'newdata' hat 104 Zeilen , aber die gefundenen Variablen haben 312 Zeilen
#'#seems to be a bug as it doesn't happen in any other case
#'r believes the predictor in the gbm model, here ~ coxpredicted_trainpbbc comes from the dataset
#'that the response variables are part of. in fact its the cumulated relative hazards from the pbc.cox model
gbmcitestpbc = predict(object = gbmcipbc,
newdata = testpbc,
n.trees = 1500,
type = "response")
Survresptrainpbcci <- Surv(trainpbc$time,trainpbc$status==2)
Survresptestpbcci <- Surv(testpbc$time,testpbc$status == 2)
CI_gbmcipbc <- BeggC(Survresptrainpbcci, Survresptestpbcci, gbmcitrainpbc, gbmcitestpbc)
if(CI_gbmcipbc<=0.5){
CI_gbmcipbc =1-CI_gbmcipbc
}
CI_gbmcipbc
|
cf3724eb63b52d537c6bdf4993b7363bd67b5bdb
|
1426ecc326a9ffb44582def40f464a6f911d917f
|
/programs/adam/advs.R
|
ce6810c954792044914c1a57b7c64737b090d86c
|
[] |
no_license
|
openpharma/rinpharma_workshop_2021
|
2c902ac7a24453cbb069c3aa8cd3ee9e18ccfb0f
|
b4d22d56e69cfb2e332d0255cc9c788a5183b42b
|
refs/heads/main
| 2023-08-19T20:02:19.930151
| 2021-10-25T16:01:44
| 2021-10-25T16:01:44
| 418,849,315
| 5
| 2
| null | null | null | null |
UTF-8
|
R
| false
| false
| 2,227
|
r
|
advs.R
|
library(admiral)
library(dplyr)
library(lubridate)
vs <- readRDS("data_demo/VS.rds")
adsl <- readRDS("data_demo/ADSL.rds")
param_lookup <- tibble::tribble(
~VSTESTCD, ~PARAMCD, ~PARAM,
"SYSBP", "SYSBP", "Systolic Blood Pressure (mmHg)",
"DIABP", "DIABP", "Diastolic Blood Pressure (mmHg)",
"PUL", "PULSE", "Pulse Rate (beats/min)",
"WGHT", "WEIGHT", "Weight (kg)",
"HGHT", "HEIGHT", "Height (cm)",
"EMP", "TEMP", "Temperature (C)"
)
range_lookup <- tibble::tribble(
~PARAMCD, ~ANRLO, ~ANRHI, ~A1LO, ~A1HI,
"SYSBP", 90, 130, 70, 140,
"DIABP", 60, 80, 40, 90,
"PUL", 60, 100, 40, 110,
"TMP", 36.5, 37.5, 35, 38
)
advs <- vs %>%
# Join ADSl variables
left_join(adsl) %>%
# ADTM
derive_vars_dtm(
new_vars_prefix = "A",
dtc = VSDTC
#time_imputation =,
#flag_imputation =
) %>%
# ADTM -> ADT
mutate(ADT = ADTM) %>%
# # ADT
# derive_vars_dt (
# new_vars_prefix = "AST",
# dtc = ADT
#)
derive_var_ady(reference_date = TRTSDT, date = ADT) %>%
# PARAMCD, PARAM etc.
left_join(param_lookup, by = "VSTESTCD") %>%
# AVAL, AVALC
mutate(
AVAL = VSSTRESN,
AVALC = VSSTRESC,
) %>%
# ONTRTFL
derive_var_ontrtfl(
start_date = ADT,
ref_start_date = TRTSDT,
ref_end_date = TRTEDT
) %>%
derive_extreme_flag(
by_vars = vars(STUDYID, USUBJID, PARAMCD),
order = vars(ADT, VSSEQ),
new_var = ABLFL,
mode = "last",
filter = (!is.na(AVAL) & ADT <= TRTSDT)
) %>%
derive_var_base(
by_vars = vars(STUDYID, USUBJID, PARAMCD)
) %>%
derive_var_basec(
by_vars = vars(STUDYID, USUBJID, PARAMCD)
) %>%
derive_var_chg() %>%
derive_var_pchg() %>%
left_join(range_lookup, by = "PARAMCD") %>%
derive_var_anrind() %>%
derive_baseline(
by_vars = vars(STUDYID, USUBJID, PARAMCD),
source_var = ANRIND,
new_var = BNRIND
)
# # ABLFL
# derive_extreme_flag(
#
# ) %>%
#
# # BASE
# derive_var_base() %>%
#
# # BASEC
# derive_var_basec() %>%
#
# # CHG
# derive_var_chg() %>%
#
# # PCHG
# derive_var_pchg() %>%
#
# # Reference range
#
# # ANRIND
#
# # BNRIND
# derive_baseline(
#
# )
|
63ed1507196371b37f8f809805a6e4cc490493a7
|
cf3efcb13e447aca3e70d6b84af637d995956f70
|
/R/add_year_column.R
|
bf07da9c56e2ec240898b22d68e5cbbb36b19b8e
|
[] |
no_license
|
davan690/brotools
|
02fef9b9fde7acbd1c4d9a919efbb942a2936801
|
7e6f0b1a3f03bf50dc6a9583ef67590f488af8bc
|
refs/heads/master
| 2022-04-17T22:30:08.934999
| 2020-04-12T20:31:34
| 2020-04-12T20:31:34
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 1,353
|
r
|
add_year_column.R
|
#' Adds the year column
#' @param list_of_datasets A list containing named datasets
#' @return A list of datasets with the year column
#' @description This function works by extracting the year string contained in
#' the data set name and appending a new column to the data set with the numeric
#' value of the year. This means that the data sets have to have a name of the
#' form data_set_2001 or data_2001_europe, etc
#' @export
#' @examples
#' \dontrun{
#' #`list_of_data_sets` is a list containing named data sets
#' # For example, to access the first data set, called dataset_1 you would
#' # write
#' list_of_data_sets$dataset_1
#' add_year_column(list_of_data_sets)
#' }
add_year_column <- function(list_of_datasets){
for_one_dataset <- function(dataset, dataset_name){
if ("ANNEE" %in% colnames(dataset) | "Annee" %in% colnames(dataset)){
return(dataset)
} else {
# Split the name of the data set and extract the number index
index <- grep("\\d+", stringr::str_split(dataset_name, "[_.]", simplify = TRUE))
# Get the year
year <- as.numeric(stringr::str_split(dataset_name, "[_.]", simplify = TRUE)[index])
# Add it to the data set
dataset$ANNEE <- year
return(dataset)
}
}
output <- purrr::map2(list_of_datasets, names(list_of_datasets), for_one_dataset)
return(output)
}
|
ae71a09b7ad2e2cd0c0081a7ab946b29471e8af7
|
8705b9a9733add3083b646a1a5bb7fd784c40a10
|
/tests/testthat/test-BibEntry.R
|
09dec92dc4180ed6577889fa07160c6f5cb9ac40
|
[] |
no_license
|
zeehio/RefManageR
|
5e8dd44652d4660d1921cf7eddbfd957f14fbcd3
|
5498e23421cac14526f24dc2976f7ff2719c717b
|
refs/heads/master
| 2021-01-25T09:32:11.080002
| 2017-06-02T15:22:39
| 2017-06-02T15:22:39
| 93,852,266
| 1
| 0
| null | 2017-06-09T11:28:43
| 2017-06-09T11:28:43
| null |
UTF-8
|
R
| false
| false
| 1,117
|
r
|
test-BibEntry.R
|
context("BibEntry")
## unloadNamespace("RefManageR")
## library(RefManageR)
test_that("BibEntry function parses LaTeX-style name fields", {
bib <- BibEntry(bibtype = "Article", key = "mclean2014",
title = "An Article Title",
editora = "Smith, Bob",
author = "McLean, Mathew W. and Ruppert, David and Wand, Matt P.",
journaltitle = "The Journal Title",date = "2014-02-06",
pubstate = "forthcoming")
expect_equal(length(bib$author$family), 3L)
expect_equal(length(bib$author[[3]]$given), 2L)
expect_equal(bib$editora$given, "Bob")
})
test_that("BibEntry still works if latexToUtf8 hangs", {
bib <- BibEntry(bibtype = "Article", key = "mclean2014",
title = "An Article Title",
editora = "Smith, Bob",
author = "Abad, Mar{\\'\\i}a J. F. and Balboa, Mar{\\a'\\i}a A",
journaltitle = "The Journal Title",date = "2014-02-06",
pubstate = "forthcoming")
expect_output(print(bib))
expect_equal(length(bib$author$family), 2L)
})
|
ac1537cbec502be77c779b67af6d87de6ad293e6
|
a78ce9fb08d3fead97f82219056b571eb40b7d6b
|
/R/sample_count_2022.R
|
53b4490930bced1a058bec3f9e75bac233a2596c
|
[
"LicenseRef-scancode-public-domain"
] |
permissive
|
davemcg/eyeIntegration_app
|
c8f03a105c7c5e731169b2dfcabf6cef3a53425b
|
4f23191cb0cfa07902cab80cdff7483d6fed67ad
|
refs/heads/master
| 2023-09-03T05:02:08.757529
| 2023-08-31T19:07:47
| 2023-08-31T19:07:47
| 164,459,029
| 5
| 3
|
NOASSERTION
| 2023-09-04T01:28:31
| 2019-01-07T16:20:43
|
HTML
|
UTF-8
|
R
| false
| false
| 7,725
|
r
|
sample_count_2022.R
|
#' Build bar plot graphic
#'
#' Builds bar plot on main page
#'
#' @examples
#' \dontrun{source('R/sample_count_2017_2019_2022.R')}
#'
library(tidyverse)
library(pool)
library(RSQLite)
library(colorspace)
library(svglite)
sample_count_2022 <- function(){
app_location <- '/Users/mcgaugheyd//git/eyeIntegration_app/inst/app'
#gene_pool_2017 <- dbPool(drv = SQLite(), dbname = paste0(app_location, "/www/2017/eyeIntegration_human_2017_01.sqlite"), idleTimeout = 3600000)
#gene_pool_2019 <- dbPool(drv = SQLite(), dbname = paste0(app_location, "/www/2019/EiaD_human_expression_2019_04.sqlite"), idleTimeout = 3600000)
gene_pool_2022 <- dbPool(drv = SQLite(), dbname = paste0(app_location, "/www/2022/eyeIntegration_2022_human.sqlite"))
core_tight_2017 <- gene_pool_2017 %>% tbl('metadata') %>% as_tibble()
core_tight_2019 <- gene_pool_2019 %>% tbl('metadata') %>% as_tibble()
core_tight_2022 <- gene_pool_2022 %>% tbl('metadata') %>% as_tibble()
core_tight_2022 <- core_tight_2022 %>% mutate(Sub_Tissue = case_when(is.na(Sub_Tissue) ~ '', TRUE ~ Sub_Tissue),
Source = case_when(is.na(Source) ~ '', TRUE ~ Source),
Age = case_when(is.na(Age) ~ '', TRUE ~ Age),
Perturbation = case_when(is.na(Perturbation) ~ '', TRUE ~ Perturbation),
Tissue = case_when(is.na(Tissue) ~ '', TRUE ~ Tissue))
# # fix tissue <-> color
# meta <- 'core_tight_2022'
# tissue_col <- scale_fill_manual(values = setNames(c(pals::glasbey(n = 32),
# pals::kelly(n = get(meta) %>% pull(Tissue) %>% unique() %>% length() - 32 + 1)[-1]) %>%
# colorspace::lighten(0.3), get(meta) %>% pull(Tissue) %>% unique() %>% sort()))
# Use global tissue values to match remainder of eyeIntegration app
tissues <- c(core_tight_2017$Tissue, core_tight_2019$Tissue, core_tight_2022$Tissue)%>% unique() %>% sort()
tissue_fill <- scale_fill_manual(values = setNames(c(pals::polychrome()[3:36], pals::kelly()[c(3:7,10:21)])[1:length(tissues)], tissues %>% sort()))
a <- core_tight_2022 %>%
arrange(Tissue) %>%
mutate(GTEx = case_when(study_accession == 'SRP012682' ~ 'GTEx', TRUE ~ 'Eye')) %>%
select(run_accession, Tissue, Sub_Tissue, Age, Source, Perturbation, GTEx) %>% unique() %>%
mutate(Sub_Tissue = case_when(is.na(Sub_Tissue) ~ '', TRUE ~ Sub_Tissue),
Source = case_when(is.na(Source) ~ '', TRUE ~ Source),
Age = case_when(is.na(Age) ~ '', TRUE ~ Age),
Perturbation = case_when(is.na(Perturbation) ~ '', TRUE ~ Perturbation)) %>%
mutate(Sub_Tissue = glue::glue("<span style='color:#000000FF'>{Sub_Tissue}</span>"),
Source = glue::glue("<span style='color:#1E46A2FF'>{Source}</span>"),
Age = glue::glue("<span style='color:#FB323BFF'>{Age}</span>"),
Perturbation = glue::glue("<span style='color:#85660D'>{Perturbation}</span>")
) %>%
mutate(expanded_name = paste0(Tissue, sep = " | ", Sub_Tissue, sep = " | ",
Age, sep = " | ", Perturbation, sep = " | ",
Source)) %>%
group_by(GTEx, Tissue, Sub_Tissue, Age, Source, Perturbation, expanded_name) %>%
filter(GTEx == 'Eye') %>%
count(name="Count") %>%
ungroup() %>%
#mutate(Perturbation = case_when(grepl('MGS', Source_details) ~ Source_details)) %>%
ggplot(data=.,aes(x=interaction(Source, Sub_Tissue, Age, Perturbation, sep = ' | '),y=Count,
fill = Tissue)) +
#geom_violin(alpha=0.5, scale = 'width') +
geom_bar(stat = 'identity', position = 'dodge') +
cowplot::theme_cowplot(font_size = 15) + theme(axis.text.x = element_text(angle = 90, hjust=1, vjust = 0.2)) +
ylab("Count") +
theme(strip.background = element_rect(fill = 'black'),
strip.text = element_text(color = 'white'),
panel.background = element_rect(fill = 'gray90'),
plot.margin=grid::unit(c(0,0,0,0.1), "cm"),
legend.position = "bottom",
legend.direction = "horizontal",
legend.key.size= unit(0.2, "cm"),
legend.spacing = unit(0.2, "cm")) +
tissue_fill +
coord_flip() +
facet_grid(rows = vars(Tissue), scales = 'free_y', space = 'free') +
theme(strip.text.y.right = element_text(angle = 0)) +
theme(
axis.text.y = element_markdown(),
axis.title.y = element_markdown()) +
labs(x = "<span style='color:#1E46A2FF'>Source</span> |
<span style='color:#000000FF'>Sub Tissue</span> |
<span style='color:#FB323BFF'>Age</span> |
<span style='color:#85660D'>Perturbation</span>") +
theme(legend.position = "none")
b <- core_tight_2022 %>%
arrange(Tissue) %>%
mutate(GTEx = case_when(study_accession == 'SRP012682' ~ 'GTEx', TRUE ~ 'Eye')) %>%
select(run_accession, Tissue, Sub_Tissue, Age, Source, Perturbation, GTEx) %>% unique() %>%
mutate(Sub_Tissue = case_when(is.na(Sub_Tissue) ~ '', TRUE ~ Sub_Tissue),
Source = case_when(is.na(Source) ~ '', TRUE ~ Source),
Age = case_when(is.na(Age) ~ '', TRUE ~ Age),
Perturbation = case_when(is.na(Perturbation) ~ '', TRUE ~ Perturbation)) %>%
mutate(Sub_Tissue = glue::glue("<span style='color:#000000FF'>{Sub_Tissue}</span>"),
Source = glue::glue("<span style='color:#1E46A2FF'>{Source}</span>"),
Age = glue::glue("<span style='color:#FB323BFF'>{Age}</span>"),
Perturbation = glue::glue("<span style='color:#85660D'>{Perturbation}</span>")
) %>%
mutate(expanded_name = paste0(Tissue, sep = " | ", Sub_Tissue, sep = " | ",
Age, sep = " | ", Perturbation, sep = " | ",
Source)) %>%
group_by(GTEx, Tissue, Sub_Tissue, Age, Source, Perturbation, expanded_name) %>%
filter(GTEx == 'GTEx') %>%
count(name="Count") %>%
ungroup() %>%
#mutate(Perturbation = case_when(grepl('MGS', Source_details) ~ Source_details)) %>%
ggplot(data=.,aes(x=interaction(Source, Sub_Tissue, Age, Perturbation, sep = ' | '),y=Count,
fill = Tissue)) +
#geom_violin(alpha=0.5, scale = 'width') +
geom_bar(stat = 'identity', position = 'dodge') +
cowplot::theme_cowplot(font_size = 15) + theme(axis.text.x = element_text(angle = 90, hjust=1, vjust = 0.2)) +
ylab("Count") +
theme(strip.background = element_rect(fill = 'black'),
strip.text = element_text(color = 'white'),
panel.background = element_rect(fill = 'gray90'),
plot.margin=grid::unit(c(0,0,0,0.1), "cm"),
legend.position = "bottom",
legend.direction = "horizontal",
legend.key.size= unit(0.2, "cm"),
legend.spacing = unit(0.2, "cm")) +
tissue_fill +
coord_flip() +
facet_grid(rows = vars(Tissue), scales = 'free_y', space = 'free') +
theme(strip.text.y.right = element_text(angle = 0)) +
theme(
axis.text.y = element_markdown(),
axis.title.y = element_markdown()) +
labs(x = "<span style='color:#1E46A2FF'>Source</span> |
<span style='color:#000000FF'>Sub Tissue</span> |
<span style='color:#FB323BFF'>Age</span> |
<span style='color:#85660D'>Perturbation</span>") +
theme(legend.position = "none") + xlab('')
cowplot::plot_grid(plotlist = list(a,b),ncol =2)
ggsave(filename = paste0(app_location, '/www/sample_count_2022.svg'),dpi = 'retina', height = 15, width=15)
}
|
19bbd8eddec9a84d74b983b5e9a182aa7aad7300
|
2e731f06724220b65c2357d6ce825cf8648fdd30
|
/lowpassFilter/inst/testfiles/convolve/AFL_convolve/convolve_valgrind_files/1616007454-test.R
|
2041f8021e8251bc89276e6cf744d5a250a69f3c
|
[] |
no_license
|
akhikolla/updatedatatype-list1
|
6bdca217d940327d3ad42144b964d0aa7b7f5d25
|
3c69a987b90f1adb52899c37b23e43ae82f9856a
|
refs/heads/master
| 2023-03-19T11:41:13.361220
| 2021-03-20T15:40:18
| 2021-03-20T15:40:18
| 349,763,120
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 819
|
r
|
1616007454-test.R
|
testlist <- list(kern = numeric(0), val = c(1.02088994287928e-202, -4.72266776296739e-300, 562949953435769, -1.30618378624483e+45, 1.46283277563346e-316, -3.96901482569735e-46, 8.90721517183782e+217, -5.21188868203025e+304, 7.90783548032518e-55, 2.61835429782163e+122, -2.28998945498263e+226, -2.2899894549927e+226, -2.28998945499136e+226, 1.63589317076709e+125, 2.61830011167902e+122, 2.61426534624451e+122, 2.61830011167902e+122, 5.43230956041879e-309, 1.87334101608436e-300, 2.76252633707883e-306, 1.35129174321256e-98, 1.01796081104413e+281, -8.59574860630882e+208, 1.01628588815092e+34, 8.23331198188802e+80, 4.04309013568144e-258, 9.40775852177127e-307, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0))
result <- do.call(lowpassFilter:::convolve,testlist)
str(result)
|
f60ae628443f3b5a4cc763846c002a2e336af843
|
93731d4f81021e230fcac34c91c470768f7a6af4
|
/plot1.R
|
8920ef33ce2026da309a62e236e65085773adf17
|
[] |
no_license
|
isaac-yauri/Exploratory_Data_Analysis
|
34380ce3e537144a0704714c19e7f16653f1eb84
|
23762aee5d4aa188e436417bac2f8fa16d5e5e9a
|
refs/heads/master
| 2021-01-10T10:21:50.929287
| 2015-06-07T10:53:15
| 2015-06-07T10:53:15
| 37,006,913
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 784
|
r
|
plot1.R
|
# Download ZIP File from UC Irvine Machine Learning Repository
download.file("https://d396qusza40orc.cloudfront.net/exdata%2Fdata%2Fhousehold_power_consumption.zip", "household.zip")
unzip("household.zip")
house <- read.csv("household_power_consumption.txt", sep=";", stringsAsFactors=FALSE)
# Changing Date format
house$Date <- strptime(house$Date, "%d/%m/%Y")
housepower <- subset(house, Date == "2007-02-01" | Date == "2007-02-02")
housepower$Global_active_power <- as.numeric(housepower$Global_active_power)
# Open a PNG file
png(filename="plot1.png", width=480, height=480, units="px", pointsize=12, bg="white")
# Picture 1
hist(housepower$Global_active_power, col="red", main = "Global Active Power", xlab="Global Active Power (kilowatts)")
# Closing a PNG file
dev.off ()
|
33a35214d5b1e2c82cc5f42ecc5848ea7a75cc20
|
86772a78af6ca3567ed333c9a4cd68c5af73848d
|
/examples/Exaplanets (mass and Kepler's laws recovery)/tests/postproc.R
|
ad9bbcea49f97b5b908caeec5982a5a229bcf97d
|
[] |
no_license
|
aliaksah/EMJMCMC2016
|
077170db8ca4a21fbf158d182f551b3814c6c702
|
3954d55fc45296297ee561e0f97f85eb5048c39e
|
refs/heads/master
| 2023-07-19T16:52:43.772170
| 2023-07-15T16:05:37
| 2023-07-15T16:05:37
| 53,848,643
| 17
| 5
| null | 2021-11-25T14:53:35
| 2016-03-14T10:51:06
|
R
|
UTF-8
|
R
| false
| false
| 5,654
|
r
|
postproc.R
|
library(hash)
library(stringi)
setwd("/mn/sarpanitu/ansatte-u2/aliaksah/abeldata/simulations/simulations/simulations")
cosi<-function(x)cos(x/180*pi)
sini<-function(x)sin(x/180*pi)
m<-function(x,y)(x*y)
expi<-function(x)
{
r<-exp(x)
if(r==Inf)
return(10000000)
else
return(r)
}
InvX<-function(x)
{
if(x==0)
return(10000000)
else
return(1/x)
}
troot<-function(x)abs(x)^(1/3)
sigmoid<-function(x)exp(-x)
#"to23","expi","logi","to35","sini","troot"
sini<-function(x)sin(x/180*pi)
expi<-function(x)exp(-abs(x))
logi <-function(x)log(abs(x)+1)
troot<-function(x)abs(x)^(1/3)
to23<-function(x)abs(x)^(2.3)
to35<-function(x)abs(x)^(3.5)
#experiment i
#temp = list.files(pattern="posteriorsJA3_*")
#myfiles = lapply(FUN = read.csv,X = temp,stringsAsFactors=F)
details = file.info(list.files(pattern="postJA1_*"))
details = details[with(details, order(as.POSIXct(mtime),decreasing = T)), ]
files = rownames(details)
ids<-NULL
nms<-NULL
i<-0
for(file in files)
{
i<-i+1
tmp<-strsplit(x = file,fixed = T,split = c("_","."))[[1]][2]
tmp<-strsplit(x = tmp,fixed = T,split = ".")[[1]][1]
if(as.integer(tmp)<=150&&stri_count_fixed(str = file,pattern = "new")[[1]]==0&&stri_count_fixed(str = file,pattern = "REV")[[1]]==0&&stri_count_fixed(str = file,pattern = "S")[[1]]==0)
{
ids<-c(ids,i)
nms<-c(nms,tmp)
}
}
temp<-files[ids]
myfiles = lapply(FUN = read.csv,X = temp,stringsAsFactors=F)[1:100]
#
# X4<- as.data.frame(array(data = rbinom(n = 50*1000,size = 1,prob = runif(n = 50*1000,0,1)),dim = c(1000,50)))
# Y4<-rnorm(n = 1000,mean = 1+7*(X4$V4*X4$V17*X4$V30*X4$V10)+7*(((X4$V50*X4$V19*X4$V13*X4$V11)>0)) + 9*(X4$V37*X4$V20*X4$V12)+ 7*(X4$V1*X4$V27*X4$V3)
# +3.5*(X4$V9*X4$V2) + 6.6*(X4$V21*X4$V18) + 1.5*X4$V7 + 1.5*X4$V8,sd = 1)
# X4$Y4<-Y4
length(myfiles)
X<-read.csv("exa1.csv")
aggreg<-NULL
for(i in 1:length(myfiles))
{
print(i)
aggreg <- rbind(aggreg,myfiles[i][[1]])
#write.csv(x = simplifyposteriors(X=X,posteriors=as.matrix(myfiles[i][[1]]),th=0.0001,thf=0.1),file = paste0("postJA32_",nms[i],".csv"),row.names = F)
}
#xxx<-simplifyposteriors(X=X,posteriors=as.matrix(myfiles[i][[1]]),th=0.0001,thf=0.3)
rhash<-hash()
N<-length(myfiles)
alpha<-0.25
clear(rhash)
#
#
# for(i in 1:min(100,N))
# {
# for(j in 1:length(myfiles[[i]]$posterior))
# {
# if(myfiles[[i]]$posterior[j]>=alpha)
# {
# expr<-as.character(myfiles[[i]]$tree[j])
# print(expr)
# res<-model.matrix(data=X,object = as.formula(paste0("PeriodDays~",expr)))
# ress<-c(stri_flatten(round(res[,2],digits = 4),collapse = ""),stri_flatten(res[,1],collapse = ""),1,expr)
# if(!(ress[1] %in% values(rhash)))
# rhash[[ress[1]]]<-ress
# else
# {
# if(ress[1] %in% keys(rhash))
# {
# rhash[[ress[1]]][3]<- (as.numeric(rhash[[ress[1]]][3])) + as.numeric(1)
# if(stri_length(rhash[[ress[1]]][4])>stri_length(expr))
# rhash[[ress[1]]][4]<-expr
# }
# else
# {
# rhash[[ress[2]]][3]<- (as.numeric(rhash[[ress[2]]][3])) + as.numeric(1)
# if(stri_length(rhash[[ress[2]]][4])>stri_length(expr))
# rhash[[ress[2]]][4]<-expr
# }
# }
# }
#
# }
#
# }
#
#
# write.csv(x = t(values(rhash)[c(3,4),]),file = "exppaap.csv",row.names = F,col.names = F)
#
rhash<-hash()
N<-length(myfiles)
alpha<-0.25
clear(rhash)
TPS<-c(stri_flatten(round(model.matrix(data=X,object = as.formula(paste0("RadiusJpt~","I(troot(I(I(I(PeriodDays)*I(PeriodDays))*I(HostStarMassSlrMass))))")))[,2],digits = 4),collapse = ""),stri_flatten(round(model.matrix(data=X,object = as.formula(paste0("RadiusJpt~","I(troot(I(I(I(HostStarRadiusSlrRad)*I(PeriodDays))*I(PeriodDays))))")))[,2],digits = 4),collapse = ""),stri_flatten(round(model.matrix(data=X,object = as.formula(paste0("RadiusJpt~","I(troot(I(I(I(PeriodDays)*I(PeriodDays))*I(HostStarTempK))))")))[,2],digits = 4),collapse = ""))
stats = array(0,dim = c(min(100,N),3))
for(i in 1:min(100,N))
{
j=1
curFound = 0
notFound = 0
while(j <= length(myfiles[[i]]$posterior))
{
if(myfiles[[i]]$posterior[j]>=alpha)
{
expr<-as.character(myfiles[[i]]$tree[j])
print(expr)
res<-model.matrix(data=X,object = as.formula(paste0("RadiusJpt~",expr)))
ress<-c(stri_flatten(round(res[,2],digits = 4),collapse = ""),stri_flatten(res[,1],collapse = ""),1,expr)
if(ress[1] %in% TPS)
{
#j = length(myfiles[[i]]$posterior)+1
#print(ress[1])
curFound = curFound+1
}else{
notFound = notFound+1
}
if(curFound)
{
if(!(ress[1] %in% values(rhash)))
rhash[[ress[1]]]<-ress
else
{
if(ress[1] %in% keys(rhash))
{
rhash[[ress[1]]][3]<- (as.numeric(rhash[[ress[1]]][3])) + as.numeric(1)
if(stri_length(rhash[[ress[1]]][4])>stri_length(expr))
rhash[[ress[1]]][4]<-expr
}
else
{
rhash[[ress[2]]][3]<- (as.numeric(rhash[[ress[2]]][3])) + as.numeric(1)
if(stri_length(rhash[[ress[2]]][4])>stri_length(expr))
rhash[[ress[2]]][4]<-expr
}
}
}
}
j = j + 1
}
stats[i,1]=as.integer(curFound>0)
stats[i,2]=notFound
stats[i,3]=notFound/(curFound+notFound)
}
write.csv(x = t(values(rhash)[c(3,4),]),file = "findings.csv",row.names = F,col.names = F)
write.csv(x = t(c(mean(stats[,1]),mean(stats[,2]),mean(stats[,3]))),file = "stats.csv",row.names = F,col.names = F)
print(t(c(mean(stats[,1]),mean(stats[,2]),mean(stats[,3]))))
|
185471f295bfd90eea54261fe855ef7d072cfba3
|
043757e6d32a45e147a574b81e77a375e3f67ea8
|
/dashboard/server.r
|
c843523d46039b71dac1ad17e9b25ed8b126d762
|
[] |
no_license
|
MISMCapstone2015PBS/PBSProject
|
d0c9c789d3641b26b76a97e4acab0d566b39b802
|
6818fd4556d9b9fe396365c1a4ff5314c55f2ce9
|
refs/heads/master
| 2021-01-10T16:58:16.076061
| 2015-12-18T21:07:15
| 2015-12-18T21:07:15
| 44,457,220
| 1
| 1
| null | null | null | null |
UTF-8
|
R
| false
| false
| 6,627
|
r
|
server.r
|
##########################################################################
#
# File: server.R
# Author: Carnegie Mellon Heinz Capstone Team for PBS
# Purpose: This file is responsible for the backend server of the
# Shiny Dashboard. It receives each and every action performed
# in the ui and takes necessary actions accordingly
#
##########################################################################
##### S E R V E R #####
# Load the required libraries
if(!require(shiny)) install.packages("shiny")
if(!require(shinydashboard)) install.packages("shinydashboard")
if(!require(leaflet)) install.packages("leaflet")
if(!require(ggmap)) install.packages("ggmap")
if(!require(ggplot2)) install.packages("ggplot2")
if(!require(plyr)) install.packages("plyr")
if(!require(data.table)) install.packages("data.table")
# Install this package if any error is thrown
#if(!require(rgdal)) install.packages("rgdal")
options(shiny.maxRequestSize=10000*1024^2)
# Server code for all the tabs
server <- function(input, output, session) {
## SECOND TAB CONTENT
## Show the output of the booker data which was just loaded
output$contents1 <- renderDataTable({
#input$submit
cat("FUNC 1")
loadData()
}, options = list(pageLength = 10))
loadData <- reactive({
inFile1 <- input$file1
if(is.null(inFile1)) {
return(NULL)
} else {
cat("DATA LOAD RUNNING")
#density.map.data <<- read.csv(inFile1$datapath, header=T, sep=',', quote='"')
density.map.data <<- data.frame(fread(inFile1$datapath, header=T, sep=','))
density.map.data$Artist...Show <<- as.factor(density.map.data$Artist...Show)
cat("DATA LOADED")
cust.density <<- subset(density.map.data, select= c(Customer.Identifier,Postal.Code,Artist...Show,Units,Customer.Price,Download.Date..PST.))
cust.density$Postal.Code <<- substr(cust.density$Postal.Code,1,5)
cust.density$Postal.Code <<- as.numeric(cust.density$Postal.Code, na.rm = TRUE)
cust.density$Postal.Code <<- sprintf("%05d",cust.density$Postal.Code)
cust.density$Toal.Price <<- as.numeric(cust.density$Units*cust.density$Customer.Price, na.rm=TRUE)
temp_data <<- data.frame(density.map.data[1:10,])
}
})
## THIRD TAB CONTENT
output$myDemandMap <- renderLeaflet({
data(zipcode)
cat("Finished Global\n")
cat("STARTED LEAFLET NO 1")
#density.map.data <<- bookerMerchant_orig
cust.map <<- cust.density #merge(cust.density,zipcode,by.x="Postal.Code",by.y="zip")
cust.zipcode <<- ddply(cust.map,"Postal.Code",summarise,cnt=length(Customer.Identifier))
colnames(cust.zipcode) <<- c("ZIPCODE","Count")
cat("\n Zip merge")
cust.count <<- merge(zipcode,cust.zipcode,by.x="zip",by.y="ZIPCODE")
shows <<- data.frame(Shows = unique(cust.map$Artist...Show))
generateLeaflet(cust.count)
})
## FOURTH TAB CONTENT
output$choose_columns1 <- renderUI({
checkboxGroupInput('show', 'SHOW\'s :', as.character(shows$Shows), selected = c("Downton Abbey"))
})
output$showsop <- renderText({
show_range <- input$show
})
# Switch the tabs between fourth and fifth Automatically
observeEvent(input$stateButton, {
newtab1 <- switch(input$tabs,
"contentpopularity" = "popularitymap",
"popularitymap" = "contentpopularity")
updateTabItems(session,"tabs", newtab1)
})
## FIFTH TAB CONTENT
output$mypopularitymap <- renderLeaflet({
runPop()
})
showPop <- reactive({
show_range <- input$show
## Update all df's with states only for those states
show.popularity <<- subset(cust.map, Artist...Show %in% c(show_range))
show_range
})
runPop <- reactive({
showPop()
show.zipcode <<- ddply(show.popularity,"Postal.Code",summarise,cnt=length(Customer.Identifier))
colnames(show.zipcode) <<- c("ZIPCODE","Count")
cat("\n Zip merge")
show.count <<- merge(zipcode,show.zipcode,by.x="zip",by.y="ZIPCODE")
cat("\nGenerate Populatiry")
generateLeaflet(show.count)
})
generateLeaflet <- function(spatialDF) {
cat("STARTED LEAFLET")
spatialDF <- na.omit(spatialDF)
leaflet(spatialDF) %>% addTiles(urlTemplate = "//{s}.tiles.mapbox.com/v3/jcheng.map-5ebohr46/{z}/{x}/{y}.png",
attribution = 'Maps by <a href="http://www.mapbox.com/">Mapbox</a>') %>%
addCircles(~longitude,~latitude, weight = 1, radius=~Count,stroke=FALSE, fillOpacity=0.4,
popup = ~paste(city," - Count: ",Count,sep="")) %>%
setView(lng = mean(as.numeric(spatialDF$longitude),na.rm=TRUE), lat = mean(as.numeric(spatialDF$latitude),na.rm=TRUE), zoom = 4)
}
## SIXTH TAB CONTENT
output$mypopularitystate <- renderLeaflet({
runState()
})
runState <- reactive ({
show_range <- input$show
states <<- readShapeSpatial("ShapeFiles/cb_2014_us_state_500k.shp")
proj4string(states) <<- CRS("+proj=longlat +datum=WGS84")
state.count <<- ddply(show.count,"state",summarise,Count=sum(Count))
state_total <<- ddply(cust.count,"state",summarise,Total_Cnt=sum(Count))
state.count <<- merge(state.count,state_total,by="state")
state.count$percent.sales <<- (state.count$Count/state.count$Total_Cnt)*100
states <<- merge(states,state.count,by.x="STUSPS",by.y="state",all.y=TRUE)
states <<- states[which(states$Count > 0),]
pal <- colorNumeric(
palette = "YlOrRd",
domain = states$percent.sales
)
leaflet(states) %>% addTiles(urlTemplate = "//{s}.tiles.mapbox.com/v3/jcheng.map-5ebohr46/{z}/{x}/{y}.png",
attribution = 'Maps by <a href="http://www.mapbox.com/">Mapbox</a>') %>%
addPolygons(
stroke = FALSE, fillOpacity = 0.5, smoothFactor = 0.5,
color = ~pal(percent.sales),
popup = ~paste(paste(STUSPS," - Count: ",Count,sep=""), paste("Percent Sales: ", round(percent.sales,0), "%",sep=""),sep = "\n")
) %>%
addLegend("bottomright", pal = pal, values = ~percent.sales,
title = "Popularity",
labFormat = labelFormat(),
opacity = 1
) %>%
setView(lng= -98.35,lat=39.5,zoom=4)
})
}
|
82d3ebc80ca370098e35064e32a4a8af7fe3946a
|
6841e590501a49bea4a099de1391cc8b66e7d5a8
|
/man/taxon_distribution.Rd
|
1235e83c87681784b17c1d49c46d7e355cdf6249
|
[] |
no_license
|
JonasGeschke/rcites
|
ec874dec5942141de41dd55ef3df567f28047515
|
f8e2bc3f6173a75553ef0c917a5cf7e72fa6039b
|
refs/heads/master
| 2020-03-27T01:30:58.700685
| 2018-08-22T09:26:53
| 2018-08-22T09:26:53
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| true
| 1,556
|
rd
|
taxon_distribution.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/taxon_distribution.R
\name{taxon_distribution}
\alias{taxon_distribution}
\title{Get distributions data available for a given taxon concept.}
\usage{
taxon_distribution(tax_id, collapse_tags = NULL, simplify = FALSE,
token = NULL)
}
\arguments{
\item{tax_id}{character string containing a species' taxon concept identifier
(see \code{\link[rcites]{sppplus_taxonconcept}}).}
\item{collapse_tags}{a string used to collapse tags. Default is set to \code{NULL} meaning that tags column's elements remains lists.}
\item{simplify}{a logical. Should the output be simplified? In other words,
should columns of data.table objects returned be unlisted when they are
lists made of single elements?}
\item{token}{a character string containing the authentification token, see
\url{https://api.speciesplus.net/documentation}. Default is set to
\code{NULL} and requires the environment variable \code{SPPPLUS_TOKEN} to be
set directly in \code{Renviron}. Alternatively \code{sppplus_login()} can
be used to set \code{SPPPLUS_TOKEN} for the current session.}
}
\value{
A data table with all distribution information.
}
\description{
Retrieve distributions data available for a given taxon concept for which the
the taxon identifier is known.
}
\examples{
\donttest{
res1 <- taxon_distribution(tax_id = '4521')
res2 <- taxon_distribution(tax_id = '4521', collapse_tags = ' + ', simplify = T)
}
}
\references{
\url{https://api.speciesplus.net/documentation/v1/distributions/index.html}
}
|
a908647dfb870d448db7db93d6eaf3bdee8e0ca0
|
b1204862b0b25eb22e912f9cc681ff8525e836e8
|
/code/functions_MAR.R
|
3999ca7366ddd0c6e082c7278429993a4e61ef8d
|
[] |
no_license
|
stjordanis/marginal-two-part
|
f568da986d4620e1ec8874fafe15b2fc62d03eca
|
ebf2fdd00038d27c3a425fd43c3129f2b9e22962
|
refs/heads/master
| 2022-04-24T11:56:01.033219
| 2020-04-20T06:20:16
| 2020-04-20T06:20:16
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 2,243
|
r
|
functions_MAR.R
|
######################################################
# Functions to create the MAR missing data mechanism #
######################################################
#' Calculate the probability of dropping out
#'
#' @param y numeric; the outcome variable
#' @param treatment numeric; 0 (control) or 1 (treatment)
#'
#' @return
p_MAR <- function(y, treatment) {
ylag <- dplyr::lag(y)
ylag <- ylag + 1
ylag_log <- log(ylag, base = 10)
ylag_log_c <- ylag_log - log(500 + 1, base = 10)
# MAR function
treatment <- unique(treatment)
if(treatment == 1) {
p_miss <- plogis(qlogis(0.15) + ylag_log_c * 2)
} else {
p_miss <- plogis(qlogis(0.15) + ylag_log * -2)
}
# T = 1 is complete
p_miss[1] <- 0
p_miss
}
#' Create missingness indicator
#'
#' Converts a vector with intermittent missingness into monotonic dropout.
#' For instance,
#' 0 0 0 1 0 1 0 0 0 0 0 is converted to
#' 0 0 0 1 1 1 1 1 1 1 1
#'
#' @param miss a numeric indicator, 0 = not missing, 1 = missing
#'
#' @return a numeric vector where 1 indicates values in Y that should be NA
add_dropout_ind <- function(miss) {
dropout <- which(miss == 1)[1]
if(!is.na(dropout)) miss[dropout:length(miss)] <- 1
miss
}
#' Add missing data
#'
#' @param data a data.frame with the complete data
#'
#' @return a data.frame where values in Y is replaced with NA
#' based on MAR dropout
add_MAR_missing <- function(data) {
d <- data
d <- d %>%
group_by(subject) %>%
mutate(p_miss = p_MAR(y, treatment))
d$miss <- rbinom(nrow(d), 1, d$p_miss)
d <- d %>%
group_by(subject) %>%
mutate(miss = add_dropout_ind(miss)) %>%
ungroup() %>%
mutate(y = ifelse(miss == 1, NA, y))
d
}
#' Add missing data and convert into pre-post data for ANCOVA
#'
#' @param data The complete data in long format
#'
#' @return a data.frame where `y` is the posttest variable with missing observations
#' and `pre` is the pretest scores.
add_MAR_missing_post <- function(data) {
d <- add_MAR_missing(data)
pre <- dplyr::filter(d, time == 0)
post <- dplyr::filter(d, time == 10)
post$pre <- pre$y
post
}
|
a0e3e348a2e66107238923b9d92369ae9e3933fe
|
c1a46872dd1b8b0ddc80f02efef9e3f963de082f
|
/download.R
|
4a1d1f5699924fa3b7708530e191ab1813369020
|
[] |
no_license
|
JohnAmadeo/browndatathon2019
|
560ac38be89130f93d6ba716a38be34d235153bf
|
3d4881f0eca7365c82f9cb2d4fcb238769d1f542
|
refs/heads/master
| 2020-04-24T20:39:40.817699
| 2019-02-24T15:11:29
| 2019-02-24T15:11:29
| 172,250,972
| 1
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 2,834
|
r
|
download.R
|
#check for libraries that are needed, install those that aren't present, and load all libraries
libraries <- c('foreign', 'tidyverse')
presentLibs <- libraries %in% rownames(installed.packages())
invisible(if(any(!presentLibs)) install.packages(libraries[!presentLibs]))
invisible(lapply(libraries, require, character.only=TRUE))
#call the helperfunctions script to load functions and variables used here
wd <- getwd()
#Download BRFSS data from 2011, 2013 and 2015
link <- c("http://www.cdc.gov/brfss/annual_data/2017/files/LLCP2017XPT.zip",
"http://www.cdc.gov/brfss/annual_data/2015/files/LLCP2015XPT.zip",
"http://www.cdc.gov/brfss/annual_data/2013/files/LLCP2013XPT.ZIP",
"http://www.cdc.gov/brfss/annual_data/2011/files/LLCP2011XPT.ZIP")
download_file <- function (link, filename){
path <- paste(wd, filename, sep ="/")
download.file(url = link, destfile = path)
unzip(path, exdir = wd)
extract_name <- unzip(path,list =TRUE)[1,1]
#remove_code <- paste ("rm", "-rf", path, sep = " ")
#system(remove_code)
#get the file name
return (extract_name)
}
filename <- c("BRFSS_data_2017.zip", "BRFSS_data_2015.zip", "BRFSS_data_2013.zip","BRFSS_data_2011.zip")
for(i in seq(1,length(filename),by=1)){
if(!file.exists(filename[i])){
download_file(link[i], filename[i])
}
}
brfss2017 <- read.xport('LLCP2017.XPT ') #after the file extension there seems to be a space... not sure why, but oh well
dim(brfss2017)
# read in the downloaded data files
brfss2015 <- read.xport('LLCP2015.XPT ') #after the file extension there seems to be a space... not sure why, but oh well
dim(brfss2015)
#just print if any exercise variables are missing
#441456 entries, 330 variables
brfss2013 <- read.xport("LLCP2013.XPT")
#491773 entries, 336 variables
#again print to check if any variables are missing
brfss2011 <- read.xport("LLCP2011.XPT")
dim(brfss2011)
#506467 entries, 454 variables
#print any missing variables
exercise_vars <- c("EXERANY2", "EXRACT11", "EXEROFT1", "EXERHMM1",
"EXRACT21", "EXEROFT2", "EXERHMM2", "STRENGTH",
"METVL11_", "METVL21_", "FC60_",
"ACTIN11_", "ACTIN21_", "PADUR1_", "PADUR2_",
"PAFREQ1_", "PAFREQ2_", "X_MINAC11", "X_MINAC21",
"STRFREQ_", "PAMIN11_", "PAMIN21_", "PA1MIN_",
"PAVIG11_", "PAVIG21_", "PA1VIGM_", "X_PACAT1",
"X_PASTRNG", "X_PAREC1", "X_PASTAE1", "MENTHLTH","X_RFBING5")
missing_var_exercise <- function (data){
all_var <- colnames(data)
missing <- exercise_vars[which(!exercise_vars %in% all_var)]
return (missing)
}
missing_var_exercise(brfss2017)
missing_var_exercise(brfss2015)
missing_var_exercise(brfss2013)
missing_var_exercise(brfss2011)
|
408a644ada826b792adf4dcc7d8123c9a7ccaf2a
|
9f008867907d33f886270a31ca05bf1b3468603a
|
/man/cpg.Rd
|
3c4fcd8dc7279c96e9a0376a7960154f2770168a
|
[] |
no_license
|
cran/CpGassoc
|
7fe11f56897f5e6eb1edadae34c1380dd22ec967
|
a00f76379370cd5e15ed45ad5d930baab9d0231e
|
refs/heads/master
| 2021-01-22T08:48:12.022364
| 2017-05-30T21:23:09
| 2017-05-30T21:23:09
| 17,678,558
| 1
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 3,961
|
rd
|
cpg.Rd
|
\name{cpg}
\alias{plot.cpg}
\alias{summary.cpg}
\alias{print.cpg}
\alias{sort.cpg}
\title{Methods for object of class \code{"cpg"}}
\usage{
\method{plot}{cpg}(x, save.plot = NULL, file.type = "pdf", popup.pdf = FALSE,
tplot = FALSE, classic = TRUE,main.title = NULL, eps.size = c(5, 5),
gc.p.val = FALSE, gcdisplay = FALSE, \dots)
\method{summary}{cpg}(object,\dots)
\method{print}{cpg}(x,\dots)
\method{sort}{cpg}(x,decreasing,\dots)
}
\arguments{
\item{x}{
Output of class \code{"cpg"} from cpg.assoc or cpg.work.
}
\item{save.plot}{
Name of the file for the plot to be saved to. If not specified, plot will not be saved.
}
\item{file.type}{
Type of file to be saved. Can either be \code{"pdf"} or \code{"eps"}. Selecting \code{file.type="eps"} will
result in publication quality editable postscript files that can be opened by Adobe Illustrator or Photoshop.
}
\item{popup.pdf}{
\code{TRUE} or \code{FALSE}. If creating a pdf file, this indicates if the plot should appear in a popup window as well. If running in a
cluster-like environment, best to leave \code{FALSE}.
}
\item{tplot}{
Logical. If \code{TRUE}, t-statistics will be plotted vs. their expected quantiles. If \code{FALSE} (default), -log(p) will be
plotted. (Note: if \code{class(x$indep)=='factor'} this option will be ignored.)
}
\item{classic}{
Logical. If \code{TRUE}, a classic qq-plot will be generated, with all p-values plotted against predicted values (including significant).
If \code{FALSE} Holm-significant CpG sites will not be used to compute expected quantiles and will be plotted separately.
}
\item{main.title}{
Main title to be put on the graph. If \code{NULL} one based on the analysis will be used.
}
\item{eps.size}{
Vector indicating the size of .eps file (if creating one). Correponds to the options horizontal and height in the
\code{postscript} function.
}
\item{gc.p.val}{
Logical. If true, plot will use the genomic control adjusted p-values.
}
\item{gcdisplay}{
Logical.If true, plot will display the genomic control value in the legend.
}
\item{object}{
Output of class \code{"cpg"} from \code{cpg.assoc} or \code{cpg.work}.
}
\item{decreasing}{
logical. Should the sort be increasing or decreasing? Not available for partial sorting.
}
\item{\dots}{
Arguments to be passed to methods, such as graphical parameters.
}
}
\description{
Methods and extra functions for class \code{"cpg"}.
\code{plot.cpg} creates a QQ plot based on the association p-values or t-statistics from the function \code{cpg.assoc}.
}
\value{
\code{sort.cpg} returns an item of class \code{"cpg"} that is sorted by p-value.
\code{summary.cpg} creates a qq-plot based on the data, and scatterplots or boxplots for the top sites.
}
\author{
Barfield, R.; Kilaru,V.; Conneely, K.\cr
Maintainer: R. Barfield: <rbarfield01@fas.harvard.edu>
}
\note{
Plots with empirical confidence intervals based on permutation tests can be obtained from \code{cpg.perm}.
See \code{\link{plot.cpg.perm}} for more info.
}
\seealso{
\code{\link{cpg.perm}}
\code{\link{cpg.assoc}}
\code{\link{scatterplot}}
\code{\link{manhattan}}
\code{\link{plot.cpg.perm}}
}
\examples{
##Using the results from the example given in cpg.assoc.
###NOTE: If you are dealing with large data, do not specify large.data=FALSE.
###The default option is true.
##This will involve partitioning up the data and performing more gc() to clear up space
##QQ Plot:
data(samplecpg,samplepheno,package="CpGassoc")
test<-cpg.assoc(samplecpg,samplepheno$weight,large.data=FALSE)
plot(test)
##t-statistic plot:
plot(test,tplot=TRUE)
#Getting our plot:
plot(test,classic=FALSE)
##Now an example of sort
head(sort(test)$results)
##Summary
summary(test)
}
\keyword{ ~kwd1 }
\keyword{ ~kwd2 }% __ONLY ONE__ keyword per line
|
22b059e00061fc34fc3e90e92441c9861a49bf43
|
a76aa3dbd5df3e357c3a6b88d4c25ad4f8992d1c
|
/src/plot/plotCasesPreSeason.R
|
2ea2fe8939d5680510b2ab252a094acf1aa8bbbd
|
[] |
no_license
|
hy39/dengue-forecast-hk
|
dcf81ae11d43afb7711c063a448e728b4e5762f1
|
62c70b19ddff0b2819d3cceb58110a652644ee6a
|
refs/heads/master
| 2022-07-25T10:20:08.110328
| 2019-09-11T03:02:15
| 2019-09-11T03:02:15
| 207,704,792
| 0
| 0
| null | null | null | null |
WINDOWS-1252
|
R
| false
| false
| 1,673
|
r
|
plotCasesPreSeason.R
|
# plot temperature against rainfall in pre-season months (Jan - Jun)
rm(list=ls(all=TRUE))
# install if ggplot2 is not found
if (!require(ggplot2)) install.packages(ggplot2)
source("../lib/retrieveData.R")
# plot using MIN temperature or AVERAGE temperature
temperatureType <- "mean"
temperatureLabel <- paste("Average Temperature (°C)")
# 1. Retrieve data
# ---temperature data---
temperature <- getMonthlyTemperatureOnType(temperatureType)
# ---rain data---
rainfall <- getMonthlyRainFall()
# ---cases data---
allCases <- getAnnualRelativeRisk()
# 2. Build plot_df, consisting of year, # cases, pre_season temperature, pre_season rainfall
plot_df <- data.frame()
minYear <- min(allCases$year, na.rm=TRUE)
maxYear <- max(allCases$year, na.rm=TRUE)
for (year in minYear:maxYear) {
preseason_rainfall <- mean(rainfall[rainfall$year == year & rainfall$month <= 6,
"totalrain"], na.rm=TRUE)
preseason_temperature <- mean(temperature[temperature$year == year & temperature$month <= 6,
"temperature"], na.rm=TRUE)
relativeRisk <- allCases[allCases$year == year & allCases$month == 1, "relativeRisk"]
plot_df <- rbind(plot_df, c(year, relativeRisk, preseason_rainfall, preseason_temperature))
}
names(plot_df)<-c("YEAR", "CASES", "RAINFALL", "TEMPERATURE")
# 3. Plot data
ggplot(data=plot_df, aes(x=RAINFALL, y=TEMPERATURE)) +
geom_point(aes(x=RAINFALL, y=TEMPERATURE, size=CASES), color="red", alpha=0.5) +
labs(size="Relative Risk") +
geom_text(aes(label=ifelse(CASES>0, round(CASES, digits=2), '')), size=3, hjust=0, vjust=1) +
labs(x="Rainfall (mm)") +
labs(y=temperatureLabel)
|
7e03baf07c8ec1fab2a1d3a0a01fd96134ec698b
|
29585dff702209dd446c0ab52ceea046c58e384e
|
/NHEMOtree/R/NoDupKey.R
|
5e238568b0add59ebb516d2249ea08cf92557c95
|
[] |
no_license
|
ingted/R-Examples
|
825440ce468ce608c4d73e2af4c0a0213b81c0fe
|
d0917dbaf698cb8bc0789db0c3ab07453016eab9
|
refs/heads/master
| 2020-04-14T12:29:22.336088
| 2016-07-21T14:01:14
| 2016-07-21T14:01:14
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 67
|
r
|
NoDupKey.R
|
NoDupKey <-
function(Vektor){
return(unique(sort(Vektor)))
}
|
880e64032f250850ca431b0a22a2a385b13e2d1a
|
ff16e169ef05776d80b329d9c3db48312fd23e6d
|
/LinearRegression.R
|
4819fcad2cdaa3c1667126b372fab272e0e95807
|
[] |
no_license
|
panupind/AdvStats
|
70f953ee66415fa161fe4a7db4243213414282ed
|
7b985a6b1b39704fcd83d09042ea7d64a5124935
|
refs/heads/master
| 2020-07-02T17:34:12.494400
| 2020-04-04T18:29:15
| 2020-04-04T18:29:15
| 201,607,340
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 119
|
r
|
LinearRegression.R
|
Consumer
attach(Consumer)
linearModel <- lm( AmountCharged ~ (Income + HouseholdSize), data = Consumer)
linearModel
|
db0dc4e0d68cb0bdc036d699fbac5f06fbdd697e
|
40ef9a8d519d7f50595f1abc64a87ba4c9abc41f
|
/tests/selenium.R
|
e62360a96660931a6a92c34f0b0b948982cca2e6
|
[] |
no_license
|
skranz/SeminarMatching
|
38ad2ebc1ade1750ba92ac94948fc5e67e830332
|
26f84c9e1619695f5fee8372cf68c18e737e3a5f
|
refs/heads/master
| 2021-01-19T01:47:31.971185
| 2020-06-09T05:26:08
| 2020-06-09T05:26:08
| 48,612,952
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 2,230
|
r
|
selenium.R
|
library(RSelenium)
library(testthat)
patientFindElement = function(css, wait=0.1, max=4) {
restore.point("patientFindElement")
cat("\nTry to find ", css)
start = as.numeric(Sys.time())
end = start + max
while(TRUE) {
els = remDr$findElements(using="css",css)
if (length(els)>0) return(els[[1]])
if (end < as.numeric(Sys.time())) {
stop(paste0("Timeout: could not find ", css))
}
Sys.sleep(wait)
}
}
# some helper functions
getField = function(css, field="value", unlist=TRUE,...) {
webElem = patientFindElement(css,...)
res = webElem$getElementAttribute(field)
if (unlist) return(unlist(res))
res
}
sendKeys = function(css, text,...) {
webElem = patientFindElement(css,...)
if (!is.list(text)) text = list(text)
webElem$sendKeysToElement(text)
}
clickElement=function(css,...) {
webElem = patientFindElement(css,...)
webElem$clickElement()
}
pJS <- phantom()
# note we are running here without a selenium server phantomjs is
# listening on port 4444
# in webdriver mode
numDr = 4
remDrs = vector("list", numDr)
for (i in 1:numDr) {
remDrs[[i]] <- remoteDriver(browserName = "phantomjs")
remDrs[[i]]$open()
}
appURL <- "http://127.0.0.1:4646"
ind = 1; indDr = 1
login.email.check = function(ind=1, indDr=(ind %% numDr)+1) {
restore.point("login.email.check")
cat("\nNew check ind=",ind, " remDrvInd=",indDr)
remDr<<-remDrs[[indDr]]
remDr$navigate(appURL)
userid=paste0("_test_",ind)
email = userid
password = "test"
# enter login data
sendKeys("#loginPart__loginUser",userid)
getField("#loginPart__loginUser","value")
sendKeys("#loginPart__loginPassword",password)
getField("#loginPart__loginPassword","value")
clickElement("#loginPart__loginBtn")
#webElems = remDr$findElements(using="css","h2")
#unlist(lapply(webElems, function(x){x$getElementText()}))
cat("\nTry to click link...")
clickElement("#studTabsetPanel [data-value='studPanel']")
cat("\nTry to get email...")
shown.email = getField("#studform_email","value")
print(c(email=email, shown=shown.email))
if (!identical(email,shown.email)) stop("Emails differ!")
}
for (ind in 1:10) login.email.check(ind)
for (i in 1:numDr) {
remDrs[[i]]$close()
}
|
1117bef3a4d4a4c65fc1b4e7380e7105458820bc
|
e7e0ccce84c80113d7aba41458007dd42127a94c
|
/R/read_predict.R
|
ff7025a4923d14e76a1f77ef8ccf6880febd2ecb
|
[] |
no_license
|
halasadi/ancient-damage
|
ea10ea94325b66b129c1d4e9e5bf4827e5377ad2
|
51387d59d3436d796a2621d3dd72afbec48f981a
|
refs/heads/master
| 2020-04-12T06:42:24.364980
| 2018-07-28T22:45:30
| 2018-07-28T22:45:30
| 62,754,123
| 2
| 0
| null | 2016-09-22T01:13:50
| 2016-07-06T21:20:43
|
HTML
|
UTF-8
|
R
| false
| false
| 666
|
r
|
read_predict.R
|
topic_clus <- get(load("../utilities/moderns_Pinhasi/clus_2/model.rda"))
omega <- topic_clus$omega
theta <- topic_clus$theta
new_count <- matrix(0, ncol = dim(theta)[1], nrow = 5)
new_count[1, c(1, 100)] <- 1
new_count[2, c(1001)] <- 1
new_count[3, c(5000, 9000)] <- 1
new_count[4, c(1933)] <- 1
rownames(new_count) <- paste0("reads-", 1:5)
out <- read_memberships(topic_clus, new_count, method = "lik")
out <- read_memberships(topic_clus, new_count, method = "map")
out <- read_memberships(topic_clus, new_count, method = "independent")
out <- read_memberships(topic_clus, new_count, method = "independent-nostrand")
fit <- topic_clus
reads_data <- new_count
|
b23b93ae8e0b4d3ab251aeb545f1054a81178b32
|
16e417ac3011ae8b9f7e2ee01c02fad806e320ac
|
/Plot6.R
|
7ba697a0aa3fff2058c0454087ae6f19a01fc0b9
|
[] |
no_license
|
arlopezs2015/Peer-Exploratory-Data-Analysis-2
|
11057b54f74aa8e5e43b9c8d1c226787301f310e
|
72e673abc6200ed255103c28f9465399d09b4734
|
refs/heads/master
| 2016-09-05T09:50:45.303060
| 2015-04-24T19:06:41
| 2015-04-24T19:06:41
| 34,530,045
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 1,204
|
r
|
Plot6.R
|
##PLOT_6
##Required data
setwd("./data")
NEI <- readRDS("summarySCC_PM25.rds")
SCC <- readRDS("Source_Classification_Code.rds")
##To call libraries
library(plyr)
library(ggplot2)
library(data.table)
##To filter the data
##emision<-aggregate(NEI_Veh$Emissions, list(NEI_Veh$year,NEI_Veh$fips), sum,na.rm=TRUE)
NEI_Veh1 <- NEI[(NEI$type=="ON-ROAD") & (NEI$fips %in% c("24510")),]
NEI_Veh1$fips<-"Baltimore"
NEI_Veh2 <- NEI[(NEI$type=="ON-ROAD") & (NEI$fips %in% c("06037")),]
NEI_Veh2$fips<-"LA"
emision1<-aggregate(NEI_Veh1$Emissions, list(NEI_Veh1$year,NEI_Veh1$fips), sum,na.rm=TRUE)
emision2<-aggregate(NEI_Veh2$Emissions, list(NEI_Veh2$year,NEI_Veh2$fips), sum,na.rm=TRUE)
emision_1y2<-rbind(emision1,emision2)
## Assign name to the series
names(emision_1y2)<-c('year','city','emission')
##To create the graph
png("plot6.png", width=480, height=480, units="px")
ggplot(emision_1y2,aes(x=year,y=emission,fill=city))+
geom_bar(aes(fill=year),stat="identity") +
facet_grid(.~city) +
labs(x="year", y=expression("Total PM"[2.5]*" Emission (Kilo-Tons)")) +
labs(title=expression("PM"[2.5]*" Motor Vehicle Source Emissions in Baltimore & LA, 1999-2008"))
##close the the device png
dev.off()
|
a5dc3f81265c432711d039e7769a0d12be5522c1
|
29585dff702209dd446c0ab52ceea046c58e384e
|
/tiger/R/k_hyd.R
|
69e2cf243956d3e5d5e7966f47204c6280776e6e
|
[] |
no_license
|
ingted/R-Examples
|
825440ce468ce608c4d73e2af4c0a0213b81c0fe
|
d0917dbaf698cb8bc0789db0c3ab07453016eab9
|
refs/heads/master
| 2020-04-14T12:29:22.336088
| 2016-07-21T14:01:14
| 2016-07-21T14:01:14
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 114
|
r
|
k_hyd.R
|
`k_hyd` <-
function(x){
d<-diff(x)
res<--c(d,NA)/x
res[d>=0]<-NA
res[x==0]<-NA
return(res)
}
|
0c83556be35b8225683f39edef0d85ff4e824efb
|
2d32305806855dc8793ab0348acef458b139f1be
|
/tests/testthat/test-extendOM.R
|
bc22a6518ccce0c6bb8fd76be9940225728dbd38
|
[
"MIT"
] |
permissive
|
nmfs-fish-tools/SSMSE
|
07a9ed4defb370833864183b7f4a775425c53b3c
|
47445d973a537eaf9a7361f842d3f7a404bca247
|
refs/heads/main
| 2023-08-16T21:18:17.253400
| 2023-08-09T21:40:26
| 2023-08-10T12:20:30
| 197,069,801
| 16
| 5
|
MIT
| 2023-09-08T16:03:34
| 2019-07-15T20:44:06
|
R
|
UTF-8
|
R
| false
| false
| 8,409
|
r
|
test-extendOM.R
|
context("Test functions in extendOM.R script")
# create a temporary location to avoid adding files to the repo.
temp_path <- file.path(tempdir(), "test-extendOM")
dir.create(temp_path, showWarnings = FALSE)
wd <- getwd()
setwd(temp_path)
on.exit(setwd(wd), add = TRUE)
on.exit(unlink(temp_path, recursive = TRUE), add = TRUE)
extdat_path <- system.file("extdata", package = "SSMSE")
cod_mod <- file.path(extdat_path, "test_mod", "cod_initOM_for_tests")
# copy cod to the temp_path
file.copy(cod_mod, temp_path, recursive = TRUE)
dat <- r4ss::SS_readdat(file.path(temp_path, "cod_initOM_for_tests", "data.ss"),
verbose = FALSE
)
file.rename(
file.path(temp_path, "cod_initOM_for_tests"),
file.path(temp_path, "cod_initOM1")
)
# create a catch dataframe to add to add to the model
# just repeat the catch and se from the last year.
new_catch <- data.frame(
year = (dat[["endyr"]] + 1):(dat[["endyr"]] + 3),
seas = unique(dat[["catch"]][["seas"]])[1],
fleet = unique(dat[["catch"]][["fleet"]])[1],
catch = dat[["catch"]][["catch"]][nrow(dat[["catch"]])],
catch_se = dat[["catch"]][["catch_se"]][nrow(dat[["catch"]])]
)
new_yrs <- new_catch[["year"]]
# create a dataframe here.
extend_vals <- list(
CPUE = data.frame(
year = c(101, 103, 105), seas = 7, index = 2,
se_log = c(0.1, 0.2, 0.3)
),
lencomp = data.frame(
Yr = 101:103, Seas = 1, FltSvy = 1,
Gender = 0, Part = 0,
Nsamp = c(25, 50, 100)
),
agecomp = data.frame(
Yr = 101:104, Seas = 1, FltSvy = 2,
Gender = 0, Part = 0, Ageerr = 1,
Lbin_lo = -1, Lbin_hi = -1,
Nsamp = c(25, 50, 100, 150)
)
)
# TODO: implement future_om_list use in these tests
# also need to modify to update OM and have a basic OM file that
# already has years extended to full MSE timeseries length
# In the meantime, comment out
# test_that("extend_OM works with simple case", {
# skip_on_cran()
# # simple case: 1 fleet and season needs CPUE, lencomp, agecomp added
# return_dat <- update_OM(
# catch = new_catch,
# OM_dir = file.path(temp_path, "cod_initOM1"),
# write_dat = FALSE,
# verbose = FALSE
# )
# # check catch
# new_rows <- return_dat[["catch"]][
# (nrow(return_dat[["catch"]]) - nrow(new_catch) + 1):nrow(return_dat[["catch"]]),
# ]
# lapply(colnames(new_catch), function(x) {
# expect_equal(new_rows[, x], new_catch[, x])
# })
# # check CPUE # wrap first exp. in abs() b/c fleet negative in OM as a switch.
# expect_equivalent(
# abs(return_dat[["CPUE"]][101:102, c("year", "seas", "index", "se_log")]),
# extend_vals[["CPUE"]][extend_vals[["CPUE"]][["year"]] <= return_dat[["endyr"]], ]
# )
# # check lencomp
# expect_equivalent(
# abs(return_dat[["lencomp"]][101:103, colnames(extend_vals[["lencomp"]])]),
# extend_vals[["lencomp"]][extend_vals[["lencomp"]][["Yr"]] <= return_dat[["endyr"]], ]
# )
# # check agecomp
# expect_equivalent( # wrap both exp. in abs b/c of neg in fleet and in lbin/lbinhi
# abs(return_dat[["agecomp"]][101:103, colnames(extend_vals[["agecomp"]])]),
# abs(extend_vals[["agecomp"]][extend_vals[["agecomp"]][["Yr"]] <= return_dat[["endyr"]], ])
# )
# })
# copy cod to the temp_path
file.copy(cod_mod, temp_path, recursive = TRUE)
dat <- r4ss::SS_readdat(file.path(temp_path, "cod_initOM_for_tests", "data.ss"),
verbose = FALSE
)
file.rename(
file.path(temp_path, "cod_initOM_for_tests"),
file.path(temp_path, "cod_initOM2")
)
test_that("extend_OM exits on error when it should", {
skip_on_cran()
# nyrs is too small (needs to be at least 3)
# Removed as nyrs is no longer an input
# expect_error(
# update_OM(
# catch = new_catch,
# OM_dir = file.path(temp_path, "cod_initOM2"),
# nyrs = 1,
# verbose = FALSE
# ),
# "The maximum year input for catch"
# )
# Missing a column in the catch dataframe
unlink(file.path(temp_path, "cod_initOM2"), recursive = TRUE)
file.copy(cod_mod, temp_path, recursive = TRUE)
expect_error(
update_OM(new_catch[, -1],
OM_dir = file.path(temp_path, "cod_initOM2")
),
"The catch data frame does not have the correct"
)
# wrong column in the catch dataframe
alt_new_catch <- new_catch
colnames(alt_new_catch)[1] <- "wrongname"
unlink(file.path(temp_path, "cod_initOM2"), recursive = TRUE)
file.copy(cod_mod, temp_path, recursive = TRUE)
expect_error(
update_OM(alt_new_catch,
OM_dir = file.path(temp_path, "cod_initOM2")
),
"The catch data frame does not have the correct"
)
# path does not lead to model
unlink(file.path(temp_path, "cod_initOM2"), recursive = TRUE)
file.copy(cod_mod, temp_path, recursive = TRUE)
expect_error(
update_OM(new_catch, OM_dir = temp_path),
"Please change to a directory containing a valid SS model"
)
})
# copy cod to the temp_path
file.copy(cod_mod, temp_path, recursive = TRUE)
dat <- r4ss::SS_readdat(file.path(temp_path, "cod_initOM_for_tests", "data.ss"),
verbose = FALSE
)
file.rename(
file.path(temp_path, "cod_initOM_for_tests"),
file.path(temp_path, "cod_initOM3")
)
test_that("check_future_catch works", {
# doesn't flag anything
return_catch <- check_future_catch(
catch = new_catch,
OM_dir = file.path(temp_path, "cod_initOM3")
)
expect_equal(return_catch, new_catch)
summary <- r4ss::SS_read_summary(file.path(temp_path, "cod_initOM3", "ss_summary.sso"))
summary <- summary[["biomass"]]
large_catch <- new_catch
large_catch_val <- summary["TotBio_100", "Value"] + 1000
large_catch[2, "catch"] <- large_catch_val
expect_error(
check_future_catch(
catch = large_catch,
OM_dir = file.path(temp_path, "cod_initOM3")
),
"Some input values for future catch are higher"
)
# catch has the wrong year
wrong_yr_catch <- new_catch
wrong_yr_catch[2, "year"] <- 5
expect_error(
check_future_catch(
catch = wrong_yr_catch,
OM_dir = file.path(temp_path, "cod_initOM3")
),
"The highest year for which TotBio"
)
# function cannot be used for other catch_units besides "bio"
expect_error(
check_future_catch(
catch = new_catch,
OM_dir = file.path(temp_path, "cod_initOM3"),
catch_units = "num"
),
"Function not yet implemented when catch is not in biomass"
)
expect_error(
check_future_catch(
catch = new_catch,
OM_dir = file.path(temp_path, "cod_initOM3"),
catch_units = "wrong_value"
),
"Function not yet implemented when catch is not in biomass"
)
})
test_that(" add_sample_struct works for adding data during model years and for future years", {
init_dat <- r4ss::SS_readdat(file.path(temp_path, "cod_initOM1", "data.ss"),
verbose = FALSE
)
# no change
init_endyr <- init_dat[["endyr"]]
init_dat[["endyr"]] <- init_dat[["endyr"]] + 4 # assume extend forward by 4 yrs.
no_change_dat <- add_sample_struct(sample_struct = NULL, dat = init_dat)
expect_equal(init_dat, no_change_dat)
# for future years
future_dat <- add_sample_struct(
sample_struct = extend_vals,
dat = init_dat
)
expect_true(length(future_dat[["CPUE"]][future_dat[["CPUE"]][["year"]] >
init_endyr, "year"]) == 2)
expect_true(length(future_dat[["lencomp"]][future_dat[["lencomp"]][["Yr"]] >
init_endyr, "Yr"]) == 3)
expect_true(length(future_dat[["agecomp"]][future_dat[["agecomp"]][["Yr"]] >
init_endyr, "Yr"]) == 4)
no_neg_dat <- init_dat
no_neg_dat[["CPUE"]] <- no_neg_dat[["CPUE"]][no_neg_dat[["CPUE"]][["index"]] > 0, ]
no_neg_dat[["lencomp"]] <- no_neg_dat[["lencomp"]][no_neg_dat[["lencomp"]][["FltSvy"]] > 0, ]
no_neg_dat[["agecomp"]] <- no_neg_dat[["agecomp"]][no_neg_dat[["agecomp"]][["FltSvy"]] > 0, ]
hist_samples <- extend_vals
hist_samples[["CPUE"]][["year"]] <- 31:33
hist_samples[["lencomp"]][["Yr"]] <- 27:29
hist_samples[["agecomp"]][["Yr"]] <- 31:34
# for historical years
hist_dat <- add_sample_struct(
sample_struct = hist_samples,
dat = no_neg_dat
)
expect_true(
length(hist_dat[["CPUE"]][hist_dat[["CPUE"]][["index"]] < 0, "index"]) ==
length(31:33)
)
expect_true(
length(hist_dat[["lencomp"]][
hist_dat[["lencomp"]][["FltSvy"]] < 0,
"FltSvy"
]) ==
length(27:29)
)
expect_true(
length(hist_dat[["agecomp"]][
hist_dat[["agecomp"]][["FltSvy"]] < 0,
"FltSvy"
]) ==
length(31:34)
)
})
|
0a8287de3c747b908d8aa90a683d92a6c2f7cd66
|
fda540791ba58168598b8320571356a565f9faf1
|
/explore/prob_laudo.R
|
f74327df55da6bafa736c2df0a14e24813e6d29a
|
[] |
no_license
|
monzalo14/conciliacion
|
5c3e1272090d3575552ab9b58b5b514ab9cfe58f
|
5e4670ec32026a85f5bedd0f01decee1cec01394
|
refs/heads/master
| 2021-01-12T08:58:27.217523
| 2017-05-04T07:16:47
| 2017-05-04T07:16:47
| 76,738,998
| 1
| 3
| null | 2017-02-15T18:40:55
| 2016-12-17T18:05:28
|
R
|
UTF-8
|
R
| false
| false
| 3,078
|
r
|
prob_laudo.R
|
prob_laudo <- function(df){
load('probabilidad_laudo.RData')
prediccion_RF <- predict(RF_best, df, 'prob')
prediccion_RF[, '0']
}
prob_laudo_vars <- function(array){
load('probabilidad_laudo.RData')
names(array) <- c('reclutamiento', 'gen',
'antig', 'reinst', 'hextra',
'sarimssinf', 'horas_sem', 'hextra_sem',
'rec20', 'prima_dom', 'desc_sem',
'desc_ob', 'c_indem', 'min_ley', 'giro',
'nombre_d1', 'nombre_d2',
'nombre_d3', 'nombre_d4')
df <- as.data.frame(array)
giros <- c('31', '43', '46', '52', '54', '56', '61', '62', '72', '81')
giros_agregado <- c('3', '4', '5', '6', '7', '8')
# Esto es para la que lo tome la regresión lineal
for (num in giros_agregado){
df[paste0('giro_', num)] <- ifelse(substring(df$giro_empresa, 2) == num)
}
# Esto es para el RF
for (giro in giros){
df[paste0('giro_empresa', giro)] <- ifelse(df$giro_empresa == giro, 1, 0)
}
df <- df %>%
mutate(giro_empresa00 = rowSums(df[grepl('giro_empresa', names(df))])) %>%
select(-giro_empresa)
df$giro_empresa00 <- as.numeric(df$giro_empresa00>0)
########################## Código para variable de "top demandado" #########
razones <- list(
'WALMART' = c('WALMART', 'WAL MART'),
'COMERCIAL MEXICANA' = c('COMER', 'COMERCIAL MEXICANA',
'SUMESA', 'FRESKO'),
'ELEKTRA' = c('ELMEX', 'ELEKTRA', 'TEZONTLE'),
'SANBORNS' = c('SANBORN', 'SANBORNS'),
'MANPOWER' = c('MAN POWER', 'MANPOWER'),
'WINGS' = 'WINGS',
'VIPS'= 'VIPS',
'SUBURBIA' ='SUBURBIA',
'PALACIO DE HIERRO' = 'PALACIO DE HIERRO',
'CHEDRAUI' = 'CHEDRAUI',
'ATENTO' = 'ATENTO',
'OXXO' = 'OXXO',
# Gobierno
'QUITAR' = c('IMSS',
'INFONAVIT',
'INSTITUTO MEXICANO DEL SEGURO SOCIAL',
'INSTITUTO NACIONAL DEL FONDO PARA LA VIVIENDA',
'SHCP', 'SECRETARIA DE HACIENDA',
'GORDILLO',
'SNTE', 'SINDICATO NACIONAL DE TRABAJADORES DE LA EDUCACION',
'GOBIERNO DEL', 'DELEGACION POLITICA',
'CONSAR',
#Físicos
'QUIEN', 'RESULTE',
'AGUIRRE', 'KUTZ', 'ISAIAS', 'ESCARPITA')
)
# Funciones auxiliares para juntar razones sociales de empresas
single_match <- function(variable, expresion, nombre){
variable[grepl(expresion, variable)] <- nombre
variable
}
element_match <- function(elemento_lista, nombre, variable){
for (expresion in elemento_lista){
variable <- single_match(variable, expresion, nombre)
}
variable
}
limpia_razones <- function(lista, variable){
for (i in seq_along(lista)){
variable <- element_match(lista[[i]], names(lista)[i], variable)
}
variable[variable == 'QUITAR'] <- NA
as.character(variable)
}
df <- df %>%
mutate_at(vars(starts_with('nombre_d')), limpia_razones)
load('../data/top_demandados.RData')
top_dem <- function(x){
x %in% top_dems
}
df <- df %>%
mutate_at(vars(starts_with('nombre_d')), top_dem) %>%
mutate(top_demandado = select(., starts_with('nombre_d')) %>% rowSums())
df$top_demandado <- as.numeric(df$top_demandado>0)
prediccion_RF <- predict(RF_best, df, 'prob')
prediccion_RF[, '0']
}
|
ddca2f5795bc2a3ed72bdcfa6f3aae3e33e7a79f
|
97e7aa649b53bb279c452e3e7a7a875e68883627
|
/Time Series Analysis, Text Analysis/Lab7-Visualization.R
|
ca4ab9c232323a77e1906f9ad6a6aedcf392065c
|
[] |
no_license
|
nancy9taya/DataDataAnlytics
|
bd6acee9bd3ea49c7f540c0dcebc010777e388ec
|
8a4304fcdad5e8fb919322e810fdca14cdde4a34
|
refs/heads/main
| 2023-05-06T02:41:48.015714
| 2021-06-02T10:36:15
| 2021-06-02T10:36:15
| 372,842,076
| 1
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 2,476
|
r
|
Lab7-Visualization.R
|
rm(list=ls())
setwd("D:/Rehab/Studies/Senior 2/Spring 2021/Big Data/Labs/Lab 7 - Time Series Analysis, Text Analysis and Visualization/Visualization in R") #Replace this working directory with the directory containing the R file.
install.packages("ggplot2")
library("ggplot2")
#Question 1
data <- read.csv("datasets/EconomistData.csv", header = TRUE, sep = ',')
summary(data)
#Question 2
ggplot(data, aes(x=CPI,y=HDI))+ geom_point()
#Question 3
ggplot(data, aes(x=CPI,y=HDI))+ geom_point(color = "blue")
#Question 4
ggplot(data, aes(x=CPI,y=HDI))+ geom_point(aes(color = Region))
#Question 5
ggplot(data, aes(x=CPI,y=HDI))+ geom_point(aes(color = Region))+geom_smooth()
#Question 6
ggplot(data, aes(x=CPI,y=HDI))+ geom_text(aes(label = Country))
#Question 7
ggplot(data, aes(x=CPI,y=HDI))+ geom_point(aes(color = Region),size = 2)
#Question 8
ggplot(data, aes(x=CPI,y=HDI))+ geom_point(aes(color = Region ,size = HDI.Rank))
#Question 9
middleEast <- subset(data, Region == "MENA")
ggplot(middleEast, aes(x=CPI,y=HDI))+ geom_point() #2
ggplot(middleEast, aes(x=CPI,y=HDI))+ geom_point(aes(color = Region)) #4
ggplot(middleEast, aes(x=CPI,y=HDI))+ geom_point(aes(color = Region))+geom_smooth() #5
ggplot(middleEast, aes(x=CPI,y=HDI))+ geom_text(aes(label = Country)) #6
ggplot(middleEast, aes(x=CPI,y=HDI))+ geom_point(aes(color = Region ,size = HDI.Rank)) #8
#Egypt is located at (CPI,HDI) = (2.9,0.64)
#Question 10
european <- subset(data, Region == "EU W. Europe")
ggplot(european, aes(x=CPI,y=HDI))+ geom_point() #2
ggplot(european, aes(x=CPI,y=HDI))+ geom_point(aes(color = Region)) #4
ggplot(european, aes(x=CPI,y=HDI))+ geom_point(aes(color = Region))+geom_smooth() #5
ggplot(european, aes(x=CPI,y=HDI))+ geom_text(aes(label = Country)) #6
ggplot(european, aes(x=CPI,y=HDI))+ geom_point(aes(color = Region ,size = HDI.Rank)) #8
#Question 11
european_egy <- subset(data, Region == "EU W. Europe" | Country == "Egypt")
ggplot(european_egy, aes(x=CPI,y=HDI))+ geom_point() #2
ggplot(european_egy, aes(x=CPI,y=HDI))+ geom_point(aes(color = Region)) #4
ggplot(european_egy, aes(x=CPI,y=HDI))+ geom_point(aes(color = Region))+geom_smooth() #5
ggplot(european_egy, aes(x=CPI,y=HDI))+ geom_text(aes(label = Country)) #6
ggplot(european_egy, aes(x=CPI,y=HDI))+ geom_point(aes(color = Region ,size = HDI.Rank)) #8
#Egypt is located at (CPI,HDI) = (2.9,0.64) N.B: Not exactly whether it is 2.9 or 2.8
|
fe81ead3e7a196c3ab21a5d4ad062788aa73c87d
|
8587cbe2e309f29a1e539e31509ccf75daacc764
|
/3/bernoulli distribution.R
|
1bad72408cd0df371f4831945c9a3509f55e9df4
|
[] |
no_license
|
alidarvishi14/applied-stochastic-process
|
6d39ff6f7db0b7f60527b9c36cde61c3d2ab9941
|
b528f5aec7dc6ac64001ef5e16a38fb731ad6807
|
refs/heads/main
| 2023-02-22T11:49:19.103362
| 2021-01-25T12:33:27
| 2021-01-25T12:33:27
| 332,736,616
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 150
|
r
|
bernoulli distribution.R
|
p=0.4
n=100
bernoulli=runif(n)<p
n=10^1
p=0.5
u=runif(1)
x=rep(0,n)
for (i in 1:n){
x[i]=u<p
if(x[i]){
u=u/p
}else{
u=(u-p)/(1-p)
}
}
|
47355fb02ce00eb25e306e70ac2276bdda9d1790
|
c4f1eebce3840414014000d5c926b7db6d7b0650
|
/Janousek_et_al_2015_modelling_Springer/Part_2/Code/Exercises/exe_8.2_sazava_fc_mjr.r
|
a263f7cbfdb27831f48c0022aaeed61c5aea44ce
|
[] |
no_license
|
nghia1991ad/GCDkit_book_R
|
0148bf06dda0837a32f2e8b2110e1a148bc0c7a1
|
f215dfeabd19d2521e10ad076bbdfb4c55f77c5b
|
refs/heads/master
| 2022-10-09T00:57:55.521638
| 2020-06-08T09:50:51
| 2020-06-08T09:50:51
| 269,319,776
| 2
| 1
| null | null | null | null |
UTF-8
|
R
| false
| false
| 630
|
r
|
exe_8.2_sazava_fc_mjr.r
|
x <- read.table("sazava_fc.data",sep="\t")
x <- as.matrix(x) # dataframe needs to be transformed to
# a matrix for matrix multiplication
c0 <- x[,1] # composition of parental magma
mins <- x[,-1] # composition of fractionating phases
m <- c(0.5,0.3,0.2) # mineral proportions in cumulate
fc <- 0.2 # degree of fractionation
cs <- mins%*%m # cumulate composition [Eq.(6.14)]
cl <-(c0-cs*fc)/(1-fc) # and of residual liquid [Eq. (8.2)]
x <- cbind(x,cs,cl)
colnames(x) <- c("tonalite",colnames(mins),"cumulate","dif.magma")
print(round(x,2))
|
2ed7e9a6db6e804b2a1f97ef8fc164de3b22e05c
|
433a06997823e22efd9d351698a108fdbbc68ea9
|
/cachematrix.R
|
ef5aeef23631fe9ab1bebf377d9ba4f1c8d80da4
|
[] |
no_license
|
scify65/ProgrammingAssignment2
|
a4e0e61094b5496caabaa3cdfb98690441352219
|
97b3c180e9d25a31f40962faeba88f9ef7733848
|
refs/heads/master
| 2021-01-17T13:21:57.543723
| 2015-06-07T02:54:08
| 2015-06-07T02:54:08
| 31,139,832
| 0
| 0
| null | 2015-02-21T20:30:28
| 2015-02-21T20:30:28
| null |
UTF-8
|
R
| false
| false
| 730
|
r
|
cachematrix.R
|
## Put comments here that give an overall description of what your
## functions do
## Creates a matrix and its inverse.
makeCacheMatrix <- function(x = matrix(),...) {
m<-NULL
set <- function(y) {
x<<-y
m<<-NULL
}
get <- function() x
setinverse <-function(solve) m <<- solve
getinverse <- function() m
list(set=set, get=get,
setinverse=setinverse,
getinverse=getinverse)
}
## Checks to see if the matrix inverse is already cached. If not, it caches it.
cacheSolve <- function(x, ...) {
m<-x$getinverse()
if (!is.null(m)) {
message("Getting cached data")
return(m)
}
data<-x$get()
m<-solve(data,...)
x$setinverse(m)
m
}
|
c1756a8c60f3b1be6f3578cc5d69b8ffe61ecd6e
|
9b41f42dea23be6cd0ade93716efa051858a6f0b
|
/Scripts/FFPE_trio_summary_plots.R
|
3a7777f943f8cc3c499bfb6cc4c2fec5426420b4
|
[] |
no_license
|
martimij/FFPE_trio_analysis_mirrored
|
b31be16f3eed5c9c84fc2d5509f92ab0445c8eae
|
dc6c7f70877450def4275150caac544fef52c2f1
|
refs/heads/master
| 2020-03-14T16:45:59.300916
| 2018-05-01T11:35:30
| 2018-05-01T11:35:30
| 131,704,437
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 24,087
|
r
|
FFPE_trio_summary_plots.R
|
# FFPE trio analysis
# Martina Mijuskovic
# May 2017
######### Load libraries #########
# NOTE: Installing R packages on HPC: use lib = "~/R/x86_64-pc-linux-gnu-library/3.X"
library(dplyr)
library(reshape)
library(ggplot2)
library(scales)
library(R.utils)
library(jsonlite)
######### Helper objects for plotting #########
# Blank theme
blank <- theme(panel.grid.major = element_blank(), panel.grid.minor = element_blank(), panel.background = element_blank(), axis.line = element_line(colour = "black"), legend.title=element_blank())
# Black regression line (linear model)
regr_line <- geom_smooth(method = "lm", se = F, aes(group = 1), linetype = 2, col = "black", size = 0.5)
######### QC summary plots #########
######### SNV plots #########
################ SNV summary plots (original 26 trios) ################
# Load the data (>5% variant freq only) - local
SNV_summary <- read.csv("/Users/MartinaMijuskovic/FFPE/SNV_summary_26trios_5pctFreq2017-04-19.csv")
# Add QC details to the SNV summary table
SNV_summary$PATIENT_ID <- as.character(SNV_summary$PATIENT_ID)
QC_table <- QC_portal_trios %>% filter(SAMPLE_TYPE == "FFPE") %>% dplyr::select(PATIENT_ID, CENTER_CODE, TumorType, SAMPLE_WELL_ID, TUMOUR_PURITY, GC_DROP, AT_DROP, COVERAGE_HOMOGENEITY, CHIMERIC_PER, AV_FRAGMENT_SIZE_BP, MAPPING_RATE_PER, DEAMINATION_MISMATCHES_PER)
names(QC_table)[4:12] <- paste0("FFPE_", names(QC_table)[4:12])
QC_table <- left_join(QC_table, (QC_portal_trios %>% filter(SAMPLE_TYPE == "FF") %>% dplyr::select(PATIENT_ID, SAMPLE_WELL_ID, LIBRARY_TYPE, TUMOUR_PURITY, GC_DROP, AT_DROP, COVERAGE_HOMOGENEITY, CHIMERIC_PER, AV_FRAGMENT_SIZE_BP, MAPPING_RATE_PER, DEAMINATION_MISMATCHES_PER)), by = "PATIENT_ID")
names(QC_table)[13:22] <- paste0("FF_", names(QC_table)[13:22])
QC_table$PATIENT_ID <- as.character(QC_table$PATIENT_ID)
SNV_summary <- left_join(SNV_summary, QC_table, by = "PATIENT_ID")
# Load the Domain1, Domain2 gene lists
domain1 <- read.table("GENOMONCOLOGY_SOLID_TUMOUR.v1.4.tsv", sep = "\t", header = T)
domain1 <- domain1[,1:4]
domain1 <- unique(domain1)
length(unique(domain1$gene_name)) # 72 genes
domain2 <- read.table("CANCER_CENSUS_GENES.v1.4.tsv", sep = "\t", header = T)
length(unique(domain2$gene_name)) # 544 genes
### Summary plots
# Distribution of RECALL and PRECISION
hist(SNV_summary$RECALL)
hist(SNV_summary$PRECISION)
# Overlap of Domain 1-3 SNVs >5% frequency per patient
# First recast the data (each of 3 bp values in separate row, with PATIENT_ID, with indexes 1-2-3), needs package "reshape"
SNV_summary_m <- as.data.frame(t(SNV_summary %>% arrange(PRECISION) %>% dplyr::select(PATIENT_ID, OVERLAP, FF_UNIQ, FFPE_UNIQ)))
names(SNV_summary_m) <- as.matrix(SNV_summary_m[1,])
SNV_summary_m <- SNV_summary_m[2:4,]
SNV_summary_m <- melt(cbind(SNV_summary_m, ind = rownames(SNV_summary_m)), id.vars = c('ind'))
SNV_summary_m$value <- as.numeric(SNV_summary_m$value)
# Overap plot (needs package "scales")
ggplot(SNV_summary_m, aes(x = variable, y = value, fill = ind)) + geom_bar(position = "fill",stat = "identity") + scale_y_continuous(labels = percent_format()) + theme(axis.text.x=element_text(angle=45,hjust=1,vjust=1), axis.title = element_blank()) + theme(legend.title=element_blank()) + labs(x = "Patient ID", y = element_blank()) + blank
# RECALL of FF by AT dropout/coverage homogeneity, chim reads, mapping rate of FFPE, etc
ggplot(SNV_summary, aes(x = RECALL, y = FFPE_AT_DROP, col = factor(CENTER_CODE))) + geom_jitter() + regr_line + labs(x = "Percent Recall of FF", y = "FFPE AT Dropout") + theme(legend.title=element_blank())
cor(SNV_summary$RECALL, SNV_summary$FFPE_AT_DROP, method = "spearman") # 0.01094204
ggplot(SNV_summary, aes(x = RECALL, y = FFPE_COVERAGE_HOMOGENEITY, col = factor(CENTER_CODE))) + geom_jitter() + regr_line + labs(x = "Percent Recall of FF", y = "FFPE Unevenness of Coverage") + theme(legend.title=element_blank())
cor(SNV_summary$RECALL, SNV_summary$FFPE_COVERAGE_HOMOGENEITY, method = "spearman") # 0.06393162
ggplot(SNV_summary, aes(x = RECALL, y = FFPE_CHIMERIC_PER, col = factor(CENTER_CODE))) + geom_jitter() + regr_line + labs(x = "Percent Recall of FF", y = "FFPE % Chimeric") + theme(legend.title=element_blank())
cor(SNV_summary$RECALL, SNV_summary$FFPE_CHIMERIC_PER, method = "spearman") # -0.08038311
ggplot(SNV_summary, aes(x = RECALL, y = FFPE_MAPPING_RATE_PER, col = factor(CENTER_CODE))) + geom_jitter() + regr_line + labs(x = "Percent Recall of FF", y = "FFPE Mapping Rate") + theme(legend.title=element_blank())
cor(SNV_summary$RECALL, SNV_summary$FFPE_MAPPING_RATE_PER, method = "spearman") # 0.07213675
ggplot(SNV_summary, aes(x = RECALL, y = FFPE_DEAMINATION_MISMATCHES_PER, col = factor(CENTER_CODE))) + geom_jitter() + regr_line + labs(x = "Percent Recall of FF", y = "FFPE Deamination Mismatches") + theme(legend.title=element_blank())
cor(SNV_summary$RECALL, SNV_summary$FFPE_DEAMINATION_MISMATCHES_PER, method = "spearman") # -0.1070085
ggplot(SNV_summary, aes(x = RECALL, y = FFPE_TUMOUR_PURITY, col = factor(TumorType))) + geom_jitter() + labs(x = "Percent Recall of FF", y = "FFPE Tumour Purity") + theme(legend.title=element_blank()) + regr_line
cor(SNV_summary$RECALL, SNV_summary$FFPE_TUMOUR_PURITY, method = "spearman") # -0.3930165
# PRECISION of FF by AT dropout/coverage homogeneity, chim reads, mapping rate of FFPE, etc
ggplot(SNV_summary, aes(x = PRECISION, y = FFPE_AT_DROP, col = factor(CENTER_CODE))) + geom_jitter() + regr_line + labs(x = "Precision", y = "FFPE AT Dropout") + theme(legend.title=element_blank())
cor(SNV_summary$PRECISION, SNV_summary$FFPE_AT_DROP, method = "spearman") # -0.3135579
ggplot(SNV_summary, aes(x = PRECISION, y = FFPE_COVERAGE_HOMOGENEITY, col = factor(CENTER_CODE))) + geom_jitter() + regr_line + labs(x = "Precision", y = "FFPE Unevenness of Coverage") + theme(legend.title=element_blank())
cor(SNV_summary$PRECISION, SNV_summary$FFPE_COVERAGE_HOMOGENEITY, method = "spearman") # -0.3996581
ggplot(SNV_summary, aes(x = PRECISION, y = FFPE_CHIMERIC_PER, col = factor(CENTER_CODE))) + geom_jitter() + regr_line + labs(x = "Precision", y = "FFPE % Chimeric") + theme(legend.title=element_blank())
cor(SNV_summary$PRECISION, SNV_summary$FFPE_CHIMERIC_PER, method = "spearman") # -0.04241492
ggplot(SNV_summary, aes(x = PRECISION, y = FFPE_MAPPING_RATE_PER, col = factor(CENTER_CODE))) + geom_jitter() + regr_line + labs(x = "Precision", y = "FFPE Mapping Rate") + theme(legend.title=element_blank())
cor(SNV_summary$PRECISION, SNV_summary$FFPE_MAPPING_RATE_PER, method = "spearman") # 0.2205128
ggplot(SNV_summary, aes(x = PRECISION, y = FFPE_DEAMINATION_MISMATCHES_PER, col = factor(CENTER_CODE))) + geom_jitter() + regr_line + labs(x = "Precision", y = "FFPE Deamination Mismatches") + theme(legend.title=element_blank())
cor(SNV_summary$PRECISION, SNV_summary$FFPE_DEAMINATION_MISMATCHES_PER, method = "spearman") # 0.04820513
ggplot(SNV_summary, aes(x = PRECISION, y = FFPE_TUMOUR_PURITY, col = factor(TumorType))) + geom_jitter() + labs(x = "Precision", y = "FFPE Tumour Purity") + theme(legend.title=element_blank()) + regr_line
cor(SNV_summary$PRECISION, SNV_summary$FFPE_TUMOUR_PURITY, method = "spearman") # -0.4334136
######## Cluster samples
summary(SNV_summary$PRECISION)
ggplot(SNV_summary, aes(x = CENTER_CODE, y = PRECISION, col = factor(CENTER_CODE))) + geom_jitter() + geom_boxplot(alpha = 0) + theme(legend.title=element_blank())
summary(SNV_summary$RECALL)
ggplot(SNV_summary, aes(x = CENTER_CODE, y = RECALL, col = factor(CENTER_CODE))) + geom_jitter() + geom_boxplot(alpha = 0) + theme(legend.title=element_blank())
# Recall of FF vs recall of FFPE (recall vs precision)
#ggplot(SNV_summary, aes(x = RECALL, y = PRECISION, col = factor(CENTER_CODE))) + geom_jitter() + geom_smooth(method = "lm", se = F) + labs(x = "Percent Recall of FF", y = "Percent Recall of FFPE")
ggplot(SNV_summary, aes(x = RECALL, y = PRECISION, col = factor(CENTER_CODE))) + geom_jitter() + labs(x = "Recall", y = "Precision") + theme(legend.title=element_blank())
cor(SNV_summary$RECALL, SNV_summary$PRECISION, method = "spearman") # 0.6690598
summary(SNV_summary$FFPE_TUMOUR_PURITY)
summary(SNV_summary$FF_TUMOUR_PURITY)
################ Overlap plots with different VAFs ################
# Load data, add to QC data
### 20% VAF
SNV_summary <- read.csv("/Users/MartinaMijuskovic/FFPE/SNV_summary_26trios_20pctFreq2017-04-20.csv")
# Add QC details to the SNV summary table
SNV_summary$PATIENT_ID <- as.character(SNV_summary$PATIENT_ID)
QC_table <- QC_portal_trios %>% filter(SAMPLE_TYPE == "FFPE") %>% dplyr::select(PATIENT_ID, CENTER_CODE, TumorType, SAMPLE_WELL_ID, TUMOUR_PURITY, GC_DROP, AT_DROP, COVERAGE_HOMOGENEITY, CHIMERIC_PER, AV_FRAGMENT_SIZE_BP, MAPPING_RATE_PER, DEAMINATION_MISMATCHES_PER)
names(QC_table)[4:12] <- paste0("FFPE_", names(QC_table)[4:12])
QC_table <- left_join(QC_table, (QC_portal_trios %>% filter(SAMPLE_TYPE == "FF") %>% dplyr::select(PATIENT_ID, SAMPLE_WELL_ID, LIBRARY_TYPE, TUMOUR_PURITY, GC_DROP, AT_DROP, COVERAGE_HOMOGENEITY, CHIMERIC_PER, AV_FRAGMENT_SIZE_BP, MAPPING_RATE_PER, DEAMINATION_MISMATCHES_PER)), by = "PATIENT_ID")
names(QC_table)[13:22] <- paste0("FF_", names(QC_table)[13:22])
QC_table$PATIENT_ID <- as.character(QC_table$PATIENT_ID)
SNV_summary <- left_join(SNV_summary, QC_table, by = "PATIENT_ID")
# First recast the data (each of 3 bp values in separate row, with PATIENT_ID, with indexes 1-2-3), needs package "reshape"
SNV_summary_m <- as.data.frame(t(SNV_summary %>% arrange(PRECISION) %>% dplyr::select(PATIENT_ID, OVERLAP, FF_UNIQ, FFPE_UNIQ)))
names(SNV_summary_m) <- as.matrix(SNV_summary_m[1,])
SNV_summary_m <- SNV_summary_m[2:4,]
SNV_summary_m <- melt(cbind(SNV_summary_m, ind = rownames(SNV_summary_m)), id.vars = c('ind'))
SNV_summary_m$value <- as.numeric(SNV_summary_m$value)
# Overap plot (needs package "scales")
ggplot(SNV_summary_m, aes(x = variable, y = value, fill = ind)) + geom_bar(position = "fill",stat = "identity") + scale_y_continuous(labels = percent_format()) + theme(axis.text.x=element_text(angle=45,hjust=1,vjust=1), axis.title = element_blank()) + theme(legend.title=element_blank()) + labs(x = "Patient ID", y = element_blank()) + blank
######### Correlation of overlap with tumor purity, >20% VAF #########
# Outliers in the overlap plot
SNV_summary %>% filter(PATIENT_ID %in% c("217000052", "217000011")) %>% dplyr::select(PATIENT_ID, TumorType, RECALL, PRECISION, FFPE_TUMOUR_PURITY, FF_TUMOUR_PURITY, FFPE_GC_DROP, FFPE_COVERAGE_HOMOGENEITY, FFPE_DEAMINATION_MISMATCHES_PER, FF_LIBRARY_TYPE)
# Samples with FF and FFPE tumor purity > 50%
dim(SNV_summary %>% filter(FFPE_TUMOUR_PURITY > 50, FF_TUMOUR_PURITY > 50)) # 8 (FF and FFPE)
dim(SNV_summary %>% filter(FFPE_TUMOUR_PURITY > 50)) # 10 (FFPE only)
# Distribution of tumor purity
ggplot(QC_portal_trios, aes(x = SAMPLE_TYPE, y = TUMOUR_PURITY, col = factor(SAMPLE_TYPE))) + geom_jitter() + geom_boxplot(alpha = 0) + theme(legend.title=element_blank())
######### Only samples with FFPE tumor purity >50% #########
# RECALL by AT dropout/coverage homogeneity, chim reads, mapping rate of FFPE, etc
ggplot(SNV_summary[SNV_summary$FFPE_TUMOUR_PURITY > 50,], aes(x = RECALL, y = FFPE_AT_DROP, col = factor(CENTER_CODE))) + geom_jitter() + regr_line + labs(x = "Recall", y = "FFPE AT Dropout") + theme(legend.title=element_blank())
cor(SNV_summary[SNV_summary$FFPE_TUMOUR_PURITY > 50,]$RECALL, SNV_summary[SNV_summary$FFPE_TUMOUR_PURITY > 50,]$FFPE_AT_DROP, method = "spearman") # 0.05454545
ggplot(SNV_summary[SNV_summary$FFPE_TUMOUR_PURITY > 50,], aes(x = RECALL, y = FFPE_COVERAGE_HOMOGENEITY, col = factor(CENTER_CODE))) + geom_jitter() + regr_line + labs(x = "Recall", y = "FFPE Unevenness of Coverage") + theme(legend.title=element_blank())
cor(SNV_summary[SNV_summary$FFPE_TUMOUR_PURITY > 50,]$RECALL, SNV_summary[SNV_summary$FFPE_TUMOUR_PURITY > 50,]$FFPE_COVERAGE_HOMOGENEITY, method = "spearman") # 0.1757576
ggplot(SNV_summary[SNV_summary$FFPE_TUMOUR_PURITY > 50,], aes(x = RECALL, y = FFPE_CHIMERIC_PER, col = factor(CENTER_CODE))) + geom_jitter() + regr_line + labs(x = "Recall", y = "FFPE % Chimeric") + theme(legend.title=element_blank())
cor(SNV_summary[SNV_summary$FFPE_TUMOUR_PURITY > 50,]$RECALL, SNV_summary[SNV_summary$FFPE_TUMOUR_PURITY > 50,]$FFPE_CHIMERIC_PER, method = "spearman") # -0.1515152
ggplot(SNV_summary[SNV_summary$FFPE_TUMOUR_PURITY > 50,], aes(x = RECALL, y = FFPE_MAPPING_RATE_PER, col = factor(CENTER_CODE))) + geom_jitter() + regr_line + labs(x = "Recall", y = "FFPE Mapping Rate") + theme(legend.title=element_blank())
cor(SNV_summary[SNV_summary$FFPE_TUMOUR_PURITY > 50,]$RECALL, SNV_summary[SNV_summary$FFPE_TUMOUR_PURITY > 50,]$FFPE_MAPPING_RATE_PER, method = "spearman") # 0.4424242
ggplot(SNV_summary[SNV_summary$FFPE_TUMOUR_PURITY > 50,], aes(x = RECALL, y = FFPE_DEAMINATION_MISMATCHES_PER, col = factor(CENTER_CODE))) + geom_jitter() + regr_line + labs(x = "Recall", y = "FFPE Deamination Mismatches") + theme(legend.title=element_blank())
cor(SNV_summary[SNV_summary$FFPE_TUMOUR_PURITY > 50,]$RECALL, SNV_summary[SNV_summary$FFPE_TUMOUR_PURITY > 50,]$FFPE_DEAMINATION_MISMATCHES_PER, method = "spearman") # 0.4181818
# PRECISION by AT dropout/coverage homogeneity, chim reads, mapping rate of FFPE, etc
ggplot(SNV_summary[SNV_summary$FFPE_TUMOUR_PURITY > 50,], aes(x = PRECISION, y = FFPE_AT_DROP, col = factor(CENTER_CODE))) + geom_jitter() + regr_line + labs(x = "Precision", y = "FFPE AT Dropout") + theme(legend.title=element_blank())
cor(SNV_summary[SNV_summary$FFPE_TUMOUR_PURITY > 50,]$PRECISION, SNV_summary[SNV_summary$FFPE_TUMOUR_PURITY > 50,]$FFPE_AT_DROP, method = "spearman") # -0.5757576
ggplot(SNV_summary[SNV_summary$FFPE_TUMOUR_PURITY > 50,], aes(x = PRECISION, y = FFPE_COVERAGE_HOMOGENEITY, col = factor(CENTER_CODE))) + geom_jitter() + regr_line + labs(x = "Precision", y = "FFPE Unevenness of Coverage") + theme(legend.title=element_blank())
cor(SNV_summary[SNV_summary$FFPE_TUMOUR_PURITY > 50,]$PRECISION, SNV_summary[SNV_summary$FFPE_TUMOUR_PURITY > 50,]$FFPE_COVERAGE_HOMOGENEITY, method = "spearman") # -0.5636364
ggplot(SNV_summary[SNV_summary$FFPE_TUMOUR_PURITY > 50,], aes(x = PRECISION, y = FFPE_CHIMERIC_PER, col = factor(CENTER_CODE))) + geom_jitter() + regr_line + labs(x = "Precision", y = "FFPE % Chimeric") + theme(legend.title=element_blank())
cor(SNV_summary[SNV_summary$FFPE_TUMOUR_PURITY > 50,]$PRECISION, SNV_summary[SNV_summary$FFPE_TUMOUR_PURITY > 50,]$FFPE_CHIMERIC_PER, method = "spearman") # -0.4424242
ggplot(SNV_summary[SNV_summary$FFPE_TUMOUR_PURITY > 50,], aes(x = PRECISION, y = FFPE_MAPPING_RATE_PER, col = factor(CENTER_CODE))) + geom_jitter() + regr_line + labs(x = "Precision", y = "FFPE Mapping Rate") + theme(legend.title=element_blank())
cor(SNV_summary[SNV_summary$FFPE_TUMOUR_PURITY > 50,]$PRECISION, SNV_summary[SNV_summary$FFPE_TUMOUR_PURITY > 50,]$FFPE_MAPPING_RATE_PER, method = "spearman") # 0.1636364
ggplot(SNV_summary[SNV_summary$FFPE_TUMOUR_PURITY > 50,], aes(x = PRECISION, y = FFPE_DEAMINATION_MISMATCHES_PER, col = factor(CENTER_CODE))) + geom_jitter() + regr_line + labs(x = "Precision", y = "FFPE Deamination Mismatches") + theme(legend.title=element_blank())
cor(SNV_summary[SNV_summary$FFPE_TUMOUR_PURITY > 50,]$PRECISION, SNV_summary[SNV_summary$FFPE_TUMOUR_PURITY > 50,]$FFPE_DEAMINATION_MISMATCHES_PER, method = "spearman") # -0.3333333
########## Overlap plots by allele frequency ##########
# Read SNV overlap data for difference allele frequency thresholds
SNV_summary_0 <- read.csv("/Users/MartinaMijuskovic/FFPE/SNV_summary_26trios_allFreq2017-04-19.csv")
SNV_summary_5 <- read.csv("/Users/MartinaMijuskovic/FFPE/SNV_summary_26trios_5pctFreq2017-04-19.csv")
SNV_summary_10 <- read.csv("/Users/MartinaMijuskovic/FFPE/SNV_summary_26trios_10pctFreq2017-04-24.csv")
SNV_summary_15 <- read.csv("/Users/MartinaMijuskovic/FFPE/SNV_summary_26trios_15pctFreq2017-04-24.csv")
SNV_summary_20 <- read.csv("/Users/MartinaMijuskovic/FFPE/SNV_summary_26trios_20pctFreq2017-04-20.csv")
SNV_summary_25 <- read.csv("/Users/MartinaMijuskovic/FFPE/SNV_summary_26trios_25pctFreq2017-04-24.csv")
SNV_summary_30 <- read.csv("/Users/MartinaMijuskovic/FFPE/SNV_summary_26trios_30pctFreq2017-04-24.csv")
# Create a table with overlap % for each freq, each patient
SNV_summary_0$OVERLAP_PCT <- SNV_summary_0$OVERLAP/(SNV_summary_0$FF_UNIQ + SNV_summary_0$FFPE_UNIQ + SNV_summary_0$OVERLAP)
SNV_summary_5$OVERLAP_PCT <- SNV_summary_5$OVERLAP/(SNV_summary_5$FF_UNIQ + SNV_summary_5$FFPE_UNIQ + SNV_summary_5$OVERLAP)
SNV_summary_10$OVERLAP_PCT <- SNV_summary_10$OVERLAP/(SNV_summary_10$FF_UNIQ + SNV_summary_10$FFPE_UNIQ + SNV_summary_10$OVERLAP)
SNV_summary_15$OVERLAP_PCT <- SNV_summary_15$OVERLAP/(SNV_summary_15$FF_UNIQ + SNV_summary_15$FFPE_UNIQ + SNV_summary_15$OVERLAP)
SNV_summary_20$OVERLAP_PCT <- SNV_summary_20$OVERLAP/(SNV_summary_20$FF_UNIQ + SNV_summary_20$FFPE_UNIQ + SNV_summary_20$OVERLAP)
SNV_summary_25$OVERLAP_PCT <- SNV_summary_25$OVERLAP/(SNV_summary_25$FF_UNIQ + SNV_summary_25$FFPE_UNIQ + SNV_summary_25$OVERLAP)
SNV_summary_30$OVERLAP_PCT <- SNV_summary_30$OVERLAP/(SNV_summary_30$FF_UNIQ + SNV_summary_30$FFPE_UNIQ + SNV_summary_30$OVERLAP)
SNV_summary_0$VAF <- "0"
SNV_summary_5$VAF <- "0.05"
SNV_summary_10$VAF <- "0.10"
SNV_summary_15$VAF <- "0.15"
SNV_summary_20$VAF <- "0.20"
SNV_summary_25$VAF <- "0.25"
SNV_summary_30$VAF <- "0.30"
SNV_summary_all <- rbind(SNV_summary_0, SNV_summary_5, SNV_summary_10, SNV_summary_15, SNV_summary_20, SNV_summary_25, SNV_summary_30)
# Plot all data
ggplot(SNV_summary_all, aes(x = VAF, y = OVERLAP_PCT)) + geom_boxplot() + labs(x = "VAF", y = "Overlap %") + theme(legend.title=element_blank())
# Plot stratified by tumor type and purity
SNV_summary_all$TumorType <- QC_table[match(SNV_summary_all$PATIENT_ID, QC_table$PATIENT_ID),]$TumorType
SNV_summary_all$FF_TUMOUR_PURITY <- QC_table[match(SNV_summary_all$PATIENT_ID, QC_table$PATIENT_ID),]$FF_TUMOUR_PURITY
SNV_summary_all$FFPE_TUMOUR_PURITY <- QC_table[match(SNV_summary_all$PATIENT_ID, QC_table$PATIENT_ID),]$FFPE_TUMOUR_PURITY
# Boxplots by tumor type
ggplot(SNV_summary_all, aes(x = VAF, y = OVERLAP_PCT, col = factor(TumorType))) + geom_boxplot(aes(col = factor(TumorType))) + labs(x = "VAF", y = "Overlap %") + theme(legend.title=element_blank())
# Boxplots by tumor purity
ggplot(SNV_summary_all, aes(x = VAF, y = OVERLAP_PCT, col = factor(FFPE_TUMOUR_PURITY > 50))) + geom_boxplot(aes(col = factor(FFPE_TUMOUR_PURITY > 50))) + labs(x = "VAF", y = "Overlap %") + theme(legend.title=element_blank())
ggplot(SNV_summary_all, aes(x = VAF, y = OVERLAP_PCT, col = factor(FF_TUMOUR_PURITY > 50))) + geom_boxplot(aes(col = factor(FF_TUMOUR_PURITY > 50))) + labs(x = "VAF", y = "Overlap %") + theme(legend.title=element_blank())
# Plots by sample
ggplot(SNV_summary_all, aes(x = VAF, y = OVERLAP_PCT, group = PATIENT_ID)) + geom_line(aes(col = FF_TUMOUR_PURITY), size = 1) + labs(x = "VAF", y = "Overlap %")
# List of samples by purity and tumor type
SNV_summary_all %>% filter(VAF == "0") %>% select(PATIENT_ID, TumorType, FF_TUMOUR_PURITY, FFPE_TUMOUR_PURITY)
# Summary of overlap by different VAF cutoffs
SNV_summary_all %>% group_by(VAF) %>% summarise(MEAN_OVERLAP_PCT = mean(OVERLAP_PCT), MIN_OVERLAP_PCT = min(OVERLAP_PCT), MAX_OVERLAP_PCT = max(OVERLAP_PCT))
# Plot FF unique # by VAF
ggplot(SNV_summary_all, aes(x = VAF, y = FF_UNIQ, group = PATIENT_ID)) + geom_line(aes(col = FF_TUMOUR_PURITY), size = 1) + labs(x = "VAF", y = "FF Unique Variants")
# Plot FFPE unique # by VAF
ggplot(SNV_summary_all, aes(x = VAF, y = FFPE_UNIQ, group = PATIENT_ID)) + geom_line(aes(col = FF_TUMOUR_PURITY), size = 1) + labs(x = "VAF", y = "FFPE Unique Variants")
######### Correlation of max overlap with QC metrics #########
# I will use the maximum overlap % (calculated on per-sample basis by using different VAF cutoffs) to calculate correlations with QC metrics
# The idea is that, since overlap % also depends on tumor purity, clonality and VAF, maximum overlap will normalize some of these effects
SNV_summary_all$PATIENT_ID <- as.character(SNV_summary_all$PATIENT_ID)
SNV_summary_maxO <- as.data.frame(SNV_summary_all %>% group_by(PATIENT_ID) %>% summarise(MAX_OVERLAP = max(OVERLAP_PCT)))
# Add maximum overlap % to QC data
SNV_summary <- full_join(SNV_summary, SNV_summary_maxO, by = "PATIENT_ID")
# Plot maximum overlap % correlation with QC metrics
ggplot(SNV_summary, aes(x = MAX_OVERLAP, y = FFPE_AT_DROP, col = factor(CENTER_CODE))) + geom_jitter() + regr_line + labs(x = "Maximum FF-FFPE overlap", y = "FFPE AT Dropout") + theme(legend.title=element_blank())
cor(SNV_summary$MAX_OVERLAP, SNV_summary$FFPE_AT_DROP, method = "spearman") # 0.128569
ggplot(SNV_summary, aes(x = MAX_OVERLAP, y = FFPE_COVERAGE_HOMOGENEITY, col = factor(CENTER_CODE))) + geom_jitter() + regr_line + labs(x = "Maximum FF-FFPE overlap", y = "FFPE Unevenness of Coverage") + theme(legend.title=element_blank())
cor(SNV_summary$MAX_OVERLAP, SNV_summary$FFPE_COVERAGE_HOMOGENEITY, method = "spearman") # 0.1117949
ggplot(SNV_summary, aes(x = MAX_OVERLAP, y = FFPE_CHIMERIC_PER, col = factor(CENTER_CODE))) + geom_jitter() + regr_line + labs(x = "Maximum FF-FFPE overlap", y = "FFPE % Chimeric") + theme(legend.title=element_blank())
cor(SNV_summary$MAX_OVERLAP, SNV_summary$FFPE_CHIMERIC_PER, method = "spearman") # -0.1641868
ggplot(SNV_summary, aes(x = MAX_OVERLAP, y = FFPE_MAPPING_RATE_PER, col = factor(CENTER_CODE))) + geom_jitter() + regr_line + labs(x = "Maximum FF-FFPE overlap", y = "FFPE Mapping Rate") + theme(legend.title=element_blank())
cor(SNV_summary$MAX_OVERLAP, SNV_summary$FFPE_MAPPING_RATE_PER, method = "spearman") # -0.1405128
ggplot(SNV_summary, aes(x = MAX_OVERLAP, y = FFPE_DEAMINATION_MISMATCHES_PER, col = factor(CENTER_CODE))) + geom_jitter() + regr_line + labs(x = "Maximum FF-FFPE overlap", y = "FFPE Deamination Mismatches") + theme(legend.title=element_blank())
cor(SNV_summary$MAX_OVERLAP, SNV_summary$FFPE_DEAMINATION_MISMATCHES_PER, method = "spearman") # -0.117265
# Plot maximum precision correlation with QC metrics
SNV_summary_maxP <- as.data.frame(SNV_summary_all %>% group_by(PATIENT_ID) %>% summarise(MAX_PRECISION = max(PRECISION)))
SNV_summary <- full_join(SNV_summary, SNV_summary_maxP, by = "PATIENT_ID")
ggplot(SNV_summary, aes(x = MAX_PRECISION, y = FFPE_AT_DROP, col = factor(CENTER_CODE))) + geom_jitter() + regr_line + labs(x = "Maximum precision", y = "FFPE AT Dropout") + theme(legend.title=element_blank())
cor(SNV_summary$MAX_PRECISION, SNV_summary$FFPE_AT_DROP, method = "spearman") # 0.01060192
ggplot(SNV_summary, aes(x = MAX_PRECISION, y = FFPE_COVERAGE_HOMOGENEITY, col = factor(CENTER_CODE))) + geom_jitter() + regr_line + labs(x = "Maximum precision", y = "FFPE Unevenness of Coverage") + theme(legend.title=element_blank())
cor(SNV_summary$MAX_PRECISION, SNV_summary$FFPE_COVERAGE_HOMOGENEITY, method = "spearman") # -0.006496837
ggplot(SNV_summary, aes(x = MAX_PRECISION, y = FFPE_CHIMERIC_PER, col = factor(CENTER_CODE))) + geom_jitter() + regr_line + labs(x = "Maximum precision", y = "FFPE % Chimeric") + theme(legend.title=element_blank())
cor(SNV_summary$MAX_PRECISION, SNV_summary$FFPE_CHIMERIC_PER, method = "spearman") #-0.08997606
ggplot(SNV_summary, aes(x = MAX_PRECISION, y = FFPE_MAPPING_RATE_PER, col = factor(CENTER_CODE))) + geom_jitter() + regr_line + labs(x = "Maximum precision", y = "FFPE Mapping Rate") + theme(legend.title=element_blank())
cor(SNV_summary$MAX_PRECISION, SNV_summary$FFPE_MAPPING_RATE_PER, method = "spearman") # -0.09813644
ggplot(SNV_summary, aes(x = MAX_PRECISION, y = FFPE_DEAMINATION_MISMATCHES_PER, col = factor(CENTER_CODE))) + geom_jitter() + regr_line + labs(x = "Maximum precision", y = "FFPE Deamination Mismatches") + theme(legend.title=element_blank())
cor(SNV_summary$MAX_PRECISION, SNV_summary$FFPE_DEAMINATION_MISMATCHES_PER, method = "spearman") # -0.1258335
######### SV plots #########
######### CNV plots #########
|
a17848d18836cd829e023ebc43e9c436985c8b25
|
cfaf2e9ea3ca0b733117cb9e745c0444cb8ae20f
|
/man/remove.spaces.Rd
|
e88cc82ad7e411657e04b492e2c24213a70dfd7b
|
[] |
no_license
|
cran/fsthet
|
b538842911099c942740fec913b7bb1cd71f0f5f
|
de47813bfe7b7f218e56fd7e8ff798f266414959
|
refs/heads/master
| 2021-05-07T07:46:25.074811
| 2018-03-20T17:27:53
| 2018-03-20T17:27:53
| 109,258,201
| 0
| 1
| null | null | null | null |
UTF-8
|
R
| false
| false
| 590
|
rd
|
remove.spaces.Rd
|
\name{remove.spaces}
\alias{remove.spaces}
\title{This removes spaces from a character vector}
\description{
This removes spaces from a before and after words in a character vector.
It was adapted from a similar function in adegenet.
}
\usage{
remove.spaces(charvec)
}
\arguments{
\item{charvec}{is a vector of characters containing spaces to be removed.}
}
\value{
\item{charvec}{A vector of characters without spaces}
}
\references{
\url{ http://adegenet.r-forge.r-project.org/ }
}
\examples{
charvec<-c("this ", " is"," a"," test")
remove.spaces(charvec)
}
|
9b37733ec14691082413beb52d00bc7f0f24d84c
|
7c53acd05c2a5fde82fce04b4214b077df773fdc
|
/R/cfb_conferences.R
|
0e9004a4e08f7960ebd74b57d1e5e7e257885315
|
[
"MIT"
] |
permissive
|
sabinanalytics/cfbscrapR
|
953a10bf17bba5ae58e112f4980ebd831f399a10
|
4fdf232ab410d39d67cf2297d2ead77c12ef2241
|
refs/heads/master
| 2022-12-02T06:34:54.975718
| 2020-08-11T13:58:34
| 2020-08-11T13:58:34
| 286,620,414
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 1,119
|
r
|
cfb_conferences.R
|
#' CFB Conference Information
#'
#' Pulls all college football conferences and returns as
#' data.frame the following fields:
#' @format A data frame with 11 rows and 4 variables:
#' \describe{
#' \item{id}{Referencing conference id}
#' \item{name}{Conference name}
#' \item{long_name}{Long name for Conference}
#' \item{abbreviation}{Conference abbreviation}
#' ...
#' }
#' @source \url{https://api.collegefootballdata.com/conferences}
#'
#' @keywords Conferences
#' @importFrom jsonlite "fromJSON"
#' @importFrom httr "GET"
#' @import dplyr
#' @import tidyr
#' @export
#' @examples
#'
#' cfb_conferences()
#'
cfb_conferences <- function(){
base_url = "https://api.collegefootballdata.com/conferences"
# Check for internet
check_internet()
# Create the GET request and set response as res
res <- GET(base_url)
# Check the result
check_status(res)
# Get the content and return it as data.frame
df = fromJSON(base_url)
# Rename id as conference_id, short_name as long_name
df <- df %>%
rename(conference_id = .data$id,
long_name = .data$short_name)
return(df)
}
|
b8c3384758aea83b676749980bd36c40b6c62b83
|
7c5caeca7735d7909c29ee3ed6074ad008320cf0
|
/misc/example-data/soiltexture/first-draft-LUT.R
|
fe199a2f544dd92748949d3b950c4901936b9237
|
[] |
no_license
|
ncss-tech/aqp
|
8063e800ed55458cfa7e74bc7e2ef60ac3b1e6f5
|
c80591ee6fe6f4f08b9ea1a5cd011fc6d02b5c4a
|
refs/heads/master
| 2023-09-02T07:45:34.769566
| 2023-08-31T00:14:22
| 2023-08-31T00:27:14
| 54,595,349
| 47
| 12
| null | 2023-08-17T15:33:59
| 2016-03-23T21:48:50
|
R
|
UTF-8
|
R
| false
| false
| 1,312
|
r
|
first-draft-LUT.R
|
# # this creates a lookup from ssc to texcl using the soiltexture package function TT.points.in.classes, which is not really inituitive
# soiltexture <- expand.grid(clay = 0:100, sand = 0:100, silt = 0:100)
# soiltexture <- subset(soiltexture, (clay + silt + sand) == 100)
# soiltexture$texcl <- apply(soiltexture, 1, FUN = function(x) {
#
# y <- soiltexture::TT.points.in.classes(data.frame(CLAY = x[1], SILT = x[3], SAND = x[2]), class.sys = "USDA-NCSS.TT")
# texcl <- names(y[, y > 0])
#
# return(texcl[1])
# })
#
# soiltexture$texcl <- tolower(soiltexture$texcl)
#
# idx <- with(soiltexture, clay == 40 & sand == 45)
# soiltexture$texcl[idx] <- "sc"
# row.names(soiltexture) <- NULL
#
# soiltexture <- list(values = soiltexture)
#
#
# library(compositions)
#
# st <- soiltexture
# split(st, st$texcl) ->.;
# lapply(., function(x) {
#
# co <- clo(x, parts = c("sand", "silt", "clay"), total = 100)
# rco <- rplus(co, total = 100)
# ds <- mean(rco)
# df <- data.frame(
# texcl = x$texcl[1],
# avg_clay = round(mean(x$clay)),
# ravg_clay = round(ds[3]),
# avg_sand = round(mean(x$sand))
# ravg_sand = round(ds[1])
# )
# df$silt <- 100 - df$clay - df$sand
# }) ->.;
# do.call("rbind", .) ->.;
# st2 <- .
# # I see no difference in the compositional statistics
|
739246cafda18d25ee411df0dd955936e687c6fa
|
76dbce75d2127a9304e2bce5f929898e11bedf54
|
/code/plan/98_palettes.R
|
7b1b7acdaf134e53fad5c083b8e436f71f495ec9
|
[] |
no_license
|
milescsmith/rnaseq_drake
|
dcabc8f702e9d9faf1095412c49f22178a253e71
|
982591fa14979dee72b22aa24304a30c8b25f8df
|
refs/heads/main
| 2023-04-02T10:08:49.527920
| 2021-03-26T15:00:16
| 2021-03-26T15:00:16
| 344,952,270
| 0
| 0
| null | 2021-03-26T14:50:56
| 2021-03-05T22:40:53
|
R
|
UTF-8
|
R
| false
| false
| 2,099
|
r
|
98_palettes.R
|
# we manually setup the palettes for pheatmap because letting it automatically pick the colors results in terrible choices
type_pal =
paletteer_d(
"ggsci::category20_d3",
n = length(levels(annotated_modules$type))
) %>%
as.character() %>%
set_names(levels(annotated_modules$type)) %>%
inset2("Undetermined", "#000000")
chr_pal = c("Y" = "#E41A1C",
"X" = "#377EB8")
sex_pal = c("Male" = "coral3",
"Female" = "azure2",
"unk" = "#333333")
cluster_pal =
ifelse(
test = length(levels(clusters$cluster)) > 12,
yes = list(
colorRampPalette(
paletteer_d(
palette = "ggthemes::calc",
n = 12
)
)(
length(
levels(
clusters$cluster
)
)
)
),
no = list(
paletteer_d(
palette = "ggthemes::calc",
n = length(
levels(
clusters$cluster
)
)
)
)
) %>%
unlist() %>%
as.character() %>%
set_names(levels(clusters$cluster))
project_pal =
colorRampPalette(
brewer.pal(9, "Set1"))(length(levels(annotation_info$project))) %>%
set_names(levels(annotation_info$project))
number_disease_classes =
length(
unique(
annotation_info$disease_class
)
)
disease_class_pal =
if_else(
number_disease_classes > 2,
list(brewer.pal(number_disease_classes, "Set1")),
list(c("black", "grey75"))
) %>%
unlist() %>%
set_names(
unique(
annotation_info$disease_class
)
)
# cell_type_pal =
# c("#ffa600", "#0062cc", "#008a71") %>%
# set_names(levels(annotation_info$cell_type)),
comparison_pal =
oaPalette(
length(
unique(deg_class$comparison)
)
) %>%
set_names(unique(deg_class$comparison))
group_pal =
list(
type_pal,
chr_pal,
sex_pal,
cluster_pal,
project_pal,
disease_class_pal,
comparison_pal ) %>% #,
# cell_type_pal) %>%
set_names(c(
"type",
"chr",
"sex",
"cluster",
"project",
"disease_class",
"comparison")) #,
#"cell_type")),
|
7897db414f18779ca71ef2e670824bc15516c563
|
c5faa9a2e350978662624f73725eb7ee02c55cb0
|
/R/SMA.R
|
2819f1636dbdcca54cb05eb81d4e1ac1a46ccf3e
|
[] |
no_license
|
HenrikBengtsson/aroma
|
341cc51ddd8f9c111347207535bfe2a85ea7622a
|
c0314ea003fb1d99d0db7f314e86059502d175c6
|
refs/heads/master
| 2016-09-05T18:24:56.275671
| 2014-06-19T04:13:11
| 2014-06-19T04:13:11
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 5,172
|
r
|
SMA.R
|
#########################################################################/**
# @RdocClass SMA
#
# @title "The SMA class"
#
# \description{
# @classhierarchy
#
# The SMA class is a static class that provided methods to convert
# data object in aroma into sma structures. This could be
# useful if you would like to use methods in sma that are not (yet)
# implemented in the aroma package.
# }
#
# \section{Fields and Methods}{
# @allmethods "public"
# }
#
# @author
#*/#########################################################################
setConstructorS3("SMA", function() {
extend(Object(), "SMA")
}, static=TRUE);
#########################################################################/**
# @RdocMethod as.RG
#
# @title "Converts a aroma object into an object of the RG structure"
#
# @synopsis
#
# \description{
# @get "title", which is used by most of the function in the sma package.
# }
#
# @author
#
# \seealso{
# @seeclass
# }
#*/#########################################################################
setMethodS3("as.RG", "SMA", function(this, obj) {
if (is.null(obj))
throw("Argument 'obj' is NULL.");
if (inherits(obj, "MAData")) {
obj <- as.RGData(obj);
}
if (inherits(obj, "RawData")) {
list(R=obj$R, G=obj$G, Rb=obj$Rb, Gb=obj$Gb);
} else if (inherits(obj, "RGData")) {
zeros <- matrix(0, nrow=nrow(obj$R), ncol=ncol(obj$R));
list(R=obj$R, G=obj$G, Rb=zeros, Gb=zeros);
} else {
throw("Can not convert to the RG data structure. Unknown data type: ", data.class(obj));
}
}, static=TRUE);
#########################################################################/**
# @RdocMethod as.layout
#
# @title "Converts a aroma object into an object of the layout structure"
#
# @synopsis
#
# \description{
# @get "title", which is used by most of the functions in the sma package.
# }
#
# @author
#
# \seealso{
# @seeclass
# }
#*/#########################################################################
setMethodS3("as.layout", "SMA", function(this, obj) {
if (is.null(obj))
throw("Argument 'obj' is NULL.");
if (inherits(obj, "MicroarrayData")) {
obj <- getLayout(obj);
if (is.null(obj))
throw("The given object does not specify an Layout object.");
}
if (!inherits(obj, "Layout"))
throw("Can not convert to layout. Argument is of unknown type: ", data.class(obj));
list(nspot.r=obj$nspot.r, nspot.c=obj$nspot.c, ngrid.r=obj$ngrid.r, ngrid.c=obj$ngrid.c);
}, static=TRUE);
setMethodS3("loadData", "SMA", function(this, names=NULL) {
totalSize <- 0;
require(sma);
data(MouseArray); # Approximately 10Mb
if (!is.null(names)) {
# Keep only the variables specified by 'names'
loadedNames <- c("mouse.data", "mouse.gnames", "mouse.lratio", "mouse.setup", "mouse.t2", "mouse1", "mouse2", "mouse3", "mouse4", "mouse5", "mouse6");
residues <- setdiff(loadedNames, names);
SMA$unloadData(residues);
}
# Calculate the total memory loaded.
for (name in names) {
if (exists(name))
totalSize <- totalSize + object.size(get(name));
}
# Check if gco() should be ran. The rational for doing this here is that
# loadData() is mostly used in Rd examples and when doing R CMD check on
# the package the gco() is never called and R CMD check will quickly run
# into memory problems. /HB 2002-06-24
OST <- .Platform$OS.type;
if (OST == "windows") {
memory.free <- memory.limit() - memory.size();
} else {
memory.free <- Inf;
}
if (memory.free < 5e6) {
warning("Running low of memory. Calling garbage collector gc().");
gc();
}
invisible(totalSize);
}, static=TRUE)
setMethodS3("unloadData", "SMA", function(this, names=NULL, envir=.GlobalEnv) {
# Saves about 10Mb if unloading everything.
totalSize <- 0;
if (is.null(names))
names <- c("mouse.data", "mouse.gnames", "mouse.lratio", "mouse.setup", "mouse.t2", "mouse1", "mouse2", "mouse3", "mouse4", "mouse5", "mouse6");
for (name in names) {
if (exists(name, envir=envir)) {
totalSize <- totalSize + object.size(get(name, envir=envir));
rm(list=name, envir=envir);
}
}
gc();
invisible(totalSize);
}, static=TRUE)
############################################################################
# HISTORY:
# 2002-09-13
# o BUG FIX: memory.limit() and memory.size() do only exist on Windows so
# for now the automatic calling of gco() only works on Windows.
# 2002-06-24
# * SMA$loadData() now calls gco() if [R] is running low of memory. The
# rational for doing this here is that loadData() is mostly used in Rd
# examples and when doing R CMD check on the package the gco() is never
# called and R CMD check will quickly run into memory problems.
# 2002-05-06
# * Added SMA$loadData() and SMA$unloadata(). The SMA data set takes about
# 10Mb of memory, so it is is wise to unload it when not used. Note that
# it is not possible to call it SMA$data() because then the generic
# function will make data(...) stop working.
# 2002-02-27
# * Rewritten to make use of setMethodS3's.
# 2001-08-08
# * Created!
############################################################################
|
272b27a57fe9f9a99c3cdc686953d87f801adf8a
|
b67bef2e6295b68a6ba404e78505258a1ac2f95f
|
/R/accessors.MGLMsparsereg.R
|
f89d2c423769195eaac9217544b378269e4d56f8
|
[] |
no_license
|
cran/MGLM
|
beda91fe76a43884434647620d2bf4aebedc1a59
|
e0b8d5d6dec9b3b0dcc74514b0b68438276513d4
|
refs/heads/master
| 2022-05-01T07:22:15.450258
| 2022-04-13T22:32:32
| 2022-04-13T22:32:32
| 17,680,602
| 3
| 1
| null | null | null | null |
UTF-8
|
R
| false
| false
| 2,267
|
r
|
accessors.MGLMsparsereg.R
|
# extract maxlambda from MGLMsparsereg class
##============================================================##
#' @name maxlambda
#' @title Extract maximum lambda
#' @description \code{maxlambda} extracts the maximum tuning parameter that ensures
#' the estimated regression coefficients are not all zero for the object of class \code{MGLMsparsereg}.
#' @param object an object of class \code{MGLMsparsereg} from which
#' maximum lambda value can be extracted.
#' @return Returns a maximum lambda value of \code{object}.
#' @examples
#' library("MGLM")
#' dist <- "DM"
#' n <- 100
#' p <- 10
#' d <- 5
#' set.seed(118)
#' m <- rbinom(n, 200, 0.8)
#' X <- matrix(rnorm(n * p), n, p)
#' alpha <- matrix(0, p, d)
#' alpha[c(1, 3, 5), ] <- 1
#' Alpha <- exp(X %*% alpha)
#' Y <- rdirmn(size = m, alpha = Alpha)
#' pen <- "group"
#' ngridpt <- 30
#' spmodelfit <- MGLMsparsereg(formula = Y ~ 0 + X, dist = dist,
#' lambda = Inf, penalty = pen)
#' maxlambda <- maxlambda(spmodelfit)
NULL
maxlambdaMGLM <- function(object) {
object@maxlambda
}
#' @rdname maxlambda
#' @exportMethod maxlambda
setMethod("maxlambda", "MGLMsparsereg", function(object) maxlambdaMGLM(object))
# extract degrees of freedom from MGLMsparsereg class
##============================================================##
#' @name dof
#' @title Extract degrees of freedom
#' @description \code{dof} extracts the degrees of freedom of the estimated parameter
#' from the object of class \code{MGLMsparsereg}.
#' @param object an object of class \code{MGLMsparsereg}
#' @return Returns degrees of freedom of \code{object}.
#' @examples
#' library("MGLM")
#' dist <- "DM"
#' n <- 100
#' p <- 10
#' d <- 5
#' set.seed(118)
#' m <- rbinom(n, 200, 0.8)
#' X <- matrix(rnorm(n * p), n, p)
#' alpha <- matrix(0, p, d)
#' alpha[c(1, 3, 5), ] <- 1
#' Alpha <- exp(X %*% alpha)
#' Y <- rdirmn(size = m, alpha = Alpha)
#' pen <- "group"
#' ngridpt <- 30
#' spmodelfit <- MGLMsparsereg(formula = Y ~ 0 + X, dist = dist,
#' lambda = Inf, penalty = pen)
#' df <- dof(spmodelfit)
NULL
dofMGLM <- function(object) {
object@Dof
}
#' @rdname dof
#' @exportMethod dof
setMethod("dof", "MGLMsparsereg", function(object) dofMGLM(object))
|
bd2da96eab33f8c21dbbc6b7fd354da39e8678e9
|
8ad53152d40a63bf0716a9a2f8efae7d9941b42f
|
/src/05-endemicity.R
|
97ce08f9818055fcd1b5d6456c1239ffbeacdf35
|
[] |
no_license
|
csmiguel/smallmammals_Kinabalu
|
9039dbd119affa54b4ae0383b22fb60a44df43f7
|
5d92e6ae91f5acb1f2e4c6a474abba4ae45a44c9
|
refs/heads/master
| 2021-06-20T11:54:36.327229
| 2021-02-19T18:38:56
| 2021-02-19T18:38:56
| 185,110,009
| 1
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 5,338
|
r
|
05-endemicity.R
|
###.............................................................................
# (c) Miguel Camacho Sánchez
# miguelcamachosanchez@gmail.com // miguelcamachosanchez.weebly.com
# https://scholar.google.co.uk/citations?user=1M02-S4AAAAJ&hl=en
# May 2019
###.............................................................................
#DESCRIPTION: endemicity
#PROJECT: https://github.com/csmiguel/smallmammals_Kinabalu
###.............................................................................
library(dplyr)
input <- "data/intermediate/species_matrix.rds"
ecol <- readRDS(input)
input2 <- "data/intermediate/endemics.rds"
endemics <- readRDS(input2)
source("src/parameters/params.r")
rep0 <- function(x) {x[x > 0] <- 1; x}
perm <- 1000
##
ecol1 <- ecol %>%
tibble::rownames_to_column() %>%
dplyr::mutate(elev = as.numeric(gsub("^.*_", "", rowname))) %>%
dplyr::mutate(location = gsub("_.*$", "", rowname)) %>%
dplyr::arrange(location, elev) %>%
dplyr::select(-"Suncus sp.", -"Crocidura sp")
#1. Bootstrap endemic species within site
# 1.1. Proportion of endemic species
n_end <- seq_along(mt) %>%
lapply(function(x){
ecol1 %>%
dplyr::filter(location == mt[x]) %>%
dplyr::select(-location, -rowname) %>%
tibble::column_to_rownames("elev") %>%
rep0 %>% #otherwise I would be calculating endemics over total catches
{sp <<- rowSums(.); .} %>%
dplyr::select(endemics) %>% rowSums() / sp
})
names(n_end) <- mt
# 1.2. Bootstrap within sites
site_perm <- seq_along(mt) %>%
lapply(function(x){
z <- ecol1 %>%
dplyr::mutate(elev = as.numeric(gsub("^.*_", "", rowname))) %>%
dplyr::filter(location == mt[x]) %>%
dplyr::select(-location, -rowname) %>%
tibble::column_to_rownames("elev") %>% .[, colSums(.) > 0]
1:perm %>%
sapply(function(y){
z %>%
apply(1, function(w){
h <- w[w > 0]
h[h > 0] <- 1
h[names(h) %in% endemics] <- 0
#core bootstrapping function
sample(h, length(h), replace = T) %>% as.numeric %>% {1- mean(.)}
})
}) %>% apply(1, quantile, probs = c(0.025, 0.5, 0.975))
})
names(site_perm) <- mt
#2. Bootstrap catches per trapping location
# 2.1. Proportion of endemic catches
end_catches <-
seq_along(mt) %>%
lapply(function(x){
ecol1 %>%
dplyr::filter(location == mt[x]) %>%
dplyr::select(-location, -rowname) %>%
tibble::column_to_rownames("elev") %>%
{sp <<- rowSums(.); .} %>%
dplyr::select(endemics) %>% rowSums() / sp
})
names(end_catches) <- mt
# 2.2. Bootstrap catches within sites
perm_catches <- seq_along(mt) %>%
lapply(function(x){
#1.select matrix for a given mountain
z <- ecol1 %>%
dplyr::filter(location == mt[x]) %>%
dplyr::select(-location, -rowname) %>%
tibble::column_to_rownames("elev") %>%
.[, colSums(.) > 0]
#2.run perm permutations
1:perm %>%
sapply(function(y){
#3.for each trapping location
z %>%
apply(1, function(h){
end01 <- names(h) %in% endemics %>% as.numeric
seq_along(end01) %>%
sapply(function(x) rep(end01[x], h[x])) %>%
unlist %>%
#core command
{sample(., size = sum(h), replace = T)} %>%
mean
})
}) %>% apply(1, quantile, probs = c(0.025, 0.5, 0.975)) #get quantiles
})
names(perm_catches) <- mt
#3. fit model
#3.1 for prop of endemics
#create input for model
input_model <-
reshape::melt(n_end) %>%
dplyr::mutate(elev = seq_along(n_end) %>% sapply(function(x)
c(names(n_end[[x]]))) %>% as.vector() %>% as.numeric) %>%
dplyr::rename(prop_end = value) %>%
dplyr::rename(location = L1) %>%
dplyr::mutate(location = as.factor(location))
#fit models
m1 <- lme4::glmer(prop_end ~ elev + (1 | location),
data = input_model, family = binomial)
m2 <- lme4::glmer(prop_end ~ (1 | location),
data = input_model, family = binomial)
#output results from models to file
sink("output/endemism_models.txt")
cat("\nFull model fitted:\n")
summary(m1)
cat("\nNull model fitted:\n")
summary(m2)
cat("\nComparison between m1 and m2:\n")
anova(m1, m2)
sink()
#predict values
output_model <- input_model %>% mutate(fitted = boot::inv.logit(predict(m1)))
#3.2 for proportion of cathces
input_model_catches <-
reshape::melt(end_catches) %>%
dplyr::mutate(elev = seq_along(n_end) %>% sapply(function(x)
c(names(n_end[[x]]))) %>% as.vector() %>% as.numeric) %>%
dplyr::rename(prop_end = value) %>%
dplyr::rename(location = L1) %>%
dplyr::mutate(location = as.factor(location))
#fit models
m21 <- lme4::glmer(prop_end ~ elev + (1 | location),
data = input_model_catches, family = binomial)
m22 <- lme4::glmer(prop_end ~ (1 | location),
data = input_model_catches, family = binomial)
#output results from models to file
sink("output/endemism_models.txt")
cat("\n1. Proportion of endemic species:\n")
cat("\nFull model fitted:\n")
summary(m1)
cat("\nNull model fitted:\n")
summary(m2)
cat("\nComparison between m1 and m2:\n")
anova(m1, m2)
cat("\n2. Proportion of catches from endemic species:\n")
cat("\nFull model fitted:\n")
summary(m21)
cat("\nNull model fitted:\n")
summary(m22)
cat("\nComparison between m1 and m2:\n")
anova(m21, m22)
sink()
#save objects
saveRDS(list(m1 = m1, m2 = m2, m21 = m21, m22 = m22),
"data/intermediate/models_endemicity.rds")
save(n_end, site_perm, end_catches, perm_catches,
file = "data/intermediate/plot_endemics.Rdata")
|
a30976d035d4f383739e2b4eba4ca7b5ad0ef0f0
|
8d949d4b6131ad47e60c152594bdd487c7cb405a
|
/logistical modelling.R
|
495c9acdeccda97a970ca870614fd3033a7a3da7
|
[] |
no_license
|
krishhhhh007/iit_guwahati
|
5da9ab6632fcd87bb3a5cd74f8a7cb4b372ed50e
|
ac55a7c3cae08542a5badf0f054f47c055af0329
|
refs/heads/master
| 2020-03-09T12:01:11.656278
| 2018-04-13T13:35:34
| 2018-04-13T13:35:34
| 128,775,287
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 1,264
|
r
|
logistical modelling.R
|
# import the data
dataset= read.csv("./data/logr2.csv")
str(dataset)
summary(dataset)
install.packages("caTools")
library(caTools)
set.seed(2000) # regenerate the same set of data
split = sample.split(dataset$purchased, SplitRatio = 0.75)
training_set = subset(dataset, split== TRUE)
test_set = subset(dataset, split == FALSE)
dim(dataset)
nrow(training_set)
length(dataset) # no of columns in a dataframe
length(dataset$gender) # no of elements in the vector
class(dataset$gender)
names(training_set)
#Fitting logistic regression to the Training set
logicmodel1 = glm(purchased ~ gender+age+salary, family = binomial, data= training_set)
summary(logicmodel1)
logicmodel2 = glm(purchased ~ age+salary, family = binomial, data= training_set)
summary(logicmodel2)
test_set2 = data.frame(age=c(40,65), salary=c(4000,5000))
prob_pred2=predict(logicmodel2, type="response", newdata= test_set2)
prob_pred2
cbind(test_set2, prob_pred2)
test_set
prob_pred = predict(logicmodel2, type="response", newdata= test_set)
df_prob_pred= as.data.frame(prob_pred)
summary(df_prob_pred)
head(df_prob_pred)
x=0.10
x
quantile(x)
y_pred = ifelse(prob_pred > 0.5, 1,0)
y_pred
cbind(test_set$purchased, y_pred)
cn= table(test_set[,5], y_pred)
cn
caret:: confusionmatrix(cn)
|
c15a7e60174bbc23387e9870b610da66392b487d
|
4ee1fdae7773dfe77d94030f30acaf240a3ba828
|
/tests/testthat/test-summarise_count_if.R
|
ebbcedb5f8bf2bf1303df872f56e5bd70bc6d2e3
|
[
"MIT"
] |
permissive
|
ryentes/dplyrr
|
4fb193f4dd8ab6e2fa90d894d098590577c2680a
|
316de0ea01a677b11bc55154ae34f906f22a6844
|
refs/heads/master
| 2021-01-12T13:12:09.128158
| 2016-10-03T06:29:29
| 2016-10-03T06:29:29
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 1,079
|
r
|
test-summarise_count_if.R
|
context("summarise_count_if")
srcs <- temp_srcs("sqlite", "postgres")
df <- data.frame(class = c("A", "A", "A", "B", "B", "B"), x = 1:6)
tbls <- dplyr:::temp_load(srcs, list(df=df))
temp_tbl <- tbls$sqlite$df
test_that("using count_if in summarise for SQLite", {
q <- temp_tbl %>%
summarise(y = count_if(x %% 2 == 0), z = n_if(x %% 2 == 1))
result <- q %>% collect
expect_equal(result$y, c(3))
expect_equal(result$z, c(3))
})
if(!is.null(tbls$postgres)) {
temp_tbl <- tbls$postgres$df
test_that("using count_if in summarise for PostgreSQL", {
q <- temp_tbl %>%
summarise(y = count_if(x %% 2 == 0), z = n_if(x %% 2 == 1))
result <- q %>% collect
expect_equal(result$y, c(3))
expect_equal(result$z, c(3))
})
test_that("using count_if in summarise with group_by for PostgreSQL", {
q <- temp_tbl %>%
group_by(class) %>%
summarise(y = count_if(x %% 2 == 0), z = n_if(x %% 2 == 1)) %>%
arrange(class)
result <- q %>% collect
expect_equal(result$y, c(1, 2))
expect_equal(result$z, c(2, 1))
})
}
|
6307c2216ecfd80da407d4151c9a375991d75274
|
d78eae7ac9af144c10941285e59804b7cb979a9d
|
/man/lincs.Rd
|
1ff3f471692fe2cac2e9e78c9a353ed3d5f8d2b9
|
[] |
no_license
|
tgirke/signatureSearchData
|
0d29646ac0d79e4b241f006da7127b0bc12b1f89
|
dea5184e84ec36ef91fc3e00b9a09f111f3db30a
|
refs/heads/master
| 2020-05-15T19:55:02.973706
| 2019-04-17T00:46:59
| 2019-04-17T00:46:59
| 182,468,863
| 0
| 0
| null | 2019-04-21T00:51:58
| 2019-04-21T00:51:57
| null |
UTF-8
|
R
| false
| false
| 749
|
rd
|
lincs.Rd
|
\name{lincs}
\alias{lincs}
\title{lincs signature database}
\description{
The \code{lincs} database represents moderated z-scores from DE analysis of
12,328 genes from 8,140 compound treatments in 30 cells
(45,956 signatures in total).
}
\details{
The original LINCS Level 5 moderated z-score database was downloaded at GEO
\url{https://www.ncbi.nlm.nih.gov/geo/query/acc.cgi?acc=GSE92742}.
The downloaded database in `gctx` format was processed and saved as an HDF5 file
}
\references{
\url{https://www.ncbi.nlm.nih.gov/geo/query/acc.cgi?acc=GSE92742}
}
\examples{
library(AnnotationHub)
ah <- AnnotationHub()
qr <- query(ah, c("signatureSearchData", "lincs"))
qr
# lincs <- load_sigdb("AH69079","AH69080","lincs")
}
\keyword{datasets}
|
4468153764f588977730572ffc17400166ce869c
|
7a3fab09998dfeb3c726a6cb8e98e50b780d3bff
|
/TDI.ResultsAnalysis/R/identify.tarDEGlist.givendriverNtumorset.R
|
92048ef2cbca634913a2b41d4195bd25e048c54d
|
[] |
no_license
|
minnownino/TDIResultAnalysis
|
b18aa13953dba0e7d39f74864b16928fd3bc2aa4
|
cc21dca0165ffc1de0f4e30ad26db7aa2f0d3a33
|
refs/heads/master
| 2020-12-02T17:57:30.561680
| 2018-02-12T22:49:47
| 2018-02-12T22:49:47
| 96,453,033
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 2,440
|
r
|
identify.tarDEGlist.givendriverNtumorset.R
|
#' @title Identify Target DEGs Associated with Each Significant Driver
#'
#' @description If a DEG is a target of a driver, the causal links between the driver and the DEG has to present in at least 20% (by default) of the tumors where the driver is present.
#'
#' @param driver identify the target DEGs for the given list of drivers
#' @param tumorset
#' @param TDIresults TDI Tumor-SGA-DEG triplets
#' @param postprobnoiselv posterior probability cutoff per SGA
#' @param numoftargenes.cutoff minimal number of target DEGs for an SGA to be called a driver in the tumor
#' @param DEGcallrate.thd minimal call rate for a DEG to be associated with a driver
#'
#' @export
#'
#' @return Driver.DEGlist
#'
#' @examples \dontrun{
#'
#' }
#'
identify.tarDEGlist.givendriverNtumorset <- function(driver, tumorset, TDIresults, postprobnoiselv, numoftargenes.cutoff = 5, DEGcallrate.thd = 0.2) {
# Update the TDIresult
idx2keep = intersect(intersect(which(is.element(TDIresults[, "patient_name"], tumorset)>0), which(TDIresults[, "cause_gene_name"]==driver)), which(TDIresults[, "posterior"]>=postprobnoiselv))
TDIresults = TDIresults[idx2keep, ]
numoftumors = length(tumorset)
# Get DEG names
DEGs = unique(TDIresults[, "result_gene_name"])
numofDEGs = length(DEGs)
# Create DriverDEGpair.countmatrix
DriverDEGpair.countmatrix = matrix(0, 1, numofDEGs)
rownames(DriverDEGpair.countmatrix) = driver
colnames(DriverDEGpair.countmatrix) = DEGs
# Get all the tumor TDI result files and update the counts in DriverDEGpair.countmatrix
drivertumors = 0
for (i in 1:numoftumors) {
# TDI results for tumor i
tumor.i = tumorset[i]
# cat("Processing tumor", i, tumor.i, "...\n")
idx.i = which(TDIresults[, "patient_name"]==tumor.i)
TDIresults.i = TDIresults[idx.i, ]
if (nrow(TDIresults.i)<numoftargenes.cutoff){
next
} else {
DEGs.i = TDIresults.i[, "result_gene_name"]
DriverDEGpair.countmatrix[driver, DEGs.i] = DriverDEGpair.countmatrix[driver, DEGs.i] + 1
drivertumors = drivertumors + 1
}
}
DriverDEGcallrate = DriverDEGpair.countmatrix/drivertumors
idx.tardegs = which(DriverDEGcallrate >= DEGcallrate.thd)
if (length(idx.tardegs)>0) {
driver.tarDEGs = DriverDEGcallrate[driver, idx.tardegs]
driver.tarDEGs = sort(driver.tarDEGs, decreasing=T)
} else {
driver.tarDEGs = c()
}
# Return the Driver.DEGlist
return(driver.tarDEGs)
}
|
4abf04ccfda27e720b9211b147053860952e104b
|
17ea2c72b3a83c16387df89bba76455b59150cee
|
/02_01.R
|
9c9a87a5ab804e1272b911b8e963141c06b63a11
|
[] |
no_license
|
JakubKraus/statistika
|
4472a4572957b289122e023ca3c2ff29258a9ce2
|
1c081ce58d7e789981a344927652c0c0a9c75023
|
refs/heads/master
| 2021-01-14T19:23:32.601046
| 2020-03-09T12:12:38
| 2020-03-09T12:12:38
| 242,728,827
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 1,955
|
r
|
02_01.R
|
# nacteme knihovnu "prob"
library (prob)
# Priklad 1
# s ciselnymi hodnotami
n <- 4
mince <- tosscoin (n)
# vytvoril se tzv. data.frame = datova tabulka (matice) s pojmenovanymi sloupci
mince
# rozmery
dim (mince)
nrow (mince)
ncol (mince)
# nazvy sloupcu (promennych)
names (mince)
# nazvy lze menit, napr.
names (mince) <- c ("prvni", "druha", "treti", "stvrta")
mince
# krome klasickeho indexovani pomoci hranatych zavorek se lze na sloupce odkazovat i nazvem: promenna$nazev
mince$prvni
mince$treti
# struktura promenne
str (mince)
# vidime, ze vysledky jsou H = head a T = tail
# jedna se o tzv. faktory
# interne je ulozena ciselna hodnota, cislo je ale pouze kodem, nema vyznam ciselne hopdnoty
as.numeric (mince$treti)
# vytvorime pravdepodobnostni prostor
S <- probspace (mince)
# podivame se na vysledek
S
str (S)
names (S)
# jde opet o datovou tabulku, na kazdem radku je jeden elementarni jev, pribyl sloupec s pravdepodobnosti
# velikost zakladniho prostoru Omega
nrow (S)
# jev A = padnou same lice = heads = H
A <- subset (S, isin (S, rep ("H", n)))
A
nrow (A)
nrow (A) / nrow (S)
Prob (A)
# jev Bk = padne prave k licu, tzn. k krat H a (n-k) krat T
B0 <- subset (S, isin (S, rep ("T", 4)))
B1 <- subset (S, isin (S, c ("H", "T", "T", "T")))
B2 <- subset (S, isin (S, c ("H", "H", "T", "T")))
B3 <- subset (S, isin (S, c ("H", "H", "H", "T")))
B4 <- A
B0
B1
B2
B3
B4
Prob (B0)
Prob (B1)
Prob (B2)
Prob (B3)
Prob (B4)
psti <- c (Prob (B0), Prob (B1), Prob (B2), Prob (B3), Prob (B4))
# zkontrolujeme soucet
sum (psti)
# vykreslime sloupcovy graf
names (psti) <- seq (0, 4, by = 1)
barplot (psti, xlab = "pocet licu", ylab = "pravdepodobnost")
# Dalsi ukoly k samostatnemu vyreseni (na cvicenich anebo domaci ukol):
# Opakujte ulohu pro vetsi pocet hodu n
# Urcete mnoziny elementarnich jevu priznivych nasledujicim jevum a jejich pravdepodobnosti:
# jev Ck = padne alespon k licu
# jev Dk = padne nejvyse k licu
|
b7ffa559ca7a79b604e88d2c33df9f769494dd2e
|
78e5fc01cafb39bf2e93023ef76664f42192a468
|
/man/loadSampleRegions.Rd
|
df1214d7ae311ebf5fbc2588e1cb5bd65965e92e
|
[] |
no_license
|
cran/VSE
|
a9d772fe2e305057abff7d06fc5211e5d885ce02
|
b47d0be054ecc7589a57a31399a876b50f645e73
|
refs/heads/master
| 2020-12-22T23:07:31.043759
| 2016-03-21T23:10:53
| 2016-03-21T23:10:53
| 236,958,069
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| true
| 800
|
rd
|
loadSampleRegions.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/loadSampleRegions.R
\name{loadSampleRegions}
\alias{loadSampleRegions}
\title{loadSampleRegions}
\usage{
loadSampleRegions()
}
\value{
A directory names VSE_samples that will contain 6 bed files and 1 sampleSheet.csv
}
\description{
The sample bed files are DNAse-seq, ChIP-seq for H3K4me1, H3K4me3, H3K27ac, H3K27me3 and H3K36me3 for MCF7 cells. The data are obtained from ENCODE project. There is also one sampleSheet.csv which is the sample sheet for the bed regions in the format similar to ChIPQC or DiffBind requirement.
}
\details{
This function will download sample bed files from www.hansenhelab.org/VSE/sample_regions in /VSE_samples
}
\examples{
\dontrun{
loadSampleRegions()
}
}
\keyword{sample,histone,MCF7}
|
232311e2b4bc0cc0ef9563a9847b48f93eb0364b
|
7382ae78fe19bfe9ac782b271e7e79568ed4763a
|
/scripts/final/maps/timespent_hexbinning&map_hexgrid.R
|
3f3bc47d3738fd8fb7483e86e38bb3c73e76d896
|
[] |
no_license
|
MartinBeal/political_connectivity
|
38ed005c64a49adfbe1a360961b5762ff416a65b
|
55e74ec1fa63f827e0d541f8fef05c02f8d2c68a
|
refs/heads/master
| 2023-03-16T01:19:36.424153
| 2021-02-02T16:45:55
| 2021-02-02T16:45:55
| 205,902,098
| 1
| 1
| null | null | null | null |
UTF-8
|
R
| false
| false
| 18,152
|
r
|
timespent_hexbinning&map_hexgrid.R
|
#~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
# Create maps of tracking data binned into hex.grid, and re-centering geodata to pacific perspective #
# Time spent #
#*** projection transformations in this script only work with R<4.0 and lwgeom versions <2.0
#~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
#-------------------------------------------------------------------------------------------------------
pacman::p_load(sf, sp, dggridR, tidyverse, lubridate, lwgeom, viridis)
## Choose whether to use high threshold or low threshold data (i.e. >1 bird per month
# thresh <- "high"
thresh <- "low"
if(thresh == "high"){
master <- "data/analysis/bird_thresh/"
master_figs <- "figures/bird_thresh/"
} else {
master <- "data/analysis/"
master_figs <- "figures/"
}
## Choose which country to assign Falklands/S.Georgia breeders too ~~~~~~~~~~
assign <- "A"
# assign <- "B"
popData <- read.csv('data/population_estimates.csv', stringsAsFactors = F)
#~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
if(assign == "A"){
# inFolder <- paste0(master, 'noeqx_dnsmpl/')
inFolder <- paste0(master, 'month_filtered/')
} else if(assign == "B"){
inFolder <- paste0(master, 'sovereign_B_assign/month_filtered/')
# re-assign birds on disputed islands to Argentina
popData$jurisdiction <- ifelse(popData$site_name == "Falkland Islands (Islas Malvinas)" | popData$site_name == "South Georgia (Islas Georgias del Sur)", "Argentina",
ifelse(popData$site_name == "Chafarinas", "Morocco", popData$jurisdiction))
popData$origin <- ifelse(popData$site_name == "Falkland Islands (Islas Malvinas)" | popData$site_name == "South Georgia (Islas Georgias del Sur)", "Argentina", ifelse(popData$site_name == "Chafarinas", "Morocco", popData$jurisdiction))
}
files <- list.files(inFolder)
files
#~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
spatial <- dgconstruct(spacing=360, metric=TRUE, resround='nearest')
wgs84 <- sp::CRS('+proj=longlat +ellps=WGS84 +datum=WGS84 +no_defs') # WGS for spatializing
# load data ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
spp <- unique(do.call("rbind", str_split(files, n=4, pattern="_"))[, 1])
cntsum.list <- vector(mode="list", length(spp))
for( i in 1:length(spp)){
print(i)
sp <- spp[i] # species
sp_files <- str_subset(files, pattern = fixed(sp))
# load all data_sets for a species/site, and combine into one data.frame
TD <- do.call( "rbind", lapply( sp_files, function(x) as.data.frame(readRDS(paste(inFolder, x, sep = ""))) ))
# hexgrid: get cell name for each fix ~~~~~~~~~~~~~~~~~
TD$cell <- dgGEO_to_SEQNUM(spatial, TD$longitude, TD$latitude)$seqnum
# Summarize monthly time spent per cell ~~~~~~~~~~~~~~~~
sname <- unique(TD$site_name)
TD$month <- lubridate::month(TD$date_time)
TD$yday <- lubridate::yday(TD$date_time)
TD_ndays <- TD %>% dplyr::group_by(month, site_name, track_id) %>% summarise(
scientific_name = first(scientific_name),
dmi = n_distinct(yday) # number of tracking days per month for each track/individual
)
# calculate simple weights from the proportion of the day spent in each jur, based on number of jurisdictions per day
weights <- TD %>% group_by(month, track_id, yday) %>% summarise(
n_cell = n_distinct(cell), # number of jurs visited by each individual on each day tracked
pp_cell = 1/n_cell # basic proportion of time spent by a bird in each jur
)
TD_weights <- merge(TD, weights) # merge weights to Tracking Data
# calculate daily weights (prop. of day spent) for each jur-yday combo
weighted <- TD_weights %>% group_by(month, site_name, track_id, yday, cell) %>% summarise(
day_wei = first(pp_cell))
weighted <- merge(weighted, TD_ndays) # combine jur, daily weights, and dmi (monthly) weight
# Make bird-days dataset
brdy <- weighted %>% group_by(month, site_name, cell, track_id) %>% summarise(
scientific_name = first(scientific_name),
dmi = first(dmi),
dmei = sum(day_wei), # dmei = day-month-jur-individual
ppt = dmei/dmi # proportion of time (days.in.jur[x]/tot.days.in.month[y])
)
## n tracks per month
nTracks <- brdy %>% group_by(month, cell) %>% summarise(n_tracks = n_distinct(track_id))
brdy <- merge(brdy, nTracks)
# calculate the contribution of each individual to the sample/population's 'story'
brdy <- brdy %>% mutate(
ppts = ppt * (1 / n_tracks)
)
# popTime ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
brdysum <- brdy %>% group_by(site_name, cell, month) %>% summarise(
scientific_name = first(scientific_name),
n_tracks = first(n_tracks),
pop_time = sum(ppts), # sum of time spent across individuals (within month, jur, population)
samp_est = pop_time*first(n_tracks) # est. of number of sample birds in jur at any given time (in each month)
)
PDsp <- popData[popData$scientific_name %in% unique(brdysum$scientific_name), ]
PDsp.site <- PDsp[PDsp$standard_site_name %in% unique(brdysum$site_name), ] # HERE, if adj_site_names > 1, problem
# combine population data and time spent/visitation summary
cntsum <- merge(
brdysum, PDsp.site[c("standard_site_name", "pop_estimate_IND", "global_pop_estimate_IND", "origin")],
by.x = c("site_name"), by.y = c("standard_site_name"),
all = TRUE
)
# normal prop. based on prop.time.spent * population_size
cntsum$tot_atatime <- cntsum$pop_time * cntsum$pop_estimate_IND
# globcnt ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
cntsum$globprop <- cntsum$tot_atatime/cntsum$global_pop_estimate_IND # proportion of global population in jur in time period (month of season)
cntsum.list[[i]] <- cntsum
}
cntsum_allTD <- do.call("rbind", cntsum.list)
# summarize (bin) by cell
cellcnt_sum <- cntsum_allTD %>% group_by(cell) %>% summarise(
timespent = sum(na.omit(tot_atatime)) / 12 # either x/12 for BIRDYEAR or just sum for BIRDMONTH
)
grid <- dgcellstogrid(spatial, cells = as.numeric(cellcnt_sum$cell), frame=TRUE, wrapcells=TRUE) # get only cells which contained fixes
grid <- merge(grid, cellcnt_sum, by.x="cell")
# saveRDS(grid, paste0(master, "glob_hexgrid/global_hexgrid_452km_timespent.rds"))
#~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
## Make map ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
source("C:/Users/Martim Bill/Documents/R/source_scripts/recenter_map_fxn.r") # mapdata re-centering function
if(!exists("grid")){ grid <- readRDS(paste0(master, "glob_hexgrid/global_hexgrid_452km_timespent.rds")) }
# recentering and projection objects
shift <- -153
central_meridian <- 360 + shift
proj <- sprintf("+proj=kav7 +lon_0=%i", central_meridian)
# proj <- sprintf("+proj=kav7 +lon_0=%i", shift)
# Convert dggrid object (from 'hexbinning.r') to SF object (several steps) ~~~~~~~~~~~~
# hexgrid to list of Polygons
polylist <- lapply(split(grid, grid$cell), function(x) {
apoly <- Polygon(cbind(x$long, x$lat))
apoly <- Polygons(list(apoly), ID = as.numeric(first(x$cell)))
}
)
# get attribute data for each cell
polydata <- grid %>% group_by(cell) %>% summarise(
timespent = first(timespent)
) %>% arrange( cell ) %>% as.data.frame()
rownames(polydata) <- polydata$cell
# convert to SPolyDF (attaching data)
grid.sp <- SpatialPolygonsDataFrame( SpatialPolygons(polylist, proj4string = CRS("+proj=longlat +datum=WGS84 +no_defs +ellps=WGS84 +towgs84=0,0,0")), data = polydata, match.ID = T)
## sp to sf (and make valid)
grid.sf <- st_as_sf(grid.sp)
# all(st_is_valid(grid.sf)) # check validity
grid.sf <- st_make_valid(grid.sf)
# all(st_is_valid(grid.sf)) # check validity
## Save or Read in grid of data (in WGS84) ~~~~~~~~~~
# st_write(grid.sf, paste0(master, "global_grids/timespent_grid.shp"), delete_layer =T)
# IF GRID ALREADY EXISTS: Read in ~~~~~~~~~~
if(!exists("grid.sf")){ grid.sf <- st_read(paste0(master, "global_grids/timespent_grid.shp")) }
# decide recentering and projection objects ~~~~~~~~~~~~~~~~~~~~~~
shift <- -153
central_meridian <- 360 + shift
proj <- sprintf("+proj=kav7 +lon_0=%i", central_meridian)
# proj <- sprintf("+proj=kav7 +lon_0=%i", shift)
# Countries ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
x <- rnaturalearth::ne_countries(scale = "medium", returnclass = "sf") # load country dataset
world <- rmapshaper::ms_simplify(x, keep = 0.99)
# EEZs ~~~~~~~~~~~~ ~~~~~~~~~~~~~~~~~~~~~~~~~~~
x <- st_as_sf( raster::shapefile("C:/Users/Martim Bill/Documents/geodata/world_EEZ_v11/eez_boundaries_v11_0_360.shp") ) # just EEZ data (latest version (2019))
x <- st_as_sf( raster::shapefile("spatial_data/shapefiles_EEZ_countries/union_countries_EEZs/EEZ_Land_v3_202030.shp") ) # EEZ-land union data (latest version (2019))
eez_cnt <- rmapshaper::ms_simplify(x, keep = .01) # simplify dataset to make smaller
# all(st_is_valid(eez_cnt))
# create background polygon for globe ~~~~~~~~~
lats <- c(90:-90, -90:90, 90)
maxlong <- 360 + (shift + 180)
minlong <- shift + 180
longs <- c(rep(c(maxlong, minlong ), each = 181), maxlong )
# turn into correctly projected sf collection
outline <-
list(cbind(longs, lats)) %>%
st_polygon() %>%
st_sfc( # create sf geometry list column
crs = "+proj=longlat +ellps=WGS84 +datum=WGS84 +no_defs"
) %>% st_sf()
# re-center data sets ~~~~~~~~~~~~
# re_eez <- recentre(eez_cnt, shift) %>% group_by(Line_ID) %>% summarize() # for just EEZ lines data set (seam removal works)
re_eez <- recentre(eez_cnt, shift) %>% group_by(MRGID_EEZ) %>% summarize() # seam removeal doesn't work for eez-land union
re_grid <- recentre(grid.sf, shift) %>% group_by(cell) %>% summarise( # recenter grid!
timespent = first(timespent)
)
re_world <- recentre(world, clon = shift) %>% group_by(sovereignt) %>% summarize() # recenter and remove old seam
### project datasets
# re_world_wt <- st_transform_proj(re_world, crs = proj1, use_gdal = FALSE)
re_world_prj <- lwgeom::st_transform_proj(st_segmentize(st_buffer(re_world, -0.01), 15000), proj)
re_eez_prj <- st_transform_proj(re_eez, crs = proj, use_gdal = FALSE)
re_grid_prj <- st_transform_proj(re_grid, crs = proj, use_gdal = FALSE)
outline_prj <- lwgeom::st_transform_proj(outline, crs = proj, use_gdal = FALSE)
## Map
m3 <- ggplot() +
cowplot::theme_nothing() +
geom_sf(data = outline_prj, color = NA, fill = "black") +
# geom_sf(data = outline_prj, color = NA, fill = "white") +
geom_sf(data = re_grid_prj, aes(fill=timespent/1000000), color=NA) +
scale_fill_viridis(
option ="inferno",
trans ="sqrt",
# trans ="log2",
breaks = scales::trans_breaks("sqrt", function(x) x ^ 2),
# breaks = scales::trans_breaks("log2", function(x) 2 ^ x),
labels = function(x) round(x, 1)
) +
# scale_fill_continuous_sequential(
# palette = "Reds 3",
# trans ="sqrt",
# breaks = scales::trans_breaks("sqrt", function(x) x ^ 2),
# labels = function(x) round(x, 1)
# ) + # single hue color palette
geom_sf(data = re_eez_prj, fill=NA, color="grey50", size=.9) + # EEZ borders
geom_sf(data = re_world_prj, fill="grey40", color="grey40") +
# geom_sf(data = re_world_wt, fill="grey55", color="grey25") + # country polygons
# geom_sf(data = re_world_wt, fill="grey85", color="grey85") + # country polygons
guides(
fill = guide_colorbar(
title="Bird years (millions)",
title.position="top",
barwidth = 8,
barheight = 1.5,
ticks = T,
ticks.linewidth = 2)
) +
theme(
plot.margin=unit(c(0,0,0,0),"cm"),
# legend.position="bottom",
legend.direction = "horizontal",
# legend.position=c(0.01, 0),
legend.position=c(0.80, 0), # legend bottom right
legend.justification = "left",
legend.title=element_text(size=17),
legend.text = element_text(size = 16)
) +
coord_sf(datum = NA)
# dev.new()
# m3
ggsave( "C:/Users/Martim Bill/Desktop/test/plotB11.png", plot = m3, width=30, height=20, units="cm", dpi=250)
## SAVE ##
if(assign == "A"){
ggsave(paste0(master_figs, "maps/birdYEAR_global_kav7_pacific_infernoX.png"),
width=30, height=20, units="cm", dpi=250)
} else if(assign=="B"){
ggsave(paste0(master_figs, "figures/sovereign_B_assign/maps/birdYEAR_global_kav7_pacific_infernoX.png"),
width=30, height=20, units="cm", dpi=250)
}
#~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
### Animate monthly distribution #~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ ####
#~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
source("C:/Users/Martim Bill/Documents/R/source_scripts/recenter_map_fxn.r") # mapdata re-centering function
# recentering and projection objects
shift <- -153
central_meridian <- 360 + shift
proj <- sprintf("+proj=kav7 +lon_0=%i", central_meridian)
# proj <- sprintf("+proj=kav7 +lon_0=%i", shift)
# Countries ~~~~~~~~~~~~
x <- rnaturalearth::ne_countries(scale = "medium", returnclass = "sf") # load country dataset
world <- rmapshaper::ms_simplify(x, keep = 0.99)
# EEZs ~~~~~~~~~~~~
x <- st_as_sf( raster::shapefile("data_test/geodata/World_EEZ_v10_20180221_HR_0_360/World_EEZ_boundaries_v10_2018_0_360.shp") ) # just EEZ data (latest version (2018))
eez_cnt <- rmapshaper::ms_simplify(x, keep = .01) # simplify dataset to make smaller
# create background polygon for globe ~~~~~~~~~
lats <- c(90:-90, -90:90, 90)
maxlong <- 360 + (shift + 180)
minlong <- shift + 180
longs <- c(rep(c(maxlong, minlong ), each = 181), maxlong )
# turn into correctly projected sf collection
outline <-
list(cbind(longs, lats)) %>%
st_polygon() %>%
st_sfc( # create sf geometry list column
crs = "+proj=longlat +ellps=WGS84 +datum=WGS84 +no_defs"
) %>% st_sf()
# re-center data sets ~~~~~~~~~~~~
re_eez <- recentre(eez_cnt, shift) %>% group_by(Line_ID) %>% summarize()
re_world <- recentre(world, clon = shift) %>% group_by(sovereignt) %>% summarize() # recenter and remove old seam
### project datasets
# re_world_wt <- st_transform_proj(re_world, crs = proj1, use_gdal = FALSE)
# re_world_wt <- lwgeom::st_transform_proj(st_segmentize(st_buffer(re_world, -0.01), 15000), proj)
# re_eez_wt <- st_transform_proj(re_eez, crs = proj, use_gdal = FALSE)
# re_world_wt <- lwgeom::st_transform_proj(st_segmentize(st_buffer(re_eez, -0.01), 15000), "+proj=kav7 +lon_0=-153")
# outline_prj <- lwgeom::st_transform_proj(outline, crs = proj, use_gdal = FALSE)
re_world_prj <- lwgeom::st_transform_proj(st_segmentize(st_buffer(re_world, -0.01), 15000), proj)
re_eez_prj <- st_transform_proj(re_eez, crs = proj, use_gdal = FALSE)
outline_prj <- lwgeom::st_transform_proj(outline, crs = proj, use_gdal = FALSE)
cellcnt_sum <- cntsum_allTD %>% group_by(month, cell) %>% summarise(
timespent = sum(na.omit(tot_atatime)) #/ 12 # either x/12 for BIRDYEAR or just sum for BIRDMONTH
)
# thescale <- scales::pretty_breaks(n=3)(min(cellcnt_sum$richness):max(cellcnt_sum$richness))
thescale <- scales::trans_breaks("sqrt", function(x) x ^ 2)(min(cellcnt_sum$timespent):max(cellcnt_sum$timespent)) / 1000000
limits <- c(min(cellcnt_sum$timespent), 5*ceiling(max(cellcnt_sum$timespent)/5)) # plot limits, rounding up to nearest 5
plist <- list()
for(i in 1:12){
print(i)
onemonth <- dplyr::filter(cellcnt_sum, month == i)
grid <- dgcellstogrid(spatial, cells = as.numeric(onemonth$cell), frame=TRUE, wrapcells=TRUE) # get only cells which contained fixes
grid <- merge(grid, onemonth, by.x="cell")
# Convert dggrid object (from 'hexbinning.r') to SF object (several steps) ~~~~~~~~~~~~
# hexgrid to list of Polygons
polylist <- lapply(split(grid, grid$cell), function(x) {
apoly <- Polygon(cbind(x$long, x$lat))
apoly <- Polygons(list(apoly), ID = as.numeric(first(x$cell)))
}
)
# get attribute data for each cell
polydata <- grid %>% group_by(cell) %>% summarise(
month = first(month),
timespent = first(timespent)
) %>% arrange( cell ) %>% as.data.frame()
rownames(polydata) <- polydata$cell
# convert to SPolyDF (attaching data)
grid.sp <- SpatialPolygonsDataFrame( SpatialPolygons(polylist, proj4string = CRS("+proj=longlat +datum=WGS84 +no_defs +ellps=WGS84 +towgs84=0,0,0")), data = polydata, match.ID = T)
## sp to sf (and make valid)
grid.sf <- st_as_sf(grid.sp)
# all(st_is_valid(grid.sf)) # check validity
grid.sf <- st_make_valid(grid.sf)
# all(st_is_valid(grid.sf)) # check validity
re_grid <- recentre(grid.sf, shift) %>% group_by(cell) %>% summarise( # recenter grid!
timespent = first(timespent)
)
# re_grid_wt <- st_transform_proj(re_grid, crs = proj, use_gdal = FALSE)
re_grid_prj <- st_transform_proj(re_grid, crs = proj, use_gdal = FALSE)
p <- ggplot() +
cowplot::theme_nothing() +
geom_sf(data = outline_prj, color = NA, fill = "black") +
geom_sf(data = re_grid_prj, aes(fill=timespent/1000000), color=NA) +
scale_fill_viridis(
option ="inferno",
limits = c(0, 11),
trans ="sqrt",
breaks = scales::trans_breaks("sqrt", function(x) x ^ 2, n=4),
# breaks = thescale
labels = function(x) round(x, 1)
) +
geom_sf(data = re_eez_prj, fill="grey", color="grey50") + # EEZ borders
geom_sf(data = re_world_prj, fill="grey30", color="grey30") +
guides(
fill = guide_colorbar(
title="Bird months (millions)",
title.position="top",
barwidth = 8,
barheight = 1.5)
) +
theme(
plot.margin=unit(c(0,0,0,0),"cm"),
# legend.position="bottom",
legend.direction = "horizontal",
legend.position=c(0.1, 0)
) +
coord_sf(datum = NA)
p <- cowplot::plot_grid(p, label_size = 30, labels = month.abb[i])
p
plist[[i]] <- p
# ggsave(sprintf(paste0(master_figs, "test/global_maps/animate/", i,"_timespent_%s.png"), month.abb[i]),
# plot = p, width=30, height=20, units="cm", dpi=250)
}
|
058fe0c78c64619b1ad6d28af63e70b6b30f2a12
|
410c9a8e0131ffff6938492d577d1e0bed999f80
|
/repository/Cohort_GetQC.R
|
6f9a20d4bf805183d9ed8f1430ddea778b0a038e
|
[
"MIT"
] |
permissive
|
roland-rad-lab/MoCaSeq
|
02bd677f938a4d3561a9ae64087014a06074fcdc
|
a2a2c2a53ff8a53fd69b2b782577ec126bc7e53b
|
refs/heads/master
| 2023-08-31T23:50:56.868148
| 2023-06-03T17:21:27
| 2023-06-03T17:21:27
| 162,331,694
| 15
| 14
|
NOASSERTION
| 2023-08-18T11:49:19
| 2018-12-18T18:45:48
|
R
|
UTF-8
|
R
| false
| false
| 5,826
|
r
|
Cohort_GetQC.R
|
# CALL THIS ONCE
library(data.table)
library(tidyr)
library(matrixStats)
library(doParallel)
registerDoParallel(cores=5)
getQC <- function(mousefolder){
mouseid <- gsub("/", "", mousefolder)
QCfolder <- paste0(mousefolder, "results/QC/", mouseid, "_data/")
file1 <- paste0(QCfolder, "multiqc_general_stats.txt")
file2 <- paste0(QCfolder, "mqc_picard_aligned_reads_1.txt")
file3 <- paste0(QCfolder, "mqc_picard_percentage_target_bases_1.txt")
file4 <- paste0(QCfolder, "mqc_picard_deduplication_1.txt") #mqc_picard_deduplication_1.txt (or multiqc_picard_dups.txt)
if(!file.exists(file4)){
file4 <- paste0(QCfolder, "multiqc_picard_dups.txt")
}
file5 <- paste0(QCfolder, "mqc_samtools-idxstats-xy-plot_1.txt")
file6 <- paste0(QCfolder, "mqc_fastqc_per_sequence_quality_scores_plot_1.txt")
file7 <- paste0(QCfolder, "mqc_fastqc_per_base_sequence_quality_plot_1.txt")
file8 <- paste0(QCfolder, "multiqc_fastqc.txt")
d1 <- fread(file1)
d1 <- d1[, c("Sample","Picard_mqc-generalstats-picard-PERCENT_DUPLICATION")]
names(d1) <- c("Sample", "Duplicates[%]")
d2 <- fread(file2)
names(d2) <- c("Sample", "AlignedReads", "UnalignedReads")
d3 <- fread(file3, header=T)
d3 <- d3[, c("Sample", "50")]
names(d3) <- c("Sample", "MedianPercentageOfTargetBases(Coverage)")
d4 <- fread(file4)
d4[, rs := rowSums(d4[, -"Sample"])]
d4data <- d4[, lapply(.SD, function(x){round(x / rs * 100, digits=2)}), .SDcols = !c("Sample", "rs")]
d4 <- cbind(d4[, "Sample"], d4data)
names(d4) <- gsub(" ", "",names(d4))
names(d4) <- c("Sample", paste0(names(d4[, -c("Sample")]), "[%]"))
d5 <- fread(file5)
names(d5) <- c("Sample", "ChrY", "ChrX")
d6 <- fread(file6, header=F, fill=T)
for(row in 1:nrow(d6)){
if(d6[row, V1] == ""){
newname <- paste0(d6[row+1, V1], ".header")
d6[row, V1 := newname]
}
}
samples <- d6[!grepl(".header", V1), V1]
buildD6 <- data.table()
for(sample in samples){
#sample <- "DS01_2259_LNMet-1.Normal.R1"
valvec <- as.numeric(d6[V1 == sample, -c("V1")]) # get vector of count values
maxvals <- sort(valvec, decreasing = T)[1:2] # find the 2 max values
lastcol <- names(d6[, ncol(d6), with=F]) # find the name of the last column
d6long <- data.table(gather(d6[V1 == sample], "column", "value", V2:lastcol)) # long to wide
maxcolumns <- d6long[value %in% maxvals, column] # find colname with the max values
phreadWithMaxScores <- d6[V1 == paste0(sample, ".header"), maxcolumns, with=F]
names(phreadWithMaxScores) <- c("FastQCSequenceQuality[1]", "FastQCSequenceQuality[2]")
out <- data.table(Sample=sample, phreadWithMaxScores)
buildD6 <- rbind(buildD6, out)
}
d6 <- copy(buildD6)
d7 <- fread(file7, header=T, fill=T)
# if colname Sample not found, rename V1
if(!any(grepl("Sample",names(d7)))){
setnames(d7, "V1", "Sample")
}
# also fix some broken files
d7 <- d7[Sample != ""]
rmean <- rowMeans(d7[, -c("Sample")])
d7temp <- d7[, -c("Sample")]
d7temp[, med := rowMedians(as.matrix(.SD))][]
rmedian <- d7temp$med
d7[, FastQCBaseSequenceQuality_Mean := rmean]
d7[, FastQCBaseSequenceQuality_Median := rmedian]
d7 <- d7[, .(Sample, FastQCBaseSequenceQuality_Mean, FastQCBaseSequenceQuality_Median)]
d8 <- fread(file8)
d8 <- d8[, .(Sample, overrepresented_sequences, adapter_content)]
names(d8) <- c("Sample", "Overrepresented_sequences", "Adapter_content")
mymerge <- function(x,y) merge(x,y,all=TRUE)
out <- Reduce(mymerge,list(d1,d2,d3,d4,d5,d6,d7,d8))
path9 <- paste0(mousefolder, "results/Copywriter/")
files9 <- list.files(path9, full.names = T)
files9 <- files9[grepl("pdf", files9)]
files9 <- data.table(file.info(files9), keep.rownames = T) #bytes
files9[, mb := size / 1000000]
d9.minMB <- round(files9[, min(mb)], digits=2)
file10 <- paste0(mousefolder, "results/Mutect2/",mouseid,".Mutect2.txt")
d10.MB <- round(file.info(file10)$size / 1000000, digits=2)
file11 <- paste0(mousefolder, "results/Mutect2/",mouseid,".m2.bam")
d11.MB <- round(file.info(file11)$size / 1000000, digits=2)
path12 <- paste0(mousefolder, "results/LOH/")
files12 <- list.files(path12, full.names = T)
files12 <- files12[grepl("pdf", files12)]
files12 <- data.table(file.info(files12), keep.rownames = T) #bytes
files12[, mb := size / 1000000]
d12.minMB <- round(files12[, min(mb)], digits=2)
file13 <- paste0(mousefolder, "results/LOH/",mouseid,".VariantsForLOH.txt")
d13.MB <- round(file.info(file13)$size / 1000000, digits=2)
out[, "CopywriteR[minMB]" := d9.minMB]
out[, "Mutect2Calls[MB]" := d10.MB]
out[, "Mutect2Bam[MB]" := d11.MB]
out[, "LOHChrPDF[minMB]" := d12.minMB]
out[, "LOHVariantsForLOH[MB]" := d13.MB]
return(out)
}
#wdpath <- "/run/user/1000/gvfs/smb-share:server=imostorage.med.tum.de,share=fastq/Studies/AGRad_mPDAC/" #kata
wdpath <- "Y:/Studies/AGRad_mPDAC"
setwd(wdpath)
# get QC summary for 1 mouse
mousefolder <- "DS01_2259_LNMet-1/"
qc <- getQC(mousefolder)
qc # look at data in R
fwrite(output, "irgendwohin.txt", sep="\t") # write to file
# get QC summary for all mice in folder (~1 second per directory)
#dirs <- system("ls -d -- */", intern = T) # this works on linux
dirs <- list.dirs(recursive = F)
dirs <- dirs[!grepl("plot|annotated|misc|MultiSampleCalling|CohortDatabase", dirs)] # remove some other folders
dirs <- gsub("./", "", dirs)
dirs <- paste0(dirs, "/")
output <- foreach(i=1:length(dirs), .packages=c("data.table"), .combine=rbind) %dopar% {
setwd(wdpath)
library(tidyr)
library(data.table)
library(matrixStats)
mousefolder <- dirs[i]
qc <- getQC(mousefolder)
qc
}
output #look at data
fwrite(output, "irgendwohin.txt", sep="\t") # write it to file
|
ac6ce3c46f1e54c4beac16b604a886f863008cdb
|
63e843f5e9d83949e7110efb028304c610224695
|
/man/rankReads.Rd
|
c060dd592f3684dd5a9d39e5efc7322ebe98ac70
|
[] |
no_license
|
cran/fcros
|
6b7f6549de38ea8b89918fb98d9aa8cc83ed577e
|
80a62f508adde2f33168ba4bb9d86c1f7e168dcb
|
refs/heads/master
| 2020-04-15T22:48:09.671668
| 2019-05-31T11:40:07
| 2019-05-31T11:40:07
| 17,695,996
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 5,282
|
rd
|
rankReads.Rd
|
\name{rankReads}
\alias{rankReads}
\title{This function computes a score to assess the significance of sequencing values}
\description{Implementation of two methods based (1) on the coefficient of variation
or (2) on the fold change rank ordering statistics for detecting genes
with significant sequencing values (gwssv). A score is obtained for each
gene and a threshold allows to select the number of gwssv.}
\usage{rankReads(xdata, cont, test, meth=0, Ttimes=10, err=0.1, trim.opt=0,
rseed=60)}
\arguments{
\item{xdata}{ A matrix or a table containing sequencing dataset.
The rownames of xdata is used for the output idnames.}
\item{cont}{ A vector containing the label names of the control samples:
\code{cont} = c("cont01", "cont02", ...).}
\item{test}{ A vector containing the label names of the test samples:
\code{test} = c("test01", "test02", "test03", ...).}
\item{meth}{ This parameter allows to specify the approach to use. The value
0 (defaul) means the coefficient of variation is used. When
non-zero value is given, the fcros method used: \code{meth} = 0}
\item{Ttimes}{ The number of perturbed data to use.
The value 10 (default) means that the dataset is used 20 times
and small uniform values are added at each time:
\code{Ttimes} = 10}
\item{err}{ This is the amount of the uniform values to add to count values.
The value 0.1 (default) is used:
\code{err} = 0.1}
\item{trim.opt}{ A scalar between 0 and 0.5. The value 0.25 (default) means
that 25\% of the lower and the upper rank values of each gene are not
used for computing its statistics "ri", i.e. the inter-quartile range
rank values are averaged: \code{trim.opt} = 0.25}
\item{rseed}{ This value allow to set the computer random generation seed
value in order to be able to have the same results for runs
performed at different times: \code{rseed} = 58}
}
\details{Label names appearing in the parameters "cont" and "test" should match
with some label names in the columns of the data matrix "xdata". It is not
necessary to use all label names appearing in the columns of the dataset matrix.
For a general purpose dataset, one of these parametere can be empty.}
\value{ This function returns a data frame containing 10 components when meth=1
and 3 components when meth=0
\item{idnames}{ A vector containing the list of IDs or symbols associated with genes}
\item{score }{coefficient of variation (meth=0) or Fisher-Snedecor test p-value
(meth=1). Smaller (higher) values are associated with genes with significant
(non significant) sequencing values.}
\item{moy }{trimmed means associated with genes (when meth=0).}
\item{ri }{The average of rank values associated with genes when meth=1.
These values are rank values statistics leading to f-values
and p-values (when meth=1).}
\item{FC }{The fold changes for genes in the dataset. These fold changes are
calculated as a ratio of averages from the test and the control
samples. Non log scale values are used in the calculation (when meth=1).}
\item{FC2 }{The robust fold changes for genes. These fold changes are
calculated as a trimmed mean of the fold changes or ratios
obtained from the dataset samples. Non log scale values are used
in the calculation (when meth=1).}
\item{f.value }{The f-values are probabilities associated with genes using
the "mean" and the "standard deviation" ("sd") of the statistics "ri".
The "mean" and "sd" are used as a normal distribution parameters
(when meth=1).}
\item{p.value }{The p-values associated with genes. These values are obtained
from the fold change rank values and one sample t-test (when meth=1).}
}
\author{Doulaye Dembele doulaye@igbmc.fr}
\references{Dembele D, manuscript under preparation}
\examples{
data(bott);
cont <- c("SRX033480", "SRX033488", "SRX033481");
test <- c("SRX033493", "SRX033486", "SRX033494");
n <- nrow(bott);
x2 <- tcnReads(bott[,c(cont,test)])
idx.ok <- (apply(x2, 1, sum) != 0)
xdata <- x2[,c(cont,test)]
rownames(xdata) <- bott[,1]
idx.ok <- (apply(x2, 1, sum) != 0)
tt2 <- sum(idx.ok)
raf10.cv <- rankReads(xdata, cont, test, meth=0)
raf10.pv <- rankReads(xdata, cont, test, meth=1)
score.cv <- -log10(sort(raf10.cv$score))
score.pv <- -log10(sort(raf10.pv$score))
tmp <- scoreThr(score.cv, 2500, 3500)
tmp
tmp <- scoreThr(score.pv, 2500, 3500)
tmp
op <- par(mfrow = c(1,2))
plot(score.cv, xlab = "index of genes",
ylab = "-log10(sorted(score)", main = "rs.cv", type = "l",
col = "blue", panel.first = grid())
plot(score.pv, xlab = "index of genes",
ylab = "-log10(sorted(score)", main = "rs.pv", type = "l",
col = "blue", panel.first = grid())
par(op)
}
|
1e40e8574b6f1c7720fcf6b551409b1f29f24783
|
ec68ef4a9502f4556f710fcfc5666ab3e2a0076e
|
/Classification-Algorithms_R-master/Classification-Algorithms_R-master/TAN/Calculate_mi.R
|
64fd91f3577a405eca4c51edee51d6b342083724
|
[] |
no_license
|
taigi0315/ML_Implementation
|
d9221bf35e33f2a52053df2cfe76897561d7b6fb
|
f2f190c674cfb3d58dfd24277943beb4d7df40b4
|
refs/heads/master
| 2020-03-25T00:09:03.146332
| 2018-08-01T15:02:52
| 2018-08-01T15:02:52
| 143,171,246
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 810
|
r
|
Calculate_mi.R
|
Calculate_mi <- function(dat){
n_feat <- (ncol(dat)-1)
n_row <- nrow(dat)
M.I_matrix <- matrix(0, nrow=n_feat, ncol=n_feat)
for(i in 1:(n_feat-1)){ #i,j are index for X_i, X_j
for(j in (i+1):n_feat){
number.of.x = length(unique(dat[,i])) #get unique value in X_i, X_j
number.of.y = length(unique(dat[,j]))
M.I = 0
table_i = prop.table( table(dat[,i]) )
table_j = prop.table( table(dat[,j]) )
table_ij = prop.table( table(dat[,i], dat[,j]) )
table_ij[which(table_ij == 0 )] = 1/n_row
for(x in 1:number.of.x){
for(y in 1:number.of.y){
M.I = M.I + ( table_ij[x,y] * (log2(table_ij[x,y] / (table_i[x] * table_j[y]) )) )
}
}
M.I_matrix[i,j] = M.I
}
}
return (M.I_matrix)
}
#M.I_matrix = Calculate_mi(dat)
|
1054649906436a87f025ad2509bba17a661309a4
|
eb5dc9801ce9e56fac76b5bcb97eb518db24846d
|
/sp/R/merge.R
|
ee7e15c0a9b72fe649044f3dd19e13955af2cdad
|
[] |
no_license
|
radfordneal/R-package-mods
|
3948895d007a61a817ea382486c492543bd1978b
|
57ab5792b8e86581ba72ea3f093700fcd13d336d
|
refs/heads/master
| 2020-03-27T19:55:43.393031
| 2018-11-11T20:19:53
| 2018-11-11T20:19:53
| 147,021,819
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 1,809
|
r
|
merge.R
|
# Author: Robert J. Hijmans
# Date : November 2011
# Version 1.0
# Licence GPL v3
if (!isGeneric("merge")) {
setGeneric("merge", function(x, y, ...)
standardGeneric("merge"))
}
setMethod('merge', signature(x='Spatial', y='data.frame'),
function(x, y, by=intersect(names(x), names(y)), by.x=by,
by.y=by, all.x=TRUE, suffixes = c(".x",".y"),
incomparables = NULL, duplicateGeoms=FALSE, ...) {
if (!('data' %in% slotNames(x)))
stop('x has no attributes')
# email, RJH, 12/24/13, replace:
# i <- apply(y[, by.y, drop=FALSE], 1, paste) %in%
# apply(x@data[, by.x, drop=FALSE], 1, paste)
# by the following block:
## Spatial* objects cannot have NULL geometries
## so we first get the records in y that match to a record in x.
i <- apply(y[, by.y, drop=FALSE], 1,
function(x) paste(x, collapse='_')) %in%
apply(x@data[, by.x, drop=FALSE], 1,
function(x) paste(x, collapse='_'))
if (all(!i)) {
warning("none of the records in y can be matched to x")
return(x)
} else if (sum(!i) > 0) {
warning(paste(sum(!i), "records in y cannot be matched to x"))
}
y <- y[i, ,drop=FALSE]
## check for duplicates in by.y
if (isTRUE(any(table(y[, by.y]) > 1))) {
if (!duplicateGeoms) {
dy <- nrow(y)
y <- unique(y)
if (isTRUE(any(table(y[, by.y]) > 1))) {
stop("'y' has multiple records for one or more 'by.y' key(s)")
} else {
warning(paste(dy - nrow(y), 'duplicate records in y were removed'))
}
}
}
x$DoNotUse_temp_sequential_ID_963 <- 1:nrow(x)
d <- merge(x@data, y, by=by, by.x=by.x, by.y=by.y, suffixes=suffixes,
incomparables=incomparables, all.x=all.x, all.y=FALSE)
d <- d[order(d$DoNotUse_temp_sequential_ID_963), ]
x <- x[d$DoNotUse_temp_sequential_ID_963, ]
d$DoNotUse_temp_sequential_ID_963 <- NULL
x@data <- d
x
}
)
|
0d5706d60f89a6d6bc023e592371a09bdaea89dd
|
f2d28a0f3a0ff55496f8f65826779ec79a24040e
|
/man/CompareLocusNames.Rd
|
7a167aeb8b97eb19bf9f5368434c89ff04693dde
|
[] |
no_license
|
eriqande/swfsc.mega.tools
|
5c0a32b251187a682fa8a29cfae2ef54736b3cc1
|
cb26c58855aa341a52f92f66003919e7b20d66fd
|
refs/heads/master
| 2020-05-17T17:57:39.720027
| 2014-03-07T23:41:30
| 2014-03-07T23:41:30
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 2,123
|
rd
|
CompareLocusNames.Rd
|
% Generated by roxygen2 (4.0.0): do not edit by hand
\name{CompareLocusNames}
\alias{CompareLocusNames}
\title{find closely matching locus names between two files}
\usage{
CompareLocusNames(First, Second, maxD = 7)
}
\arguments{
\item{First}{Either a vector of locus names, or a data
frame in which the loci start in column 1 and the column
for the second allele at a locus named "LocusX" is
"LocusX.1". Thus, it is the sort of data frame that
would result from reading in a two-column (ToolKit)
format file while setting the first column to be the row
names, and using R's default method for making column
names unique}
\item{Second}{a vector of locus names or a data frame
formatted like \code{First}.}
\item{maxD}{The maximum string distance between two locus
names that are still considered an approximate match}
}
\value{
A list with three components:
\itemize{
\item{\code{InFirstButNotSecond}: }{Loci appearing in First but with no \emph{exact} matches in Second}
\item{\code{RemainingInSecondButNotInFirst}: }{Any loci in second that had neither approximate nor exact matches
to any loci in First.}
\item{\code{InBoth}: }{Loci that have an exact matching name in both First and Second.}
}
Each component of the returned list has a matrix with 5 columns with headers as follows:
\itemize{
\item{\code{NameInFirst}: }{ Locus name as it appears in First }
\item{\code{NameInSecond}: }{ Locus name as it appears in Second }
\item{\code{StringSimilarity}: }{ The distance between the two approximately matching names. Lower means more similar. }
\item{\code{ColumnsInFirst}: }{ If First is a data.frame, these correspond to the columns in an excel file that hold the original data table. }
\item{\code{ColumnsInSecond}: }{ See above, but for second. }
}
}
\description{
find closely matching locus names between two files
}
\examples{
# look for closely matching locus names between the two steelhead example data sets
CompareLocusNames(sthd.geno.A, sthd.geno.B)
# look for the standard Omykiss locus names in sthd.geno.B
CompareLocusNames(sthd.geno.B, Omykiss.Standard.Loci)
}
|
2a8d0dfe564d7fdce0575f801c83749112ecaf39
|
3f1aa8be40d971b34f417f5b8c420f9725a8772f
|
/man/param1.Rd
|
6657ca2dfce825f0ac6e27a1320ca481c37fdab5
|
[] |
no_license
|
cran/MetaLandSim
|
16f3a78b7478ef2eedf4fca307caa8d0858ba384
|
7903126e4e905cff603ecc97695a927bc8483064
|
refs/heads/master
| 2023-04-04T13:29:32.818818
| 2023-01-12T21:30:02
| 2023-01-12T21:30:02
| 26,391,540
| 2
| 1
| null | null | null | null |
UTF-8
|
R
| false
| false
| 924
|
rd
|
param1.Rd
|
\name{param1}
\alias{param1}
\docType{data}
\title{
Sample parameter data frame number 1
}
\description{
Sample data frame, as produced by \code{\link{parameter.estimate}}. These parameters are to be passed to \code{\link{spom}}. These are made up parameters, not related to any species.
}
\usage{data(param1)}
\format{
A data frame with 4 rows displaying the four parameters (alpha, x, y, e) to be passed to \code{\link{spom}}:
\itemize{
\item alpha - Parameter relating extinction with distance.
\item y - Parameter y in the colonization probability.
\item e - Parameter defining the extinction probability in a patch of unit area.
\item x - Parameter scaling extinction risk with patch area.
}
}
\details{
The four parameters are to be passed to \code{\link{spom}}.
}
\examples{
data(param1)
param1
# par_output
#alpha 0.00100000
#x 0.50000000
#y 2.00000000
#e 0.04662827
}
\keyword{datasets}
|
3fe6541a106ed79edb7de569226b4e7f42dae05b
|
759adc33cff9b8ae3ae8d9e88ba817fc564a6614
|
/final_validation/analyze_model_by_province.R
|
3e952054a06f81b29bab51bf6330b5649ef8bf92
|
[] |
no_license
|
cllorca1/mtoEstimation
|
cba89fc63364501400a8ec1bf51629dad4e06246
|
edfb77f0255620766beed91ef25ba19e014cb383
|
refs/heads/master
| 2021-01-25T08:12:47.534334
| 2020-02-24T12:09:46
| 2020-02-24T12:09:46
| 93,727,433
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 1,296
|
r
|
analyze_model_by_province.R
|
pacman::p_load(data.table, dplyr, ggplot2)
#read model data
folder_model = "C:/models/treso-ldpm/output/"
trips = fread(paste(folder_model, "ldpm_trips.csv", sep = "")) %>% filter(!international)
trips = trips %>%
mutate(weight = if_else(tripState == "away", 0, if_else(tripState == "daytrip" , 1, 0.5)))
#read level 2 zone information
folder_zones = "C:/projects/MTO Long distance travel/Database information/Zones/03 Aggregation/Level2Zones/"
zones = fread(paste(folder_zones, "level2withNameAndProvince.csv", sep =""))
#add province to the trips from the model
trips = merge(x=trips, y=zones, by.x = "tripOriginCombinedZone", by.y = "id")
trips = merge(x=trips, y=zones, by.x = "tripDestCombinedZone", by.y = "id", suffixes = c("","_dest"))
#read survey
folder_surveys = "C:/projects/MTO Long distance travel/Choice models/01 tripGeneration/domesticUpdate2019/"
tsrc_trips = fread(paste(folder_surveys, "tsrcTrips2019.csv", sep = ""))
#calculate summaries
summary = trips %>% group_by(province,province_dest,tripState, tripMode) %>%
summarize(trips = n(), person_trips = sum(hhAdultsTravelParty))
write.table(summary, "clipboard", sep="\t", row.names=T)
summary = tsrc_trips %>% group_by(origProvince,destProvince,mainMode) %>%
summarize(sum(weightWTTP)/4, sum(weightWTEP)/4)
|
7a7e425ac19429010bc770b078a93ee86f642526
|
7d1218aa7a1bfaa51d940838ceb7cc6bc639b400
|
/UsingMpmTools.R
|
a5367e990143049cfd1b7ea655443e4b025f5c72
|
[] |
no_license
|
robjohnnoble/MatPopModels
|
aa3b297665567231c9bcd53e12b757194be77622
|
ad507103b7fd337267d7adb05a3a886d6db5a372
|
refs/heads/main
| 2023-03-05T15:17:35.787767
| 2021-02-17T10:02:41
| 2021-02-17T10:02:41
| 333,397,337
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 966
|
r
|
UsingMpmTools.R
|
install.packages("devtools") # install a package that enables you to install mpmtools from the github website
library(devtools) # load that package
devtools::install_github("BruceKendall/mpmtools") # install the mpmtools package
library(mpmtools) # load the mpmtools package
########
Caswell_Ex_2.1 # an example matrix from Caswell's 2001 book
lambda1(Caswell_Ex_2.1) # dominant eigenvalue of that example matrix
stable_stage(Caswell_Ex_2.1) # eigenvector associated with the dominant eigenvector
########
# Create a demography schedule, with juvenile and senescent age classes:
demog_sched <- data.frame(x = 0:7,
sx = c(0.05, 0.2, 0.35, 0.8, 0.9, 0.9, 0.75, 0.4),
mx = c(0, 0, 0, 0.5, 1, 3, 3, 1.5))
# Construct a Leslie matrix from this demography schedule:
A1 <- make_Leslie_matrix(demog_sched)
lambda1(A1) # dominant eigenvalue
stable_stage(A1) # eigenvector associated with the dominant eigenvector
|
d0145d56fe21bfd5a856265d002e775bcca26327
|
f90ec5dd7c8e2abd38681a86bdbebccbf4111d2a
|
/man/stk1.Rd
|
0568fab19d012a38793f3a87c7747107edd20f7f
|
[] |
no_license
|
marchtaylor/FLIBM
|
b5f680db8fe863702cbe13394472bfd0712a1987
|
ca26982c48f52db0dcd227c5d4abbf73ea36c933
|
refs/heads/master
| 2023-04-03T16:13:38.723948
| 2023-03-29T13:31:50
| 2023-03-29T13:31:50
| 187,761,005
| 7
| 0
| null | null | null | null |
UTF-8
|
R
| false
| true
| 1,727
|
rd
|
stk1.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/stk1.R
\docType{data}
\name{stk1}
\alias{stk1}
\title{stk1 dataset}
\format{
FLIBM class object
}
\usage{
data(stk1)
}
\description{
The \code{stk1} data generated with the with FLIBM.
The stock has a seasonal recruitment pattern, with interannual
variation in recruitment strength.
See example for script used to generate data.
}
\examples{
library(FLIBM)
library(FLCore)
library(ggplotFL)
library(data.table)
# to load and plot
data(stk1)
plot(stk1$stock.a@stock.n)
\donttest{
# data generation
set.seed(42)
stk1 <- create.FLIBM(
length = 0:85, age = 0:6,
year = ac(1960:2000), season = ac(1:12),
n.units = "1e3", wt.units = "kg"
)
# pulsed recruitment (March-May)
stk1$rec$params$season_wt[] <- 0
stk1$rec$params$season_wt[3:5] <- c(0.25, 1, 0.25)
# SRR params
stk1$rec$params['rmax'] <- 1e4
# add log-normal noise to rec covar (one value per year)
stk1$rec$covar[] <- rlnorm(n = dim(stk1$rec$covar)[2],
meanlog = 0, sdlog = 0.5)
# Fbar ages
range(stk1$stock.a)[c("minfbar", "maxfbar")] <- c(1,3)
# historical F
yrs <- 1960:2000
steepness <- 0.75
FMmax <- 1
FMs <- FMmax / (1 + exp(-steepness * (yrs - 1990) ))
plot(yrs, FMs)
# Advance
for(yr in seq(yrs)){
stk1$harvest$params$FM <- FMs[yr]
stk1 <- adv.FLIBM(obj = stk, year = ac(yrs[yr]))
}
# plot stock numbers
plot(stk1$stock.a@stock.n)
# trim
summary(stk1)
ymin <- 1980
ymax <- 1999
stk1$stock.a <- trim(stk1$stock.a, year = 1980:1999)
stk1$stock.l <- trim(stk1$stock.l, year = 1980:1999)
stk1$length.a <- trim(stk1$length.a, year = 1980:1999)
stk1$age.l <- trim(stk1$age.l, year = 1980:1999)
# plot stock numbers
plot(stk1$stock.a@stock.n)
}
}
\keyword{datasets}
|
d5187175daa37bdefd30a8f2c77ea99afd49c015
|
72d9009d19e92b721d5cc0e8f8045e1145921130
|
/DPWeibull/R/plot.R
|
4f2f791876bba6d3f7e6640bc824883468d6f5fd
|
[] |
no_license
|
akhikolla/TestedPackages-NoIssues
|
be46c49c0836b3f0cf60e247087089868adf7a62
|
eb8d498cc132def615c090941bc172e17fdce267
|
refs/heads/master
| 2023-03-01T09:10:17.227119
| 2021-01-25T19:44:44
| 2021-01-25T19:44:44
| 332,027,727
| 1
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 17,023
|
r
|
plot.R
|
plot.dpm<-function(x,simultaneous=FALSE,...){
if((x$simultaneous==FALSE)&(simultaneous==FALSE)){
plot(c(0,x$predtime),c(1,x$Spred),type="l",lwd=3,main="Survival",ylab="",xlab="Time",ylim=c(min(x$Spredl,na.rm=TRUE),1))
}else{
x$Sbandl<-confband(x$alpha,x$S)[1,]
x$Sbandu<-confband(x$alpha,x$S)[2,]
plot(c(0,x$predtime),c(1,x$Spred),type="l",lwd=3,main="Survival",ylab="",xlab="Time",ylim=c(min(x$Sbandl,na.rm=TRUE),1))
lines(c(0,x$predtime),c(1,x$Sbandu),lty=3,lwd=3)
lines(c(0,x$predtime),c(1,x$Sbandl),lty=3,lwd=3)
}
lines(c(0,x$predtime),c(1,x$Spredu),lty=2,lwd=3)
lines(c(0,x$predtime),c(1,x$Spredl),lty=2,lwd=3)
if((x$simultaneous==FALSE)&(simultaneous==FALSE)){
plot(c(0,x$predtime),c(0,x$dpred),type="l",lwd=3,main="Density",ylab="",xlab="Time",ylim=c(0,max(x$dpredu,na.rm=TRUE)))
}else{
x$dbandl<-confband(x$alpha,x$d)[1,]
x$dbandu<-confband(x$alpha,x$d)[2,]
plot(c(0,x$predtime),c(0,x$dpred),type="l",lwd=3,main="Density",ylab="",xlab="Time",ylim=c(0,max(x$dbandu,na.rm=TRUE)))
lines(c(0,x$predtime),c(0,x$dbandu),lty=3,lwd=3)
lines(c(0,x$predtime),c(0,x$dbandl),lty=3,lwd=3)
}
lines(c(0,x$predtime),c(0,x$dpredu),lty=2,lwd=3)
lines(c(0,x$predtime),c(0,x$dpredl),lty=2,lwd=3)
if((x$simultaneous==FALSE)&(simultaneous==FALSE)){
plot(x$predtime,x$hpred,type="l",lwd=3,main="Hazard",ylab="",xlab="Time",ylim=c(min(x$hpredl,na.rm=TRUE),max(x$hpredu,na.rm=TRUE)))
lines(x$predtime,x$hpredu,lty=2,lwd=3)
lines(x$predtime,x$hpredl,lty=2,lwd=3)
}else{
x$hbandl<-confband(x$alpha,x$h)[1,]
x$hbandu<-confband(x$alpha,x$h)[2,]
plot(x$predtime,x$hpred,type="l",lwd=3,main="Hazard",ylab="",xlab="Time",ylim=c(min(x$hbandl,na.rm=TRUE),max(x$hbandu,na.rm=TRUE)))
lines(x$predtime,x$hbandu,lty=3,lwd=3)
lines(x$predtime,x$hbandl,lty=3,lwd=3)
}
lines(x$predtime,x$hpredu,lty=2,lwd=3)
lines(x$predtime,x$hpredl,lty=2,lwd=3)
}
plot.dpmcomp<-function(x,simultaneous=FALSE,...){
if((x$simultaneous==FALSE)&(simultaneous==FALSE)){
plot(c(0,x$predtime),c(0,x$CIF1.est),type="l",col="red",lwd=3,main="Cumulative Incidence Functions",ylab="",xlab="Time",
ylim=c(0,max(c(x$CIF1u,x$CIF2u),na.rm=TRUE)))
lines(c(0,x$predtime),c(0,x$CIF2.est),lwd=3,col="blue")
legend("topleft",c("Event 1", "Event 2"), lwd=c(3,3), lty=c(1,1), col=c("red", "blue"))
}else{
x$CIF1bandl<-confband(x$alpha,x$CIF1)[1,]
x$CIF1bandu<-confband(x$alpha,x$CIF1)[2,]
x$CIF2bandl<-confband(x$alpha,x$CIF2)[1,]
x$CIF2bandu<-confband(x$alpha,x$CIF2)[2,]
plot(c(0,x$predtime),c(0,x$CIF1.est),type="l",col="red",lwd=3,main="Cumulative Incidence Functions",ylab="",xlab="Time",
ylim=c(0,max(c(x$CIF1bandu,x$CIF2bandu),na.rm=TRUE)))
lines(c(0,x$predtime),c(0,x$CIF1bandu),lty=3,lwd=3,col="red")
lines(c(0,x$predtime),c(0,x$CIF1bandl),lty=3,lwd=3,col="red")
lines(c(0,x$predtime),c(0,x$CIF2bandu),lty=3,lwd=3,col="blue")
lines(c(0,x$predtime),c(0,x$CIF2bandl),lty=3,lwd=3,col="blue")
legend("bottomright",c("Event 1", "Event 2"), lwd=c(3,3), lty=c(1,1), col=c("red", "blue"))
}
lines(c(0,x$predtime),c(0,x$CIF2.est),lwd=3,col="blue")
lines(c(0,x$predtime),c(0,x$CIF1u),lty=2,lwd=3,col="red")
lines(c(0,x$predtime),c(0,x$CIF1l),lty=2,lwd=3,col="red")
lines(c(0,x$predtime),c(0,x$CIF2u),lty=2,lwd=3,col="blue")
lines(c(0,x$predtime),c(0,x$CIF2l),lty=2,lwd=3,col="blue")
if((x$simultaneous==FALSE)&(simultaneous==FALSE)){
plot(c(0,x$predtime),c(0,x$d1.est),type="l",col="red",lwd=3,main="Subdistribution Density Functions",ylab="",xlab="Time",
ylim=c(0,max(c(x$d1u,x$d2u),na.rm=TRUE)))
}else{
x$d1bandl<-confband(x$alpha,x$d1)[1,]
x$d1bandu<-confband(x$alpha,x$d1)[2,]
x$d2bandl<-confband(x$alpha,x$d2)[1,]
x$d2bandu<-confband(x$alpha,x$d2)[2,]
plot(c(0,x$predtime),c(0,x$d1.est),type="l",col="red",lwd=3,main="Subdistribution Density Functions",ylab="",xlab="Time",
ylim=c(0,max(c(x$d1bandu,x$d2bandu),na.rm=TRUE)))
lines(c(0,x$predtime),c(0,x$d1bandu),lty=3,lwd=3,col="red")
lines(c(0,x$predtime),c(0,x$d1bandl),lty=3,lwd=3,col="red")
lines(c(0,x$predtime),c(0,x$d2bandu),lty=3,lwd=3,col="blue")
lines(c(0,x$predtime),c(0,x$d2bandl),lty=3,lwd=3,col="blue")
}
lines(c(0,x$predtime),c(0,x$d2.est),lwd=3,col="blue")
lines(c(0,x$predtime),c(0,x$d1u),lty=2,lwd=3,col="red")
lines(c(0,x$predtime),c(0,x$d1l),lty=2,lwd=3,col="red")
lines(c(0,x$predtime),c(0,x$d2u),lty=2,lwd=3,col="blue")
lines(c(0,x$predtime),c(0,x$d2l),lty=2,lwd=3,col="blue")
if((x$simultaneous==FALSE)&(simultaneous==FALSE)){
plot(x$predtime,x$h1.est,type="l",col="red",lwd=3,main="Subdistribution Hazard Functions",ylab="",xlab="Time",
ylim=c(min(c(x$h1l,x$h2l),na.rm=TRUE),max(c(x$h1u,x$h2u),na.rm=TRUE)))
}else{
x$h1bandl<-confband(x$alpha,x$h1)[1,]
x$h1bandu<-confband(x$alpha,x$h1)[2,]
x$h2bandl<-confband(x$alpha,x$h2)[1,]
x$h2bandu<-confband(x$alpha,x$h2)[2,]
plot(x$predtime,x$h1.est,type="l",col="red",lwd=3,main="Subdistribution Hazard Functions",ylab="",xlab="Time",
ylim=c(min(c(x$h1bandl,x$h2bandl),na.rm=TRUE),max(c(x$h1bandu,x$h2bandu),na.rm=TRUE)))
lines(x$predtime,x$h1bandu,lty=3,lwd=3,col="red")
lines(x$predtime,x$h1bandl,lty=3,lwd=3,col="red")
lines(x$predtime,x$h2bandu,lty=3,lwd=3,col="blue")
lines(x$predtime,x$h2bandl,lty=3,lwd=3,col="blue")
}
lines(x$predtime,x$h1u,lty=2,lwd=3,col="red")
lines(x$predtime,x$h1l,lty=2,lwd=3,col="red")
lines(x$predtime,x$h2.est,lwd=3,col="blue")
lines(x$predtime,x$h2u,lty=2,lwd=3,col="blue")
lines(x$predtime,x$h2l,lty=2,lwd=3,col="blue")
}
plot.ddp<-function(x,simultaneous=FALSE,exp=FALSE,...){
if((x$simultaneous==FALSE)&(simultaneous==TRUE)){
x$loghrbandl<-matrix(confband(x$alpha,x$loghr)[1,],byrow=TRUE,nrow=ncol(x$x))/x$xscale
x$loghrbandu<-matrix(confband(x$alpha,x$loghr)[2,],byrow=TRUE,nrow=ncol(x$x))/x$xscale
if(exp==TRUE){
x$hrbandl<-exp(x$loghrbandl)
x$hrbandu<-exp(x$loghrbandu)
}
}
if((x$simultaneous==FALSE)&(simultaneous==FALSE)){
if(exp==FALSE){
for(i in 1:nrow(x$loghr.est)){
ytop<-max(x$loghru,na.rm=TRUE)
ybot<-min(x$loghrl,na.rm=TRUE)
padName<-x$covnames[i]
if(grepl("factor",padName,fixed=TRUE)){
factorName<-sub(").*", ")", padName)
# now called factor(Stage)
reference<-unlist(x$xlevels[factorName])[1]
padName<-gsub("factor[(]", "", padName)
padName<-gsub("[)]", "=", padName)
factorName<-gsub("factor[(]", "", factorName)
factorName<-gsub("[)]", "=", factorName)
reference<-paste(factorName,reference)
padName<-paste(padName,"vs",reference)
}
plot(x$predtime,x$loghr.est[i,],type="l",lwd=3,main=paste("Log Hazard Ratio over Time for Covariate ",padName,sep="")
,ylab="",xlab="Time",ylim=c(ybot,ytop))
lines(x$predtime,x$loghrl[i,],lty=2,lwd=3)
lines(x$predtime,x$loghru[i,],lty=2,lwd=3)
}
}else{
x$hr.est<-exp(x$loghr.est)
x$hrl<-exp(x$loghrl)
x$hru<-exp(x$loghru)
for(i in 1:nrow(x$loghr.est)){
ytop<-max(x$hru,na.rm=TRUE)
ybot<-min(x$hrl,na.rm=TRUE)
padName<-x$covnames[i]
if(grepl("factor",padName,fixed=TRUE)){
factorName<-sub(").*", ")", padName)
# now called factor(Stage)
reference<-unlist(x$xlevels[factorName])[1]
padName<-gsub("factor[(]", "", padName)
padName<-gsub("[)]", "=", padName)
factorName<-gsub("factor[(]", "", factorName)
factorName<-gsub("[)]", "=", factorName)
reference<-paste(factorName,reference)
padName<-paste(padName,"vs",reference)
}
plot(x$predtime,x$hr.est[i,],type="l",lwd=3,
main=paste("Hazard Ratio over Time for Covariate ",padName,sep="")
,ylab="",xlab="Time",ylim=c(ybot,ytop))
lines(x$predtime,x$hrl[i,],lty=2,lwd=3)
lines(x$predtime,x$hru[i,],lty=2,lwd=3)
}
}
}else{
if(exp==FALSE){
for(i in 1:nrow(x$loghr.est)){
ytop<-max(x$loghr,na.rm=TRUE)
ybot<-min(x$loghr,na.rm=TRUE)
padName<-x$covnames[i]
if(grepl("factor",padName,fixed=TRUE)){
factorName<-sub(").*", ")", padName)
# now called factor(Stage)
reference<-unlist(x$xlevels[factorName])[1]
padName<-gsub("factor[(]", "", padName)
padName<-gsub("[)]", "=", padName)
factorName<-gsub("factor[(]", "", factorName)
factorName<-gsub("[)]", "=", factorName)
reference<-paste(factorName,reference)
padName<-paste(padName,"vs",reference)
}
plot(x$predtime,x$loghr.est[i,],type="l",lwd=3,main=paste("Log Hazard Ratio over Time for Covariate ",padName,sep="")
,ylab="",xlab="Time",ylim=c(ybot,ytop))
lines(x$predtime,x$loghrl[i,],lty=2,lwd=3)
lines(x$predtime,x$loghru[i,],lty=2,lwd=3)
lines(x$predtime,x$loghrbandl[i,],lty=3,lwd=3)
lines(x$predtime,x$loghrbandu[i,],lty=3,lwd=3)
}
}else{
x$hr<-exp(x$loghr)
x$hr.est<-exp(x$loghr.est)
x$hrl<-exp(x$loghrl)
x$hru<-exp(x$loghru)
for(i in 1:nrow(x$loghr.est)){
ytop<-max(x$hr,na.rm=TRUE)
ybot<-min(x$hr,na.rm=TRUE)
padName<-x$covnames[i]
if(grepl("factor",padName,fixed=TRUE)){
factorName<-sub(").*", ")", padName)
# now called factor(Stage)
reference<-unlist(x$xlevels[factorName])[1]
padName<-gsub("factor[(]", "", padName)
padName<-gsub("[)]", "=", padName)
factorName<-gsub("factor[(]", "", factorName)
factorName<-gsub("[)]", "=", factorName)
reference<-paste(factorName,reference)
padName<-paste(padName,"vs",reference)
}
plot(x$predtime,x$hr.est[i,],type="l",lwd=3,
main=paste("Hazard Ratio over Time for Covariate ",padName,sep="")
,ylab="",xlab="Time",ylim=c(ybot,ytop))
lines(x$predtime,x$hrl[i,],lty=2,lwd=3)
lines(x$predtime,x$hru[i,],lty=2,lwd=3)
lines(x$predtime,x$hrbandl[i,],lty=3,lwd=3)
lines(x$predtime,x$hrbandu[i,],lty=3,lwd=3)
}
}
}
}
plot.ddpcomp<-function(x,simultaneous=FALSE,exp=FALSE,...){
if((x$simultaneous==FALSE)&(simultaneous==TRUE)){
x$loghrbandl<-matrix(confband(x$alpha,x$loghr)[1,],byrow=TRUE,nrow=ncol(x$x))/x$xscale
x$loghrbandu<-matrix(confband(x$alpha,x$loghr)[2,],byrow=TRUE,nrow=ncol(x$x))/x$xscale
if(exp==TRUE){
x$hrbandl<-exp(x$loghrbandl)
x$hrbandu<-exp(x$loghrbandu)
}
}
if((x$simultaneous==FALSE)&(simultaneous==FALSE)){
if(exp==FALSE){
for(i in 1:nrow(x$loghr.est)){
ytop<-max(x$loghru,na.rm=TRUE)
ybot<-min(x$loghrl,na.rm=TRUE)
padName<-x$covnames[i]
if(grepl("factor",padName,fixed=TRUE)){
factorName<-sub(").*", ")", padName)
# now called factor(Stage)
reference<-unlist(x$xlevels[factorName])[1]
padName<-gsub("factor[(]", "", padName)
padName<-gsub("[)]", "=", padName)
factorName<-gsub("factor[(]", "", factorName)
factorName<-gsub("[)]", "=", factorName)
reference<-paste(factorName,reference)
padName<-paste(padName,"vs",reference)
}
plot(x$predtime,x$loghr.est[i,],type="l",lwd=3,
main=paste("Log Subdistribution Hazard Ratio of \n Covariate ",padName," for Event 1",sep="")
,ylab="",xlab="Time",ylim=c(ybot,ytop))
lines(x$predtime,x$loghrl[i,],lty=2,lwd=3)
lines(x$predtime,x$loghru[i,],lty=2,lwd=3)
}
}else{
x$hr.est<-exp(x$loghr.est)
x$hrl<-exp(x$loghrl)
x$hru<-exp(x$loghru)
for(i in 1:nrow(x$loghr.est)){
ytop<-max(x$hru,na.rm=TRUE)
ybot<-min(x$hrl,na.rm=TRUE)
padName<-x$covnames[i]
if(grepl("factor",padName,fixed=TRUE)){
factorName<-sub(").*", ")", padName)
# now called factor(Stage)
reference<-unlist(x$xlevels[factorName])[1]
padName<-gsub("factor[(]", "", padName)
padName<-gsub("[)]", "=", padName)
factorName<-gsub("factor[(]", "", factorName)
factorName<-gsub("[)]", "=", factorName)
reference<-paste(factorName,reference)
padName<-paste(padName,"vs",reference)
}
plot(x$predtime,x$hr.est[i,],type="l",lwd=3,
main=paste("Subdistribution Hazard Ratio of \n Covariate ",padName," for Event 1",sep="")
,ylab="",xlab="Time",ylim=c(ybot,ytop))
lines(x$predtime,x$hrl[i,],lty=2,lwd=3)
lines(x$predtime,x$hru[i,],lty=2,lwd=3)
}
}
}else{
if(exp==FALSE){
for(i in 1:nrow(x$loghr.est)){
ytop<-max(x$loghr,na.rm=TRUE)
ybot<-min(x$loghr,na.rm=TRUE)
padName<-x$covnames[i]
if(grepl("factor",padName,fixed=TRUE)){
factorName<-sub(").*", ")", padName)
# now called factor(Stage)
reference<-unlist(x$xlevels[factorName])[1]
padName<-gsub("factor[(]", "", padName)
padName<-gsub("[)]", "=", padName)
factorName<-gsub("factor[(]", "", factorName)
factorName<-gsub("[)]", "=", factorName)
reference<-paste(factorName,reference)
padName<-paste(padName,"vs",reference)
}
plot(x$predtime,x$loghr.est[i,],type="l",lwd=3,
main=paste("Log Subdistribution Hazard Ratio of \n Covariate ",padName," for Event 1",sep="")
,ylab="",xlab="Time",ylim=c(ybot,ytop))
lines(x$predtime,x$loghrl[i,],lty=2,lwd=3)
lines(x$predtime,x$loghru[i,],lty=2,lwd=3)
lines(x$predtime,x$loghrbandl[i,],lty=3,lwd=3)
lines(x$predtime,x$loghrbandu[i,],lty=3,lwd=3)
}
}else{
x$hr<-exp(x$loghr)
x$hr.est<-exp(x$loghr.est)
x$hrl<-exp(x$loghrl)
x$hru<-exp(x$loghru)
for(i in 1:nrow(x$loghr.est)){
ytop<-max(x$hr,na.rm=TRUE)
ybot<-min(x$hr,na.rm=TRUE)
padName<-x$covnames[i]
if(grepl("factor",padName,fixed=TRUE)){
factorName<-sub(").*", ")", padName)
# now called factor(Stage)
reference<-unlist(x$xlevels[factorName])[1]
padName<-gsub("factor[(]", "", padName)
padName<-gsub("[)]", "=", padName)
factorName<-gsub("factor[(]", "", factorName)
factorName<-gsub("[)]", "=", factorName)
reference<-paste(factorName,reference)
padName<-paste(padName,"vs",reference)
}
plot(x$predtime,x$hr.est[i,],type="l",lwd=3,
main=paste("Log Subdistribution Hazard Ratio of \n Covariate ",padName," for Event 1",sep="")
,ylab="",xlab="Time",ylim=c(ybot,ytop))
lines(x$predtime,x$hrl[i,],lty=2,lwd=3)
lines(x$predtime,x$hru[i,],lty=2,lwd=3)
lines(x$predtime,x$hrbandl[i,],lty=3,lwd=3)
lines(x$predtime,x$hrbandu[i,],lty=3,lwd=3)
}
}
}
}
plot.predddpcomp<-function(x,...){
for(i in 1:nrow(x$Fpred)){
plot(c(0,x$tpred),c(0,x$Fpred[i,]),
main=paste("Cumulative Incidence Function Estimate\n with New Data ",i, " for Event 1", sep=""),
type="l",lwd=3,xlab="Time",ylab="",ylim=c(min(x$Fpredl,na.rm=TRUE),max(x$Fpredu,na.rm=TRUE)))
lines(c(0,x$tpred),c(0,x$Fpredl[i,]),lwd=3,lty=2)
lines(c(0,x$tpred),c(0,x$Fpredu[i,]),lwd=3,lty=2)
plot(c(0,x$tpred),c(0,x$dpred[i,]),
main=paste("Cause-specific Density Estimate\n with New Data ",i," for Event 1", sep=""),
type="l",lwd=3,xlab="Time",ylab="",ylim=c(min(x$dpredl,na.rm=TRUE),max(x$dpredu,na.rm=TRUE)))
lines(c(0,x$tpred),c(0,x$dpredl[i,]),lwd=3,lty=2)
lines(c(0,x$tpred),c(0,x$dpredu[i,]),lwd=3,lty=2)
plot(x$tpred,x$hpred[i,],
main=paste("Subdistribution Hazard Estimate\n with New Data ",i," for Event 1", sep=""),
type="l",lwd=3,xlab="Time",ylab="",ylim=c(min(x$hpredl,na.rm=TRUE),max(x$hpredu,na.rm=TRUE)))
lines(x$tpred,x$hpredl[i,],lwd=3,lty=2)
lines(x$tpred,x$hpredu[i,],lwd=3,lty=2)
}
}
plot.predddp<-function(x,...){
for(i in 1:nrow(x$Spred)){
plot(c(0,x$tpred),c(1,x$Spred[i,]),
main=paste("Survival Estimate with New Data ",i, sep=""),
type="l",lwd=3,xlab="Time",ylab="",ylim=c(min(x$Spredl,na.rm=TRUE),max(x$Spredu,na.rm=TRUE)))
lines(c(0,x$tpred),c(1,x$Spredl[i,]),lwd=3,lty=2)
lines(c(0,x$tpred),c(1,x$Spredu[i,]),lwd=3,lty=2)
plot(c(0,x$tpred),c(0,x$dpred[i,]),
main=paste("Density Estimate with New Data ",i, sep=""),
type="l",lwd=3,xlab="Time",ylab="",ylim=c(min(x$dpredl,na.rm=TRUE),max(x$dpredu,na.rm=TRUE)))
lines(c(0,x$tpred),c(0,x$dpredl[i,]),lwd=3,lty=2)
lines(c(0,x$tpred),c(0,x$dpredu[i,]),lwd=3,lty=2)
plot(x$tpred,x$hpred[i,],
main=paste("Hazard Estimate with New Data ",i,sep=""),
type="l",lwd=3,xlab="Time",ylab="",ylim=c(min(x$hpredl,na.rm=TRUE),max(x$hpredu,na.rm=TRUE)))
lines(x$tpred,x$hpredl[i,],lwd=3,lty=2)
lines(x$tpred,x$hpredu[i,],lwd=3,lty=2)
}
}
|
9bf949e738683ea0609cd13f8ad8a8ae5d4afc5b
|
4eb5cda5f02f054d64745ce91923dd1fa4ea9095
|
/Vuln_Index/eck1.vulnDataImport.R
|
d9bbbc12e41f1af4d58a6be143f4c64379d3f902
|
[] |
no_license
|
mczapanskiy-usgs/WERC-SC
|
e2e7cb68616ef0492144c0ef97629abb280103ae
|
caec92b844c9af737dcfc8d6dbb736de79c3e71c
|
refs/heads/master
| 2021-12-02T16:42:16.529960
| 2021-12-01T16:58:20
| 2021-12-01T16:58:20
| 36,815,737
| 2
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 1,614
|
r
|
eck1.vulnDataImport.R
|
## this script loads the metric and final senstivity score data for analysis
## and performs cluster and correlation analyses on the metrics to determine which ones the most like eachother
classes = c("character", "character", "character", "character", "numeric", "numeric", "numeric", "numeric", "numeric", "numeric", "numeric", "numeric", "numeric", "numeric", "numeric", "numeric", "numeric", "numeric", "numeric", "numeric", "numeric", "numeric", "numeric", "numeric", "numeric", "numeric", "numeric", "numeric")
metrics <- read.csv("VulnIndexMetricScores.csv", colClasses = classes) ## matrix of all metrics, and uncertainty ranges
sClasses = c("character", "character", "character", "numeric", "numeric", "numeric", "numeric", "numeric", "numeric", "numeric", "numeric", "numeric")
scores <- read.csv("VulnIndexFinalSensitivityScores.csv", colClasses = sClasses) ## matrix of final PS, CS, and DS
## hierarchical clustering
var <- subset(metrics, select = c(PopSize, CCSpop, TR, SR, AO, NFR, DFR, MA, BR, HF)) ## pull out just 'best score' for all metrics
var2 <- t(var) ## transpose the matrix so that the metrics will be in a row. The values will then cluster by metric.
distVar <- dist(as.matrix(var2)) ## , method="manhattan")
clustVar <- hclust(distVar)
plot(clustVar)
## correlation analysis
library(corrgram)
corrgram(var) ## visual depiction of relationships, blue=pos correlation, red=neg correlation, white=no correlation)
cor(var)
## run statistics on the correlations
library(Hmisc)
var <- as.matrix(var)
cor <- (rcorr(var, type="pearson")) ## assuming parametric assumptions are met...
|
eb0fd28ef459158140dd73bd766813d012029569
|
ee7524bebc54a414d5b64addd77a78600b4b38ba
|
/cachematrix.R
|
7943b0758a719f6676a13da0aa588b660b150651
|
[] |
no_license
|
ek106/ProgrammingAssignment2
|
37aa62935cae5c7b10f15f9ea7b94d0a26f4e55e
|
dc32293d2e5a1ceb8aecc69859270a5a716ece4f
|
refs/heads/master
| 2021-01-13T04:55:12.093016
| 2017-02-07T18:08:00
| 2017-02-07T18:08:00
| 81,139,017
| 0
| 0
| null | 2017-02-06T22:11:39
| 2017-02-06T22:11:39
| null |
UTF-8
|
R
| false
| false
| 2,026
|
r
|
cachematrix.R
|
## Use lexical scoping to assign the inverse of a matrix, taking advantage
## of caching
## makeCacheMatrix creates a list that:
## sets the value of the matrix (using set)
## gets the value of the matrix (using get)
## sets the value of the inverse of the matrix (using setinverse)
## gets the value of the inverse of the matrix (using getinverse)
makeCacheMatrix <- function(x = matrix()) {
inverse <- NULL ##initialize inverse as empty
set <- function(y){
x <<- y ## write matrix to environment above
inverse <<- NULL ## inverse is set to environment above as empty
}
get <- function() x ## read matrix (from cache)
setinverse <- function(i) inverse <<- i ## calculate the inverse of the matrix
getinverse <- function() inverse ## read inverse of matrix (from cache)
list(set = set, get = get, ## output list with functions
setinverse = setinverse,
getinverse = getinverse)
}
## cacheSolve returns the inverse of a matrix. The first condition checks if
## the inverse has been set, and if so pulls up the cached value of the inverse
## of the matrix. If the inverse has not been calcualted, then the function
## uses the setinverse function to calcualte the inverse of the matrix. This
## function assumes the matrix is always invertible.
cacheSolve <- function(x, ...) {
## Return a matrix that is the inverse of 'x'
inverse <- x$getinverse() ## retrieve cached inverse of matrix if exists
if(!is.null(inverse)){ ## print inverse if it is in cache with message
message("getting cached data")
return(inverse)
}
data <- x$get() ## retrieve cached matrix
inverse <- solve(data) ## calculate inverse
x$setinverse(inverse) ## set cache inverse
inverse ## return inverse
}
|
559408896ba248d1546f8e93d36c8dd2f90bc85f
|
b98454c4905d0d9f600baa4db64e3ff6480823c2
|
/2-install-site-packages.R
|
4490390ab4bcd1daaca7529d1ef968924204416c
|
[] |
no_license
|
southwick-associates/R-setup
|
48f2f45ab63ac45e27188539f3bfcd9e0c54c3b6
|
1c8284c9638c3d45225d11b5fbf4d9d8247d4b64
|
refs/heads/master
| 2020-09-24T02:49:47.251989
| 2020-02-17T20:25:50
| 2020-02-17T20:25:50
| 225,644,002
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 185
|
r
|
2-install-site-packages.R
|
# A number of packages will be downloaded and installed
# select OK (or yes) if prompted to install additional packages
source("code/02-install-site-library.R") # southwick R settings
|
2552505672491271a611ddf2df6580f2d24cdeeb
|
fd570307c637f9101ab25a223356ec32dacbff0a
|
/src-local/specpr/src.specpr/init/wdgsb5.r
|
d401343ccdbcdf8785a4c5da7ee6b67491471d5e
|
[] |
no_license
|
ns-bak/tetracorder-tutorial
|
3ab4dd14950eff0d63429291c648820fb14bb4cb
|
fd07c008100f6021c293ce3c1f69584cc35de98a
|
refs/heads/master
| 2022-07-30T06:04:07.138507
| 2021-01-03T22:19:09
| 2021-01-03T22:49:48
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 1,602
|
r
|
wdgsb5.r
|
subroutine wdgsb5
implicit integer*4 (i-n)
#ccc version date: 06/01/83
#ccc author(s): Roger Clark & Jeff Hoover
#ccc language: Fortran
#ccc
#ccc short description:
#ccc This subroutine displays various commands
#ccc algorithm description: none
#ccc system requirements: none
#ccc subroutines called:
#ccc none
#ccc argument list description:
#ccc argument: none
#ccc parameter description:
#ccc common description:
#ccc message files referenced:
#ccc internal variables:
#ccc file description:
#ccc user command lines:
#ccc update information:
#ccc NOTES:
#ccc
include "../common/lundefs"
write (ttyout,330)
write (ttyout, 331)
return
330 format (20x, '*** SETUP parameters ***', //)
331 format (5x,
'type o to change the OBSERVATORY or observatory site',/,
5x, 'type r to REASSIGN FILES and devices', /,5x,
'type f to EVALUATE PROTECTION vs file sizes', /,5x,
' (no response indicates all is consistent)',/,5x,
'type g and number to set GRAPHICS type (see manual)',/,5x,
'type b to toggle BELL',/,5x,
'type v to change the NAME of device v',/,5x,
'type d to change the NAME of device d',/,5x,
'type u to change the NAME of device u',/,5x,
'type y to change the NAME of device y',/,5x,
'type w to change the NAME of device w',/,5x,
'type cp to change the FILE PROTECTION', //, 5x,
'press return to go back to the MAIN routines.', /)
end
|
6612ac8e90b3c1ad25a2956c1ae21f322269f4ec
|
93c1bea1358fc663906a33b3260b59a3225f169f
|
/semTools/R/miPowerFit.R
|
146eb78a8840ab291be17c7501505cc8ef7f0b3b
|
[] |
no_license
|
simsem/semTools
|
8130c9dd00a545536635c72f428804757cc82e09
|
36b8a032cabe77e0df7da32f403168fccc74699a
|
refs/heads/master
| 2023-08-18T01:04:37.833956
| 2023-08-10T14:54:11
| 2023-08-10T14:54:11
| 4,353,483
| 60
| 43
| null | 2023-08-18T15:12:21
| 2012-05-17T00:47:07
|
R
|
UTF-8
|
R
| false
| false
| 15,963
|
r
|
miPowerFit.R
|
### Sunthud Pornprasertmanit and Terrence D. Jorgensen (added ... argument)
### Last updated: 2 September 2021
##' Modification indices and their power approach for model fit evaluation
##'
##' The model fit evaluation approach using modification indices and expected
##' parameter changes.
##'
##' To decide whether a parameter should be freed, one can inspect its
##' modification index (MI) and expected parameter change (EPC).
##' Those values can be used to evaluate model fit by 2 methods.
##'
##' Method 1: Saris, Satorra, and van der Veld (2009, pp. 570--573) used
##' power (probability of detecting a significant MI) and EPC to decide whether
##' to free a parametr. First, one should evaluate whether a parameter's MI
##' is significant. Second, one should evaluate whether the power to detect a
##' target EPC is high enough. The combination of criteria leads to the
##' so-called "JRule" first implemented with LISREL (van der Veld et al., 2008):
##'
##' \itemize{
##' \item If the MI is not significant and the power is low,
##' the test is inconclusive.
##' \item If the MI is not significant and the power is high,
##' there is no misspecification.
##' \item If the MI is significant and the power is low,
##' the fixed parameter is misspecified.
##' \item If the MI is significant and the power is high,
##' the EPC is investigated. If the EPC is large (greater than the
##' the target EPC), the parameter is misspecified. If the EPC is low
##' (lower than the target EPC), the parameter is not misspecificied.
##' }
##'
##' Method 2: The confidence interval (CI) of an EPC is calculated.
##' These CIs are compared with the range of trivial
##' misspecification, which could be (-\code{delta}, \code{delta}) or (0,
##' \code{delta}) for nonnegative parameters.
##'
##' \itemize{
##' \item If a CI overlaps with the range of trivial misspecification,
##' the test is inconclusive.
##' \item If a CI completely exceeds the range of trivial misspecification,
##' the fixed parameters are severely misspecified.
##' \item If a CI is completely within the range of trivial misspecification,
##' the fixed parameters are trivially misspecified.
##' }
##'
##'
##' @aliases miPowerFit miPowerFit
##' @importFrom lavaan lavInspect
##' @importFrom stats qnorm qchisq pchisq
##'
##' @param lavaanObj The lavaan model object used to evaluate model fit
##' @param stdLoad The amount of standardized factor loading that one would like
##' to be detected (rejected). The default value is 0.4, which is suggested by
##' Saris and colleagues (2009, p. 571).
##' @param cor The amount of factor or error correlations that one would like to
##' be detected (rejected). The default value is 0.1, which is suggested by
##' Saris and colleagues (2009, p. 571).
##' @param stdBeta The amount of standardized regression coefficients that one
##' would like to be detected (rejected). The default value is 0.1, which is
##' suggested by Saris and colleagues (2009, p. 571).
##' @param intcept The amount of standardized intercept (similar to Cohen's
##' \emph{d} that one would like to be detected (rejected). The default value
##' is 0.2, which is equivalent to a low effect size proposed by Cohen (1988,
##' 1992).
##' @param stdDelta The vector of the standardized parameters that one would
##' like to be detected (rejected). If this argument is specified, the value
##' here will overwrite the other arguments above. The order of the vector
##' must be the same as the row order from modification indices from the
##' \code{lavaan} object. If a single value is specified, the value will be
##' applied to all parameters.
##' @param delta The vector of the unstandardized parameters that one would like
##' to be detected (rejected). If this argument is specified, the value here
##' will overwrite the other arguments above. The order of the vector must be
##' the same as the row order from modification indices from the \code{lavaan}
##' object. If a single value is specified, the value will be applied to all
##' parameters.
##' @param cilevel The confidence level of the confidence interval of expected
##' parameter changes. The confidence intervals are used in the equivalence
##' testing.
##' @param \dots arguments passed to \code{\link[lavaan]{modificationIndices}},
##' except for \code{delta}, which is already an argument (which can be
##' substituted for \code{stdDelta} or specific sets of parameters using
##' \code{stdLoad}, \code{cor}, \code{stdBeta}, and \code{intcept}).
##'
##' @return A data frame with these variables:
##' \enumerate{
##' \item \code{lhs}: The left-hand side variable, with respect to the operator in
##' in the lavaan \code{\link[lavaan]{model.syntax}}
##' \item \code{op}: The lavaan syntax operator: "~~" represents covariance,
##' "=~" represents factor loading, "~" represents regression, and
##' "~1" represents intercept.
##' \item \code{rhs}: The right-hand side variable
##' \item \code{group}: The level of the group variable for the parameter in question
##' \item \code{mi}: The modification index of the fixed parameter
##' \item \code{epc}: The EPC if the parameter is freely estimated
##' \item \code{target.epc}: The target EPC that represents the minimum size
##' of misspecification that one would like to be detected
##' by the test with a high power
##' \item \code{std.epc}: The standardized EPC if the parameter is freely estimated
##' \item \code{std.target.epc}: The standardized target expected parameter change
##' \item \code{significant.mi}: Represents whether the modification index value is
##' significant
##' \item \code{high.power}: Represents whether the power is enough to detect the
##' target expected parameter change
##' \item \code{decision.pow}: The decision whether the parameter is misspecified
##' or not based on Saris et al's method: \code{"M"} represents the parameter
##' is misspecified, \code{"NM"} represents the parameter is not misspecified,
##' \code{"EPC:M"} represents the parameter is misspecified decided by
##' checking the expected parameter change value, \code{"EPC:NM"} represents
##' the parameter is not misspecified decided by checking the expected
##' parameter change value, and \code{"I"} represents the decision is
##' inconclusive.
##' \item \code{se.epc}: The standard errors of the expected parameter changes.
##' \item \code{lower.epc}: The lower bound of the confidence interval of expected
##' parameter changes.
##' \item \code{upper.epc}: The upper bound of the confidence interval of expected
##' parameter changes.
##' \item \code{lower.std.epc}: Lower confidence limit of standardized EPCs
##' \item \code{upper.std.epc}: Upper confidence limit of standardized EPCs
##' \item \code{decision.ci}: Decision whether the parameter is misspecified
##' based on the CI method: \code{"M"} represents the
##' parameter is misspecified, \code{"NM"} represents the parameter is not
##' misspecified, and \code{"I"} represents the decision is inconclusive.
##' }
##'
##' The row numbers matches with the results obtained from the
##' \code{inspect(object, "mi")} function.
##'
##' @author Sunthud Pornprasertmanit (\email{psunthud@@gmail.com})
##'
##' @seealso \code{\link{moreFitIndices}} For the additional fit indices
##' information
##'
##' @references
##' Cohen, J. (1988). \emph{Statistical power analysis for the
##' behavioral sciences} (2nd ed.). Hillsdale, NJ: Erlbaum.
##'
##' Cohen, J. (1992). A power primer. \emph{Psychological Bulletin, 112}(1),
##' 155--159. \doi{10.1037/0033-2909.112.1.155}
##'
##' Saris, W. E., Satorra, A., & van der Veld, W. M. (2009). Testing structural
##' equation models or detection of misspecifications? \emph{Structural Equation
##' Modeling, 16}(4), 561--582. \doi{10.1080/10705510903203433}
##'
##' van der Veld, W. M., Saris, W. E., & Satorra, A. (2008).
##' \emph{JRule 3.0 Users Guide}. \doi{10.13140/RG.2.2.13609.90729}
##'
##' @examples
##'
##' library(lavaan)
##'
##' HS.model <- ' visual =~ x1 + x2 + x3 '
##' fit <- cfa(HS.model, data = HolzingerSwineford1939,
##' group = "sex", group.equal = c("loadings","intercepts"))
##' miPowerFit(fit, free.remove = FALSE, op = "=~") # loadings
##' miPowerFit(fit, free.remove = FALSE, op = "~1") # intercepts
##'
##' model <- '
##' # latent variable definitions
##' ind60 =~ x1 + x2 + x3
##' dem60 =~ y1 + a*y2 + b*y3 + c*y4
##' dem65 =~ y5 + a*y6 + b*y7 + c*y8
##'
##' # regressions
##' dem60 ~ ind60
##' dem65 ~ ind60 + dem60
##'
##' # residual correlations
##' y1 ~~ y5
##' y2 ~~ y4 + y6
##' y3 ~~ y7
##' y4 ~~ y8
##' y6 ~~ y8
##' '
##' fit2 <- sem(model, data = PoliticalDemocracy, meanstructure = TRUE)
##' miPowerFit(fit2, stdLoad = 0.3, cor = 0.2, stdBeta = 0.2, intcept = 0.5)
##'
##' @export
miPowerFit <- function(lavaanObj, stdLoad = 0.4, cor = 0.1, stdBeta = 0.1,
intcept = 0.2, stdDelta = NULL, delta = NULL,
cilevel = 0.90, ...) {
mi <- lavaan::modificationIndices(lavaanObj, ...)
mi <- mi[mi$op != "==",]
sigma <- mi[,"epc"] / sqrt(mi[,"mi"])
if (is.null(delta)) {
if (is.null(stdDelta))
stdDelta <- getTrivialEpc(mi, stdLoad = stdLoad, cor = cor,
stdBeta = stdBeta, intcept = intcept)
if (length(stdDelta) == 1) stdDelta <- rep(stdDelta, nrow(mi))
delta <- unstandardizeEpc(mi, stdDelta, findTotalVar(lavaanObj))
}
if (length(delta) == 1) delta <- rep(delta, nrow(mi))
ncp <- (delta / sigma)^2
alpha <- 0.05
desiredPow <- 0.80
cutoff <- qchisq(1 - alpha, df = 1)
pow <- 1 - pchisq(cutoff, df = 1, ncp = ncp)
sigMI <- mi[,"mi"] > cutoff
highPow <- pow > desiredPow
group <- rep(1, nrow(mi))
if ("group" %in% colnames(mi)) group <- mi[ , "group"]
decision <- mapply(decisionMIPow, sigMI = sigMI, highPow = highPow,
epc = mi[ , "epc"], trivialEpc = delta)
if (is.null(stdDelta)) stdDelta <- standardizeEpc(mi, findTotalVar(lavaanObj),
delta = delta)
result <- cbind(mi[ , 1:3], group, as.numeric(mi[ , "mi"]), mi[ , "epc"],
delta, standardizeEpc(mi, findTotalVar(lavaanObj)),
stdDelta, sigMI, highPow, decision)
# New method
crit <- abs(qnorm((1 - cilevel)/2))
seepc <- abs(result[,6]) / sqrt(abs(result[,5]))
lowerepc <- result[,6] - crit * seepc
upperepc <- result[,6] + crit * seepc
stdlowerepc <- standardizeEpc(mi, findTotalVar(lavaanObj), delta = lowerepc)
stdupperepc <- standardizeEpc(mi, findTotalVar(lavaanObj), delta = upperepc)
isVar <- mi[,"op"] == "~~" & mi[,"lhs"] == mi[,"rhs"]
decisionci <- mapply(decisionCIEpc, targetval = as.numeric(stdDelta),
lower = stdlowerepc, upper = stdupperepc,
positiveonly = isVar)
result <- cbind(result, seepc, lowerepc, upperepc, stdlowerepc,
stdupperepc, decisionci)
result <- result[!is.na(decision), ]
colnames(result) <- c("lhs","op","rhs","group","mi","epc","target.epc",
"std.epc","std.target.epc","significant.mi",
"high.power","decision.pow","se.epc","lower.epc",
"upper.epc","lower.std.epc","upper.std.epc","decision.ci")
class(result) <- c("lavaan.data.frame","data.frame")
return(result)
}
## ----------------
## Hidden Functions
## ----------------
## totalFacVar: Find total factor variances when regression coeffient matrix
## and factor residual covariance matrix are specified
totalFacVar <- function(beta, psi) {
ID <- diag(nrow(psi))
total <- solve(ID - beta) %*% psi %*% t(solve(ID - beta))
return(diag(total))
}
## findTotalVar: find the total indicator and factor variances
##' @importFrom lavaan lavInspect
findTotalVar <- function(lavaanObj) {
result <- list()
nGroups <- lavInspect(lavaanObj, "ngroups")
cov.all <- lavInspect(lavaanObj, "cov.all")
if (nGroups == 1) cov.all <- list(cov.all)
for (i in 1:nGroups) {
temp <- diag(cov.all[[i]])
names(temp) <- rownames(cov.all[[i]])
result[[i]] <- temp
}
return(result)
}
## getTrivialEpc: find the trivial misspecified expected parameter changes
## given the type of parameters in each row of modification indices
getTrivialEpc <- function(mi, stdLoad=0.4, cor=0.1, stdBeta=0.1, intcept=0.2) {
op <- mi[,"op"]
result <- gsub("=~", stdLoad, op)
result <- gsub("~~", cor, result)
result <- gsub("~1", intcept, result)
result <- gsub("~", stdBeta, result)
return(result)
}
## unstandardizeEpc: Transform from standardized EPC to unstandardized EPC
unstandardizeEpc <- function(mi, delta, totalVar) {
name <- names(totalVar[[1]])
lhsPos <- match(mi[,"lhs"], name)
rhsPos <- match(mi[,"rhs"], name)
group <- rep(1, nrow(mi))
if("group" %in% colnames(mi)) group <- mi[,"group"]
getVar <- function(pos, group) totalVar[[group]][pos]
lhsVar <- mapply(getVar, pos=lhsPos, group=group)
rhsVar <- mapply(getVar, pos=rhsPos, group=group)
FUN <- function(op, lhsVar, rhsVar, delta) {
if(op == "|") return(NA)
lhsSD <- sqrt(lhsVar)
rhsSD <- sqrt(rhsVar)
if(!is.numeric(delta)) delta <- as.numeric(delta)
if(op == "=~") {
return((rhsSD * delta) / lhsSD)
} else if (op == "~~") {
return(lhsSD * delta * rhsSD)
} else if (op == "~1") {
return(lhsSD * delta)
} else if (op == "~") {
return((lhsSD * delta) / rhsSD)
} else {
return(NA)
}
}
unstdDelta <- mapply(FUN, op=mi[,"op"], lhsVar=lhsVar, rhsVar=rhsVar, delta=delta)
return(unstdDelta)
}
## unstandardizeEpc: Transform from unstandardized EPC to standardized EPC.
## If delta is null, the unstandardized epc from the modification indices
## data.frame are used
standardizeEpc <- function(mi, totalVar, delta = NULL) {
if(is.null(delta)) delta <- mi[,"epc"]
name <- names(totalVar[[1]])
lhsPos <- match(mi[,"lhs"], name)
rhsPos <- match(mi[,"rhs"], name)
group <- rep(1, nrow(mi))
if("group" %in% colnames(mi)) group <- mi[,"group"]
getVar <- function(pos, group) totalVar[[group]][pos]
lhsVar <- mapply(getVar, pos=lhsPos, group=group)
rhsVar <- mapply(getVar, pos=rhsPos, group=group)
FUN <- function(op, lhsVar, rhsVar, delta) {
lhsSD <- sqrt(lhsVar)
rhsSD <- sqrt(rhsVar)
if(!is.numeric(delta)) delta <- as.numeric(delta)
if(op == "=~") {
#stdload = beta * sdlatent / sdindicator = beta * lhs / rhs
return((delta / rhsSD) * lhsSD)
} else if (op == "~~") {
#r = cov / (sd1 * sd2)
return(delta / (lhsSD * rhsSD))
} else if (op == "~1") {
#d = meanDiff/sd
return(delta / lhsSD)
} else if (op == "~") {
#beta = b * sdX / sdY = b * rhs / lhs
return((delta / lhsSD) * rhsSD)
} else {
return(NA)
}
}
stdDelta <- mapply(FUN, op=mi[,"op"], lhsVar=lhsVar, rhsVar=rhsVar, delta=delta)
return(stdDelta)
}
## decisionMIPow: provide the decision given the significance of modification
## indices and power to detect trivial misspecification
decisionMIPow <- function(sigMI, highPow, epc, trivialEpc) {
if(is.na(sigMI) | is.na(highPow)) return(NA)
if(sigMI & highPow) {
if(abs(epc) > abs(trivialEpc)) {
return("EPC:M")
} else {
return("EPC:NM")
}
} else if (sigMI & !highPow) {
return("M")
} else if (!sigMI & highPow) {
return("NM")
} else if (!sigMI & !highPow) {
return("I")
} else {
return(NA)
}
}
decisionCIEpc <- function(targetval, lower, upper, positiveonly = FALSE) {
if(is.na(lower) | is.na(upper)) return(NA)
if(positiveonly) {
if(lower > targetval) {
return("M")
} else if (upper < targetval) {
return("NM")
} else {
return("I")
}
} else {
negtargetval <- -targetval
if(lower > targetval | upper < negtargetval) {
return("M")
} else if (upper < targetval & negtargetval < lower) {
return("NM")
} else {
return("I")
}
}
}
|
d8bc43e09d94f00a3a7104c26dc942446df72593
|
9818edb917a1244192e03076b920114518715e8a
|
/systematic-investor/Introduction-to-Backtesting-library.R
|
25a5ba96cddad839d867bc65d4e433a7928bc3e3
|
[
"MIT"
] |
permissive
|
rivu-basu/R_for_Quantitative_Finance
|
927d06495a1668e32ec0671b7b88ba72e6bdf183
|
f4c78c547c28408cc0f859630ebe57f2fb61b6c8
|
refs/heads/master
| 2020-05-21T17:07:27.580327
| 2014-08-29T03:25:52
| 2014-08-29T03:25:52
| 186,116,962
| 1
| 0
|
MIT
| 2019-05-11T10:07:26
| 2019-05-11T10:07:25
| null |
UTF-8
|
R
| false
| false
| 2,917
|
r
|
Introduction-to-Backtesting-library.R
|
# evaluate and analyze Trading Strategies
con = gzcon(file('sit.gz', 'rb')) #from same folder
source(con)
close(con)
load.packages('quantmod')
# data is a time series of price
# signal is a indicator vector for buy and sell
bt.simple <- function(data, signal)
{
# lag serial
signal <- lag(signal,1)
# back fill
signal <- na.locf(signal, na.rm = FALSE)
signal[is.na(signal)] = 0
# calculate close-to-close returns
# ROC() : Calculate the (rate of) change of a series over n periods.
ret <- ROC(Cl(data), type="discrete")
ret[1] = 0
# compute stats
bt <- list()
bt$ret <- ret * signal
bt$equity <- cumprod(1 + bt$ret)
return(bt)
}
# Test for bt.simple functions
# load historical prices from Yahoo Finance
data <- getSymbols('SPY', src = 'yahoo', from = '1980-01-01', auto.assign = F)
# buy and hold
signal <- rep(1, nrow(data))
buy.hold <- bt.simple(data, signal)
# MA cross (moving average)
# Cl: get closing price
sma <- SMA(Cl(data), 200)
signal <- ifelse(Cl(data) > sma, 1, 0) # if price large than moving mean, buy
sma.cross <- bt.simple(data, signal)
# Create a chart showing the strategies perfromance in 2000:2009
dates <- '2000::2009'
buy.hold.equity <- buy.hold$equity[dates] / as.double(buy.hold$equity[dates][1])
sma.cross.equity <- sma.cross$equity[dates] / as.double(sma.cross$equity[dates][1])
# chartSeries() : Charting tool to create standard financial charts given a time series like object
chartSeries(buy.hold.equity, TA = c(addTA(sma.cross.equity, on=1, col='red')),
theme ='white', yrange = range(buy.hold.equity, sma.cross.equity) )
# sample code to implement the above strategies using the backtesting library in the Systematic Investor Toolbox:
#*****************************************************************
# Load historical data
#******************************************************************
load.packages('quantmod')
tickers <- spl('SPY')
data <- new.env() # data is a environment
# bt.prep function merges and aligns all symbols in the data environment
getSymbols(tickers, src = 'yahoo', from = '1970-01-01', env = data, auto.assign = T)
bt.prep(data, align='keep.all', dates='1970::2011')
#*****************************************************************
# Code Strategies
#******************************************************************
prices <- data$prices # price for SPY
# bt.run computes the equity curve of strategy specified by data$weight matrix.
# The data$weight matrix holds weights (signals) to open/close positions
# Buy & Hold
data$weight[] <- 1
buy.hold <- bt.run(data)
# MA Cross
# bt.apply function applies user given function to each symbol in the data environment
sma <- bt.apply(data, function(x) { SMA(Cl(x), 200) } )
data$weight[] <- NA # update weights matirx
data$weight[] <- iif(prices >= sma, 1, 0)
sma.cross <- bt.run(data, trade.summary=T)
plotbt.custom.report(sma.cross, buy.hold)
|
2f8662c4b5a367f8a07cffc2b0f964d34ea26ee8
|
b9df7a5a1751767c00b6b73e1734d428411be3de
|
/man/acs.fetch.Rd
|
d6192a236a9f42c882cd35499920864af5256f56
|
[] |
no_license
|
pssguy/acs-1
|
bedfde0752f59975c6b4610e59a7dfd96fad9376
|
d3fcfdff719d328356ab1f2a3b4af1133385cea7
|
refs/heads/master
| 2021-01-15T08:04:08.213900
| 2014-01-20T00:00:00
| 2014-01-20T00:00:00
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 5,837
|
rd
|
acs.fetch.Rd
|
\name{acs.fetch}
\alias{acs.fetch}
\alias{api.url.maker}
\title{ Downloads ACS data via the Census API and converts to a proper
acs object with estimates, standard errors, and associated metadata.}
\description{ When passed a valid geo.set object and either lookup terms
(table.number, table.name, keyword) or a valid acs.lookup object,
queries the Census ACS API and downloads data to create a new acs-class
object. Geographical information is preserved, metadata in bundled
together with the acs object, and margins of errors are converted to
standard errors to accompany estimates (see \code{help(acs)}).}
\usage{
acs.fetch(endyear = 2011, span = 5, geography, table.name,
table.number, variable, keyword, key, col.names = "auto", ...)
}
%- maybe also 'usage' for other objects documented here.
\arguments{
\item{endyear}{an integer (defaults to 2011) indicating the latest year of the
data in the survey (e.g., for data from the 2007-2011 5-year ACS data,
endyear would be 2011; limited by acceptable values currently provided
by the Census API)}
\item{span}{an integer indicating the span (in years) of the desired
ACS data (should be 1, 3, or 5); defaults to 5}
\item{geography}{a valid geo.set object specifying the census
geography or geographies to be fetched; can be created "on the fly"
with a call to \code{geo.make()} }
\item{table.name}{a string giving the search term(s) to find in the
name of the ACS census table (for example, "Sex" or "Age"); accepts
multiple words, which must all be found in the returned table names;
always case-sensitive. (Note: when set, this variable is passed to
an internal call to acs.lookup---see
\code{\link{acs.lookup}}).}
\item{table.number}{a string (not a number) indicating the table from
the Census to fetch; examples: "B01003" or "B23013"; always
case-sensitive. Used to fetch all variables for a given table
number; if "table.number" is provided, other lookup variables
("table.name" or "keyword") will be ignored.}
\item{variable}{an object of acs.lookup class, or a string (not a
number) or vector of strings indicating the exact variable
number(s) the Census to fetch. See details for more.
Non-acs.lookup examples include "B01003_001" or "B23013_003" or
c("B01003_001", "B23013_003"). Used to fetch specific variables,
as opposed to all variables for a given table number; if
"variable" is provided, all other lookup variables ("table.name",
"table.number", and "keyword") will be ignored. }
\item{keyword}{a string or vector of strings giving the search
term(s) to find in the name of the ACS census variable (for
example, "Male" or "Haiti"); accepts multiple words, which must
all be found in the returned variable names; always
case-sensitive. (Note: when set, this variable is passed to an
internal call to acs.lookup---see \code{\link{acs.lookup}}).}
\item{key}{a string providing the Census API key to use for when
fetching data. Typically saved once via \code{api.key.install}
and passed automatically with each call; see
\code{\link{api.key.install}}}
\item{col.names}{either "auto","pretty", or a vector of strings of
the same length as the number of variables to be fetched. When
"auto" (the default), census variable codes will be used as column
names for the fetched data; when "pretty", descriptive variables
names will be used; otherwise col.names will be used.}
\item{\dots}{ Not used interactively (reserved for recursive calls).
}}
\details{
Assuming you have created some geography with geo.make and you have
already installed an API key, the call is quite simple: for example,
acs.fetch(geography=my.geo, table.number="B01003". (For more on API
keys, see \code{\link{api.key.install}}; if you haven't installed one,
you can always add a "key=YOUR.KEY.HERE" argument to \code{acs.fetch}
each time.)
By default, acs.fetch will download the ACS data from 2007--2011 (the
Five-Year ACS), but the functions includes options for "endyear" and
"span". At present, the API only provides the five-year data for
2006--2010, 2007--2011, and 2008--2012, but as more data is added the
function will be able to download other years and products.
Downloading based on a table number is probably the most fool-proof way
to get the data you want, but acs.fetch will also accept a number of
other arguments instead of "table.number". Users can provide strings to
search for in table names (e.g., table.name="Age by Sex" or
table.name="First Ancestry Reported") or keywords to find in the names
of variables (e.g., keyword="Male" or keyword="Haiti")---but be warned:
given how many tables there are, you may get more matches than you
expected and suffer from the "download overload" of fast-scrolling
screens full of data. (But don't lose hope: see the acs.lookup tool,
which can help with this problem.)
On the other hand, if you know you want a specific variable or two (not
a whole table, just a few columns of it, such as variable="B05001_006"
or variable=c("B16001_058", "B16001_059")), you can ask for that with
\code{acs.fetch(variable="these.variable.codes", ...)}.
Note: when "combine=T" for the fetched geo.set, data will be aggregated
in the resulting acs abject. Some users may therefore wish to specify
"one.zero=T" as an additional argument to \code{acs.fetch}; see
\code{\link{sum-methods}}. }
\value{Returns a new acs-class object with estimates, standard errors
(derived from the census 90\% margins of error), and metadata of the
fetched data from the Census API. }
\references{
http://www.census.gov/developers/
}
\author{
Ezra Haber Glenn \email{eglenn@mit.edu}
}
\seealso{
\code{\link{acs.lookup}}.
}
|
7c06bd1f0030b7f23eab84fd531e34fca5d24dce
|
c9e0c41b6e838d5d91c81cd1800e513ec53cd5ab
|
/man/gtkTextMarkGetName.Rd
|
50fac92cdd5d6112bbede194b4ebd26ad22cb837
|
[] |
no_license
|
cran/RGtk2.10
|
3eb71086e637163c34e372c7c742922b079209e3
|
75aacd92d4b2db7d0942a3a6bc62105163b35c5e
|
refs/heads/master
| 2021-01-22T23:26:26.975959
| 2007-05-05T00:00:00
| 2007-05-05T00:00:00
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 379
|
rd
|
gtkTextMarkGetName.Rd
|
\alias{gtkTextMarkGetName}
\name{gtkTextMarkGetName}
\title{gtkTextMarkGetName}
\description{Returns the mark name; returns NULL for anonymous marks.}
\usage{gtkTextMarkGetName(object)}
\arguments{\item{\code{object}}{[\code{\link{GtkTextMark}}] a \code{\link{GtkTextMark}}}}
\value{[character] mark name}
\author{Derived by RGtkGen from GTK+ documentation}
\keyword{internal}
|
714a66d282a571db27ced623cd20b7efa70bd0fd
|
f24287d774fa1ed5911c99d2d8402a913dbadae8
|
/Model_v03.R
|
fe5bb24052f1ac7c6ac9582084199a83b8be15fb
|
[] |
no_license
|
jsamaitis/Price-and-Volatility-Forecast-with-SLR
|
9e5e2ae98318311aa31d71acbbdd8064fe719a20
|
92d633af2043d96b3801cf3df060c4d842b1c974
|
refs/heads/master
| 2021-09-18T12:56:02.970339
| 2018-07-14T07:42:16
| 2018-07-14T07:42:16
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 5,606
|
r
|
Model_v03.R
|
library(tidyverse)
library(lubridate)
library(forecast)
library(gridExtra)
library(quantmod)
#Load and filter data
load_ticker <- function(ticker, start, end) {
#Conver start and end dates to date format
start <- ymd(start)
end <- ymd(end)
#Load data
ticker_data <- as.tibble(getSymbols(ticker, src = "yahoo", from = start, to = end, env = NULL))
#Filter data
ticker_data <- ticker_data %>%
mutate(adj_close = .[[5]], volume = .[[4]]) %>%
select(adj_close, volume) %>%
as.ts()
return(ticker_data)
}
#Makes linear models of specified time window (default 5) and returns coefficients:
decision <- function(ticker = "AAPL", start = "2017-10-01", end = "2017-10-30", time_window = 5, explanatory_plot = TRUE) {
ticker_data = load_ticker(ticker, start, end)
#Filters based on window
model_data <- tail(ticker_data, time_window)
#Models linear relationship on the prices
fit_price <- tslm(adj_close ~ trend, data = model_data)
#Models linear relationship on the volume
fit_volume <- tslm(volume ~ trend, data = model_data)
#Stores the coefficients
model_coefs <- tibble("Price Model" = fit_price$coefficients, "Volume Model" = fit_volume$coefficients)
#Returns the decision based on calculations
if (model_coefs$`Price Model`[2] > 0 && model_coefs$`Volume Model`[2] < 0){
print("The hype is over. SELL.")
} else if (model_coefs$`Price Model`[2] > 0 && model_coefs$`Volume Model`[2] > 0){
print("The hype is still existing. BUY/HOLD")
} else if (model_coefs$`Price Model`[2] < 0 && model_coefs$`Volume Model`[2] < 0){
print("The panic is over. BUY")
} else if (model_coefs$`Price Model`[2] < 0 && model_coefs$`Volume Model`[2] > 0){
print("Panic mode. Everyone is selling - SHORT SOON OR KYS (Or wait, idc)")
} else {
warning("Wtf, both slopes are either 0 or something strange is happening")
}
#Plot graph explaining the decisions if TRUE
if (explanatory_plot){
#Create dates and remove weekends
data_date <- tibble(date = seq(ymd(start), ymd(end), by = "day")) %>%
mutate(wday = weekdays(date)) %>%
filter(!(wday == "Saturday" | wday == "Sunday")) %>%
select(date)
#Adjustments as one stocks exclude "last date" from search for some reason
data_date <- data_date[1:(nrow(data_date)-1),]
#Add stock data to data_plot
ticker_data = as.tibble(ticker_data) %>%
mutate(date = data_date$date) %>%
select(date, adj_close, volume)
#Create fitted values data
data_fitted <- tibble(date = tail(data_date, time_window)$date, fit_price = fit_price$fitted, fit_vol = fit_volume$fitted)
#Create price plot
plot_price <- ggplot(data = ticker_data) +
geom_line(mapping = aes(x = date, y = adj_close), col = "darkblue") +
geom_line(data = data_fitted, mapping = aes(date, y = fit_price), col = "red") +
labs(x = "", y = "Adjusted Close", title = "Price Model") +
theme_minimal()
#Create volume plot
plot_vol <- ggplot(data = ticker_data) +
geom_bar(mapping = aes(x = date, y = volume), fill = "darkgrey", stat = "identity") +
geom_line(data = data_fitted, mapping = aes(date, y = fit_vol), col = "red", lwd = 1.2) +
labs(x = "", y = "Volume", title = "Volume Model") +
theme_minimal()
#Arrange and plot
grid.arrange(plot_price, plot_vol, heights=c(2.5/4, 1.5/4))
}
}
#-------------------Testing-------------------------
# Apple Stock
ticker_data = decision(ticker = "AAPL",time_window = 5)
#Day 1.
decision(ticker = "VRX", start = "2017-10-07", end = "2017-11-07", time_window = 5, explanatory_plot = T)
#"The hype is still existing. BUY/HOLD", "FACT: 6.49 %"
decision(ticker = "VIRT", start = "2017-10-01", end = "2017-10-30", time_window = 5, explanatory_plot = T)
#"The hype is over. SELL.", "FACT: 4.82 %"
#S&P 500: 0.12%
#Day 2.
decision(ticker = "SYNA", start = "2017-10-09", end = "2017-11-09", time_window = 5, explanatory_plot = T)
#"The hype is still existing. BUY/HOLD", "FACT: -2.23 %"
decision(ticker = "PCRX", start = "2017-10-09", end = "2017-11-09", time_window = 5, explanatory_plot = T)
#"The hype is still existing. BUY/HOLD", "FACT: -2.43 %"
decision(ticker = "VRTU", start = "2017-10-09", end = "2017-11-09", time_window = 5, explanatory_plot = T)
#"The hype is still existing. BUY/HOLD", "FACT: -0.31 %"
decision(ticker = "INGN", start = "2017-10-09", end = "2017-11-09", time_window = 5, explanatory_plot = T)
#"The hype is still existing. BUY/HOLD", "FACT: 2.46 %"
decision(ticker = "HALO", start = "2017-10-09", end = "2017-11-09", time_window = 5, explanatory_plot = T)
#"The hype is still existing. BUY/HOLD", "FACT: -3.07 %"
#S&P 500: -0.38%
#Day 3.
decision(ticker = "SYNA", start = "2017-10-09", end = "2017-11-10", time_window = 5, explanatory_plot = T)
#"The hype is still existing. BUY/HOLD", "FACT: -6.84 %"
decision(ticker = "PCRX", start = "2017-10-09", end = "2017-11-10", time_window = 5, explanatory_plot = T)
#"The hype is still existing. BUY/HOLD", "FACT: 3.89 %"
decision(ticker = "VRTU", start = "2017-10-09", end = "2017-11-10", time_window = 5, explanatory_plot = T)
#"The hype is still existing. BUY/HOLD", "FACT: -0.22 %"
decision(ticker = "INGN", start = "2017-10-09", end = "2017-11-10", time_window = 5, explanatory_plot = T)
#"The hype is still existing. BUY/HOLD", "FACT: -0.47%"
decision(ticker = "HALO", start = "2017-10-09", end = "2017-11-10", time_window = 5, explanatory_plot = T)
#"The hype is still existing. BUY/HOLD", "FACT: -5%"
#Total 2.67%
|
558bc07d9fb7e27bba353dbc526e4056c2a6c4a1
|
af628cd55f7b80b9cc6ec6f98b9f225300f04d3c
|
/R_files/epiallele.R
|
8068eb6f91aefb8cc7eac1d2dbab7db089db2ef5
|
[
"MIT"
] |
permissive
|
mferrodo/bed-beta
|
1aa648f29505388fb40a3fe02dc9c2bcb53d5414
|
41fac1c9fb0aa6b80f8f5bdf4839d8b600864010
|
refs/heads/main
| 2023-04-28T03:05:43.995176
| 2021-03-12T10:55:22
| 2021-03-12T10:55:22
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 2,282
|
r
|
epiallele.R
|
Bayesian epiallele detection
Copyright (C) 2019 James E. Barrett (regmjeb@ucl.ac.uk)
This program is free software: you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation, either version 3 of the License, or
(at your option) any later version.
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with this program. If not, see <https://www.gnu.org/licenses/>.
epiallele <- function(input){
for (src in dir('R_files')){
source(paste('R_files/',src,sep=''))
}
Z <- input$Z
e.depth <- input$e.depth
# ------------------------------------------------------------------ #
# Fit model to regions with sufficient depth
# ------------------------------------------------------------------ #
regions <- which(apply(e.depth$N, 1, FUN='median', na.rm=T)>=100)
Ed <- vector('list',length(regions))
pB <- txtProgressBar(min=1,max=length(regions), width =50L, style = 3)
counter <- 1
for (n in regions){
#setTxtProgressBar(pB, counter)
cat(n,'\n')
set.seed(32*n)
# tryCatch({
# result <- epiallele_distance(Z, e.depth$ind[n,], n)
# Ed[[counter]]$dist <- result$epi.profile.margin
# Ed[[counter]]$epi <- result$X
# Ed[[counter]]$ind <- e.depth$ind[n,]
# Ed[[counter]]$N <- e.depth$N[n,]
# }, error=function(e) NULL)
result <- epiallele_distance(Z, e.depth$ind[n,], n)
if(all(!is.na(result))){
Ed[[counter]]$dist <- result$epi.profile.margin
Ed[[counter]]$epi <- result$X
Ed[[counter]]$ind <- e.depth$ind[n,]
Ed[[counter]]$N <- e.depth$N[n,]
} else {
Ed[[counter]]$dist <- NA
Ed[[counter]]$epi <- NA
Ed[[counter]]$ind <- e.depth$ind[n,]
Ed[[counter]]$N <- e.depth$N[n,]
}
counter <- counter + 1
}
close(pB)
return(Ed)
}
|
c3a209d88deab03e5241f72ab301969315597383
|
f4e6801e5959d14512e0597d346afb857a58ac76
|
/readCufflinksRes.R
|
a091aba575898f874f8ca387b42f694cbe04d3c3
|
[] |
no_license
|
npinel/metaomics
|
9f5739a6b42c68b18c0e38c5df870db2fe9bbff9
|
b1bdfd273372c74895a862ebff9ff1bfdcd57b34
|
refs/heads/master
| 2016-09-06T03:01:47.133811
| 2014-12-26T02:15:20
| 2014-12-26T02:15:20
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 7,848
|
r
|
readCufflinksRes.R
|
#!/bin/R
##########################################################################################
# Author(s): #
# 1. Shaman Narayanasamy (shaman.narayanasamy@uni.lu) #
# Affiliation(s): #
# 1. Luxembourg Center for Systems Biomedicine #
# Project(s): #
# Time-resolved multi-omic analysis of lipid-accumulating communities #
# Date: 03 November 2013 #
# Script: 04b.analysis.idba.sh #
# Purpose: #
# R script for reading in cufflinks data sets #
# NOTES: #
# TO DO: 1. Make script more dynamic. Provide arguements for the variables. #
# 2. Rearrange the time points accordingly and match RNA and DNA measurements #
# 3. Use R packages for analysis? Or implement custom analysis (discuss with #
# Nic) #
# 4. Make a wrapper shell script #
##########################################################################################
##########################################################################################
# Function: Read in all the cufflinks fpkm values based on a list of files and merge
# the results into a single time series table.
# NOTE: Same function can be used for MG or MT data sets provided the mappings and
# corresponding annotation file is used
##########################################################################################
ts.read <- function(file.list){
M <- read.delim(file.list[1])[,c("locus","FPKM")]
for (i in 2:length(file.list)){
M1 <- read.delim(file.list[i])[,c("locus","FPKM")]
M <- merge(M,M1,by="locus")
}
colnames(M)[2:ncol(M)] <- file.list[1:length(file.list)]
return(M)
}
##########################################################################################
# Function: Integrate MT and MG data sets. Performs RNA/DNA normalization for whatever
# sets of samples provided resulting in a single normalized matrix.
# NOTE: Draft version.
# TODO: 1. Data transformation (log)
# 2. Handling 0 expression (MT) values (division of 0)
# 3. Handling 0 MG values (division by 0)
##########################################################################################
MTMG.normalize <- function(MT.mat,MG.mat){
# Simplify sample names MG
colnames(MG.mat)[2:ncol(MG.mat)] <- gsub(pattern="_MG.genes.fpkm_tracking",replacement="",x=colnames(MG.mat)[2:ncol(MG.mat)])
# Simplify sample names MG
colnames(MT.mat)[2:ncol(MT.mat)] <- gsub(pattern="_MT.genes.fpkm_tracking",replacement="",x=colnames(MT.mat)[2:ncol(MT.mat)])
# Find intersection between columns of MT and MG data to make sure the samples are paired
MGMT.paired <- intersect(colnames(MT.mat)[2:ncol(MT.mat)],colnames(MG.mat)[2:ncol(MG.mat)])
# Normalize the matrix by RNA/DNA measure
MTMG.norm.mat<- cbind(MT.mat[,1],MT.mat[,MGMT.paired]/MG.mat[,MGMT.paired])
colnames(MTMG.norm.mat)[1] <- "locus"
return(MTMG.norm.mat)
}
##########################################################################################
# Function: Integrate MT and MG data sets. Performs RNA/DNA normalization for whatever
# sets of samples provided resulting in a single normalized matrix.
# NOTE: Draft version.
# TODO: 1. Data transformation (log)
# 2. Handling 0 expression (MT) values (division of 0)
# 3. Handling 0 MG values (division by 0)
##########################################################################################
require(stringr)
gff.unlist=function(gff.file){
x <- read.delim(gff.file,header=F)
y <- cbind(str_split_fixed(x[,dim(x)[2]],";",3))
y[,1] <- gsub("ID=","",y[,1])
y[,2] <- gsub("Name=","",y[,2])
y[,11] <- gsub("Ontology_term=","",y[,11])
y[which(annot[,2]==""),2]=NA
y[which(annot[,3]==""),3]=NA
y[which(annot[,10]==""),10]=NA
y[which(annot[,11]==""),11]=NA
x <- cbind(x[,-dim(x)[2]],y)
x<-cbind(x,str_split_fixed(x[,1],":",2)[,1])
colnames(x) <- c("ID","source","feature","start","stop","score","strand","frame","feature_ID","function(EC)","Ontology_term","contig")
x[,1] <- paste(x[,1],":",x[,4]-1,"-",x[,5],sep="")
return(x)
}
##########################################################################################
# Function: Handling exceptions in the annotation of transcripts
#
# NOTE: This came about when I realized that cufflinks seems to call transcripts that
# were not in the annotation file (gff). In most these cases, cufflinks concatenates
# two transcripts that are close by (loci), while those loci, do not exist within
# the gff file. Cufflinks also calls transcripts that do not even exist at all in
# the annotation file. For now, we can just omit those files.
# TODO: 1. Clean up
# 2. Create a table with all the "bad ids"
# 3. Convert to function
# 4. Figure out what to do with the missing values
##########################################################################################
# create a test matrix
test <- matrix(nrow=nrow(bad.ids.3),ncol=3)
# start loop
for (i in 1:nrow(bad.ids.3)){
# extract the contig containing a given transcript
sub <- annot.2[which(annot.2$contig==as.character(bad.ids.3$contig[i])),]
print(length(which(sub$start==bad.ids.3$start[i])))
start.loc <- length(which(sub$start==bad.ids.3$start[i]))
print(length(which(sub$stop==bad.ids.3$stop[i])))
stop.loc <- length(which(sub$stop==bad.ids.3$stop[i]))
if (start.loc > 0 & stop.loc > 0){
id.1 <- as.character(sub[which(sub$start==bad.ids.3$start[i]),"feature_ID"])
id.2 <- as.character(sub[which(sub$stop==bad.ids.3$stop[i]),"feature_ID"])
test[i,] <- c(as.character(bad.ids.3$cufflink_id[i]),id.1,id.2)
}
else{
test[i,] <- c(as.character(bad.ids.3$cufflink_id[i]),NA,NA)
}
print(test[i,])
}
bad.ids.4 <- cbind(bad.ids.3,test[,2:3])
colnames(bad.ids.4)[5:6] <- c("ID_1","ID_2")
bad.ids.5<-cbind(as.character(bad.ids.4$cufflink_id),NA,NA,as.character(bad.ids.4$start),as.character(bad.ids.4$stop),NA,NA,NA,paste(bad.ids.4$ID_1,bad.ids.4$ID_2,sep=";"),NA,NA,as.character(bad.ids.4$contig))
colnames(bad.ids.5) <- colnames(annot.2)
annot.3<-rbind(annot.2,bad.ids.5)
##########################################################################################
# Function: Main
# NOTE: At the moment, this is all the main script does. Possible to provide more options
# depending on the input.
# TODO: Provide more options for expression matrices.
# 1. Reading in a paired sample data set
# 2. Reading in a time series data set
##########################################################################################
args <- commandArgs(trailingOnly=TRUE)
print("Reading in list of fpkm_tracking files: args[1]")_
system("date")
file.list <- as.character(read.table(args[1],header=FALSE)[,1])
MT.mat <- ts.read(MT.file.list)
MG.mat <- ts.read(MG.file.list)
E.mat <- MTMG.normalize(MT.mat,MG.mat)
print("Writing out expression matrix")
out.file <- args[2]
write.table(E.mat,out.file,quote=F,sep="\t",row.names=F)
system("date")
MG.cols<-str_split_fixed(colnames(MG.mat),"/",11)[-1,11]
MG.cols<-gsub("_MG.genes.fpkm_tracking","",MG.cols)
MT.cols<-str_split_fixed(colnames(MT.mat),"/",11)[-1,11]
MT.cols<-gsub("_MT.genes.fpkm_tracking","",MT.cols)
|
b21f05d1534afaaadea9214a6a6dc1849fe2ca96
|
70e55527de16dfa752805ad30dea5f8783f431ca
|
/man/beach_get_data.Rd
|
2c31bb36f3fb1d1df9df305206579c7226702d3c
|
[] |
no_license
|
mikkelkrogsholm/badevand
|
040d91b3f14dbf31c17b6a704517227d52d6f5b0
|
d193d03abad0edf02ffdcdc57d9e0d534098a7cf
|
refs/heads/master
| 2020-03-20T18:06:50.766481
| 2018-06-16T10:42:06
| 2018-06-16T10:42:06
| 137,571,345
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| true
| 353
|
rd
|
beach_get_data.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/the_functions.R
\name{beach_get_data}
\alias{beach_get_data}
\title{Retrieve data for danish beaches}
\usage{
beach_get_data()
}
\value{
a list of tibbles with the data
}
\description{
Retrieve data for danish beaches
}
\examples{
beach_data <- badevand::beach_get_data()
}
|
cbae561a6890e308354bb0a12c88cd57670bce02
|
91b06d684a3c638d6053241f5599be3f0abcd1e4
|
/Part2_실전분석/Stage5_GoogleChart/s5_2_Script.R
|
4a3dbbbdcab5bdde3d18688243be147d97d92b16
|
[] |
no_license
|
limwonki0619/R_Data_Analysis
|
2680b07e68e5f44afd5c167a7338487c4205da62
|
8efd42061be81996b0f3eb4ee6e2e1bd3ff3d8b0
|
refs/heads/master
| 2020-05-29T13:19:54.578617
| 2019-08-26T11:33:42
| 2019-08-26T11:33:42
| 189,156,993
| 2
| 1
| null | null | null | null |
UTF-8
|
R
| false
| false
| 6,654
|
r
|
s5_2_Script.R
|
# 예제 5-21. 타임라인 그래프 그리기
library(googleVis)
dat <- data.frame(Term=c("1","2","3"),
President=c("Whasington", "Adams", "Jefferson"),
start=as.Date(x=c("1789-03-29", "1797-02-03", "1801-02-03")),
end=as.Date(x=c("1797-02-03", "1801-02-03", "1809-02-03")))
tl <- gvisTimeline(data=dat[,-1], rowlabel="President",
start="start", end="end")
tl <- gvisTimeline(data=dat, barlabel="President",
start="start", end="end")
plot(tl)
dat <- data.frame(Room=c("Class 1","Class 2","Class 3"),
Language=c("English Team", "German Team", "French Team"),
start=as.POSIXct(c("2015-09-14 14:00", "2015-09-14 15:00",
"2015-09-14 14:30")),
end=as.POSIXct(c("2015-09-14 15:00", "2015-09-14 16:00",
"2015-09-14 15:30")))
tl <- gvisTimeline(data=dat, rowlabel="Rev_Team",
start="start", end="end")
plot(tl)
# 예제 5-22. gvisScatterChart 사용하기
txt1 <- gvisScatterChart(women)
plot(txt1)
# 여러 옵션들 사용하기
txt2 <- gvisScatterChart(women, options=list(legend="none",
lineWidth="3", pointSize="3",
title="Women", vAxis="{title:'weight (lbs)'}",
crosshair="{ trigger: 'both' }",
hAxis="{title:'height (in)'}", width=800, height=600))
plot(txt2)
# 포인터 모양 변경하기
mtx <- matrix(nrow=6,ncol=6)
mtx[col(mtx)==row(mtx)] <- 1:6
dat <- data.frame(X=1:6, mtx)
SC <- gvisScatterChart(dat,
options=list(title="Customizing points",
legend="right",
pointSize=30,
series="{
0: { pointShape: 'circle' },
1: { pointShape: 'triangle' },
2: { pointShape: 'square' },
3: { pointShape: 'diamond' },
4: { pointShape: 'star' },
5: { pointShape: 'polygon' }
}",width=800, height=400))
plot(SC)
# 예제 5-23. Treemap 사용하기 – gvisTreemap( )
tm <- gvisTreeMap(Regions, idvar="Region", parentvar="Parent",
sizevar="Val", colorvar="Fac")
plot(tm)
# 색상을 다르게 설정하기
tm2 <- gvisTreeMap(Regions, "Region", "Parent", "Val", "Fac",
options=list(width=600, height=500,
fontSize=13,
minColor='#EDF8FB',
midColor='#66C2A4',
maxColor='#006D2C',
headerHeight=20,
fontColor='black',
showScale=TRUE))
plot(tm2)
# 미국 전체 주를 Treemap 으로 표시하기
require(datasets)
states <- data.frame(state.name, state.area)
total=data.frame(state.area=sum(states$state.area), state.name="USA")
my.states <- rbind(total, states)
my.states$parent="USA"
my.states$parent[my.states$state.name=="USA"] <- NA
my.states$state.area.log=log(my.states$state.area)
statesTree <- gvisTreeMap(my.states, "state.name", "parent",
"state.area", "state.area.log")
plot(statesTree)
# 예제 5-24. 여러 개의 Google Chart 합치기 – gvisMerge( )
Pie1 <- gvisPieChart(CityPopularity)
Pie2 <- gvisPieChart(CityPopularity, options=list(
slices="{4: {offset: '0.2'}, 0: {offset: '0.3'}}",
title="City popularity",
legend="none",
pieSliceText="label",
pieHole="0.5",width=600))
plot(gvisMerge(Pie2, Pie1, tableOptions = "cellspacing=\"20\" bgcolor=\"#AABBCC\"",
horizontal=TRUE))
Geo <- gvisGeoChart(Exports, "Country", "Profit",
options=list(width=250, height=100))
Tbl <- gvisTable(Exports,
options=list(width=250, height=300))
GT <- gvisMerge(Geo,Tbl, horizontal=FALSE)
plot(GT)
M <- gvisMotionChart(Fruits, "Fruit", "Year",
options=list(width=400, height=410)) # API Key 필요
GTM <- gvisMerge(GT, M, horizontal=TRUE,
tableOptions="cellspacing=10")
plot(GTM)
line <- gvisLineChart(OpenClose, "Weekday", c("Open", "Close"),
options=list(legend="none", width=300, height=150))
column <- gvisColumnChart(OpenClose, "Weekday", c("Open", "Close"),
options=list(legend="none", width=300, height=150))
area <- gvisAreaChart(OpenClose, "Weekday", c("Open", "Close"),
options=list(legend="none", width=300, height=150))
bar <- gvisBarChart(OpenClose, "Weekday", c("Open", "Close"),
options=list(legend="none", width=300, height=150))
lcab <- gvisMerge(gvisMerge(line, column), gvisMerge(area, bar),
horizontal=TRUE, tableOptions="bgcolor=\"#AABBCC\"")
plot(lcab)
# 예제 5-25. Sankey Chart 로 표현하기
install.packages(c("devtools","RJSONIO", "knitr", "shiny", "httpuv"))
library(devtools)
install_github("mages/googleVis")
data1 <- read.csv("data/rchart연습.csv")
data1
require(googleVis)
san <- gvisSankey(data1, from="항목", to="목적지", weight="크기",
options=list(height=550, width=800,
sankey="{link:{color:{fill:'lightblue'}}}"
))
header <- san$html$header
header <- gsub('charset=utf-8', 'charset=euc-kr', header)
san$html$header <- header
plot(san)
# 경상남도 2015년 예산 표현하기
data1 <- read.csv("data/경상남도_2015_예산.csv")
data1
san <- gvisSankey(data1, from="총예산", to="세부지출", weight="예산",
options=list(height=550, width=800,
sankey="{link:{color:{fill:'lightblue'}}}" ))
header <- san$html$header
header <- gsub('charset=utf-8', 'charset=euc-kr', header)
san$html$header <- header
plot(san)
# 영화 배우와 출연 작품 표현
data1 <- read.csv("data/영화배우와_출연작품.csv")
data1
san <- gvisSankey(data1, from="배우명", to="영화명", weight="관객수",
options=list(height=550, width=800,
sankey="{link:{color:{fill:'lightblue'}}}" ))
header <- san$html$header
header <- gsub('charset=utf-8', 'charset=euc-kr', header)
san$html$header <- header
plot(san)
|
8a8edf2b3e0fc2638630de85c86b7fd3962a4cd7
|
6a0e07739a890d35f66077c57be86d8603e47b6b
|
/tests/testthat/test-getmatchinfo.R
|
736e2b84c88d91d27f4c3a89385d7fc0ec78dc35
|
[] |
no_license
|
jcrodriguez1989/Rtinder
|
57c651cfa7cfc1458e90a8a01d2cd03cde0fc534
|
6369a23dab8981a7ed5173e49e1a798733656cd7
|
refs/heads/master
| 2021-06-28T20:28:14.103631
| 2019-06-03T23:18:45
| 2019-06-03T23:18:45
| 126,035,088
| 18
| 2
| null | null | null | null |
UTF-8
|
R
| false
| false
| 460
|
r
|
test-getmatchinfo.R
|
context("test-getmatchinfo")
test_that("can get match info", {
skip_if_offline()
skip_if(!login())
updates <- getUpdates()
skip_if(length(updates$matches) == 0)
one_match <- updates$matches[[1]]
match_info <- getMatchInfo(one_match$id)
expect_true(length(match_info) > 0)
expect_true("id" %in% names(match_info))
expect_equal(one_match$id, match_info$id)
expect_equal(one_match$person$name, match_info$person$name)
})
|
6b103e1b979cc5624a1dfcd6827038822de29d27
|
40eaa9583862c6c73388b9f34628bf85429208e4
|
/scripts/aula_02.R
|
2e3b06c1fadc7b742d4ea686c6f0a74ecce8f1a7
|
[] |
no_license
|
maykongpedro/2021-02-25_R-Ciencia-de-Dados-1-CursoR
|
dcd46e80852ff9241b6ff30b60358b0a616921e2
|
d3f9a9494913fe3d17e208cdcd1dc1ad3fd66906
|
refs/heads/master
| 2023-03-25T18:16:59.637616
| 2021-03-16T01:28:38
| 2021-03-16T01:28:38
| 342,425,622
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 939
|
r
|
aula_02.R
|
# CTRL + SHIFT + R = Criar divisórias
# Importação --------------------------------------------------------------
# Carregar pacotes
library(readr)
# Carregando um ano
imbd <- readr::read_rds("dados/por_ano/imdb_2016.rds")
# Listando arquivos na pasta
arquivos <-
list.files("dados/por_ano/",
full.names = TRUE,
pattern = ".rds$")
# Testando purr
#purrr::map(1:10, sqrt)
# Abrindo todos os arquivos e unindo eles
imdb <- purrr::map_dfr(arquivos,
read_rds)
# -------------------------------------------------------------------------
# Carregando nome das colunas que consta em uma planilha separada
nome_colunas <-
readxl::read_excel(
"dados/imdb_nao_estruturada.xlsx",
sheet = 2
)
# Carregando base oficial pulando as 4 primeiras linhas
imdb <-
readxl::read_excel(
"dados/imdb_nao_estruturada.xlsx",
skip = 4,
col_names = FALSE,
n_max = 3713
)
|
23ebd37b66f8d412658f6518e8630df8f02b2da3
|
c7fe6a88582766a710325d3a9fc6b7328c1cfd7a
|
/tests/testthat.R
|
3d3da41c8d7bf4f8416f822368fc76ea6f1e8118
|
[] |
no_license
|
fergustaylor/openprescribingR
|
023c1b9cb30d269e3da1fc8329ec4f9b0e8d004b
|
124f6d0ed4ce41b2fc8f2ccca5b1ac98f19964ac
|
refs/heads/master
| 2021-01-16T00:10:06.271612
| 2018-04-24T18:08:39
| 2018-04-24T18:08:39
| 99,961,777
| 1
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 76
|
r
|
testthat.R
|
library(testthat)
library(openprescribingR)
test_check("openprescribingR")
|
b39e6da133628a553dc8518140ed05a91c3a2477
|
265850f90cab17e9ec70eac49ba1103d0c103cca
|
/ch04/src/verification/temp/04_mortality_smoking.R
|
626c3c942bcc4691aa07d91efa246382314533c2
|
[
"AFL-3.0"
] |
permissive
|
sdaza/dissertation
|
9a7bc287c9d963a6d833f841508c01382af7a38b
|
deae3b114ef2aeb03995f99825ad81457285322a
|
refs/heads/master
| 2023-02-27T21:32:14.283448
| 2021-02-08T15:51:58
| 2021-02-08T15:51:58
| 111,732,738
| 1
| 0
|
AFL-3.0
| 2020-07-22T14:28:11
| 2017-11-22T21:11:15
|
Jupyter Notebook
|
UTF-8
|
R
| false
| false
| 3,575
|
r
|
04_mortality_smoking.R
|
##############################
# generative model income mobility and mortality
# mortality and smoking differentials by income
# author: sebastian daza
##############################
library(data.table)
library(ggplot2)
source("src/utils.R")
# adjustment of baseline income smoking coefficients
# when rank rank slope effect is in action
v = exp(0.12/0.086)
mean(c(0.10, 0.05, 0.0, -0.05, -0.10))
mean(c(0.12, 0.05, 0.0, -0.05, -0.12))
c(-1.502,-1.748,0 ,-2.526,-3.29)
mean(c(0.35, 0.30, 0.20, -0.50, -0.70))
coeff = c(-0.9103386 ,-1.2483597, -1.6892769, -2.1046334, -2.8605010)
prop = NULL
eprop = NULL
nprop = NULL
for (i in seq_along(coeff)) {
prop[i] = exp(coeff[i]) / (1 + exp(coeff[i]))
}
prop
mean(prop)
for (i in seq_along(coeff)) {
eprop[i] = exp(coeff[i] + 0.12/0.086 * 0.28) / (1 + exp(coeff[i] + 0.12/0.086 * 0.28))
}
eprop
adj = c(1.40, 1.35, 1.10, 1.15, 1.15)
wadj = adj/max(adj)
wadj
wadj * 1.40
ncoeff = coeff * c(1.65, 1.35, 1.15, 1.10, 1.15)
ncoeff
for (i in seq_along(ncoeff)) {
nprop[i] = exp(ncoeff[i] + 0.12/0.086 * 0.28) / (1 + exp(ncoeff[i] + 0.12/0.086 * 0.28))
}
nprop
cat(paste0("{", paste0(round(ncoeff, 3), collapse = ","), "}"))
# fertility adjustment
f = c(1.10, 1.67, 1.69, 1.72, 1.90)
mean(f)
# read data
#path = "models/MobHealthRecycling/output/verification/smoking/"
path = "models/MobHealthRecycling/output/verification/microsimulation/"
ind = readMultipleFiles("individuals", path)
m = readMultipleFiles("mortality", path)
e = readMultipleFiles("environment", path)
p = readMultipleFiles("parameters", path)
dim(p)
setorder(e, iteration)
e[, .(iteration, nsi, population, le)]
e[iteration == 1]
p[iteration == 1, .(iteration, smoking_rank_slope_exp_coeff, move_decision_rate, prob_move_random)]
t = merge(p, e, all.y = TRUE, by = "iteration")
t[, .(iteration, population, nsi, smokers, le, smoking_rank_slope_exp_coeff, prob_move_random )]
prop.table(table(ind[age >= 30, income_type]))
prop.table(table(ind[, income_type]))
prop.table(table(m[age >= 30, income_type]))
prop.table(table(ind[age >= 30, smoker]))
s = ind[iteration == 1 & age >= 30, mean(smoker), income_type]
setorder(s, income_type)
s
s = ind[iteration == 2 & age >= 30 & age <=50, mean(smoker), income_type]
prop.table(table(ind[age >= 30 & iteration == 2, smoker]))
setorder(s, income_type)
s
s = ind[iteration == 3 & age >= 30 & age <=50, mean(smoker), income_type]
setorder(s, income_type)
s
# smoking values
table(m[rank_slope_exposure18 == 0, age])
table(m[total_rank_slope_exposure == 0, age])
m[, .(s = mean(rank_slope_exposure18)), income_type]
summary(m$rank_slope_exposure18)
summary(m$total_rank_slope_exposure)
cor(m[, .(total_rank_slope_exposure, rank_slope_exposure18)])
table(m$replicate)
# duplicates
anyDuplicated(m$id)
prop.table(table(m[age >= 30, smoker]))
table(m$generation)
# smoking status by income
tab = m[age >= 30, mean(smoker), income_type]
setorder(tab, income_type)
tab
mean(m[age >= 30, smoker])
t = m[age >= 30, .(smoking = mean(age)), .(smoker)]
diff(t$smoking)
test = m[age>=30, .(age = mean(age)), .(income_type, smoker)]
setorder(test, smoker, income_type)
test[, diff(age), income_type]
# logistic model on smoking
t = m[, mean(age), income_type]
setorder(t, income_type)
t
names(m)
summary(m$z_income_exposure30)
summary(m$total_z_income_exposure)
cor(m[, .(z_income_exposure30, total_z_income_exposure)])
model = glm(smoker ~ as.factor(income_type) + parent_smoker + z_income_exposure30 +
rank_slope_exposure18, data = m[age>=30], family = "binomial")
screenreg(model)
|
1fa27eea9700857244895c60b1f6bffcf969f9f9
|
142b34cde65a0cd726f63abec2aa93fc0bd8c057
|
/2_lab_class.R
|
8bfbf42baa64f08f61620e4f11969133f4e89a38
|
[] |
no_license
|
papenfuss/BINF90004
|
a92fc8bb4fbb42f13db1cbb09b830fbf30119a5d
|
a7bdd032de9e9a2afe83b522708d18cd110687e3
|
refs/heads/master
| 2021-09-08T23:18:48.230036
| 2021-08-30T00:55:27
| 2021-08-30T00:55:27
| 146,819,832
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 3,717
|
r
|
2_lab_class.R
|
#' Cancer Evolution and heterogeneity lab class
#' ============================================
#'
#' Author: Tony Papenfuss
#' ----------------------
#' Date: Monday 30th August 2021
#' -----------------------------
#'
#' The data you will be looking at was collected from a patient who died
#' from advanced melanoma. The primary tumour was obtained from an archival
#' Formalin Fixed Paraffin Embedded block. Multiple cores from the primary tumour
#' were sequenced. Multiple samples from distant metastases were collected
#' during autopsy and underwent whole exome sequencing. Two cores from the
#' primary and two from liver metastases have been processed
#' together with the germline sequencing data obtained from blood.
#'
#' The idea for this workshop is to perform copy number analysis on a subset of data
#' from a single case and manually reconstruct the evolutionary relationship of these
#' samples as a phylogenetic tree.
#'
#' #' The code for this workshop is available here:
#' http://github.com/papenfuss/BINF90004/
#'
#' The html version of this document is available here:
#' http://github.com/papenfuss/BINF90004/lab_class.html
#'
#' Before starting you need to run http://github.com/papenfuss/BINF90004/1_setup.R
#'
#' To pre-process these samples:
#' 1. The raw sequencing data was aligned to the human reference genome (hg19) using bwa mem.
#'
#' 2. Pileup files were generated using samtools, e.g. for the normal
#' samtools mpileup −f hg19.fasta −Q 20 normal.bam | gzip > normal.pileup.gz
#'
#' 3. The GC profile was summarised from the reference genome, e.g.
#' sequenza−utils.py GC−windows −w 50 hg19.fa | gzip > hg19.gc50Base.txt.gz
#'
#' 4. Finally, sequenza seqz files were generated, e.g. for one tumour sample
#' sequenza−utils.py pileup2seqz −gc hg19.gc50Base.txt.gz −n normal.pileup.gz −t tumor.pileup.gz | gzip > out.seqz.gz
#'
#' Load the required R libraries. setup.R installs these if necessary, so make sure you ran that.
library(copynumber)
library(sequenza)
library(stringr)
#' A bit more setup...
#' NB: Edit this line to get into the right directory
setwd("~/BINF90004")
input.files <- list.files(path="./data", full.names=TRUE)
input.files
#' Before undertaking copy number analysis using sequenza a few preprocessing steps
#' are needed. We have done these for you, but it is good to understand what was done.
#'
#'
#' Next, load one of the raw files and perform some exploratory plots
dat.1 <- read.delim(gzfile(input.files[1]), sep="\t")
head(dat.1)
#' The tumour sequencing depth looks like this:
dat.1$x = dat.1$position/1e6
plot(depth.tumor~x, dat.1, pch=20, cex=0.3, xlab="Position (Mb)", ylab="Counts", main="Tumor sequencing depth")
#' The germline sequencing depth looks like this:
plot(depth.normal~x, dat.1, pch=20, cex=0.3, xlab="Position (Mb)", ylab="Counts", main="Germline sequencing depth")
#' The depth ratio of these looks like this:
plot(log2(depth.ratio)~x, dat.1, pch=20, cex=0.3, xlab="Position (Mb)", ylab="log2(R)", main="Tumour/normal depth ratio")
#' If we compare the sequencing depths with the genomic GC, we see there is a strong bias.
plot(depth.normal~GC.percent, dat.1, pch=20, cex=0.3, xlab="%GC", ylab="Counts", main="GC bias")
plot(log2(depth.normal)~GC.percent, dat.1, pch=20, cex=0.3, xlab="%GC", ylab="log2(Counts)", main="GC bias")
plot(log2(depth.tumor)~GC.percent, dat.1, pch=20, cex=0.3, xlab="%GC", ylab="log2(Counts)", main="GC bias")
#' This is still present after normalising using the germline sample.
plot(log2(depth.ratio)~GC.percent, dat.1, pch=20, cex=0.3, xlab="%GC", ylab="log2(R)", main="GC bias")
#' Finally, run the code in run_sequenza.R and explore the sequenza output
|
a7fc0bcf5aff622a7d2c38e8d04e2db84af9dad0
|
ffdea92d4315e4363dd4ae673a1a6adf82a761b5
|
/data/genthat_extracted_code/emulator/examples/betahat.fun.Rd.R
|
12140f19969cda82a42646f1a541b2f0e2a52944
|
[] |
no_license
|
surayaaramli/typeRrh
|
d257ac8905c49123f4ccd4e377ee3dfc84d1636c
|
66e6996f31961bc8b9aafe1a6a6098327b66bf71
|
refs/heads/master
| 2023-05-05T04:05:31.617869
| 2019-04-25T22:10:06
| 2019-04-25T22:10:06
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 1,922
|
r
|
betahat.fun.Rd.R
|
library(emulator)
### Name: betahat.fun
### Title: Calculates MLE coefficients of linear fit
### Aliases: betahat.fun betahat.fun.A
### Keywords: models
### ** Examples
data(toy)
val <- toy
H <- regressor.multi(val)
d <- apply(H,1,function(x){sum((0:6)*x)})
fish <- rep(2,6)
A <- corr.matrix(val,scales=fish)
Ainv <- solve(A)
# now add suitably correlated Gaussian noise:
d <- as.vector(rmvnorm(n=1,mean=d, 0.1*A))
betahat.fun(val , Ainv , d) # should be close to c(0,1:6)
# Now look at the variances:
betahat.fun(val,Ainv,give.variance=TRUE, d)
# now find the value of the prior expectation (ie the regression
# plane) at an unknown point:
x.unknown <- rep(0.5 , 6)
regressor.basis(x.unknown) %*% betahat.fun(val, Ainv, d)
# compare the prior with the posterior
interpolant(x.unknown, d, val, Ainv,scales=fish)
# Heh, it's the same! (of course it is, there is no error here!)
# OK, put some error on the old observations:
d.noisy <- as.vector(rmvnorm(n=1,mean=d,0.1*A))
# now compute the regression point:
regressor.basis(x.unknown) %*% betahat.fun(val, Ainv, d.noisy)
# and compare with the output of interpolant():
interpolant(x.unknown, d.noisy, val, Ainv, scales=fish)
# there is a difference!
# now try a basis function that has superfluous degrees of freedom.
# we need a bigger dataset. Try 100:
val <- latin.hypercube(100,6)
colnames(val) <- letters[1:6]
d <- apply(val,1,function(x){sum((1:6)*x)})
A <- corr.matrix(val,scales=rep(1,6))
Ainv <- solve(A)
betahat.fun(val, Ainv, d, func=function(x){c(1,x,x^2)})
# should be c(0:6 ,rep(0,6). The zeroes should be zero exactly
# because the original function didn't include any squares.
## And finally a sanity check:
f <- function(x){c(1,x,x^2)}
jj1 <- betahat.fun(val, Ainv, d, func=f)
jj2 <- betahat.fun.A(val, A, d, func=f)
abs(jj1-jj2) # should be small
|
3f4cb70ff24142d77382eddf4bb0d1f5ec53ddbf
|
e40d274ff6b9bd7e7f20998379f483543582c81f
|
/leaflet-ex/apps/snapmapapp/global.R
|
c1a26a51d8bf665b085563f82f491dbba28852e7
|
[] |
no_license
|
ua-snap/snap-r-tools
|
5be2dcc5171cf7289504f20e98ad3ec603e4ed57
|
c3f573c2abf11633b5262c4d98cfbde39854dbf4
|
refs/heads/master
| 2020-03-22T05:57:29.239067
| 2019-01-08T03:11:17
| 2019-01-08T03:11:17
| 139,602,296
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 56
|
r
|
global.R
|
library(shiny)
library(leaflet)
library(leaflet.extras)
|
e4d09d8bb670917c485efbafe89389320250bdb4
|
113ac2556bd26bd0906f964cd775d91efc609784
|
/devtools_history.R
|
fed2cfa962716e409b54d3a5e2903dd3da038081
|
[
"MIT"
] |
permissive
|
ColinFay/sweary
|
088804140c4fe0a2d832ac5cf4e257542b720223
|
acceec190aeb9f92a1ad2d48681e440aefca146d
|
refs/heads/master
| 2020-03-30T08:47:01.763455
| 2018-10-02T19:35:48
| 2018-10-02T19:35:48
| 151,039,141
| 0
| 0
| null | 2018-10-01T05:08:11
| 2018-10-01T05:08:11
| null |
UTF-8
|
R
| false
| false
| 309
|
r
|
devtools_history.R
|
usethis::use_build_ignore("devtools_history.R")
# Add package
usethis::use_package("attempt")
usethis::use_package("glue")
usethis::use_package("tokenizers")
# Clean DESC
usethis::use_tidy_description()
# Test
usethis::use_test("utils")
usethis::use_test("detect")
usethis::use_test("tokenize_words")
|
71aeee7023a3b9735c0156f32e578545783fcccc
|
aa815d377234c0ff51f6d24c551ec3fbc9fd2dfb
|
/R/Assignment3/rankhospital.R
|
3de59aefe2656d0bf4ebd06420c714749aae3f69
|
[] |
no_license
|
nbanguiano/stat101
|
c936ad91812e6d7b21b28ab04b41febd3f9a9f21
|
43ef824c7c822d11466217404d01e1a7ca3b7d8c
|
refs/heads/master
| 2021-01-10T19:33:39.012554
| 2014-05-15T09:32:39
| 2014-05-15T09:32:39
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 1,496
|
r
|
rankhospital.R
|
rankhospital <- function(s, o, n = "best") {
## Define the dataset
dset <- read.csv("outcome-of-care-measures.csv",
colClasses = "character")
## Check if the state passed exists in the datase
## if not, throw an error and stop
st <- dset$State
if (sum(grepl(s, st)) == 0) { stop("invalid state") }
## Assign the desease to its respective column number
## if it doesn't exist, stop
if (o == "heart attack") {o <- 11;}
else if (o == "heart failure") {o <- 17;}
else if (o == "pneumonia") {o <- 23;}
else { stop("invalid outcome") }
## Split the data by states, and create lists
## then define hotels and outcome by state
## by subseting the data frame for that state "[[s]]"
HBS <- split(dset$Hospital.Name, st)[[s]]
OBS <- as.numeric(split(dset[,o], st)[[s]])
## Only get complete cases
u <- complete.cases(HBS, OBS)
## Create a data frame with the data
## and order it
HBS <- HBS[u]; OBS <- OBS[u]
HBS <- HBS[order(OBS)]; OBS <- sort(OBS)
## Logic to understand "best" and "worst"
if (n == "best"){n = 1}
else if (n == "worst"){n = length(OBS)}
else {n = n}
## Check for ties, and sort the tied
## hospitals alphabetically
uu <- OBS == OBS[n]
HBS <- sort(HBS[uu])
## Return the requested position
## or the first tied in alphabetical order
return(HBS[1])
}
|
0ae50ce6937142ee8610d3318635ee882a16029d
|
ddf87d7410f5f63747758b8beaf0a4fe7c297796
|
/man/run_ed.Rd
|
bd76e258e03600638b1acaef0c7e77d77d8cbb08
|
[
"MIT"
] |
permissive
|
ashiklom/fortebaseline
|
3142ff38f305906489cf6e012a8f55fa3efaa51e
|
513ea9353c133b47f67b3023a78e56abb6384847
|
refs/heads/master
| 2021-07-23T16:30:12.074804
| 2020-04-30T17:15:43
| 2020-04-30T17:15:43
| 157,924,482
| 3
| 3
|
NOASSERTION
| 2023-01-22T10:39:45
| 2018-11-16T21:42:24
|
R
|
UTF-8
|
R
| false
| true
| 1,516
|
rd
|
run_ed.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/run-ed.R
\name{run_ed}
\alias{run_ed}
\title{Start an ED2 model run using `processx`}
\usage{
run_ed(
casename,
trait_values = list(),
start_date = "1902-06-01",
end_date = "1920-01-01",
trait_plasticity = FALSE,
multiple_scatter = FALSE,
crown_model = FALSE,
water_lim = TRUE,
out_root = getOption("fortebaseline.ed_root"),
ed_exe = fs::path(getOption("fortebaseline.ed_src_dir"), "ED", "build", "ed_2.1-opt"),
...
)
}
\arguments{
\item{casename}{Name}
\item{trait_values}{Named list of custom parameters}
\item{start_date, end_date}{Run start and end dates. Default is 1902-06-01 to
1920-01-01.}
\item{trait_plasticity}{Whether or not to enable the trait
plasticity scheme (default = `FALSE`)}
\item{multiple_scatter}{Whether or not to use the
multiple-scattering canopy RTM. If `FALSE` (default), use the
two-stream RTM.}
\item{crown_model}{Whether or not to use the finite canopy radius
model (default = `FALSE`)}
\item{water_lim}{Whether or not to run with water limitation to
photosynthesis (default = `TRUE`).}
\item{out_root}{Path to ED2 output root directory. Default is the option
`fortebaseline.ed_root`, and if unset, "ed2-output" in the project root
directory.}
\item{...}{Additional ED2IN namelist settings. Must be in all caps, and must
match settings in ED2IN.}
}
\value{
`processx` process for ED2 run.
}
\description{
Start an ED2 model run using `processx`
}
\author{
Alexey Shiklomanov
}
|
fdf0129d6f5e88348401db78287044d5467f8045
|
941a94c0941e284f9ef3e7f01d08a8f00ccc4e78
|
/R/compress.R
|
cd34de16dbf7854835a5d3a9b197eb8ea41f6c54
|
[] |
no_license
|
cran/GHap
|
d871c803ab5034b195147cf7be5bd7ecca1bdd44
|
05d12b36bc8c6d858a2229120edebd74b71cdb18
|
refs/heads/master
| 2022-07-05T10:30:08.113872
| 2022-07-01T20:50:05
| 2022-07-01T20:50:05
| 57,885,792
| 1
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 9,434
|
r
|
compress.R
|
#Function: ghap.compress
#License: GPLv3 or later
#Modification date: 13 May 2021
#Written by: Yuri Tani Utsunomiya & Marco Milanesi
#Contact: ytutsunomiya@gmail.com, marco.milanesi.mm@gmail.com
#Description: Compress phased data into GHap binary
ghap.compress <- function(
input.file=NULL,
out.file,
samples.file=NULL,
markers.file=NULL,
phase.file=NULL,
batchsize=NULL,
ncores=1,
verbose=TRUE
){
# Check input file prefix ----------------------------------------------------
if(is.null(input.file) == FALSE){
samples.file <- paste(input.file, "samples", sep=".")
markers.file <- paste(input.file, "markers", sep=".")
phase.file <- paste(input.file, "phase", sep=".")
}else if(is.null(phase.file)){
stop("Please provide a phase file!")
}else if(is.null(samples.file)){
stop("Please provide a samples file!")
}else if(is.null(markers.file)){
stop("Please provide a markers file!")
}
# Check if phase file exist
if(file.exists(phase.file) == FALSE){
stop("Could not find the phase file!")
}
# Check if samples file exist
if(file.exists(samples.file) == FALSE){
stop("Could not find the samples file!")
}
# Check if markers file exist
if(file.exists(markers.file) == FALSE){
stop("Could not find the markers file!")
}
# Check if out file exist
if(file.exists(paste(out.file,".phaseb",sep="")) == TRUE){
stop("Output file already exists!")
}else{
rnumb <- runif(n = 1, min = 1, max = 1e+6)
rnumb <- ceiling(rnumb)
tmp.file <- paste(tempdir(),"/tmp",rnumb,sep="")
}
# Load marker map file -------------------------------------------------------
ncores <- min(c(detectCores(), ncores))
if(verbose == TRUE){
cat("\nReading in marker map information... ")
}
marker <- fread(markers.file, header=FALSE,
colClasses = "character", nThread = ncores)
# Check if the map file contains correct dimension ---------------------------
if(ncol(marker) %in% c(5,6) == FALSE){
stop("\n\nMarker map contains wrong number of columns (expected 5 or 6)")
}
marker$V3 <- as.numeric(marker$V3)
if(ncol(marker) == 5){
tmp <- as.data.frame(matrix(data = NA, nrow = nrow(marker),
ncol = 6))
colnames(tmp) <- paste0("V",1:6)
tmp[,1:3] <- marker[,1:3]
idx <- which(is.na(tmp$V4))
tmp$V4[idx] <- as.numeric(tmp$V3[idx])/1e+6
tmp[,5:6] <- marker[,4:5]
marker <- tmp
}else{
marker$V4 <- as.numeric(marker$V4)
}
# Check if alleles are different ---------------------------------------------
equalalleles <- length(which(marker$V5 == marker$V6))
if(equalalleles > 0){
stop("\n\nThe map contains markers with A0 = A1!")
}
# Check for duplicated marker ids --------------------------------------------
dup <- which(duplicated(marker$V2))
ndup <- length(dup)
if(ndup > 0){
emsg <- paste("\n\nYour marker map file contains", ndup, "duplicated ids")
stop(emsg)
}
# Check if markers are sorted by bp ------------------------------------------
chr <- unique(marker$V1)
nchr <- length(chr)
chrorder <- chr[order(nchar(chr),chr)]
negpos <- diff(marker$V3)
negpos <- length(which(negpos < 0)) + 1
if(identical(chr,chrorder) == FALSE | negpos != nchr){
stop("\n\nMarkers are not sorted by chromosome and base pair position")
}
# Check for duplicated bp ----------------------------------------------------
dup <- paste(marker$V1,marker$V3)
dup <- which(duplicated(dup))
ndup <- length(dup)
note <- NULL
if(ndup > 0){
note <- paste(note, "\n[NOTE] Found", ndup,
"duplicated physical positions!")
}
# Map passed checks ----------------------------------------------------------
nmarkers <- nrow(marker)
if(verbose == TRUE){
cat("Done.\n")
cat(paste("A total of ", nmarkers,
" markers were found in ", nchr," chromosomes.\n",sep=""))
}
# Load sample file -----------------------------------------------------------
if(verbose == TRUE){
cat("Reading in sample information... ")
}
sample <- fread(samples.file, header=FALSE,
colClasses = "character", nThread = ncores)
sample <- as.data.frame(sample)
# Check if the sample file contains correct dimension ------------------------
if(ncol(sample) %in% 2:5 == FALSE){
stop("\n\nSample file contains wrong number of columns (expected 2 to 5)")
}
if(ncol(sample) < 5){
tmp <- as.data.frame(matrix(data = NA, nrow = nrow(sample), ncol = 5))
for(i in 1:ncol(sample)){
tmp[,i] <- sample[,i]
}
colnames(tmp) <- paste0("V",1:5)
sample <- tmp
}
sample$V3[which(sample$V3 == "0")] <- NA
sample$V4[which(sample$V4 == "0")] <- NA
sample$V5[which(is.na(sample$V5))] <- "0"
# Check for duplicated ids ---------------------------------------------------
dup <- which(duplicated(sample$V2))
ndup <- length(dup)
if(ndup > 0){
emsg <- paste("\n\nSample file contains", ndup, "duplicated ids!")
stop(emsg)
}
# Samples passed check -------------------------------------------------------
nsamples <- nrow(sample)
pop <- rep(sample$V1,each=2)
ids <- rep(sample$V2,each=2)
if(verbose == TRUE){
cat("Done.\n")
cat(paste("A total of ", nsamples, " individuals were found in ",
length(unique(pop)), " populations.\n",sep=""))
}
# Compute bit loss -----------------------------------------------------------
bitloss <- 8 - ((2*nsamples) %% 8)
if(bitloss == 8){
bitloss <- 0
}
linelen <- 2*nsamples
# Generate batch index -------------------------------------------------------
if(is.null(batchsize) == TRUE){
batchsize <- ceiling(nmarkers/10)
}
if(batchsize > nmarkers){
batchsize <- nmarkers
}
id1<-seq(1,nmarkers,by=batchsize)
id2<-(id1+batchsize)-1
id1<-id1[id2<=nmarkers]
id2<-id2[id2<=nmarkers]
id1 <- c(id1,id2[length(id2)]+1)
id2 <- c(id2,nmarkers)
if(id1[length(id1)] > nmarkers){
id1 <- id1[-length(id1)]; id2 <- id2[-length(id2)]
}
if(verbose == TRUE){
cat("Processing ", nmarkers, " markers in:\n", sep="")
batch <- table((id2-id1)+1)
for(i in 1:length(batch)){
cat(batch[i]," batches of ",names(batch[i]),"\n",sep="")
}
}
# Process line function ------------------------------------------------------
lineprocess <- function(i){
line <- scan(text=batchline[i], what = "character", sep=" ", quiet = TRUE)
if(length(line) != linelen){
emsg <- paste("\n\nExpected", linelen, "columns in line",i+nlines.skip[b],"of",
phase.file,"but found",length(line),"\n\n")
stop(emsg)
}
line <- c(line,rep("0",times=bitloss))
strings <- unique(line)
if(length(which(strings %in% c("0","1") == FALSE)) > 0){
stop("Phased genotypes should be coded as 0 and 1")
}else{
line <- paste(line, collapse = "")
nc <- nchar(line)
n <- seq(1, nc, by = 8)
line <- substring(line, n, c(n[-1]-1, nc))
line <- strtoi(line, base=2)
}
return(line)
}
# Iterate batches ------------------------------------------------------------
phase.con <- file(phase.file,"r")
nmarkers.done <- 0
nlines.read <- id2-id1+1
nlines.skip <- c(0,cumsum(nlines.read)[-length(nlines.read)])
for(b in 1:length(id1)){
# Load batch
batchline <- readLines(con = phase.con, n = nlines.read[b])
# Check if batch is ok
if(length(batchline) != nlines.read[b]){
emsg <- paste("\n\nExpected", nmarkers, "lines in",
phase.file,"but found", length(batchline)+nlines.skip[b],"\n\n")
stop(emsg)
}
# Transform lines
ncores <- min(c(detectCores(), ncores))
if(Sys.info()["sysname"] == "Windows"){
cl <- makeCluster(ncores)
results <- unlist(parLapply(cl = cl, fun = lineprocess, X = 1:length(batchline)))
stopCluster(cl)
}else{
results <- unlist(mclapply(FUN = lineprocess, X = 1:length(batchline), mc.cores = ncores))
}
# Write to output file
out.con <- file(tmp.file, "ab")
writeBin(object = results, con = out.con, size = 1)
close.connection(out.con)
# Log message
if(verbose == TRUE){
nmarkers.done <- nmarkers.done + (id2[i]-id1[i]) + 1
cat(nmarkers.done, "markers written to file\r")
}
}
# Last integrity check -------------------------------------------------------
batchline <- readLines(con = phase.con, n = 1)
if(length(batchline) != 0){
emsg <- paste("\n\nExpected", nmarkers, "lines in",
phase.file,"but found more\n\n")
stop(emsg)
}
# Close connection with phase file -------------------------------------------
close.connection(phase.con)
if(verbose == TRUE){
cat(nmarkers, "markers written to file\n\n")
}
# Output results -------------------------------------------------------------
sup <- file.copy(from = tmp.file, to = paste(out.file,".phaseb",sep=""))
sup <- file.remove(tmp.file)
if(verbose == TRUE){
cat("Phase file succesfully compressed.\n\n")
}
}
|
bb2a54a44bbd156ab3085fcfbe9127382238e423
|
9643f8349f55e9a05e9d2f7193b660e48b3fb6a8
|
/data-raw/make_data.R
|
0bd0614ca77973e7725bae7bdf7e782d53225414
|
[
"MIT"
] |
permissive
|
neuroconductor-releases/ggseg
|
01bf15d71c0e46343a22b2c19873e2d5dc171bb1
|
9babfd67acff27434c8ff7b4e70d3be0cd4368f2
|
refs/heads/master
| 2021-07-17T11:11:17.150228
| 2020-10-29T15:34:51
| 2020-10-29T15:34:54
| 224,288,083
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 2,600
|
r
|
make_data.R
|
# dk ----
devtools::load_all("../ggsegExtra/")
someData <- data.frame(
region = c("transverse temporal", "insula",
"precentral","superior parietal",
"transverse temporal", "insula",
"precentral","superior parietal"),
p = sample(seq(0,.5,.001), 8),
Group = c(rep("G1",4), rep("G2",4)),
stringsAsFactors = FALSE)
dk <- ggsegExtra::make_ggseg3d_2_ggseg(ggseg3d::dk_3d,
steps = 7,
tolerance = .5,
smoothness = 5,
keep = 0.05,
output_dir = "~/Desktop/test/")
ggseg(atlas=dk, show.legend = FALSE,
colour = "black", position="stacked",
mapping = aes(fill=region)) +
scale_fill_brain()
plot(dk)
ggplot() +
geom_brain(data = dk, aes(fill = region),
position = position_brain("vertical"),
show.legend = FALSE) +
scale_fill_brain()
ggplot(someData) +
geom_brain(data = someData, atlas = dk, aes(fill = region), show.legend = FALSE) +
scale_fill_brain()
usethis::use_data(dk,
internal = FALSE,
overwrite = TRUE,
compress="xz")
# aseg ----
# aseg_n <- mutate(aseg,
# kp = case_when(
# grepl("Left", label) & hemi != "left" ~ FALSE,
# grepl("Right", label) & hemi != "right" ~ FALSE,
# TRUE ~ TRUE)) %>%
# filter(kp) %>%
# select(-kp)
# aseg_n <- unnest(aseg_n, ggseg)
# aseg_n <- group_by(aseg_n, label, hemi, side, region, .id)
# aseg_n <- nest(aseg_n)
# aseg_n <- group_by(aseg_n, hemi, side, region)
# aseg_n <- mutate(aseg_n, .subid = row_number())
# aseg_n <- unnest(aseg_n, data)
# aseg_n <- ungroup(aseg_n)
# aseg_n <- as_ggseg_atlas(aseg_n)
aseg2 <- sf::st_as_sf(unnest(aseg, ggseg), coords = c(".long", ".lat")) %>%
group_by( label, .id, .subid) %>%
summarize(do_union=FALSE) %>%
sf::st_cast("POLYGON") %>%
ungroup() %>%
select(-.id, -.subid) %>%
group_by(label) %>%
summarise(geometry = sf::st_combine(geometry)) %>%
ungroup()
aseg_n <- aseg2 %>%
left_join(aseg) %>%
select(atlas, hemi, side, region, label, ggseg, geometry)
aseg_n %>%
ggseg(atlas = ., show.legend = TRUE,
colour = "black",
mapping = aes(fill=region)) +
scale_fill_brain("aseg")
ggplot() +
geom_brain(data = aseg_n) +
scale_fill_brain("aseg")
aseg <- aseg_n
usethis::use_data(aseg,
internal = FALSE,
overwrite = TRUE,
compress="xz")
|
75f0c6da6a45f01c8b6a4677ad2f87845bcadaef
|
662451aa61f4d9d5e0c4e5734f3b309fa166ecc9
|
/tests/testthat.R
|
5f9800862850dd9bc4d930542a21189f9472f610
|
[
"MIT"
] |
permissive
|
r-spatialecology/belg
|
1d251c97bc70537f6204d0e42e7ea460b84020c2
|
cf1bb63cf07e575aa78489991cee1e9e0c6470af
|
refs/heads/master
| 2022-12-21T14:07:03.572496
| 2022-12-15T15:09:31
| 2022-12-15T15:09:31
| 122,247,898
| 10
| 3
|
NOASSERTION
| 2020-04-29T11:50:27
| 2018-02-20T19:50:56
|
R
|
UTF-8
|
R
| false
| false
| 52
|
r
|
testthat.R
|
library(testthat)
library(belg)
test_check("belg")
|
843be6e4a6f6dc16ebc2e1e1fb96d8fc7847a116
|
576b07d2c2370aae0fd6abd083265a5ac4c55bfc
|
/plot1.R
|
6601fe4da2e3a0545a9ca58e33abe63c5cb8d94e
|
[] |
no_license
|
cdiako16/ExData_Plotting1
|
d7aa7c6dae91c52b19ce1152454e4c3b7e139cdd
|
96002c4b324016322b0869804016c9acbb41aed0
|
refs/heads/master
| 2021-01-22T21:27:48.536143
| 2016-08-29T06:48:46
| 2016-08-29T06:48:46
| 66,723,051
| 0
| 0
| null | 2016-08-27T16:03:53
| 2016-08-27T16:03:53
| null |
UTF-8
|
R
| false
| false
| 1,223
|
r
|
plot1.R
|
#EXPLORATORY DATA ANALYSIS PROJECT 1
#Code for Plot 1
if(!file.exists("data")) {
dir.create("data")
}
fileUrl<- "https://d396qusza40orc.cloudfront.net/exdata%2Fdata%2Fhousehold_power_consumption.zip"
download.file(fileUrl, destfile = "./data/powerconsumption.zip")
unzip(zipfile="./data/powerconsumption.zip",exdir="./data")
library(dplyr)
# reading in the data
powerConsumption <- read.table("./data/household_power_consumption.txt", header= TRUE, sep =";", na.strings = "?")
head(powerConsumption)
str(powerConsumption)
powerConsumption <- tbl_df(powerConsumption)
#Subsetting the data for the required dates
consumptionDate <- filter(powerConsumption, grepl("^[1,2]/2/2007", Date))
#Could also use ConsumptionDate <- powerConsumption[powerConsumption$Date %in% c("1/2/2007","2/2/2007") ,]
# Checking the dimension, head and tail
dim(consumptionDate)
head(consumptionDate)
tail(consumptionDate)
#Converting gloabal active power to numeric
globalActivePower <- as.numeric(consumptionDate$Global_active_power, na.rm = TRUE)
#Plotting the graph as png
png("plot1.png", width=480, height=480)
hist(globalActivePower, col="red", main = "Global Active Power", xlab = "Global Active Power (kilowatts)")
dev.off()
|
0a9feb703d494eae7192dc9095f8e7dc1420875d
|
246d965e4e8fb5e2a6c49598d1a1c6e99b3a24a1
|
/temp/app.R
|
e4049850165a6b6d101c842345e097e51d156b4e
|
[] |
no_license
|
NIHRDECncl/R-tools
|
75ebab87653dcdf1d3d27e8e7a8a1549d4811e21
|
03cb62221925f1387c66c131fa511c85a943900b
|
refs/heads/master
| 2021-01-12T15:49:57.109329
| 2018-01-08T11:11:44
| 2018-01-08T11:11:44
| 71,878,066
| 0
| 0
| null | 2018-01-08T11:12:21
| 2016-10-25T08:53:59
|
HTML
|
UTF-8
|
R
| false
| false
| 761
|
r
|
app.R
|
#ui.R
library(shiny)
tagList(
navbarPage( "Title",
tabPanel("Navbar 1",
sidebarPanel(
),
mainPanel(
tabsetPanel(
tabPanel("Table",
h4("Iris"),
tableOutput("table")),
tabPanel("Text",
h4("VText"),
verbatimTextOutput("vtxt")),
tabPanel("Header")
)
)),
tabPanel("Navbar 2")))
#server.R
function(input, output) {
output$table <- renderTable({
iris
})
}
|
73d24e2586a2053132e6dc4468e9d39679d6ca53
|
93204f1ff02f6e20f2a44d077ea8712ca914cecd
|
/R/05_countCumulativeEventsByDate.R
|
ea14145fb3459c80e656cf35685f6b88d8373904
|
[] |
no_license
|
cjcampbell/iNaturalist-Identifiers
|
b5ae5db1929a36fdeb8146e92e5ca3b7a21d3735
|
13adc0a9c48817453aeb9df5a4dc3a3771c40322
|
refs/heads/main
| 2023-04-15T14:46:08.151368
| 2023-02-27T13:54:39
| 2023-02-27T13:54:39
| 606,906,415
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 3,340
|
r
|
05_countCumulativeEventsByDate.R
|
source('/srv/gspeedq32/mbelitz/iNatIDers/R/00_setup.R')
library(purrr)
print(paste0( "Beginning process query for script 5 at ", Sys.time()) )
iNat <- DBI::dbConnect(RSQLite::SQLite(), dbname = "iNat.db")
# Obs by date -------------------------------------------------------------
print("Counting observations by date")
obsByDate <- dbGetQuery(
iNat,
paste0(
"
SELECT
`n_obs`, `yr_precise`, SUM(`n_obs`)
OVER (
ORDER BY `yr_precise` ROWS UNBOUNDED PRECEDING
) AS `cum_obs`
FROM
(
SELECT
CAST(`obs_created_at_year` AS NUMERIC) + CAST(`obs_created_at_month` AS FLOAT) / 12 AS `yr_precise`,
COUNT(*) AS `n_obs`
FROM
(SELECT DISTINCT * FROM obs)
GROUP BY `obs_created_at_year`, `obs_created_at_month`
)
"
)
)
saveRDS(obsByDate, file = file.path(wd$bin, "obsByDate.rds"))
# IDs made for others by date ---------------------------------------------
print("Counting identifications made for others by date")
idsByDate <- dbGetQuery(
iNat,
paste0(
"
SELECT
`n_ids`, `yr_precise`, SUM(`n_ids`)
OVER (
ORDER BY `yr_precise` ROWS UNBOUNDED PRECEDING
) AS `cum_IDs`
FROM
(
SELECT
CAST(`id_created_at_year` AS NUMERIC) + CAST(`id_created_at_month` AS FLOAT) / 12 AS `yr_precise`,
COUNT(*) AS `n_ids`
FROM
(SELECT DISTINCT * FROM ids)
WHERE `is_observer` = 0
GROUP BY `id_created_at_year`, `id_created_at_month`
)
"
)
)
saveRDS(idsByDate, file = file.path(wd$bin, "idsByDate.rds"))
# n Identifiers IDing for others by date ----------------------------------
IDersByDate <- dbGetQuery(
iNat,
paste0(
"
SELECT
`n_IDers`, `yr_precise`, SUM(`n_IDers`)
OVER (
ORDER BY `yr_precise` ROWS UNBOUNDED PRECEDING
) AS `cum_IDers`
FROM
(
SELECT
CAST(`id_created_at_year` AS NUMERIC) + CAST(`id_created_at_month` AS FLOAT) / 12 AS `yr_precise`,
COUNT(DISTINCT(`id_user.id`)) AS `n_IDers`
FROM
(SELECT DISTINCT * FROM ids)
WHERE `is_observer` = 1
GROUP BY `id_created_at_year`, `id_created_at_month`
)
"
)
)
saveRDS(IDersByDate, file = file.path(wd$bin, "IDersByDate.rds"))
# Proportion with vision - 1st ID -----------------------------------------
prop_vision <- dbGetQuery( iNat,
"SELECT `yr_precise`, COUNT(*) AS `n_ids`, is_observer, SUM(vision) AS `n_vision` FROM
(SELECT vision, is_observer, CAST(`id_created_at_year` AS NUMERIC) + CAST(`id_created_at_month` AS FLOAT) / 12 AS `yr_precise` FROM ids)
GROUP BY `yr_precise`, `is_observer`;")
prop_vision %>%
dplyr::mutate(n_novision = n_ids-n_vision) %>%
replace(is.na(.), 0) %>%
pivot_longer(cols = c(n_vision, n_novision)) %>%
dplyr::select(yr_precise, is_observer, name, value) %>%
saveRDS(., file = file.path(wd$bin, "prop_vision.rds"))
# Combine and conclude ----------------------------------------------------------------
dbDisconnect(iNat)
# Combine outputs.
df_l <- lapply(c("obsByDate.rds", "idsByDate.rds", "IDersByDate.rds"), function(x) {
readRDS(file.path(wd$bin, x))
})
df <- purrr::reduce(df_l, dplyr::full_join, by = "yr_precise")
write.csv(df, file = file.path(wd$out, "cumulativeEventsByDate.csv"), row.names = F)
print(paste0( "Concluding process at ", Sys.time()) )
|
0b1795f18047f154463b85ed86875b6462901b23
|
93f8e647685105c9c07b78b3c881638a6101b0d8
|
/man/rpart.rules.Rd
|
c0c101a9505748067b9605d18036d6cde30c97e2
|
[] |
no_license
|
timelyportfolio/rpart.utils
|
5ed2a35f80025d0296fd80217b44caf8e2f76b8f
|
5caae906e019b30f0f90afcb9f946726aa9633aa
|
refs/heads/master
| 2021-01-22T12:45:33.923072
| 2014-12-17T22:48:01
| 2014-12-17T22:48:01
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 419
|
rd
|
rpart.rules.Rd
|
% Generated by roxygen2 (4.0.0): do not edit by hand
\name{rpart.rules}
\alias{rpart.rules}
\title{Returns a list of strings summarizing the branch path to each node.}
\usage{
rpart.rules(object)
}
\arguments{
\item{object}{an rpart object}
}
\description{
Returns a list of strings summarizing the branch path to each node.
}
\examples{
library(rpart)
fit<-rpart(Reliability~.,data=car.test.frame)
rpart.rules(fit)
}
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.