blob_id
stringlengths 40
40
| directory_id
stringlengths 40
40
| path
stringlengths 2
327
| content_id
stringlengths 40
40
| detected_licenses
listlengths 0
91
| license_type
stringclasses 2
values | repo_name
stringlengths 5
134
| snapshot_id
stringlengths 40
40
| revision_id
stringlengths 40
40
| branch_name
stringclasses 46
values | visit_date
timestamp[us]date 2016-08-02 22:44:29
2023-09-06 08:39:28
| revision_date
timestamp[us]date 1977-08-08 00:00:00
2023-09-05 12:13:49
| committer_date
timestamp[us]date 1977-08-08 00:00:00
2023-09-05 12:13:49
| github_id
int64 19.4k
671M
⌀ | star_events_count
int64 0
40k
| fork_events_count
int64 0
32.4k
| gha_license_id
stringclasses 14
values | gha_event_created_at
timestamp[us]date 2012-06-21 16:39:19
2023-09-14 21:52:42
⌀ | gha_created_at
timestamp[us]date 2008-05-25 01:21:32
2023-06-28 13:19:12
⌀ | gha_language
stringclasses 60
values | src_encoding
stringclasses 24
values | language
stringclasses 1
value | is_vendor
bool 2
classes | is_generated
bool 2
classes | length_bytes
int64 7
9.18M
| extension
stringclasses 20
values | filename
stringlengths 1
141
| content
stringlengths 7
9.18M
|
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
3b559f97c5b1021fc613f7745cbbc8e0d9ff2cd6
|
f08cd32750b98a195a67fdff699f0a835a2e615f
|
/Image_Processing/knit/knit.R
|
2000039c86b4bc5df8e7681528be27e0fb7d5504
|
[
"MIT"
] |
permissive
|
katharynduffy/LandCarbonAdvances
|
cb4f498561a8893625db5104df1042f7972ed93a
|
591fc6480db4be32f89a487429e354819bac11a1
|
refs/heads/master
| 2020-05-21T02:31:54.089833
| 2019-05-21T16:10:11
| 2019-05-21T16:10:11
| 185,880,339
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 383
|
r
|
knit.R
|
## this file is to run by the isntructor to preapre the teaching materials
library(knitr)
wd <- getwd()
setwd('Image_Processing/')
rmd_files <- dir(pattern = '*.Rmd', full.names = TRUE)
for(ii in 1:length(rmd_files)){
knit(rmd_files[ii])
purl(rmd_files[ii])
}
code_files <- dir(pattern = '*.R$')
file.rename(from = code_files, to = paste0('codes/', code_files))
setwd(wd)
|
7efd5614d25464b0a5329c5d6b6acff1db8ce870
|
874878b7cb5361362ce01d6c98f86723304bb605
|
/body_cam.R
|
91253d0ae5caa79744bdfa104a298deb5c3e5bbf
|
[] |
no_license
|
ConnerArdman/Police-Shootings
|
f648f4dd0b6a4a64a5065b6834875ab6d0680726
|
90b3fa8dfc60d91e58cc78cec663a7b1afcf9660
|
refs/heads/master
| 2022-01-25T19:46:25.916229
| 2019-07-04T00:23:12
| 2019-07-04T00:23:12
| 109,912,977
| 0
| 2
| null | null | null | null |
UTF-8
|
R
| false
| false
| 4,576
|
r
|
body_cam.R
|
library(dplyr)
library(ggplot2)
library(plotly)
shooting.data <- read.csv("data.csv", stringsAsFactors = FALSE)
# Count how many shootings are in the dataset.
total.shootings <- nrow(shooting.data)
# Seperating data based off whether officer had bodycam
bodycam.false <- shooting.data %>% filter(body_camera == "False")
total.non.bc.shootings <- nrow(bodycam.false)
bodycam.true <- shooting.data %>% filter(body_camera == "True")
total.bc.shootings <- nrow(bodycam.true)
# Percentage of unarmed/armed suspects killed of officers WITH body cameras.
unarmed.bc.true <- bodycam.true %>% filter(armed == "unarmed")
unarmed.bc.true.perc <- round(nrow(unarmed.bc.true) / nrow(bodycam.true) * 100)
armed.bc.true.perc <- 100 - unarmed.bc.true.perc
# Plot
showArmedPlotBC <- function() {
unarmed.bc.true.graph <- plot_ly(bodycam.true,
labels = ~c("Unarmed", "Armed"),
values = ~c(unarmed.bc.true.perc, armed.bc.true.perc),
type = 'pie',
width = 400, height = 400) %>%
layout(xaxis = list(showgrid = FALSE, zeroline = FALSE, showticklabels = FALSE),
yaxis = list(showgrid = FALSE, zeroline = FALSE, showticklabels = FALSE), paper_bgcolor="#4e5d6c",
plot_bgcolor="#4e5d6c")
}
# Percentage of unarmed/armed suspects killed of officers WITHOUT body cameras.
unarmed.bc.false <- bodycam.false %>% filter(armed == "unarmed")
unarmed.bc.false.perc <- round(nrow(unarmed.bc.false) / nrow(bodycam.false) * 100)
armed.bc.false.perc <- round((nrow(bodycam.false) - nrow(unarmed.bc.false)) / nrow(bodycam.false) * 100)
# Plot
unarmed.bc.false.graph <- plot_ly(bodycam.false,
labels = ~c("Unarmed", "Armed"),
values = ~c(unarmed.bc.false.perc, armed.bc.false.perc),
type = 'pie',
width = 400, height = 400) %>%
layout(xaxis = list(showgrid = FALSE, zeroline = FALSE, showticklabels = FALSE),
yaxis = list(showgrid = FALSE, zeroline = FALSE, showticklabels = FALSE), paper_bgcolor="#4e5d6c",
plot_bgcolor="#4e5d6c")
showArmedPlotNoBC <- function() {
unarmed.bc.false.graph <- plot_ly(bodycam.false,
labels = ~c("Unarmed", "Armed"),
values = ~c(unarmed.bc.false.perc, armed.bc.false.perc),
type = 'pie',
width = 400, height = 400) %>%
layout(xaxis = list(showgrid = FALSE, zeroline = FALSE, showticklabels = FALSE),
yaxis = list(showgrid = FALSE, zeroline = FALSE, showticklabels = FALSE), paper_bgcolor="#4e5d6c",
plot_bgcolor="#4e5d6c")
}
# Percentage of fleeing/non fleeing suspects killed of officers WITH body cameras.
not.flee.bc.true <- bodycam.true %>% filter(flee == "Not fleeing")
not.flee.bc.true.perc <- round(nrow(not.flee.bc.true) / nrow(bodycam.true) * 100)
flee.bc.true.perc <- (100 - not.flee.bc.true.perc)
# Plot
showFleePlotBC <- function() {
flee.bc.true.plot <- plot_ly(bodycam.true,
labels = ~c("Didn't Flee", "Fled"),
values = ~c(not.flee.bc.true.perc, flee.bc.true.perc),
type = 'pie',
width = 400, height = 400) %>%
layout(xaxis = list(showgrid = FALSE, zeroline = FALSE, showticklabels = FALSE),
yaxis = list(showgrid = FALSE, zeroline = FALSE, showticklabels = FALSE), paper_bgcolor="#4e5d6c",
plot_bgcolor="#4e5d6c")
}
# Percentage of flee/not flee suspects killed of officers WITHOUT body cameras.
not.flee.bc.false <- bodycam.false %>% filter(flee == "Not fleeing")
not.flee.bc.false.perc <- round(nrow(not.flee.bc.false) / nrow(bodycam.false) * 100)
flee.bc.false.perc <- (100 - not.flee.bc.false.perc)
# Plot
showFleePlotNoBC <- function() {
flee.bc.false.plot <- plot_ly(bodycam.false,
labels = ~c("Didn't Flee", "Fled"),
values = ~c(not.flee.bc.false.perc, flee.bc.false.perc),
type = 'pie',
width = 400, height = 400) %>%
layout(xaxis = list(showgrid = FALSE, zeroline = FALSE, showticklabels = FALSE),
yaxis = list(showgrid = FALSE, zeroline = FALSE, showticklabels = FALSE), paper_bgcolor="#4e5d6c",
plot_bgcolor="#4e5d6c")
}
|
2204a5ec16ea534f398c55b7fc608ced09918406
|
e9291a6e2fffd64065f61c7e2a9c8a1a6878779b
|
/tests/testthat.R
|
641bd270454ea1f31276d0f9910c4b21dcc3ec3a
|
[] |
no_license
|
dmenne/dknitprintr
|
bd92bc9ea38c734831fa7f33a30b1e058b3d513c
|
3150de1817ac5db04f68e2d5473bbaa7e36d4d0d
|
refs/heads/master
| 2020-06-14T10:39:17.869977
| 2019-08-02T07:32:44
| 2019-08-02T07:32:44
| 75,197,639
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 65
|
r
|
testthat.R
|
library(testthat)
library(Dknitprintr)
test_check("Dknitprintr")
|
c69d1681287abd51b442dfabd36a9ef6381bec47
|
714e7c6736a2e3d8fd07634427c4a8bb3cef2d61
|
/man/save_plot.Rd
|
bb4250aedc1e1bd5c556fc26e39b64aace4aaaba
|
[
"MIT"
] |
permissive
|
flaneuse/llamar
|
da7cb58a03b2adbffb6b2fe2e57f3ffeede98afb
|
ea46e2a9fcb72be872518a51a4550390b952772b
|
refs/heads/master
| 2021-01-18T00:10:00.797724
| 2017-10-24T13:41:21
| 2017-10-24T13:41:21
| 48,335,371
| 0
| 1
| null | null | null | null |
UTF-8
|
R
| false
| true
| 1,124
|
rd
|
save_plot.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/save_plot.R
\name{save_plot}
\alias{save_plot}
\title{Simple wrapper to ggsave}
\usage{
save_plot(filename, plot = last_plot(), saveBoth = FALSE, width = NA,
height = NA, units = "in", scale = 1)
}
\arguments{
\item{filename}{string containing file name}
\item{plot}{which plot to save; by default, the last one created}
\item{saveBoth}{If TRUE, save both a .pdf and .png}
\item{width}{width of rendered plot}
\item{height}{height of the rendered plot}
\item{units}{units of the width/height of rendered plot (typically inches -- 'in')}
\item{scale}{scalar factor to enlarge/shrink the rendered plot}
}
\description{
Wrapper to ggsave to auto-save the annoying fiddly arguments I always forget. Works only w/ ggplot2 objects.
}
\examples{
# create a figure
p = ggplot(mtcars, aes(x = cyl, y = mpg)) + geom_point()
p + stat_summary(geom = 'point', fun.y = 'mean', colour = 'red', size = 5, shape = 21, fill = NA)
# save figures
save_plot('last_plot.pdf', width = 5, height = 5)
save_plot('plot_p.pdf', plot = p, width = 5, height = 5)
}
|
a4b8a3f45d23d97c1ff5e6708d4126a2f9ca8444
|
379a039dabc2404fec00d6bebffe0bf30ed473c0
|
/man/cmmb.Rd
|
9474d09f5b78b2719384f8c2f814875b5ea6a0ac
|
[] |
no_license
|
mbsohn/cmmb
|
7708ca784af3ad787331e70e2635d0550847ae2e
|
b92a3b32e4e39b8ac82220e14ce8ea63bc0bccee
|
refs/heads/master
| 2023-06-16T08:43:30.055622
| 2021-07-05T20:14:33
| 2021-07-05T20:14:33
| 382,963,057
| 2
| 1
| null | null | null | null |
UTF-8
|
R
| false
| false
| 3,062
|
rd
|
cmmb.Rd
|
\name{cmmb}
\alias{cmmb}
\title{
Compositional Mediation Model for Binary Outcomes
}
\description{
Estimate direct and indirect effects of treatment on binary outcomes transmitted through compositional mediators
}
\usage{
cmmb(Y, M, tr, X, n.cores=NULL, n.boot=2000, ci.method="empirical",
p.value=FALSE, ForSA=FALSE, max.rho=0.5, sig.level=0.05, FWER=FALSE,
w=rep(1,length(Y)), prec=1e-4, max.iter=1000)
}
\arguments{
\item{Y}{a vector of binary outcomes}
\item{M}{a matrix of compositional data}
\item{tr}{a vector of continuous or binary treatments}
\item{X}{a matrix of covariates}
\item{n.cores}{a number of CPU cores for parallel processing}
\item{n.boot}{a number of bootstrap samples}
\item{ci.method}{options for bootstrap confidence interval. It can be either "empirical" (default) or "percentile".}
\item{p.value}{a logical value for calculating the p value. It is inactive when \emph{ci.method="percentile"}.}
\item{ForSA}{a logical value for sensitivity analysis}
\item{max.rho}{a maximum correlation allowed between mediators and an outcome}
\item{sig.level}{a significance level to estimate bootstrap confidence intervals for direct and indirect effects of treatment}
\item{FWER}{a logical value for family-wise error rate for direct and total indirect effects. If \emph{FWER=TRUE}, the Bonferroni correct will be applied.}
\item{w}{a vector of weights on samples. If measurements in a sample is more reliable than others, this argument can be used to take that information into the model.}
\item{prec}{an error tolerance or a stopping criterion for the debiasd procedure}
\item{max.iter}{a maximum number of iteration in the debias procedure}
Note: the range of rho is not from -1 to 1 when the number of components is more than two because the correlation between them is not zero, and the range gets smaller as the number of components increases.
}
\value{
If \emph{ForSA=FALSE},
\item{total}{contains estimated direct and total indirect effects with their confidence limits}
\item{cwprod}{contains component-wise products of path coefficients with their confidence limits}
If \emph{ForSA=TRUE},
\item{total}{contains estimated direct and total indirect effects with their confidence limits}
\item{cwprod}{contains component-wise products of path coefficients with their confidence limits}
\item{cide.rho}{contains estimated indirect effects and corresponding pointwise 95\% confidence intervals, given correlations between mediators and an outcome}
}
\references{
Sohn, M.B., Lu, J. and Li, H. (2021). \emph{A Compositional Mediation Model for Binary Outcome: Application to Microbiome Studies} (Submitted)
}
\author{
Michael B. Sohn
Maintainer: Michael B. Sohn <michael_sohn@urmc.rochester.edu>
}
\examples{
\dontrun{
# Load a simulated dataset
data(cmmb_demo_data)
# Run CMM for binary outcomes
rslt <- cmmb(Y=cmmb_demo_data$Y, M=cmmb_demo_data$M,
tr=cmmb_demo_data$tr, X=cmmb_demo_data$X)
rslt
# Plot products of component-wise path coefficients
plot_cw_ide(rslt)
}
}
|
c8227f18b97073286ab6221a14be13793cebe830
|
02db52e1ab4453e85f03c4d7dd19274626033dbd
|
/man/presentation.Rd
|
2502a05ee08ee41fdb7502b1cd996487461b2ad7
|
[] |
no_license
|
riverlee/reports
|
def177c335b880bc0345de4a6d4a2984ea07f3fa
|
26ea8b52f2a5d4f92c70fa9101eac648aa432038
|
refs/heads/master
| 2021-01-15T21:03:15.405664
| 2013-08-30T18:07:03
| 2013-08-30T18:07:03
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 3,618
|
rd
|
presentation.Rd
|
\name{presentation}
\alias{presentation}
\title{Presentation Template}
\usage{
presentation(presentation = "presentation",
type = c("rnw", "rmd"), theme = "Madrid",
bib.loc = getOption("bib.loc"),
name = getOption("name.reports"),
github.user = getOption("github.user"),
sources = getOption("sources.reports"), path = getwd(),
slidify = getOption("slidify.template"), open = FALSE,
...)
}
\arguments{
\item{presentation}{A character vector of length two or
one: (1) the main directory name and (2) sub directory
names (i.e., all the file contents will be imprinted with
this name). If the length of \code{report} is one this
name will be used as the main directory name and all sub
directories and files.}
\item{type}{A vector of the file format types. Any
combination of the following: \code{rnw}, \code{rmd} or
\code{pptx}. \code{rnw} corresponds to a beamer slides
(.Rnw file), \code{rmd} corresponds to a html5
(compliments of slidify) slides (.Rnwd file) and
\code{docx} corresponds to PowerPoint slides (.pptx
file).}
\item{theme}{\href{http://deic.uab.es/~iblanes/beamer_gallery/index_by_theme.html}{Beamer
theme} to use. If \code{NULL} \code{presentation} will
allow the user to choose interactively.}
\item{bib.loc}{Optional path to a .bib resource.}
\item{name}{A character vector of the user's name to be
used on the report.}
\item{github.user}{GitHub user name (character string).}
\item{sources}{A vector of path(s) to other scripts to be
sourced in the report project upon startup (adds this
location to the report project's \code{.Rprofile}).}
\item{path}{The path to where the project should be
created. Default is the current working directory.}
\item{slidify}{The template to be used in the
PRESENTATION .Rmd. This can be one of the types from
\code{slidify_templates} or a path to an .Rmd file. This
argument will be overrode if a custom reports template is
supplied with an .Rmd file in the inst directory named
slidify.Rmd (\code{/inst/slidify.Rmd}).}
\item{open}{logical. If \code{TRUE} the project will be
opened in RStudio.}
\item{\ldots}{Other arguments passed to
\code{\link[slidify]{author}}.}
}
\value{
Creates a presentation template.
}
\description{
Generate a presentation template to increase efficiency.
This is a lighter weight version of
\code{\link[reports]{new_report}} that focuses on the
presentation.
}
\section{Suggestion}{
The user may want to set \code{\link[base]{options}} for
\code{bib.loc}, \code{github.user}, \code{name.reports}
\code{sources.reports},\code{slidify.template} and
\code{reveraljs.loc} in the user's primary
\code{.Rprofile}: \enumerate{ \item{\bold{bib.loc} - The
path to the user's primary bibliography}
\item{\bold{github.user} - GitHub user name}
\item{\bold{name.reports} - The name to use on reports}
\item{\bold{sources.reports} - Path(s) to additional
files/scripts that should be included to be sourced in
the project startup} \item{\bold{slidify.template} - Path
to, or defualt, .Rmd file tempalte for use in as the .Rmd
used in the slidify presentations (see
\code{slidify_templates} for possible non-path
arguments)} }
}
\examples{
## presentation("New")
}
\references{
\href{https://github.com/ramnathv/slidifyExamples/tree/gh-pages/examples}{slidify
examples}
}
\seealso{
\code{\link[reports]{new_report}},
\code{\link[reports]{slidify_templates}},
\code{\link[slidify]{author}}
\href{https://github.com/hakimel/reveal.js/}{Installation
section of reveal.js GitHub}
}
|
aae37572fd59020f63f239318827743d2552be15
|
bf8c5c249994cc9df1684cbd402cb33a5439e681
|
/man/randomize_trt2.Rd
|
29c13157a9f3158b4669e044d95c36e32654ce8a
|
[] |
no_license
|
cran/TwoArmSurvSim
|
607eb6819ca04c9fb8d24d26c3cc5c6e31a00682
|
51d1274128f054bb6ded6d588679dcdbf9671dc9
|
refs/heads/master
| 2023-03-08T04:10:35.616291
| 2021-02-26T07:50:06
| 2021-02-26T07:50:06
| 323,359,862
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 527
|
rd
|
randomize_trt2.Rd
|
\name{randomize_trt2}
\alias{randomize_trt2}
\title{Generate Block Randomized Treatment Label Based on Covariates Matrix for Two Arm Trial}
\usage{
randomize_trt2(cov_mat=cov_mat,blocksize=blocksize,rand_ratio=c(1,1))
}
\arguments{
\item{cov_mat}{Covariates matrix. }
\item{blocksize}{Randomization block size}
\item{rand_ratio}{Randomization ratio between control and treatment}
}
\description{
Generate block randomized treatment label based on covariates matrix for two arm trial.
}
|
efd5edea12e8d0a536b9ad64d32696b085ba2f0d
|
2de9deb099bc4379d78df2efef50ded7916d4558
|
/doc/mindev.R
|
ec3fb867be75adb9c7dff56cc9ebcad010b49d59
|
[] |
no_license
|
ablejec/StatPred
|
a2d4992cf097ff02514828a8ada55a082c57e7db
|
f46754f21e74109a1733654304f07a407b4fe4b4
|
refs/heads/master
| 2021-01-19T10:11:14.403418
| 2013-01-18T21:54:32
| 2013-01-18T21:54:32
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 6,885
|
r
|
mindev.R
|
###################################################
### chunk number 1:
###################################################
options(width=70)
set.seed(1234)
library(Hmisc)
#library(xlsReadWrite)
###################################################
### chunk number 2: gmean
###################################################
gmean <- function(x,na.rm=TRUE){
#
# geometric mean
#
x <- x[!is.na(x)]
prod(x)^(1/length(x))
}
gmean(c(1,10,100,NA))
###################################################
### chunk number 3: showMinDeviation
###################################################
showMinDeviation <-
function(x=rnorm(10, 5, 2),a=seq(0, 20, .2),Xs=seq(0, max(a), .1),xlim=range(a),ylim=c(0,5),what=c("median", "mean", "gmean"),cols=c("black","blue","red")){
#
# dynamic plot of sum of deviations minima
#
#
n <- length(x)
maxa <- max(a)
basey <- max(ylim)/15
vabs <- Vectorize (FUN = function(x, a) sum(abs(x - a))/length (x), "a")
vkv <- Vectorize (FUN = function(x, a) sum((x - a)^2)/length (x), "a")
pkvoc <- Vectorize (FUN = function(x, a) prod(exp(log(x/a)^2))^(1/length(x)), "a")
#
par (mar = c(5, 4, 1, 4))
for (X in Xs) {
x[n] <- X
plot(a, a, ylab = "", xlim = xlim, ylim = ylim, type = "n")
rug(x)
rug(x[n], col = "red", lwd = 3)
if (!is.na(pmatch ("median", what))) {
## median
col <- cols[1]
axis(1)
mtext(expression(sum(abs(x - a)/n)), 2, 2)
points(a, vabs(x, a), col = col)
abline (v = median(x),col=col)
MINa <- vabs(x, a = median(x))
abline (h = MINa,col=col)
text(0, MINa + .2, round(MINa, 2), xpd = TRUE, adj = 0,col=col)
text(median(x), min(ylim)-basey, "Me", xpd = TRUE,col=col)
}
if (!is.na(pmatch ("mean", what))) {
## arithmetic mean
col <- cols[2]
axis(4, col = col, at = axTicks(2), label = axTicks(2) * 10, col.axis = col)
mtext(expression(sum((x - a)^2)/n), 4, 2, col = col)
points(a, vkv(x, a)/10, col = col)
abline (v = mean(x), col = col)
MINk <- vkv(x, a = mean(x))
abline (h = MINk/10, col = col)
text(maxa, MINk/10 + .2, round(MINk, 2), xpd = TRUE, adj = 1, col = col)
text(mean(x), min(ylim)- basey*1.5, expression(bar (x)), xpd = TRUE, col = col)
}
if (!is.na(pmatch ("gmean", what))) {
## geometric mean
col <- cols[3]
points(a, pkvoc(x, a), col = col)
abline (v = gmean(x), col = col)
MINp <- pkvoc(x, a = gmean(x))
abline (h = MINp, col = col)
text(gmean(x), min(ylim)- basey*2, "G", xpd = TRUE, col = col)
text(0, MINp + .2, round(MINp, 2), xpd = TRUE, adj = 0, col = col)
mtext(expression((prod(e^{log(x/a)^2}))^{1/n}), 2, 2, col = col,adj=.8)
}
}
}
showMinDeviation()
###################################################
### chunk number 4:
###################################################
showMinDeviation(what=c("median"),Xs=20)
###################################################
### chunk number 5:
###################################################
showMinDeviation(what=c("median","mean"),Xs=20)
###################################################
### chunk number 6:
###################################################
showMinDeviation(Xs=20)
###################################################
### chunk number 7: eval=FALSE
###################################################
## showMinDeviation()
###################################################
### chunk number 8:
###################################################
gmean <- function(x,na.rm=TRUE){
#
# geometric mean
#
x <- x[!is.na(x)]
prod(x)^(1/length(x))
}
gmean(c(1,10,100,NA))
###################################################
### chunk number 9: eval=FALSE
###################################################
## showMinDeviation <-
## function(x=rnorm(10, 5, 2),a=seq(0, 20, .2),Xs=seq(0, max(a), .1),xlim=range(a),ylim=c(0,5),what=c("median", "mean", "gmean"),cols=c("black","blue","red")){
## #
## # dynamic plot of sum of deviations minima
## #
## #
## n <- length(x)
## maxa <- max(a)
## basey <- max(ylim)/15
## vabs <- Vectorize (FUN = function(x, a) sum(abs(x - a))/length (x), "a")
## vkv <- Vectorize (FUN = function(x, a) sum((x - a)^2)/length (x), "a")
## pkvoc <- Vectorize (FUN = function(x, a) prod(exp(log(x/a)^2))^(1/length(x)), "a")
## #
## par (mar = c(5, 4, 1, 4))
## for (X in Xs) {
## x[n] <- X
## plot(a, a, ylab = "", xlim = xlim, ylim = ylim, type = "n")
## rug(x)
## rug(x[n], col = "red", lwd = 3)
## if (!is.na(pmatch ("median", what))) {
## ## median
## col <- cols[1]
## axis(1)
## mtext(expression(sum(abs(x - a)/n)), 2, 2)
## points(a, vabs(x, a), col = col)
## abline (v = median(x),col=col)
## MINa <- vabs(x, a = median(x))
## abline (h = MINa,col=col)
## text(0, MINa + .2, round(MINa, 2), xpd = TRUE, adj = 0,col=col)
## text(median(x), min(ylim)-basey, "Me", xpd = TRUE,col=col)
## }
## if (!is.na(pmatch ("mean", what))) {
## ## arithmetic mean
## col <- cols[2]
## axis(4, col = col, at = axTicks(2), label = axTicks(2) * 10, col.axis = col)
## mtext(expression(sum((x - a)^2)/n), 4, 2, col = col)
## points(a, vkv(x, a)/10, col = col)
## abline (v = mean(x), col = col)
## MINk <- vkv(x, a = mean(x))
## abline (h = MINk/10, col = col)
## text(maxa, MINk/10 + .2, round(MINk, 2), xpd = TRUE, adj = 1, col = col)
## text(mean(x), min(ylim)- basey*1.5, expression(bar (x)), xpd = TRUE, col = col)
## }
## if (!is.na(pmatch ("gmean", what))) {
## ## geometric mean
## col <- cols[3]
## points(a, pkvoc(x, a), col = col)
## abline (v = gmean(x), col = col)
## MINp <- pkvoc(x, a = gmean(x))
## abline (h = MINp, col = col)
## text(gmean(x), min(ylim)- basey*2, "G", xpd = TRUE, col = col)
## text(0, MINp + .2, round(MINp, 2), xpd = TRUE, adj = 0, col = col)
## mtext(expression((prod(e^{log(x/a)^2}))^{1/n}), 2, 2, col = col,adj=.8)
## }
## }
## }
## showMinDeviation()
###################################################
### chunk number 10: minme
###################################################
set.seed(1221)
maxx <- 20
n <- 10
a <- seq(0,maxx,0.01)
x <- runif(n)*maxx
par(mar=c(5,4,1,3))
ylab <- expression(n(x <= a) - n(x > a))
plot(range(x),c(-n,n),type="n",xlab="",ylab=ylab,axes=FALSE)
axis(2)
segments(x,-.5,x,.5,lwd=2)
nvec <- Vectorize(FUN=function(x,a) {sum(x<=a)-sum(x>a)},"a")
vabs <- Vectorize (FUN = function(x, a) sum(abs(x - a))/length (x), "a")
points(a,nvec(x,a))
abline(h=0)
abline(v=median(x))
text(median(x),-n-1.5,"Me",xpd=TRUE)
lines(a,vabs(x,a))
labcurve(list(list(x=a,y=vabs(x,a))),labels=expression(sum(abs(x-a))),type="p")
|
6f87f3ddaf547eb1445c4b886247ee9aa4c49149
|
d4599d2a5faeaa5e40994b8486e6becc59141fe1
|
/man/auto.var.Rd
|
b37b2364cbb38b9aef873598042b2eca0a30fb03
|
[] |
no_license
|
Allisterh/forecast-pimfc
|
6a61c568768a792babc49ba1f27cc89997c63cfa
|
928986ec4932e7247fff857da58e53733ee00cd4
|
refs/heads/master
| 2023-03-16T23:02:45.366133
| 2017-03-13T23:35:45
| 2017-03-13T23:35:45
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| true
| 1,123
|
rd
|
auto.var.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/autovar.R
\name{auto.var}
\alias{auto.var}
\title{Fit best VAR model to multivariate time series}
\usage{
auto.var(y, max.p = 6, ic = c("SC", "HQ", "AIC", "FPE"), seasonal = TRUE)
}
\arguments{
\item{y}{A multivariate time series}
\item{max.p}{Determines the highest lag order for lag length selection
according to the choosen ic.}
\item{ic}{Information criterion to be used in model selection.}
\item{seasonal}{If FALSE, restricts search to models without
seasonal dummies.}
}
\value{
A list with class attribute 'autovar' holding the following elements:
\item{\code{fit}}{modelo estimado da class \code{varest}.}
\item{\code{d}}{ordem de diferenciacao das series.}
\item{\code{y}}{series das variaveis originais.}
\item{\code{x}}{series das variaveis diferenciadas \code{d} vezes.}
\item{\code{ic}}{criterios de informacao do modelo selecionado.}
}
\description{
Returns best VAR model according to either SC, HQ, AIC or FPE value.
The function conducts a search over possible model within the order
constraints provided.
}
|
518439e21e04cba18927f51642c2a70a7fba0b9e
|
2d16a85f93eec6d13ddd32f3f12036058f440014
|
/man/PS06.Rd
|
ddfee36a47030ac253e3cbd6a8e3238966fbaa1b
|
[
"MIT"
] |
permissive
|
A-moosh/foofactors
|
d7ffc434d6450f072368c788558f601b05a640cf
|
41046d93adf048ba004352a367097d681d85089e
|
refs/heads/master
| 2023-04-12T09:27:13.683460
| 2021-05-13T20:09:57
| 2021-05-13T20:09:57
| 367,161,628
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| true
| 428
|
rd
|
PS06.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/Untitled.R
\docType{data}
\name{PS06}
\alias{PS06}
\title{Medicaid Expenditure 2019.}
\format{
A dataframe with 51 rows and 4 variables:
}
\source{
\url{https://github.com/GeoDaCenter/opioid-policy-scan/blob/master/data_final/PS06_2019_S.csv}
}
\usage{
PS06
}
\description{
A dataset containing medicaid expenditure data in 2019
}
\keyword{datasets}
|
cb820262dca290a8af9d06376b4379fa046231d3
|
9fa290918b0cc0b319d02f421763bbefa398e60d
|
/R/na.as.R
|
c5bc367f3973c7c3eb6d3e76f1f5deeba534bd87
|
[] |
no_license
|
cran/misty
|
634e5bd6bf5e317fa1f4ee1f586d5572a4e47875
|
1a42b63704bf9daf2d920312bc1f04204bac85b4
|
refs/heads/master
| 2023-08-31T19:04:33.782877
| 2023-08-24T07:30:05
| 2023-08-24T09:31:21
| 239,395,613
| 0
| 1
| null | null | null | null |
UTF-8
|
R
| false
| false
| 5,822
|
r
|
na.as.R
|
#' Replace Missing Values With User-Specified Values
#'
#' This function replaces \code{NA} in a vector, factor, matrix or data frame with
#' user-specified values in the argument \code{value}.
#'
#' @param x a vector, factor, matrix or data frame.
#' @param value a numeric value or character string with which \code{NA} is
#' replaced.
#' @param as.na a numeric vector indicating user-defined missing values,
#' i.e. these values are converted to \code{NA} before conducting
#' the analysis.
#' @param check logical: if \code{TRUE}, argument specification is checked.
#'
#' @author
#' Takuya Yanagida \email{takuya.yanagida@@univie.ac.at}
#'
#' \code{\link{as.na}}, \code{\link{na.auxiliary}}, \code{\link{na.coverage}},
#' \code{\link{na.descript}}, \code{\link{na.indicator}}, \code{\link{na.pattern}},
#' \code{\link{na.prop}}, \code{\link{na.test}}
#'
#' @references
#' Becker, R. A., Chambers, J. M. and Wilks, A. R. (1988) \emph{The New S Language}.
#' Wadsworth & Brooks/Cole.
#'
#' @return
#' Returns \code{x} with \code{NA} replaced with the numeric value or character
#' string specified in \code{value}.
#'
#' @export
#'
#' @examples
#' #--------------------------------------
#' # Numeric vector
#' x.num <- c(1, 3, NA, 4, 5)
#'
#' # Replace NA with 2
#' na.as(x.num, value = 2)
#'
#' #--------------------------------------
#' # Character vector
#' x.chr <- c("a", NA, "c", "d", "e")
#'
#' # Replace NA with "b"
#' na.as(x.chr, value = "b")
#'
#' #--------------------------------------
#' # Factor
#' x.factor <- factor(c("a", "a", NA, NA, "c", "c"))
#'
#' # Replace NA with "b"
#' na.as(x.factor, value = "b")
#'
#' #--------------------------------------
#' # Matrix
#' x.mat <- matrix(c(1, NA, 3, 4, 5, 6), ncol = 2)
#'
#' # Replace NA with 2
#' na.as(x.mat, value = 2)
#'
#' #--------------------------------------
#' # Data frame
#' x.df1 <- data.frame(x1 = c(NA, 2, 3),
#' x2 = c(2, NA, 3),
#' x3 = c(3, NA, 2), stringsAsFactors = FALSE)
#'
#' # Replace NA with -99
#' na.as(x.df1, value = -99)
#'
#' #--------------------------------------
#' # Recode value in data frame
#' x.df2 <- data.frame(x1 = c(1, 2, 30),
#' x2 = c(2, 1, 30),
#' x3 = c(30, 1, 2))
#'
#' # Replace 30 with NA and then replace NA with 3
#' na.as(x.df2, value = 3, as.na = 30)
na.as <- function(x, value, as.na = NULL, check = TRUE) {
#_____________________________________________________________________________
#
# Initial Check --------------------------------------------------------------
# Check if input 'x' is missing
if (isTRUE(missing(x))) { stop("Please specify a vector, factor, matrix or data frame for the argument 'x'.", call. = FALSE) }
# Check if input 'x' is NULL
if (isTRUE(is.null(x))) { stop("Input specified for the argument 'x' is NULL.", call. = FALSE) }
# Check if input 'value' is missing
if (isTRUE(missing(value))) { stop("Please specify a numeric value or character string for the argument 'value'.", call. = FALSE) }
# Convert user-missing values into NA
if (isTRUE(!is.null(as.na))) { x <- misty::as.na(x, na = as.na, check = check) }
#_____________________________________________________________________________
#
# Input Check ----------------------------------------------------------------
# Check input 'check'
if (isTRUE(!is.logical(check))) { stop("Please specify TRUE or FALSE for the argument 'check'.", call. = FALSE) }
if (isTRUE(check)) {
# Vector, factor, matrix or data frame for the argument 'x'?
if (isTRUE(!is.atomic(x) && !is.factor(x) && !is.matrix(x) && !is.data.frame(x))) { stop("Please specifiy a vector, factor, matrix or data frame for the argument 'x'.", call. = FALSE) }
# Factor or Vector
if (isTRUE(is.null(dim(x)))) {
if (isTRUE(all(!is.na(x)))) { warning("There are no missing values (NA) in the vector or factor specified in 'x'.", call. = FALSE) }
# Matrix or data frame
} else {
if (isTRUE(all(apply(x, 2, function(y) all(!is.na(y)))))) { warning("There are no missing values (NA) in the matrix or data frame specified in 'x'.", call. = FALSE) }
}
# Check input 'value'
if (isTRUE(length(value) != 1L)) { stop("Please specifiy a single value or character string for the argument 'value'.", call. = FALSE) }
}
#_____________________________________________________________________________
#
# Main Function --------------------------------------------------------------
# Factor or Vector
if (isTRUE(is.null(dim(x)))) {
# Factor
if (isTRUE(is.factor(x))) {
# Factor levels
f.levels <- sort(unique(as.numeric(x)))
f.value <- length(f.levels) + 1L
f.levels <- c(f.levels, f.value)
# Factor labels
f.labels <- c(levels(x), value)
object <- factor(ifelse(is.na(x), f.value, x), levels = f.levels, labels = f.labels)
# Vector
} else {
object <- ifelse(is.na(x), value, x)
}
# Matrix or data frame
} else {
# Matrix
if (isTRUE(is.matrix(x))) {
object <- apply(x, 2, na.as, value = value, check = FALSE)
}
# Data frame
if (isTRUE(is.data.frame(x))) {
object <- data.frame(lapply(x, na.as, value = value, check = FALSE),
check.names = FALSE, fix.empty.names = FALSE)
}
}
#_____________________________________________________________________________
#
# Output ---------------------------------------------------------------------
return(object)
}
|
66a968f6e3dbd1cd9e10368a0453bbd90ee16a7e
|
ffdea92d4315e4363dd4ae673a1a6adf82a761b5
|
/data/genthat_extracted_code/mudfold/examples/Plato7.Rd.R
|
0bfc1af8c3223e6844fb73b67150838c0654da3a
|
[] |
no_license
|
surayaaramli/typeRrh
|
d257ac8905c49123f4ccd4e377ee3dfc84d1636c
|
66e6996f31961bc8b9aafe1a6a6098327b66bf71
|
refs/heads/master
| 2023-05-05T04:05:31.617869
| 2019-04-25T22:10:06
| 2019-04-25T22:10:06
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 194
|
r
|
Plato7.Rd.R
|
library(mudfold)
### Name: Plato7
### Title: Plato's Seven Works
### Aliases: Plato7
### Keywords: datasets
### ** Examples
## Not run:
##D data(Plato7)
##D str(Plato7)
## End(Not run)
|
d1db29ba987e7a256c0f4862295795c8dced4a2b
|
f7408683a4b9f3ea36e6c56588f257eba9761e12
|
/man/pffr.Rd
|
3783caa92f38cba0d75e78fa6f11c750e678d6fc
|
[] |
no_license
|
refunders/refund
|
a12ad139bc56f4c637ec142f07a78657727cc367
|
93cb2e44106f794491c7008970760efbfc8a744f
|
refs/heads/master
| 2023-07-21T21:00:06.028918
| 2023-07-17T20:52:08
| 2023-07-17T20:52:08
| 30,697,953
| 42
| 22
| null | 2023-06-27T15:17:47
| 2015-02-12T10:41:27
|
R
|
UTF-8
|
R
| false
| true
| 13,351
|
rd
|
pffr.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/pffr.R
\name{pffr}
\alias{pffr}
\title{Penalized flexible functional regression}
\usage{
pffr(
formula,
yind,
data = NULL,
ydata = NULL,
algorithm = NA,
method = "REML",
tensortype = c("ti", "t2"),
bs.yindex = list(bs = "ps", k = 5, m = c(2, 1)),
bs.int = list(bs = "ps", k = 20, m = c(2, 1)),
...
)
}
\arguments{
\item{formula}{a formula with special terms as for \code{\link[mgcv]{gam}},
with additional special terms \code{\link{ff}(), \link{sff}(),
\link{ffpc}(), \link{pcre}()} and \code{c()}.}
\item{yind}{a vector with length equal to the number of columns of the matrix
of functional responses giving the vector of evaluation points \eqn{(t_1,
\dots ,t_{G})}. If not supplied, \code{yind} is set to
\code{1:ncol(<response>)}.}
\item{data}{an (optional) \code{data.frame} containing the data. Can also be
a named list for regular data. Functional covariates have to be supplied as
<no. of observations> by <no. of evaluations> matrices, i.e. each row is
one functional observation.}
\item{ydata}{an (optional) \code{data.frame} supplying functional responses
that are not observed on a regular grid. See Details.}
\item{algorithm}{the name of the function used to estimate the model.
Defaults to \code{\link[mgcv]{gam}} if the matrix of functional responses
has less than \code{2e5} data points and to \code{\link[mgcv]{bam}} if not.
\code{'\link[mgcv]{gamm}'}, \code{'\link[gamm4]{gamm4}'} and
\code{'\link[mgcv]{jagam}'} are valid options as well. See Details for
\code{'\link[gamm4]{gamm4}'} and \code{'\link[mgcv]{jagam}'}.}
\item{method}{Defaults to \code{"REML"}-estimation, including of unknown
scale. If \code{algorithm="bam"}, the default is switched to
\code{"fREML"}. See \code{\link[mgcv]{gam}} and \code{\link[mgcv]{bam}} for
details.}
\item{tensortype}{which typ of tensor product splines to use. One of
"\code{\link[mgcv]{ti}}" or "\code{\link[mgcv]{t2}}", defaults to
\code{ti}. \code{t2}-type terms do not enforce the more suitable special
constraints for functional regression, see Details.}
\item{bs.yindex}{a named (!) list giving the parameters for spline bases on
the index of the functional response. Defaults to \code{list(bs="ps", k=5,
m=c(2, 1))}, i.e. 5 cubic B-splines bases with first order difference
penalty.}
\item{bs.int}{a named (!) list giving the parameters for the spline basis for
the global functional intercept. Defaults to \code{list(bs="ps", k=20,
m=c(2, 1))}, i.e. 20 cubic B-splines bases with first order difference
penalty.}
\item{...}{additional arguments that are valid for \code{\link[mgcv]{gam}},
\code{\link[mgcv]{bam}}, \code{'\link[gamm4]{gamm4}'} or
\code{'\link[mgcv]{jagam}'}. \code{subset} is not implemented.}
}
\value{
A fitted \code{pffr}-object, which is a
\code{\link[mgcv]{gam}}-object with some additional information in an
\code{pffr}-entry. If \code{algorithm} is \code{"gamm"} or \code{"gamm4"},
only the \code{$gam} part of the returned list is modified in this way.\cr
Available methods/functions to postprocess fitted models:
\code{\link{summary.pffr}}, \code{\link{plot.pffr}},
\code{\link{coef.pffr}}, \code{\link{fitted.pffr}},
\code{\link{residuals.pffr}}, \code{\link{predict.pffr}},
\code{\link{model.matrix.pffr}}, \code{\link{qq.pffr}},
\code{\link{pffr.check}}.\cr If \code{algorithm} is \code{"jagam"}, only
the location of the model file and the usual
\code{\link[mgcv]{jagam}}-object are returned, you have to run the sampler
yourself.\cr
}
\description{
Implements additive regression for functional and scalar covariates and
functional responses. This function is a wrapper for \code{mgcv}'s
\code{\link[mgcv]{gam}} and its siblings to fit models of the general form
\cr \eqn{E(Y_i(t)) = g(\mu(t) + \int X_i(s)\beta(s,t)ds + f(z_{1i}, t) +
f(z_{2i}) + z_{3i} \beta_3(t) + \dots )}\cr with a functional (but not
necessarily continuous) response \eqn{Y(t)}, response function \eqn{g},
(optional) smooth intercept \eqn{\mu(t)}, (multiple) functional covariates
\eqn{X(t)} and scalar covariates \eqn{z_1}, \eqn{z_2}, etc.
}
\section{Details}{
The routine can estimate \enumerate{ \item linear
functional effects of scalar (numeric or factor) covariates that vary
smoothly over \eqn{t} (e.g. \eqn{z_{1i} \beta_1(t)}, specified as
\code{~z1}), \item nonlinear, and possibly multivariate functional effects
of (one or multiple) scalar covariates \eqn{z} that vary smoothly over the
index \eqn{t} of \eqn{Y(t)} (e.g. \eqn{f(z_{2i}, t)}, specified in the
\code{formula} simply as \code{~s(z2)}) \item (nonlinear) effects of scalar
covariates that are constant over \eqn{t} (e.g. \eqn{f(z_{3i})}, specified
as \code{~c(s(z3))}, or \eqn{\beta_3 z_{3i}}, specified as \code{~c(z3)}),
\item function-on-function regression terms (e.g. \eqn{\int
X_i(s)\beta(s,t)ds}, specified as \code{~ff(X, yindex=t, xindex=s)}, see
\code{\link{ff}}). Terms given by \code{\link{sff}} and \code{\link{ffpc}}
provide nonlinear and FPC-based effects of functional covariates,
respectively. \item concurrent effects of functional covariates \code{X}
measured on the same grid as the response are specified as follows:
\code{~s(x)} for a smooth, index-varying effect \eqn{f(X(t),t)}, \code{~x}
for a linear index-varying effect \eqn{X(t)\beta(t)}, \code{~c(s(x))} for a
constant nonlinear effect \eqn{f(X(t))}, \code{~c(x)} for a constant linear
effect \eqn{X(t)\beta}. \item Smooth functional random intercepts
\eqn{b_{0g(i)}(t)} for a grouping variable \code{g} with levels \eqn{g(i)}
can be specified via \code{~s(g, bs="re")}), functional random slopes
\eqn{u_i b_{1g(i)}(t)} in a numeric variable \code{u} via \code{~s(g, u,
bs="re")}). Scheipl, Staicu, Greven (2013) contains code examples for
modeling correlated functional random intercepts using
\code{\link[mgcv]{mrf}}-terms. } Use the \code{c()}-notation to denote
model terms that are constant over the index of the functional response.\cr
Internally, univariate smooth terms without a \code{c()}-wrapper are
expanded into bivariate smooth terms in the original covariate and the
index of the functional response. Bivariate smooth terms (\code{s(), te()}
or \code{t2()}) without a \code{c()}-wrapper are expanded into trivariate
smooth terms in the original covariates and the index of the functional
response. Linear terms for scalar covariates or categorical covariates are
expanded into varying coefficient terms, varying smoothly over the index of
the functional response. For factor variables, a separate smooth function
with its own smoothing parameter is estimated for each level of the
factor.\cr \cr The marginal spline basis used for the index of the the
functional response is specified via the \emph{global} argument
\code{bs.yindex}. If necessary, this can be overriden for any specific term
by supplying a \code{bs.yindex}-argument to that term in the formula, e.g.
\code{~s(x, bs.yindex=list(bs="tp", k=7))} would yield a tensor product
spline over \code{x} and the index of the response in which the marginal
basis for the index of the response are 7 cubic thin-plate spline functions
(overriding the global default for the basis and penalty on the index of
the response given by the \emph{global} \code{bs.yindex}-argument).\cr Use
\code{~-1 + c(1) + ...} to specify a model with only a constant and no
functional intercept. \cr
The functional covariates have to be supplied as a \eqn{n} by <no. of
evaluations> matrices, i.e. each row is one functional observation. For
data on a regular grid, the functional response is supplied in the same
format, i.e. as a matrix-valued entry in \code{data}, which can contain
missing values.\cr
If the functional responses are \emph{sparse or irregular} (i.e., not
evaluated on the same evaluation points across all observations), the
\code{ydata}-argument can be used to specify the responses: \code{ydata}
must be a \code{data.frame} with 3 columns called \code{'.obs', '.index',
'.value'} which specify which curve the point belongs to
(\code{'.obs'}=\eqn{i}), at which \eqn{t} it was observed
(\code{'.index'}=\eqn{t}), and the observed value
(\code{'.value'}=\eqn{Y_i(t)}). Note that the vector of unique sorted
entries in \code{ydata$.obs} must be equal to \code{rownames(data)} to
ensure the correct association of entries in \code{ydata} to the
corresponding rows of \code{data}. For both regular and irregular
functional responses, the model is then fitted with the data in long
format, i.e., for data on a grid the rows of the matrix of the functional
response evaluations \eqn{Y_i(t)} are stacked into one long vector and the
covariates are expanded/repeated correspondingly. This means the models get
quite big fairly fast, since the effective number of rows in the design
matrix is number of observations times number of evaluations of \eqn{Y(t)}
per observation.\cr
Note that \code{pffr} does not use \code{mgcv}'s default identifiability
constraints (i.e., \eqn{\sum_{i,t} \hat f(z_i, x_i, t) = 0} or
\eqn{\sum_{i,t} \hat f(x_i, t) = 0}) for tensor product terms whose
marginals include the index \eqn{t} of the functional response. Instead,
\eqn{\sum_i \hat f(z_i, x_i, t) = 0} for all \eqn{t} is enforced, so that
effects varying over \eqn{t} can be interpreted as local deviations from
the global functional intercept. This is achieved by using
\code{\link[mgcv]{ti}}-terms with a suitably modified \code{mc}-argument.
Note that this is not possible if \code{algorithm='gamm4'} since only
\code{t2}-type terms can then be used and these modified constraints are
not available for \code{t2}. We recommend using centered scalar covariates
for terms like \eqn{z \beta(t)} (\code{~z}) and centered functional
covariates with \eqn{\sum_i X_i(t) = 0} for all \eqn{t} in \code{ff}-terms
so that the global functional intercept can be interpreted as the global
mean function.
The \code{family}-argument can be used to specify all of the response
distributions and link functions described in
\code{\link[mgcv]{family.mgcv}}. Note that \code{family = "gaulss"} is
treated in a special way: Users can supply the formula for the variance by
supplying a special argument \code{varformula}, but this is not modified in
the way that the \code{formula}-argument is but handed over to the fitter
directly, so this is for expert use only. If \code{varformula} is not
given, \code{pffr} will use the parameters from argument \code{bs.int} to
define a spline basis along the index of the response, i.e., a smooth
variance function over $t$ for responses $Y(t)$.
}
\examples{
###############################################################################
# univariate model:
# Y(t) = f(t) + \int X1(s)\beta(s,t)ds + eps
set.seed(2121)
data1 <- pffrSim(scenario="ff", n=40)
t <- attr(data1, "yindex")
s <- attr(data1, "xindex")
m1 <- pffr(Y ~ ff(X1, xind=s), yind=t, data=data1)
summary(m1)
plot(m1, pages=1)
\dontrun{
###############################################################################
# multivariate model:
# E(Y(t)) = \beta_0(t) + \int X1(s)\beta_1(s,t)ds + xlin \beta_3(t) +
# f_1(xte1, xte2) + f_2(xsmoo, t) + \beta_4 xconst
data2 <- pffrSim(scenario="all", n=200)
t <- attr(data2, "yindex")
s <- attr(data2, "xindex")
m2 <- pffr(Y ~ ff(X1, xind=s) + #linear function-on-function
xlin + #varying coefficient term
c(te(xte1, xte2)) + #bivariate smooth term in xte1 & xte2, const. over Y-index
s(xsmoo) + #smooth effect of xsmoo varying over Y-index
c(xconst), # linear effect of xconst constant over Y-index
yind=t,
data=data2)
summary(m2)
plot(m2)
str(coef(m2))
# convenience functions:
preddata <- pffrSim(scenario="all", n=20)
str(predict(m2, newdata=preddata))
str(predict(m2, type="terms"))
cm2 <- coef(m2)
cm2$pterms
str(cm2$smterms, 2)
str(cm2$smterms[["s(xsmoo)"]]$coef)
#############################################################################
# sparse data (80\% missing on a regular grid):
set.seed(88182004)
data3 <- pffrSim(scenario=c("int", "smoo"), n=100, propmissing=0.8)
t <- attr(data3, "yindex")
m3.sparse <- pffr(Y ~ s(xsmoo), data=data3$data, ydata=data3$ydata, yind=t)
summary(m3.sparse)
plot(m3.sparse,pages=1)
}
}
\references{
Ivanescu, A., Staicu, A.-M., Scheipl, F. and Greven, S. (2015).
Penalized function-on-function regression. Computational Statistics,
30(2):539--568. \url{https://biostats.bepress.com/jhubiostat/paper254/}
Scheipl, F., Staicu, A.-M. and Greven, S. (2015). Functional Additive Mixed
Models. Journal of Computational & Graphical Statistics, 24(2): 477--501.
\url{ https://arxiv.org/abs/1207.5947}
F. Scheipl, J. Gertheiss, S. Greven (2016): Generalized Functional Additive Mixed Models,
Electronic Journal of Statistics, 10(1), 1455--1492.
\url{https://projecteuclid.org/journals/electronic-journal-of-statistics/volume-10/issue-1/Generalized-functional-additive-mixed-models/10.1214/16-EJS1145.full}
}
\seealso{
\code{\link[mgcv]{smooth.terms}} for details of \code{mgcv} syntax
and available spline bases and penalties.
}
\author{
Fabian Scheipl, Sonja Greven
}
|
52e8f1e2b0bf49b0568c35e94244c8cc0a3d63c7
|
ffdea92d4315e4363dd4ae673a1a6adf82a761b5
|
/data/genthat_extracted_code/PabonLasso/examples/BOR1.Rd.R
|
7d7dd23d64e4895984f1b0a1f69fb8c2884be33d
|
[] |
no_license
|
surayaaramli/typeRrh
|
d257ac8905c49123f4ccd4e377ee3dfc84d1636c
|
66e6996f31961bc8b9aafe1a6a6098327b66bf71
|
refs/heads/master
| 2023-05-05T04:05:31.617869
| 2019-04-25T22:10:06
| 2019-04-25T22:10:06
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 256
|
r
|
BOR1.Rd.R
|
library(PabonLasso)
### Name: BOR1
### Title: Is a vector of Bed Occupation Rates at the beginning of study
### Aliases: BOR1
### Keywords: datasets
### ** Examples
BOR1=c(72.54,48.86,42.77,40.81,60,28.61,20.29,12.84,100,47.07,78.51,45,49,20,88,90)
|
e20c7adff4e1572d62e1e476b08c5c09e0bde7e7
|
91dca679488c1a409cdbad5f7df34e3430f47ab2
|
/lib/visual.lib.R
|
71ba261a64dd8a3220adbf09869106dbaaa7051e
|
[] |
no_license
|
joelescobar01/investmentStrategy
|
3eb81247690c2c25316cc1b7583a88329acfe738
|
e698282c4f17ea7dee9a9979cf800af7bdd78220
|
refs/heads/master
| 2023-01-07T17:10:53.886611
| 2020-11-11T15:02:02
| 2020-11-11T15:02:02
| 256,921,472
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 1,680
|
r
|
visual.lib.R
|
library(TTR)
library(tidyverse)
library(tidyquant)
library(ggplot2)
zoom.last_n <- function( stockTbbl, n=14 ){
zoom <-
coord_cartesian(xlim=c(
nth(stockTbbl$date,n=-1)-days(n),
nth(stockTbbl$date,n=-1))
)
return( zoom )
}
max.plot.space <- function(){
max.plot <-
theme(panel.border=element_blank(),
panel.spacing = unit(0, "cm"),
plot.margin = margin(t = 0, r = 0, b = 0, l = 0, unit = "pt") )
return( max.plot )
}
scale.date.axis.small <- function(){
sc <-
scale_x_date ( breaks=scales::breaks_width("1 months"),
labels=scales::label_date_short() )
return(sc)
}
scale.date.axis.large <- function(){
sc <-
scale_x_date ( breaks=scales::breaks_width("6 months"),
labels=scales::label_date_short() )
return(sc)
}
scale.date.axis.yearly <- function(){
sc <-
scale_x_date ( breaks=scales::breaks_width("1 years"),
labels=scales::label_date("'%y") )
return(sc)
}
scale.price.axis <- function(){
sc <-
scale_y_continuous( breaks=scales::breaks_extended(8), labels=scales::label_dollar() )
return(sc)
}
scale.price.xaxis <- function(){
sc <-
scale_x_continuous( breaks=scales::breaks_extended(8), labels=scales::label_dollar() )
return(sc)
}
scale.percent.axis <- function(){
sc <-
scale_y_continuous( breaks=scales::breaks_extended(16), labels=scales::label_percent() )
return(sc)
}
scale.percent.xaxis <- function(){
sc <-
scale_x_continuous( breaks=scales::breaks_extended(8), labels=scales::label_percent( ) )
return(sc)
}
|
aef6ee37c61a54cf6ca09f0ea4232f174292fdd9
|
c45fefaa8779071f3875cc9f43ea2e0b9dfedcd4
|
/tests/testthat/test-formats.R
|
463cb9b7db56f83272439be0bd2232c67e1cac44
|
[] |
no_license
|
noamross/texttable
|
e937a2a9b703b6505158c71b7a08f10703b8d4df
|
d1b17d3dcda4c47047863096154cb8fc54643a3b
|
refs/heads/master
| 2016-09-14T01:32:45.610086
| 2016-04-19T15:34:40
| 2016-04-19T15:34:40
| 56,542,149
| 1
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 2,061
|
r
|
test-formats.R
|
# Test all the file types that pandoc lists as inputs
test_files = list.files(pattern = "table.*[^R]$")
test_files = test_files[-grep("(odt|latex|haddock)", test_files)]
cat("\nPandoc version:", as.character(rmarkdown::pandoc_version()), "\n\n")
context("Format checking")
for (file_name in test_files) {
file_type = tools::file_ext(file_name)
imported = texttable(file_name)
test_that(paste("Importing works from", file_type, "format"), expect_true(is.list(imported)))
test_that(paste(file_type, "format gives list of length >= 1"), expect_true(length(imported) >= 1))
test_that(paste("All components of list are data frames for ", file_type), expect_true(all(sapply(imported, is.data.frame))))
}
context("Import type checking")
test_that("Importing works from a URL", {
imported = texttable("https://raw.githubusercontent.com/jgm/pandoc/master/tests/tables.markdown")
expect_true(is.list(imported))
expect_true(length(imported) >= 1)
expect_true(all(sapply(imported, is.data.frame)))
})
test_that("Importing works from character", {
imported = texttable("
| Right | Left | Center |
|------:|:-----|:------:|
|12|12|12|
|123|123|123|
|1|1|1|
| | | |
|------:|:-----|:------:|
|12|12|12|
|123|123|123|
|1|1|1|
")
expect_true(is.list(imported))
expect_true(length(imported) >= 1)
expect_true(all(sapply(imported, is.data.frame)))
})
test_that("Importing works from character with leading whitespace", {
sample_text = "
| Right | Left | Center |
|------:|:-----|:------:|
|12|12|12|
|123|123|123|
|1|1|1|
| | | |
|------:|:-----|:------:|
|12|12|12|
|123|123|123|
|1|1|1|
"
imported = texttable(sample_text)
expect_true(is.list(imported))
expect_true(length(imported) >= 1)
expect_true(all(sapply(imported, is.data.frame)))
})
|
1cfff138c29c2591aeea1fcccb10016430c46615
|
9f1a721907110eae0ea02c0ff9f91e9c28ccdade
|
/panels/weather/weather.r
|
41b06f8b44d984efb1b0e9a6cb8835ada6530a7b
|
[] |
no_license
|
DavidRickmann/ShipInterface
|
3ee88e5065cb62846da2e6937d722e2abb9b6316
|
a24ca461acceab0ea8a9fc7391e6d6421eb84310
|
refs/heads/master
| 2022-11-11T17:22:51.181628
| 2020-06-30T22:27:00
| 2020-06-30T22:27:00
| 273,696,702
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 4,329
|
r
|
weather.r
|
library(lcars)
library(ggplot2)
#This wrapper is required until shiny v1.5 is released.
moduleServer <- function(id, module) {
callModule(module, id)
}
#write a standard nav button function
#get the colours from the theme?
#allow assignation of standard colours in the config
weatherpanelUI <- function(id) {
ns <- NS(id)
home <- lcarsButton(
"Home",
"Home",
icon = NULL,
color = "neon-carrot",
hover_color = "mariner",
width = 150
)
lcarsPage(
fluidRow(
lcarsBox(
title = NULL,
subtitle = "Atmospheric Conditions",
corners = 4,
sides = c(3,4),
left_inputs = NULL,
right_inputs = NULL,
color = "neon-carrot",
side_color = "neon-carrot",
title_color = "mariner",
subtitle_color = "mariner",
title_right = TRUE,
subtitle_right = TRUE,
clip = FALSE,
width_left = 150,
width_right = 60,
fluidRow(
htmlOutput(ns("text"))
)
)
),
fluidRow(
lcarsBox(
title = NULL,
subtitle = NULL,
corners = c(1),
sides = c(1,4),
left_inputs = home,
right_inputs = NULL,
color = "neon-carrot",
side_color = "neon-carrot",
title_color = "mariner",
subtitle_color = "mariner",
title_right = TRUE,
subtitle_right = TRUE,
clip = FALSE,
width_left = 150,
width_right = 60,
fluidRow(
htmlOutput(ns("tempTitle")),
plotOutput(ns("plot1"))
),
fluidRow(
column(6),
column(6,
htmlOutput(ns("rainTitle")),
plotOutput(ns("plot2"))
)
)
)
)
)
}
weatherpanel <- function(id) {
moduleServer(id, function(input, output, session) {
library(waiter)
library(weatherr)
library(tidyverse)
library(jsonlite)
library(dplyr)
# header box bits
output$text <- renderUI({HTML("<br><center><h1> ACCESSING REMOTE SENSOR FEEDS </h1></center>") })
#get weather
#yeah this should be in a function and location should be a variable but this'll do to get the system back up and running
#also needs error handling on the API call.
weather_URL <- 'http://api.met.no/weatherapi/locationforecast/2.0/complete?lat=51.484940&lon=-0.301890&altitude=28'
weather <- jsonlite::fromJSON(weather_URL)
#we might want to seperate out the instant/12hour/6hour/1hour data but for now I'm just gonna flatten it all and sort it out later
weather <- jsonlite::flatten(weather$properties$timeseries)
#clean it up a bit
colnames(weather) <- c("time","pressure","temp_air",
"cloudcover","cloudcover_high","cloudcover_low","cloudcover_medium",
"dewpoint","fog","humidity","uv","winddirection","windspeed",
"symbol_12hr","symbol_1hr","precip_1hr",
"symbol_6hr","temp_air_max_6hr","temp_air_min_6hr","precip_6hr")
weather$time <- parse_date_time(weather$time,"Ymd HMS")
#body box bits
output$tempTitle <- renderUI({HTML("AIR TEMPERATURE FORECAST") })
output$plot1 <- renderPlot(ggplot(weather, aes(x=time, y=temp_air)) +
geom_line(aes(color = "#FFCC99")) +
theme_lcars_dark() +
theme(legend.position = "none",
axis.title.x=element_blank(),
axis.title.y=element_blank()
)
)
rain <- weather %>% select(time, precip_1hr) %>% head(24)
output$rainTitle <- renderUI({HTML("PRECIPITATION FORECAST") })
output$plot2 <- renderPlot(ggplot(rain, aes(x=time, y=precip_1hr)) +
geom_line(aes(color = "#FFCC99")) +
theme_lcars_dark() +
theme(legend.position = "none",
axis.title.x=element_blank(),
axis.title.y=element_blank()
) )
})
}
panelApp <- function() {
ui <- lcarsPage(
weatherpanelUI("wp1")
)
server <- function(input, output, session) {
weatherpanel("wp1")
}
shinyApp(ui, server)
}
panelApp()
|
358a2f16dc3f169d3ff19cf1fedb87a18e542603
|
0fd8fdaad36db1be77563597f1efef5044aabb92
|
/ui.R
|
9a45fd2df9a61245cfd948e103fdc4ec83d806b7
|
[] |
no_license
|
nkdhruw/ShinyXploreR
|
1ae05eba67e76ef01be0bf25ff457f8467989926
|
dd4c6994fbebe7f3e8303ece6683191d57225033
|
refs/heads/master
| 2020-06-12T01:11:21.846824
| 2019-06-30T17:50:43
| 2019-06-30T17:50:43
| 194,147,576
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 2,503
|
r
|
ui.R
|
library(shiny)
library(shinymaterial)
library(DT)
ui <- material_page(
title = "XploreR",
nav_bar_color = "teal lighten-1",
material_side_nav(
fixed = TRUE,
image_source = 'side_bar.jpg',
material_row(
material_column(
width = 10,
shiny::tags$h6('Load example dataset'),
material_dropdown(
input_id = "example_dataset",
label = "",
choices = c('Titanic'='titanic','Housing'='housing','Counties'='counties','Iris'='iris')
)
)
)
),
material_tabs(
tabs = c(
'Input Data' = 'input_data_tab',
'Data Overview' = 'data_overview_tab',
'Feature distribution' = 'feature_distribution_tab',
'Explore relationships' = 'explore_relationship_tab'
)
),
material_tab_content(
tab_id = 'input_data_tab',
material_row(
material_column(width = 12, DTOutput('inputDataTable'))
)
),
material_tab_content(
tab_id = 'data_overview_tab',
material_row(
material_column(
width = 12,
material_card(
title = 'Numeric features description',
divider = TRUE,
DTOutput('numDataDescriptionTable')
)
)
),
material_row(
material_column(
width = 12,
material_card(
title = 'Categorical features description',
divider = TRUE,
DTOutput('charDataDescriptionTable')
)
)
)
),
material_tab_content(
tab_id = 'feature_distribution_tab',
material_card(
title = 'Histogram',
divider = TRUE,
#uiOutput('data_exploration_UI')
#tags$h5('Select Feature'),
material_dropdown(
input_id = 'select_feature_histogram',
label = '',
choices = c('a'='a')
),
plotlyOutput('featureHistogram')
)
),
material_tab_content(
tab_id = 'explore_relationship_tab',
material_card(
title = 'Split-Feature-Distribution',
divider = TRUE,
material_row(
material_column(
width = 6,
material_dropdown(
input_id = 'split_histogram_feature',
label = '',
choices = c('a'='a')
)
),
material_column(
width = 6,
material_dropdown(
input_id = 'split_histogram_splitBy',
label = '',
choices = c('a'='a')
)
)
),
material_row(
plotlyOutput('splitFeatureHist')
)
)
)
)
|
117f9b229d0c3b38ef5d072fc0a3786e33577ede
|
4be23c83eeedc01670ee6e79c295ff9b2ae187d3
|
/man/split_rows_by.Rd
|
6d9a6a9cb1417341578699dc190b6065278f3539
|
[
"MIT",
"Apache-2.0"
] |
permissive
|
jcheng5/rtables
|
8b0f708d791118189cc76287945acfcacc3c0dd9
|
5b552c69488faa064e2209b4ff3291ba39feaee4
|
refs/heads/main
| 2023-08-12T22:53:04.445497
| 2021-09-23T00:11:23
| 2021-09-23T00:11:23
| 411,396,687
| 0
| 0
|
NOASSERTION
| 2021-09-30T19:13:22
| 2021-09-28T18:28:42
| null |
UTF-8
|
R
| false
| true
| 4,230
|
rd
|
split_rows_by.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/colby_constructors.R
\name{split_rows_by}
\alias{split_rows_by}
\title{Add Rows according to levels of a variable}
\usage{
split_rows_by(
lyt,
var,
labels_var = var,
split_label = var,
split_fun = NULL,
format = NULL,
nested = TRUE,
child_labels = c("default", "visible", "hidden"),
label_pos = "hidden",
indent_mod = 0L
)
}
\arguments{
\item{lyt}{layout object pre-data used for tabulation}
\item{var}{string, variable name}
\item{labels_var}{string, name of variable containing labels to be displayed for the values of \code{var}}
\item{split_label}{string. Label string to be associated with the table generated by the split. Not to be confused with labels assigned to each child (which are based on the data and type of split during tabulation).}
\item{split_fun}{function/NULL. custom splitting function}
\item{format}{FormatSpec. Format associated with this split. Formats can be declared via strings (\code{"xx.x"}) or function. In cases such as \code{analyze} calls, they can character vectors or lists of functions.}
\item{nested}{boolean, Add this as a new top-level split (defining a new subtable directly under root). Defaults to \code{FALSE}}
\item{child_labels}{string. One of \code{"default"}, \code{"visible"}, \code{"hidden"}. What should the display behavior be for the labels (ie label rows) of the children of this split. Defaults to \code{"default"} which flags the label row as visible only if the child has 0 content rows.}
\item{label_pos}{character(1). Location the variable label should be displayed, Accepts hidden (default for non-analyze row splits), visible, topleft, and - for analyze splits only - default. For analyze calls, \code{default} indicates that the variable
should be visible if and only if multiple variables are analyzed at the same level of nesting.}
\item{indent_mod}{numeric. Modifier for the default indent position for the structure created by this function(subtable, content table, or row) \emph{and all of that structure's children}. Defaults to 0, which corresponds to the unmodified default behavior.}
}
\value{
A \code{PreDataTableLayouts} object suitable for passing to further layouting functions, and to \code{build_table}.
}
\description{
Add Rows according to levels of a variable
}
\note{
If \code{var} is a factor with empty unobserved levels and
\code{labels_var} is specified, it must also be a factor
with the same number of levels as \code{var}. Currently the
error that occurs when this is not hte case is not very informative,
but that will change in the future.
}
\examples{
l <- basic_table() \%>\%
split_cols_by("ARM") \%>\%
split_rows_by("RACE", split_fun = drop_split_levels) \%>\%
analyze("AGE", mean, var_labels = "Age", format = "xx.xx")
build_table(l, DM)
basic_table() \%>\%
split_cols_by("ARM") \%>\%
split_rows_by("RACE") \%>\%
analyze("AGE", mean, var_labels = "Age", format = "xx.xx") \%>\%
build_table(DM)
l <- basic_table() \%>\%
split_cols_by("ARM") \%>\%
split_cols_by("SEX") \%>\%
summarize_row_groups(label_fstr = "Overall (N)") \%>\%
split_rows_by("RACE", split_label = "Ethnicity", labels_var = "ethn_lab",
split_fun = drop_split_levels) \%>\%
summarize_row_groups("RACE", label_fstr = "\%s (n)") \%>\%
analyze("AGE", var_labels = "Age", afun = mean, format = "xx.xx")
l
library(dplyr)
DM2 <- DM \%>\%
filter(SEX \%in\% c("M", "F")) \%>\%
mutate(
SEX = droplevels(SEX),
gender_lab = c("F" = "Female", "M" = "Male",
"U" = "Unknown", "UNDIFFERENTIATED" = "Undifferentiated")[SEX],
ethn_lab = c(
"ASIAN" = "Asian",
"BLACK OR AFRICAN AMERICAN" = "Black or African American",
"WHITE" = "White",
"AMERICAN INDIAN OR ALASKA NATIVE" = "American Indian or Alaska Native",
"MULTIPLE" = "Multiple",
"NATIVE HAWAIIAN OR OTHER PACIFIC ISLANDER" =
"Native Hawaiian or Other Pacific Islander",
"OTHER" = "Other", "UNKNOWN" = "Unknown"
)[RACE]
)
build_table(l, DM2)
}
\author{
Gabriel Becker
}
|
802c00a1fd990de92b0141643a0b96aa802c743d
|
b67ce7f77bca2b817a80c0888897655e426b88cc
|
/R/bblocks.R
|
ec8721a19df917041bb45dabfbb0cd217709ead8
|
[
"MIT"
] |
permissive
|
saragong/bblocks
|
babef5d3648ada67549f020cb624045fc8ea12e6
|
a604fa99c9f7520f0c02f688964217391565fff6
|
refs/heads/master
| 2020-07-13T17:37:16.170420
| 2019-08-29T21:11:18
| 2019-08-29T21:11:18
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 3,866
|
r
|
bblocks.R
|
# bblocks.R
#' @export
bblocks <- function(data, min_block_size, blockby) {
# build blocks --------------------
data$block_id <- ""
num_tags <- 0
covariates <- unique(names(blockby))
num_obs <- nrow(data)
num_covar <- length(covariates)
num_specif <- length(blockby)
# iterate through covariates
for (i in c(1:num_covar)) {
num_tags <- num_tags + 1
covar <- covariates[i]
specif_list <- blockby[names(blockby) == covar]
# iterate through specifications for each covariate
for (j in c(1:length(specif_list))) {
specif_tag <- paste(num_tags,
covar,
paste(unlist(specif_list[[j]]),
collapse = "_"),
sep = "_")
data$block_id <- ifelse(data[,covar] %in% specif_list[[j]],
paste(data$block_id,
specif_tag,
sep = "_"),
data$block_id)
}
# assign ID if no match found
data$block_id <- ifelse(is.na(data[,covar]) == TRUE
| !(data[,covar] %in% unlist(specif_list, use.names = FALSE)),
paste(data$block_id,
paste(num_tags,
covar,
"other",
sep = "_"),
sep = "_"),
data$block_id)
}
# merge blocks --------------------
# check if there are blocks that need merging
block_freq_table <- as.data.frame(table(data$block_id, dnn = c("block_id")))
blocks_to_merge <- subset(block_freq_table, block_freq_table$Freq < min_block_size)
num_blocks_to_merge <- nrow(blocks_to_merge)
# keep trying block sizes until finding smallest size that doesn't leave leftovers
data$block_id_long <- data$block_id
try_block_size <- min_block_size
lowest_tag <- num_tags
while (num_blocks_to_merge > 0 & try_block_size <= num_obs/2) {
while (num_blocks_to_merge > 0 & lowest_tag > 1) {
# strip last tag from block_id
pattern_to_strip <- paste('_', as.character(lowest_tag), '_.*', sep = '')
data$block_id <- ifelse(data$block_id %in% blocks_to_merge$block_id,
gsub(pattern_to_strip, "", data$block_id),
data$block_id)
# store the number of the next tag to strip
lowest_tag <- lowest_tag - 1
# check if there are blocks that need merging
block_freq_table <- as.data.frame(table(data$block_id, dnn = c("block_id")))
blocks_to_merge <- subset(block_freq_table, block_freq_table$Freq < try_block_size)
num_blocks_to_merge <- nrow(blocks_to_merge)
}
# reset sort process if the last minimum block size had leftovers
if (num_blocks_to_merge > 0) {
print(paste("Minimum block size ",
try_block_size,
" is too small. Trying size ",
try_block_size + 1,
"...", sep = ""))
data$block_id <- data$block_id_long
lowest_tag <- num_tags
try_block_size <- try_block_size + 1
}
}
if (try_block_size <= num_obs/2) {
num_final_blocks <- length(unique(data$block_id))
print(paste("Successfully created ",
num_final_blocks,
" blocks of minimum size ",
try_block_size,
".",
sep = ""))
return(data$block_id)
}
if (try_block_size > num_obs/2) {
print(paste("Unable to sort data into blocks of minimum size ",
min_block_size,
". Try reordering the specifications or using broader specifications.",
sep = ""))
return(NA)
}
}
|
0206155be22942b5a3e7a04bc3dc40cc77d61723
|
4627cdc23e3f0d22867110ed1215ff85754de6c2
|
/ads_romania.R
|
2b004b1f8b49446d1324f214550e226ba2bf1cb0
|
[] |
no_license
|
ethanarsht/romania-dashboard
|
94443667b066b7dda61fbd5f2f436f1b36f9e78c
|
f27be0283bf0f84237440cfab18b0cc7a3ccac08
|
refs/heads/master
| 2023-02-18T04:58:16.834513
| 2021-01-15T17:37:47
| 2021-01-15T17:37:47
| 323,613,083
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 1,167
|
r
|
ads_romania.R
|
library(Radlibrary)
library(aws.s3)
library(config)
library(readr)
c <- config::get('aws')
Sys.setenv(
"AWS_ACCESS_KEY_ID" = c$id,
"AWS_SECRET_ACCESS_KEY" = c$password,
"AWS_DEFAULT_REGION" = c$region
)
query <- adlib_build_query(
ad_active_status = 'ALL',
ad_reached_countries = 'RO',
ad_type = c("POLITICAL_AND_ISSUE_ADS"),
search_page_ids = c("181375448581167",
'291177524360702',
'102601499822802',
'1058116737612843',
'1903580739951981',
'581911828514783',
'621574984950400',
'2212856535602171'
),
ad_delivery_date_min = "2020-12-01"
)
response <- adlib_get(params = query)
df_response <- as_tibble(response) %>%
mutate(
page_id = as.numeric(page_id),
adlib_id = as.numeric(adlib_id)
)
old_ads <- s3read_using(read_csv, object = 'romania_ads_archive.csv', bucket = 'iri-romania')
new_ads <- old_ads %>%
bind_rows(df_response) %>%
distinct()
s3write_using(new_ads, write_csv, object = 'romania_ads_archive.csv', bucket = 'iri-romania')
|
3e803f9483b1ded6b56f3d9c2375ac4412ff49f1
|
8c0087ce6ae51911ca2954bb316a060978a60cea
|
/utils/waic.r
|
5a9b82c42126597d1e7ac05acbc9bcd3d3a834e2
|
[] |
no_license
|
davide-burba/Bayesian_Project
|
3e904fe1a34493f96788fe231a13a23e1b431cee
|
b475d6e114565fd0261883ff1a4212f94ab3b659
|
refs/heads/master
| 2021-10-09T13:08:59.290225
| 2018-12-28T15:56:44
| 2018-12-28T15:56:44
| 111,917,446
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 978
|
r
|
waic.r
|
#
# This script evaluates the WAIC (and AIC) value; doesn't work well, use
# something else preferably
#
#EDITABLE VALUES:
n=5000 # number of iterations
names(data.out)[1059]
beta_pos_interval=1:12
sigma_pos=1059
p=length(unique(Patient))
Mfinal=matrix(0,n,p)
Y=RNFL_average
d=dim(Z)[2]
numerosity<-as.integer(table(Patient))
kk=rep(0,length(unique(Patient)+1))
kk[1]=0
for (i in 2:length(unique(Patient))){
kk[i]=kk[i-1]+numerosity[i-1]
}
kk[length(unique(Patient))+1] =length(Patient)
library(mnormt)
for(ii in 1:n){
betas<-data.out[ii,beta_pos_interval]
for(jj in 1:p){
Xi<-X[(kk[jj]+1):kk[jj+1],]
#Zi<-Z[(kk[jj]+1):kk[jj+1],]
#bi<-data.out[ii,((jj-1)*d+1):(jj*d)]
Yi<-Y[(kk[jj]+1):kk[jj+1]]
Sig<-diag(rep(1,numerosity[jj]))*data.out[ii,sigma_pos]
m<-Xi%*%t(betas)#+Zi%*%t(bi)
m<-as.vector(m)
Mfinal[ii,jj]=dmnorm(Yi, m, Sig , log=TRUE )
}
}
library(loo)
ww<-waic(Mfinal)
ww
loo(Mfinal)
AIC=sum(colMeans(Mfinal))
AIC
|
6f0c29646983572e41c2a19546e4b5cc0adfb177
|
f9712c631d00c7a6369c593b823002f3962ef2a3
|
/man/BuildRFClassifier.Rd
|
5a37a52a24f34389c2d3133f3486911ce7c06499
|
[] |
no_license
|
roryk/seurat
|
2c3ee6601aaede077d327bf852fd2fde4ca49c8e
|
e1eae2da82ada2211e84fbc471615e8d3921b539
|
refs/heads/master
| 2020-12-11T01:45:01.591510
| 2017-01-11T21:32:08
| 2017-01-11T21:32:08
| 54,148,919
| 0
| 0
| null | 2016-03-17T20:22:54
| 2016-03-17T20:22:54
| null |
UTF-8
|
R
| false
| true
| 703
|
rd
|
BuildRFClassifier.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/seurat.R
\name{BuildRFClassifier}
\alias{BuildRFClassifier}
\title{Build Random Forest Classifier}
\usage{
BuildRFClassifier(object, training.genes = NULL, training.classes = NULL,
verbose = TRUE, ...)
}
\arguments{
\item{object}{Seurat object on which to train the classifier}
\item{training.genes}{Vector of genes to build the classifier on}
\item{training.classes}{Vector of classes to build the classifier on}
\item{verbose}{Additional progress print statements}
\item{...}{additional parameters passed to ranger}
}
\value{
Returns the random forest classifier
}
\description{
Train the random forest classifier
}
|
95477a2f8753e61c4a4750bf62af1ef866663976
|
62464fa0d0f7c4d07b61216c25da80d42612b49f
|
/R/PoisMixClusWrapper.R
|
ac391284bcd18ca3d4dd4f51a3ce6eab8c19ecf7
|
[] |
no_license
|
melinaGALL/melinaGALL.github.io
|
beed0cd99d903319ec1d010c29100cd3710352d9
|
734219e1263e9c1a7bdffc5cf0a17e21c9af5b2e
|
refs/heads/master
| 2020-05-15T09:38:34.780905
| 2013-09-24T16:07:06
| 2013-09-24T16:07:06
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 3,962
|
r
|
PoisMixClusWrapper.R
|
PoisMixClusWrapper <- function(y, u, gmin = 1, gmax, conds, lib.size = TRUE, lib.type = "TMM",
gmin.init.type = "small-em", init.runs = 5, split.init = TRUE, alg.type = "EM", cutoff = 10e-6, iter = 1000,
fixed.lambda = NA, equal.proportions = FALSE, verbose = FALSE)
{
all.results <- vector("list", length = gmax - gmin + 1)
names(all.results) <- paste("g=", seq(gmin,gmax, 1), sep = "")
## For gmin, run PoisMixClus with regular small-EM initialization
cat("Running g =", gmin, "...\n")
all.results[[1]] <- PoisMixClus(y = y, u = u, g = gmin, lib.size = lib.size,
lib.type = lib.type, conds = conds,
init.type = gmin.init.type,
alg.type = alg.type, cutoff = cutoff, iter = iter,
fixed.lambda = fixed.lambda, equal.proportions = equal.proportions,
prev.labels = NA, prev.probaPost = NA, init.runs = 5, verbose = verbose)
## For g > gmin, run PoisMixClus with Panos init using previous results
index <- 2
if(gmax > gmin) {
if(split.init == TRUE) {
for(K in seq((gmin+1),gmax,1)) {
cat("Running g =", K, "...\n")
prev.labels <- all.results[[K-1]]$labels
prev.probaPost <- all.results[[K-1]]$probaPost
all.results[[index]] <- PoisMixClus(y = y,
u = u, g = K, lib.size = lib.size,
lib.type = lib.type, conds = conds,
init.type = "split.small-em",
alg.type = alg.type, cutoff = cutoff,
iter = iter,
fixed.lambda = fixed.lambda,
equal.proportions = equal.proportions,
prev.labels = prev.labels,
prev.probaPost = prev.probaPost,
init.runs = 5, verbose = verbose)
index <- index + 1
}
}
if(split.init == FALSE) {
for(K in seq((gmin+1),gmax,1)) {
cat("Running g =", K, "...\n")
all.results[[index]] <- PoisMixClus(y = y, u = u,g = K, lib.size = lib.size,
lib.type = lib.type, conds = conds,
init.type = gmin.init.type,
alg.type = alg.type, cutoff = cutoff, iter = iter,
fixed.lambda = fixed.lambda,
equal.proportions = equal.proportions,
prev.labels = NA, prev.probaPost = NA, init.runs = 5,
verbose = verbose)
index <- index + 1
}
}
}
logLike.all <- unlist(lapply(all.results, function(x) x$log.like))
p.logLike.all <- unlist(lapply(all.results, function(x) x$p.log.like))
entropy.all <- unlist(lapply(all.results, function(x) x$entropy))
entropyM.all <- unlist(lapply(all.results, function(x) x$entropyM))
entropyS.all <- unlist(lapply(all.results, function(x) x$entropyS))
BIC.all <- unlist(lapply(all.results, function(x) x$BIC))
BIC.choose <- which(BIC.all == max(BIC.all, na.rm = TRUE))
BIC.select.results <- all.results[[BIC.choose]]
ICL.all <- unlist(lapply(all.results, function(x) x$ICL))
ICL.choose <- which(ICL.all == max(ICL.all, na.rm = TRUE))
ICL.select.results <- all.results[[ICL.choose]]
SICL.all <- unlist(lapply(all.results, function(x) x$SICL))
SICL.choose <- which(SICL.all == max(SICL.all, na.rm = TRUE))
SICL.select.results <- all.results[[SICL.choose]]
MIL.all <- unlist(lapply(all.results, function(x) x$MIL))
MIL.choose <- which(MIL.all == max(MIL.all, na.rm = TRUE))
MIL.select.results <- all.results[[MIL.choose]]
RESULTS <- list(logLike.all = logLike.all,
p.logLike.all = p.logLike.all,
entropy.all = entropy.all, entropyM.all = entropyM.all,
entropyS.all = entropyS.all, BIC.all = BIC.all,
ICL.all = ICL.all, SICL.all = SICL.all, MIL.all = MIL.all,
BIC.select.results = BIC.select.results, ICL.select.results = ICL.select.results, SICL.select.results = SICL.select.results, MIL.select.results = MIL.select.results, all.results = all.results)
class(RESULTS) <- "HTSClusterWrapper"
return(RESULTS)
}
|
66fa49df68cba9e2c64ad8bca968f38039ee799f
|
339532c1047f1c4654692339478ada6c90f0420e
|
/ui/ui-tab-inputdata.R
|
6e64bde81d958ef55d229fb0f646d3dddf8e9c4d
|
[] |
no_license
|
marcottelab/pepliner
|
1647a5541830b4f23f82295c5471b5e54c0ae4d1
|
2e21bf81d56bacdcc9c6ee75bb3cdce3a8213de4
|
refs/heads/master
| 2021-01-01T18:52:08.640836
| 2018-07-27T20:37:57
| 2018-07-27T20:37:57
| 98,455,138
| 3
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 9,121
|
r
|
ui-tab-inputdata.R
|
## ==================================================================================== ##
# Pepliner: App for visualizing protein elution data
#
# Modified 2018 from the original GNUpl3 by Claire D. McWhitei <claire.mcwhite@utexas.edu>
# Original Copyright (C) 2016 Jessica Minnier, START Shiny Transcriptome Analysis Tool
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
## ==================================================================================== ##
##
##
## # This tab is used to input the count or normalized data files
tabPanel("Input Data",
fluidRow(column(4,wellPanel(
downloadLink("instructionspdf",label="Download Instructions (pdf)"),
radioButtons('data_file_type','Use example file or upload your own data',
c('Upload Data'="upload",
'Pepliner RData file'="previousrdata",
'Example Data'="examplecounts"
), selected = "examplecounts"),
conditionalPanel(condition="input.data_file_type=='previousrdata'",
fileInput('rdatafile','Upload Pepliner App Generated RData File'),
conditionalPanel("output.fileUploaded",h4(strong("Check data contents then click Submit")))
),
conditionalPanel(condition="input.data_file_type=='upload'",
radioButtons("inputdat_type","Input data type:",
c("Peptides"="peps",
"Proteins"="prots")),
conditionalPanel(condition="input.data_file_type=='upload'",
radioButtons("inputdat_format", "Input data format:",
c("Tidy"="tidy",
"Wide matrix"="wide")),
conditionalPanel(condition="input.inputdat_type=='peps' && input.inputdat_format=='wide'",
downloadLink("example_peptide_analysis_file",label="Download Example Peptide Count file"),
p(""),
img(src="exampleanalysisdata.png",width="100%"),
tags$ul(
tags$li("File must have a header row."),
tags$li("First/Left-hand column must be peptide sequence"),
tags$li("Second column must be protein ID"),
tags$li("Fraction names in right hand columns")
)
),
conditionalPanel(condition="input.inputdat_type=='peps' && input.inputdat_format=='tidy'",
downloadLink("example_peptide_counts_matrix_file",label="Download Example Peptide Count file"),
p(""),
img(src="example_peptide_counts_tidy.png",width="100%"),
tags$ul(
tags$li("File must have a header row."),
tags$li("First/Left-hand column must be peptide sequences."),
tags$li("Second column must be protein IDs"),
tags$li("Third column must be fraction name"),
tags$li("Fourth column must be value (ex. spectral counts)")
)
),
conditionalPanel(condition="input.inputdat_type=='prots' && input.inputdat_format=='wide'",
downloadLink("example_protein_analysis_file",label="Download Example Protein Count file"),
p(""),
img(src="exampleanalysisdata.png",width="100%"),
tags$ul(
tags$li("File must have a header row."),
tags$li("First/Left-hand column must be called 'ID' and contain row IDs"),
tags$li("Fraction names in right hand columns")
)
),
conditionalPanel(condition="input.inputdat_type=='prots_tidy' && input.inputdat_format=='wide'",
downloadLink("example_protein_counts_matrix_file",label="Download Example Peptide Count file"),
p(""),
img(src="example_protein_counts_tidy.png",width="100%"),
tags$ul(
tags$li("File must have a header row."),
tags$li("First/Left-hand column must be protein sequences."),
tags$li("Third column must be fractionID"),
tags$li("Fourth column must be value (ex. spectral counts)")
)
),
fileInput('datafile', 'Choose File Containing Data (.CSV)',
accept=c('text/csv',
'text/comma-separated-values,text/plain',
'.csv')),
conditionalPanel(condition = "input.inputdat_type=='peps'",
fileInput('proteomefile', 'Choose Fasta file containing sequence IDs matching data file IDs)',
accept=c('fa/fasta',
'.fa',
'.fasta'))
)
) #End condition=upload
),
conditionalPanel("output.fileUploaded",
actionButton("upload_data","Submit Data",
style="color: #fff; background-color: #BF5700; border-color: #9E0000"))
)#,
# add reference group selection
# add instructions
# missing value character?
),#column
column(8,
bsCollapse(id="input_collapse_panel",open="data_panel",multiple = FALSE,
bsCollapsePanel(title="Data Contents: Wait for upload and check Before `Submit`",value="data_panel",
dataTableOutput('countdataDT')
),
bsCollapsePanel(title="Proteome Contents: Wait for upload and check Before `Submit`",value="proteome_panel",
dataTableOutput('proteomeDT')
),
bsCollapsePanel(title="Analysis Results: Ready to View Other Tabs",value="analysis_panel",
downloadButton('downloadResults_CSV','Save Results as CSV File'),
downloadButton('downloadResults_RData',
'Save Results as START RData File for Future Upload',
class="mybuttonclass"),
dataTableOutput('analysisoutput'),
tags$head(tags$style(".mybuttonclass{background-color:#BF5700;} .mybuttonclass{color: #fff;} .mybuttonclass{border-color: #9E0000;}"))
)
)#bscollapse
)#column
)#fluidrow
)#tabpanel
|
8f2496a25ef8a80e99a18900340958bb758717a7
|
152a54991ed04bfc5d647592a38b687eb6d02289
|
/man/plot_magic_carpet.Rd
|
9b54c36ec8d4008dbca520949b3db9474fc59a09
|
[] |
no_license
|
gloverd2/codeBase
|
dd8a64c711ad98b0902955e585015d03442b5bbf
|
897da7d36b7bbe964c7d5e870e8eda918b59eb08
|
refs/heads/master
| 2023-05-05T20:19:55.512739
| 2021-06-02T08:29:16
| 2021-06-02T08:29:16
| 277,748,509
| 0
| 0
| null | 2020-11-30T11:03:33
| 2020-07-07T07:33:27
|
HTML
|
UTF-8
|
R
| false
| true
| 2,381
|
rd
|
plot_magic_carpet.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/plot_magic_carpet.R
\name{plot_magic_carpet}
\alias{plot_magic_carpet}
\title{plot_magic_carpet}
\usage{
plot_magic_carpet(
feature,
feature_name,
incumbent_pred,
proposed_pred,
weight = rep(1, length(feature)),
n_bins = 10,
ratio_max = 1,
ratio_step = 0.05,
position = "fill"
)
}
\arguments{
\item{feature}{array[numeric/character/factor] - value of feature}
\item{feature_name}{character - name of feature}
\item{incumbent_pred}{array[numeric] - values of incumbent prediction}
\item{proposed_pred}{array[numeric] - values of proposed prediction}
\item{weight}{array[numeric] - weight of each row}
\item{n_bins}{intergerish - number of buckets to split each (numeric) feature into}
\item{ratio_max}{numeric - max ratio at which to cap the incumbent_pred/proposed_pred ratio}
\item{ratio_step}{numeric - step size to divide ratio bins.}
\item{position}{character - either \code{"fill"} or \code{"stack"}.
If \code{"fill"} all bars are the same hight and extra line is added to show relative population.
If \code{"stack"} bar hight gives population.}
}
\value{
}
\description{
This plot visually compares how predictions change over a factor.
The ratio of two predictions \code{proposed_pred/incumbent_pred} is calculated for each row. Two plots are produced which display
For each level of the feature we find what is the proportion of row with each ratio
For each value of the ratio what is the proportion of row with each value of the feature
The log2 is used to calculate the ratio as this is symmetrical
the values of \code{log2(proposed_pred/incumbent_pred)} which are plotted are -ratio_max, -(n * ratio_step), -((n-1) * ratio_step), ..., -ratio_step, 0, ratio_step, ..., (n-1) * ratio_step, n * ratio_step, ratio_max
}
\examples{
n=100
feature1 <- seq(-10, 10, length.out=n)
feature2 <- rep(c("a", "b", "c"), each=ceiling(n/3))[1:n]
incumbent_pred <- 100 + rnorm(n)
proposed_pred <- 100 + rnorm(n) + seq(-10, 10, length.out=n)
plot_magic_carpet(feature=feature1, feature_name="example feature",
incumbent_pred = incumbent_pred,
proposed_pred = proposed_pred)
plot_magic_carpet(feature=feature2, feature_name="example feature",
incumbent_pred = incumbent_pred,
proposed_pred = proposed_pred)
}
|
dcaed474d5d1b830f7993b6a24222036076aa930
|
0f423767bc2eed0feee6dddd1709603ea3ebde72
|
/table2_nnt.R
|
2c490f3e8e60f034f7ea86594bd56b3f7cc9eb14
|
[] |
no_license
|
lbhund/Git_LQAS_Design_Uncertainty
|
71c81c64da01bc047a461e79c6550e51ffa28e24
|
db76abddb94d0a8880c79f475c0b355eb08ca424
|
refs/heads/master
| 2016-09-06T05:31:37.296826
| 2014-11-04T23:07:43
| 2014-11-04T23:07:43
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 1,721
|
r
|
table2_nnt.R
|
library(xtable)
#! LOAD IN OUTPUT FROM EXAMPLE 2 - NNT !#
load("C:/Users/lhund/Dropbox/PROJECTS/LQAS/SensSpec/output/designs2.Rda")
load("C:/Users/lhund/Dropbox/PROJECTS/LQAS/SensSpec/output/l2.Rda")
load("C:/Users/lhund/Dropbox/PROJECTS/LQAS/SensSpec/output/e2.Rda")
load("C:/Users/lhund/Dropbox/PROJECTS/LQAS/SensSpec/output/q2.Rda")
#! FILL IN BF DESIGNS IN SITUATION WHERE EQUIVALENT TO LQAS FOR EFFICIENCY !#
upper <- length(designs)
btemp <- list()
for(kk in 1:upper){
btemp[[kk]] <- list(n = NA, rule = NA)
if(kk!=4){
pl <- l[[kk]]$pl
pu <- l[[kk]]$pu
sptemp <- designs[[kk]]$sp
setemp <- designs[[kk]]$se
ple <- setemp*pl + (1-sptemp)*(1-pl)
pue <- setemp*pu + (1-sptemp)*(1-pu)
dtemp <- l[[kk]]$rule
ntemp <- l[[kk]]$n
logbf <- dbinom(dtemp, ntemp, pue, log = T) - dbinom(dtemp, ntemp, ple, log = T)
bf <- exp(logbf)
btemp[[kk]] <- list(n = ntemp, rule = bf)
}
}
b <- btemp
rm(btemp)
# Plug in NAs for design 4 where rule does not exist
l[[4]] <- list(n = NA, rule=NA)
for(pp in 1:upper){
if(length(q[[pp]]) > 1)
q[[pp]] <- q[[pp]]$Q
}
# MAKE BF RULES #
logbf <- NULL
for(kk in 1:length(designs)){
if(kk!=4){
pl <- l[[kk]]$pl*l[[kk]]$se + (1-l[[kk]]$pl)*(1- l[[kk]]$sp)
pu <- l[[kk]]$pu*l[[kk]]$se + (1-l[[kk]]$pu)*(1- l[[kk]]$sp)
n <- l[[kk]]$n
d <- l[[kk]]$rule
logbf[kk] <- dbinom(d, n, pu, log=T) - dbinom(d, n, pl, log=T)
}
if(kk==4) logbf[kk] <- NA
}
bf <- exp(logbf)
# Make matrix of results
mat <- NULL
for(kk in 1:upper){
mat <- rbind(mat, c(q[[kk]], e[[kk]],
l[[kk]]$n, l[[kk]]$rule,bf[kk]))
}
mat <- as.data.frame(mat)
colnames(mat) <- c("Q", "alpha", "beta", "n_lqas", "d", "k")
xtable(mat, digits=c(0, 2, 2, 2, 0, 0, 2))
|
04f3a9ce9c414c1073adcfef9280849ddd0208dd
|
22d114d86d77cbc042f1f651ee6b8feabb2f5149
|
/man/read_source.Rd
|
95bd1e57daeec4d863b4c24ea7aa6c4a32c9a078
|
[
"MIT"
] |
permissive
|
lidavidm/arrowbench
|
544b8030ae203c2274a6c4c8a6c5ea1d76fc246b
|
ca08ea96678962c462dd8049511e72fbb6f3e9da
|
refs/heads/main
| 2023-04-09T23:26:06.203856
| 2021-04-13T22:51:41
| 2021-04-13T22:51:41
| 358,266,146
| 0
| 0
|
NOASSERTION
| 2021-04-15T13:18:41
| 2021-04-15T13:18:40
| null |
UTF-8
|
R
| false
| true
| 328
|
rd
|
read_source.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/ensure-source.R
\name{read_source}
\alias{read_source}
\title{Read a known source}
\usage{
read_source(file, ...)
}
\arguments{
\item{file}{file to read}
\item{...}{extra arguments to pass}
}
\value{
the source
}
\description{
Read a known source
}
|
659098e45f6e78a6fbe3dae75936f13ad55ddcc7
|
0bff7b092713ddf7a6ccdd22dd6c95eb4e9cec32
|
/Doing/A hands-on introduction to statistics with R/Introduction to R/Chapter01 - Intro to basics/ex4.R
|
6e2db5550bd5083cde8253697368162358052561
|
[] |
no_license
|
Selaginella99/coursera
|
2aca2dd2fa65d1217b31e0f9b228e865a42064eb
|
95a7da5814a8164dd932bc7963fffa1719f905d3
|
refs/heads/master
| 2021-01-18T02:29:57.199101
| 2015-05-14T14:47:46
| 2015-05-14T14:47:46
| 35,336,115
| 0
| 0
| null | 2015-05-09T16:36:00
| 2015-05-09T16:35:59
| null |
UTF-8
|
R
| false
| false
| 125
|
r
|
ex4.R
|
# Assign the value 5 to the variable called my_apples
my_apples = 5
# Print out the value of the variable my_apples
my_apples
|
b2083bd13212a82c1a6acaf844ae50b01d34aa81
|
22f8b2957929cb4d2d69d8619d9c17ab62ea952e
|
/5147-R-assignment.R
|
b245af9fb99ed2a8d1d8de2e19bd50cb0d97a89e
|
[] |
no_license
|
hsinhuibiga/R---data-analysis
|
30bcea7b0171f4564541033028b060d3223192e6
|
6de4ab91ce01c7df5901b61749bfc76eebee7d7a
|
refs/heads/master
| 2021-05-26T02:31:35.865524
| 2020-04-08T09:44:20
| 2020-04-08T09:44:20
| 254,017,620
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 5,023
|
r
|
5147-R-assignment.R
|
# student:HsinHui Lin 28464176 Mon 18:00 tutorial
require(ggplot2)
library(ggmap)
library(datasets)
library(leaflet)
#----Task 1----------------------------------------------------------------#
#read the csv file in to the data
data = read.csv("assignment-02-data-formated.csv")
#print the data
data
#---Task 2 ----------------------------------------------------------------#
#to create static visualisations in R using ggplot2
#put the value in the more readable type and use numeric
data$value <- as.numeric(sub("%", "", data$value))
#consider to run the library
#myGraph <- ggplot(data, aes(variable for x axis, variable for y axis))
data.soft <- data[data$coralType == "soft corals", ]
data.hard <- data[data$coralType == "hard corals", ]
data.seaP <- data[data$coralType == "sea pens", ]
data.seaF <- data[data$coralType == "sea fans", ]
data.blue <- data[data$coralType == "blue corals", ]
graph <-ggplot(data = data.soft, aes(year, value)) +geom_point() + facet_wrap((latitude~location),nrow = 1) + geom_smooth(aes(group =1),method = "lm",se = FALSE)
graph <-ggplot(data = data.soft, aes(year, value)) +geom_point() + facet_wrap((latitude~location),nrow = 1) + geom_smooth(aes(group =1),method = "lm",formula = y~ poly(x, 2),se = FALSE)
graph <-ggplot(data = data.hard, aes(year, value)) +geom_point() + facet_wrap((latitude~location),nrow = 1) + geom_smooth(aes(group =1),method = "lm",se = FALSE)
graph <-ggplot(data = data.hard, aes(year, value)) +geom_point() + facet_wrap((latitude~location),nrow = 1) + geom_smooth(aes(group =1),method = "lm",formula = y~ poly(x, 2),se = FALSE)
graph <-ggplot(data = data.seaP, aes(year, value)) +geom_point() + facet_wrap((latitude~location),nrow = 1) + geom_smooth(aes(group =1),method = "lm",se = FALSE)
graph <-ggplot(data = data.seaP, aes(year, value)) +geom_point() + facet_wrap((latitude~location),nrow = 1) + geom_smooth(aes(group =1),method = "lm",formula = y~ poly(x, 2),se = FALSE)
graph <-ggplot(data = data.seaF, aes(year, value)) +geom_point() + facet_wrap((latitude~location),nrow = 1) + geom_smooth(aes(group =1),method = "lm",se = FALSE)
graph <-ggplot(data = data.seaF, aes(year, value)) +geom_point() + facet_wrap((latitude~location),nrow = 1) + geom_smooth(aes(group =1),method = "lm",formula = y~ poly(x, 2),se = FALSE)
graph <-ggplot(data = data.blue, aes(year, value)) +geom_point() + facet_wrap((latitude~location),nrow = 1) + geom_smooth(aes(group =1),method = "lm",se = FALSE)
graph <-ggplot(data = data.blue, aes(year, value)) +geom_point() + facet_wrap((latitude~location),nrow = 1) + geom_smooth(aes(group =1),method = "lm",formula = y~ poly(x, 2),se = FALSE)
graph
#---Task 3 ------------------------------------------------------------------------#
#to fit curves to the data to identify trends
#use scatter plot with matrix to represent the trends
ggplot(data = data,aes(x=year, y=value, color=coralType)) +geom_point() +
labs(x= 'year' ,y = 'value', title = 'Trend for the Great Barrier Reef')+
geom_smooth(aes(group =1),method = "lm",se = FALSE) + facet_wrap(~location,nrow = 1)
#----Task 4-----------------------------------------------------------------------#
#to create a data map in R with Leaflet
map <- leaflet(data=data) %>%
addTiles() %>% #addCircles(lng = ~lon, lat = ~lat)
addMarkers(lng=~longitude , lat=~latitude, popup=~location)
map # Prints the map
#----Task 5---------------------------------------------------------------------#
# to create an interactive visualisation in R with Shiny
#UI
ui <-shinyUI(fluidPage(
# Apply title as header
headerPanel("Bleaching of the Great Barrier Reef"),
leafletOutput("worldmap"),
# Sidebar with controls to select the variable to plot against
# mpg and to specify whether outliers should be included
sidebarLayout(
sidebarPanel(
selectInput("Coral_Type", "Choosing Type:",
c("blue corals", "hard corals","sea fans","sea pens","soft corals")
),
selectInput("Smoother_Type", " Choosing:",
c(
"polynomial linear smooth"= "polyls",
"loess" ="loess"))
),
mainPanel(
h2(textOutput("Value")),
plotOutput("bogbr")
)
)
))
#server
names(data) = c("location", "coralType","longitude","latitude","year","value")
server <-shinyServer(function(input, output) {
output$worldmap = renderLeaflet({leaflet(data = data) %>% addTiles() %>%addMarkers(~longitude, ~latitude, popup = ~as.character(location))})
output$bogbr = renderPlot({
a=data[data$coralType==input$Coral_Type,]
b=ggplot(data = a, aes(x =year, y= value)) +geom_point()+facet_wrap((location~latitude),nrow = 1)
b=b+scale_x_continuous("year")+scale_y_continuous("Value")
if (input$Smoother_Type =="polyls"){
picture=b+geom_smooth(aes(group =1),method = "lm",formula = y~ poly(x, 2),se = FALSE)
}
else{picture=b+geom_smooth(aes(group= 1),method = "lm",se =FALSE)}
print(picture)
})
})
shinyApp(ui = ui,server = server)
|
8223b72b287b697a977e81b3028109186ca9337d
|
cb2df211142e20ea1dd3b39eee92cfb03458db39
|
/ABLIRC/bin/plot/volcano.r
|
35ce8d13d390bc8bc7786b78782187eeeb6dd86d
|
[
"MIT"
] |
permissive
|
ablifedev/ABLIRC
|
4c9f1efa0f92a2a08ddcc142fad8eab670f83d33
|
875278b748a8e22ada2c76c3c76dbf970be4a6a4
|
refs/heads/master
| 2021-01-13T11:32:05.398582
| 2019-10-18T03:35:36
| 2019-10-18T03:35:36
| 81,188,219
| 1
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 12,563
|
r
|
volcano.r
|
#!/usr/bin/env Rscript
####################################################################################
### Copyright (C) 2015-2019 by ABLIFE
####################################################################################
####################################################################################
####################################################################################
# Date Version Author ChangeLog
#
#####################################################################################
#####################################################################################
#####################################################################################
suppressPackageStartupMessages(library("optparse"))
suppressPackageStartupMessages(library("stats"))
usage = "Rscript /users/ablife/ablife-R/Venn/latest/Venn.r -f -d -c -t -n
example: Rscript /users/ablife/ablife-R/Venn/latest/Venn.r
-f Ezh2_vs_ESC_Input_peaks_cluster.bed,Jarid2_vs_ESC_Input_peaks_cluster.bed
-c 6
-o ./"
option_list <- list(
make_option(c("-f", "--file"),action = "store",type = "character",
help = "The Result file"),
make_option(c("-A","--columnA"),action = "store",type = "integer",default = 5,
help = "the column number of sample A's Expression"),
make_option(c("-B","--columnB"),action = "store",type = "integer",default = 6,
help = "the column number of sample B's Expression"),
make_option(c("-s", "--suffix"),action = "store",type = "character",default = "_result_list_add_exp.txt",
help = "file suffix,which will be delete in result file name"),
make_option(c("-o", "--outdir"),action = "store",type = "character",default = "./",
help = "The outdir"),
make_option(c("-n", "--outfile"),action = "store",type = "character",default = "NULL",
help = "outfile name"),
make_option(c("-c", "--addcor"),action = "store",type = "integer",default = 1,
help = "add R value, default is 1, set 0 to disable")
)
opt <- parse_args(OptionParser(option_list = option_list))
setwd(opt$outdir)
# opt$file <- "S_24h_vs_C_24h_result_list.txt"
a <- as.numeric(opt$columnA)
b <- as.numeric(opt$columnB)
#################2016.05.27
#setwd("")
#################
library(grid)# for `unit`
library(gridExtra)# for `unit
library(ggplot2)
library(reshape2)
library(dplyr)
################################################################################
################################################################################
filename <- strsplit(as.character(opt$file),"/")
filename <- sub(as.character(opt$suffix),"",filename[[1]][length(filename[[1]])])
filename <- sub("^_","",filename)
if (opt$outfile != "NULL"){
filename <- opt$outfile
}
print(filename)
results = read.table(as.character(opt$file),header = T,sep = "\t")
#names(results) = as.character(unlist(results[1,]))
#results = results[-1,]
results = mutate(results, sig=ifelse((results$PValue >0.01 | abs(results$logFC) < 1), "not differential expressed",
ifelse(results$logFC < 0, "down-regulated genes",
"up-regulated genes")))
#filenames <- list.files(path="./", pattern="*.txt")
#names <-substr(filenames,1,18)
#for(i in names){
# filepath <- file.path("../volcano_plot/",paste(i,"_result_list.txt",sep=""))
# assign(i, read.delim(filepath,
# colClasses=c("character",rep("numeric",5)), header = T,
# sep = "\t"))
#}
#out.file<-""
######################################
######################################
theme_paper <- theme(
# panel.border = element_rect(fill = NA,colour = "black"),
# panel.grid.major = element_line(colour = "grey88",
# size = 0.2),
# panel.grid.minor = element_line(colour = "grey95",
# size = 0.5),
# axis.text.x= element_text(vjust = 1,hjust = 1, angle = 45),
# legend.position = "top",
# legend.direction = "horizontal",
panel.grid.major = element_blank(),
panel.grid.minor = element_blank(),
legend.background=element_blank(),
legend.key = element_rect(fill = NA,colour = NA),
legend.position = c(0.28, 0.89),
legend.text = element_text(size = 10,margin = margin(0,0,0,0)),
legend.title=element_blank(),
axis.title = element_text(size = 12),
plot.title = element_text(size = 12),
axis.text = element_text(size = 12, colour = "black"))
color_points <- function(pval, lfc, q_cut, lfc_cut,upnum, downnum,nonum) {
# level 1: qval > q_cut
# level 2: qval < q_cut & lfc < lfc_cut
# level 3: qval < q_cut & lfc > lfc_cut
signif_levels <- c(paste0("not differential expressed","(",nonum,")"),
paste0("down-regulated genes","(",downnum,")"),
paste0("up-regulated genes","(",upnum,")"))
signif_points <- ifelse((pval > q_cut | abs(lfc) < lfc_cut), signif_levels[1],
ifelse(lfc < 0, signif_levels[2],
signif_levels[3]))
signif_points <- factor(signif_points, levels = signif_levels)
return(signif_points)
}
volcano_plot <- function(pval, lfc, q_cut, lfc_cut,upnum, downnum,nonum) {
# Creating figure similar to Figure 1A of Barreiro, Tailleux, et al., 2012.
signif_points <- color_points(pval, lfc, q_cut, lfc_cut,upnum, downnum,nonum)
dat <- data.frame(p = -log10(pval), lfc = lfc, signif = signif_points)
ggplot(dat) + geom_point(aes(x = lfc, y = p, color = signif,shape=signif,alpha=signif,size=signif)) +
scale_color_manual(values = c("black", "#456A9F", "#FB654A"), drop = FALSE) +
scale_shape_manual(values = c(1, 16, 16), drop = FALSE) +
scale_alpha_manual(values = c(0.3, 0.9, 0.9), drop = FALSE) +
scale_size_manual(values = c(1, 1.5, 1.5), drop = FALSE) +
labs(title = "Volcano plot", x = expression(paste(log[2], " fold change")),
y = expression(paste("-", log[10], " p-value"))) +
theme_bw()+theme_paper
# theme(legend.title=element_blank(),
# legend.direction = "vertical",
#
# legend.title = element_text(size = 12),
# legend.text = element_text(size = 12, face = 'bold'),
# legend.key = element_rect(fill = 'white'),
# legend.key.size = unit(0.4,"cm"),
# legend.position = c(0.18, 0.9),
#
# panel.background = element_rect(fill = "white",colour = NA),
# panel.border = element_rect(size = 1,colour = "#8B8B8B",fill =NA),
# panel.grid.major = element_line(size=0.5,colour = "#FFFFFF"),
# panel.grid.minor = element_line(size=0.1,colour = "#FFFFFF"))
}
exp_plot <- function(ra, rb, ra_name,rb_name,pval, lfc, q_cut, lfc_cut,upnum, downnum,nonum) {
# Creating figure similar to Figure 1A of Barreiro, Tailleux, et al., 2012.
signif_points <- color_points(pval, lfc, q_cut, lfc_cut,upnum, downnum,nonum)
dat <- data.frame(ra = ra, rb = rb, signif = signif_points)
ggplot(dat) + geom_point(aes(x = ra, y = rb, color = signif,shape=signif,alpha=signif,size=signif)) +
scale_color_manual(values = c("black", "#456A9F", "#FB654A"), drop = FALSE) +
scale_shape_manual(values = c(1, 16, 16), drop = FALSE) +
scale_alpha_manual(values = c(0.3, 0.9, 0.9), drop = FALSE) +
scale_size_manual(values = c(1, 1.5, 1.5), drop = FALSE) +
labs(title = "Exp plot", x = paste("RPKM of ",ra_name),
y = paste("RPKM of ",rb_name)) +
# ylim(0,250)+xlim(0,250)+
theme_bw()+theme_paper
# theme(legend.title=element_blank(),
# legend.direction = "vertical",
#
# legend.title = element_text(size = 12),
# legend.text = element_text(size = 12, face = 'bold'),
# legend.key = element_rect(fill = 'white'),
# legend.key.size = unit(0.4,"cm"),
# legend.position = c(0.18, 0.9),
#
# panel.background = element_rect(fill = "white",colour = NA),
# panel.border = element_rect(size = 1,colour = "#8B8B8B",fill =NA),
# panel.grid.major = element_line(size=0.5,colour = "#FFFFFF"),
# panel.grid.minor = element_line(size=0.1,colour = "#FFFFFF"))
}
#+theme_Publication()+ theme(legend.position="none")
# head(volcano_1[[1]])
# sigif <- factor(results$sig,levels = c("not differential expressed","down-regulated genes","up-regulated genes"))
# p1 = ggplot(results) +
# labs(title = "")+
# geom_point(alpha=0.8,size=1.2,aes(x = F_C, y = M_C, color = signif,shape=sigif,alpha=signif,size=signif)) + coord_flip()+
# scale_color_manual(values=c("black", "#456A9F", "#FB654A"))+
# scale_shape_manual(values=c(1, 16, 16), drop = FALSE)+
# scale_alpha_manual(values = c(0.3, 1, 1), drop = FALSE) +
# scale_size_manual(values = c(1, 1.5, 1.5), drop = FALSE) +
# ylim(0,1000)+xlim(0,1000)
# p1
results.Up <- subset(results, logFC > 1 & PValue <= 0.01) #define Green
results.Up <-mutate(results.Up,"Up")
upnum <- length(results.Up$Gene)
results.No <- subset(results, PValue > 0.01 | abs(logFC) < 1) #define Black
results.No <- mutate(results.No,"Not-sig")
nonum <- length(results.No$Gene)
results.Dn <- subset(results, logFC < -1 & PValue <= 0.01) #define Red
results.Dn <-mutate(results.Dn,"Down")
downnum <- length(results.Dn$Gene)
# colnames(results.Up)<-c("Gene","logFC","logCPM","PValue","sig")
# colnames(results.No)<-c("Gene","logFC","logCPM","PValue","sig")
# colnames(results.Dn)<-c("Gene","logFC","logCPM","PValue","sig")
# results <- rbind(results.Up, results.No, results.Dn)
# results$sig <- as.factor(results$sig)
#########################
colname <- colnames(results[,c(a,b)])
x <- log(results[,a]+1,10)
y <- log(results[,b]+1,10)
Max <- max(max(results[,a]),max(results[,b]))
CorResult <- cor(x,y,method = 'pearson')
CorResult <- round(CorResult,3)
CorResult <- paste("italic(R)==",CorResult,sep='')
CorResult
Max
exp_1 <- exp_plot(results[,a],results[,b],colname[1],colname[2],results$PValue,results$logFC,
q_cut = .01, lfc_cut = 1, upnum=upnum, downnum=downnum,nonum=nonum) +
labs(title = "")+
# scale_x_continuous(breaks = seq(-8, 8, 2)) +
# scale_y_continuous(trans = "log10",limits = c(1, Max))+scale_x_continuous(trans = "log10",limits = c(1, Max))
scale_y_continuous(trans = "log10")+scale_x_continuous(trans = "log10")
if (opt$addcor==1){
exp_1 <- exp_1 + geom_abline(aes(intercept=0,slope=1),linetype = "dashed",size = 0.8,colour = "gray50")+
annotate("text",x= 0.1*Max,y=0.6*Max,label = CorResult,size= 4,parse = TRUE)
}
# ylim(0,500)+xlim(0,500)
ggsave(paste(filename,"_exp.pdf",sep=""), width = 130, height = 120, units = "mm")
ggsave(paste(filename,"_exp.png",sep=""), width = 130, height = 120, units = "mm", dpi = 450)
volcano_1 <- volcano_plot(results$PValue,results$logFC,
q_cut = .01, lfc_cut = 1, upnum=upnum, downnum=downnum,nonum=nonum) +
labs(title = "")+
scale_x_continuous(breaks = seq(-8, 8, 2),limits = c(-8, 8)) +
ylim(0,30)
######################
# multiplot <- function(..., plotlist=NULL, file, cols=1, layout=NULL) {
# library(grid)
#
# # Make a list from the ... arguments and plotlist
# plots <- c(list(...), plotlist)
#
# numPlots = length(plots)
#
# # If layout is NULL, then use 'cols' to determine layout
# if (is.null(layout)) {
# # Make the panel
# # ncol: Number of columns of plots
# # nrow: Number of rows needed, calculated from # of cols
# layout <- matrix(seq(1, cols * ceiling(numPlots/cols)),
# ncol = cols, nrow = ceiling(numPlots/cols))
# }
#
# if (numPlots==1) {
# print(plots[[1]])
#
# } else {
# # Set up the page
# grid.newpage()
# pushViewport(viewport(layout = grid.layout(nrow(layout), ncol(layout))))
#
# # Make each plot, in the correct location
# for (i in 1:numPlots) {
# # Get the i,j matrix positions of the regions that contain this subplot
# matchidx <- as.data.frame(which(layout == i, arr.ind = TRUE))
#
# print(plots[[i]], vp = viewport(layout.pos.row = matchidx$row,
# layout.pos.col = matchidx$col))
# }
# }
# }
##########################
########################## reformated by JieHuang
# multiplot()
#grid.arrange(b1, b2, b3,b4, nrow=2, ncol=2)
ggsave(paste(filename,"_DEG.pdf",sep=""), width = 130, height = 120, units = "mm")
ggsave(paste(filename,"_DEG.png",sep=""), width = 130, height = 120, units = "mm", dpi = 450)
|
72c3809b76369f4ae8bb4c26271f12c5cace30c3
|
1f29b675ec689b85e9b5362bb2c068eb549727f2
|
/plot1.R
|
3c7fd08c3b1795193f918a42f1b5ac11e63b2dff
|
[] |
no_license
|
rusteyz/ExData_Plotting1
|
1fe7fa676e4c366c8b1194d16c21743ddf00c78e
|
b6fcb2a9b8a7cb7a424c30256410e248696d1e4c
|
refs/heads/master
| 2021-01-16T22:16:17.209224
| 2014-07-14T00:39:46
| 2014-07-14T00:39:46
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 632
|
r
|
plot1.R
|
## plot1.R
setwd("/Users/amrastog/datasciencecoursera/ExploratoryDataAnalysis")
## Read data
data = read.table("household_power_consumption.txt", na.string='?',sep=';',header=TRUE)
data$Date=strptime(paste(data$Date,data$Time), "%d/%m/%Y %H:%M:%S")
data=subset(data, (data$Date>=strptime("2007-02-01","%Y-%m-%d")&(data$Date<strptime("2007-02-03","%Y-%m-%d"))))
hist(data$Global_active_power, col="red",main="Global Active Power",xlab="Global Active Power (Kilowatts)")
dev.copy(png, file = "plot1.png", height=480,width=480) ## Copy my plot to a PNG file
dev.off() ## Don't forget to close the PNG device!
|
c43b6c30ca845c704efd54544bebeb96c04e6d02
|
8dbe523b5cd123fb95bdcb97dac806d482af566f
|
/man/pass.message.Rd
|
0d3be202c044dfcc6f8b10b71288d79359f7c638
|
[] |
no_license
|
npetraco/CRFutil
|
b5ca67b73afdab9dc64712fc709fe08a8fbce849
|
50ef4ca06b7ab11ac1d54472a87e7854beb07cec
|
refs/heads/master
| 2023-01-22T10:53:55.149603
| 2023-01-06T02:03:47
| 2023-01-06T02:03:47
| 135,449,204
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| true
| 464
|
rd
|
pass.message.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/sum.product_util.R
\name{pass.message}
\alias{pass.message}
\title{Pass a message over an edge on a factor graph}
\usage{
pass.message(
start.node,
end.node,
factor.graph,
pots.list,
mailboxes.list,
printQ = F
)
}
\arguments{
\item{XX}{The XX}
}
\value{
The function will XX
}
\description{
Pass a message over an edge on a factor graph
}
\details{
The function will XXXX
}
|
46a9e1e466aa5fb8f4ed49a5ea7f20cd878d75c1
|
2cf4177233c5ed00d23614db585196c2f3db1077
|
/plotting/plot3.R
|
33b927cf8ad3093b67e3a05afda2c9d3b1fb718a
|
[] |
no_license
|
lightkuriboh/ExData_Plotting1
|
3ef046571ccb91b4817a5554a92675d791b50202
|
37b4d3d80486b79c201ff99658d4c61f2ca96c4d
|
refs/heads/master
| 2022-11-10T23:41:53.861541
| 2020-07-13T09:40:12
| 2020-07-13T09:40:12
| 279,043,557
| 0
| 0
| null | 2020-07-12T10:44:05
| 2020-07-12T10:44:04
| null |
UTF-8
|
R
| false
| false
| 1,130
|
r
|
plot3.R
|
library(sqldf)
data_path <- 'raw_data/household_power_consumption.txt'
start_date <- as.Date('2007-02-01')
end_date <- as.Date('2007-02-02')
my_data <- read.csv(data_path, sep = ';')
my_data <- subset(my_data, as.Date(my_data$Date, format = '%d/%m/%Y') >= start_date &
as.Date(my_data$Date, format = '%d/%m/%Y') <= end_date)
dim(my_data)
my_date_time <- as.POSIXlt(paste(my_data$Date, my_data$Time), format = '%d/%m/%Y %H:%M:%S')
png('output/plot3.png')
plot(my_date_time,
my_data$Sub_metering_1,
ylab = 'Energy sub metering',
col = 'black',
type = 'l',
xlab = '',
axes = F
)
lines(my_date_time, my_data$Sub_metering_2, col = 'red')
lines(my_date_time, my_data$Sub_metering_3, col = 'blue')
axis(side = 2, at = seq(0, 30, 10), labels = seq(0, 30, 10))
axis(side = 1, at = seq(min(my_date_time), max(my_date_time), length.out = 3), labels = c('Thu', 'Fri', 'Sat'))
box(which = "plot", lty = "solid")
legend('topright',
legend = c('Sub_metering_1', 'Sub_metering_2', 'Sub_metering_3'),
col = c('black', 'red', 'blue'),
lty = 1
)
dev.off()
|
38c8aed6ba4067e8ff4317779a339c78ecca2b1a
|
56b1c1c707c1412c7cbf14e081980a37736fe83d
|
/NormalPlotting/man/normalplot.Rd
|
d1a898a9993db6132aebcb9ea579c999b5381a85
|
[] |
no_license
|
sbillystewartsw/Plotting
|
abe36271a181d0d2c24c1e76c17c2cb0494d769e
|
71e26e35f57dd201e8e5c63b0bd936c034340f0c
|
refs/heads/main
| 2023-01-04T22:37:29.524138
| 2020-11-06T21:09:11
| 2020-11-06T21:09:11
| 310,699,558
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| true
| 488
|
rd
|
normalplot.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/normalplot.R
\name{normalplot}
\alias{normalplot}
\title{Plot a normal distribution}
\usage{
normalplot(mu = 1, sd = 1)
}
\arguments{
\item{mu}{is the mean of the distribution (default = 1)}
\item{sd}{is the standard deviation of the distribution (default = 1)}
}
\value{
returns a plot of the normal distribution
}
\description{
Plot a normal distribution
}
\examples{
normalplot(10, 3)
normalplot(2, 0.5)
}
|
a2c1289208b4dfb75503dee44bf894a4727d1c9a
|
41cc7cc09f184dd49ec17669b96708a245599047
|
/R/general_functions.R
|
e7f4cf75eff23a116d384a060185f6cbab4f41bd
|
[] |
no_license
|
ddeweerd/ComhubbeR
|
1a475168a4f2da0ec091590e0cd2782e2d9cf2dc
|
ad49d100345ce472d91e7bad420bc74b24f3deda
|
refs/heads/master
| 2023-04-03T09:01:24.188664
| 2021-03-26T10:13:50
| 2021-03-26T10:13:50
| 344,760,538
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 2,292
|
r
|
general_functions.R
|
#' @import tidyverse
#' @import magrittr
#' @import tigress
#' @import minet
#' @import dplyr
#' @import reshape2
#' @import reticulate
transpose_expression_data <- function(comhub_object){
comhub_object$python_object$expression_data %>%
t(.) %>%
as.data.frame(.)
}
melt_result <- function(net, comhub_object, network_cutoff){
reshape2::melt(net) %>%
dplyr::filter(., value > 0) %>%
dplyr::filter(Var1 %in% comhub_object$python_object$transcription_factors[,1]) %>%
dplyr::arrange(., desc(value)) %>%
magrittr::set_colnames(., c('TF', 'target', 'confidence')) %>%
rapply(., as.character, classes = "factor", how = "replace") %>%
mutate_at(c(1,2), as.character) %>%
dplyr::slice(., 1:network_cutoff)
}
build_genie_result <- function(genie_result, network_cutoff){
gene_names <- genie_result[[2]]
results <- genie_result[[1]]
col1 <- unlist_genie(1, results, gene_names)
col2 <- unlist_genie(2, results, gene_names)
data.frame(col1, col2, unlist(sapply(results, function(x)x[3])), stringsAsFactors = F) %>%
set_colnames(c('TF', 'target', 'confidence')) %>%
dplyr::slice(., 1:network_cutoff)
}
unlist_genie <- function(col, results, gene_names){
indici <- sapply(results, function(x)x[col]) %>%
unlist(.) + 1
gene_names[indici]
}
#' @export
consensus_network <- function(comhub_object){
comhub_object$consensus_network <- Reduce(rbind, comhub_object$results) %>%
dplyr::select(-confidence) %>%
dplyr::count(TF, target) %>%
dplyr::arrange(., dplyr::desc(n))
return(comhub_object)
}
#' @export
combine_comhub_objects <- function(comhub_objects){
c1 <- comhub_objects[[1]]
c1$results <- lapply(comhub_objects, function(x)x$results) %>%
unlist(., recursive = F)
return (c1)
}
#' @export
retrieve_community <- function(comhub_object){
revive_python_object(comhub_object) %>%
pairwise_correlation() %>%
get_tf_outdegree() %>%
community() %>%
consensus_network()
}
#' @export
revive_python_object <- function(comhub_object){
comhub_object$python_object <- comhub_main(comhub_object$network_name,
comhub_object$expression_data,
comhub_object$transcription_factors)
return (comhub_object)
}
|
37ba9fa19865ab50c28b1df6763c3c84f0cce3eb
|
17ef71e1eabf7ab93f0fec321f427b576b90a264
|
/R/felm.old.R
|
94cc96f5dcc82b54024e6153a89e357e592bf765
|
[] |
no_license
|
sgaure/lfe
|
aa326228816f4a36828d8bfb44ec8523208c8643
|
79a10e4b68d79bce8c287d15e238505ddcc1a92e
|
refs/heads/master
| 2023-04-12T03:26:21.204881
| 2019-12-13T12:05:34
| 2019-12-13T12:05:34
| 138,496,051
| 55
| 12
| null | 2021-11-17T14:37:01
| 2018-06-24T16:05:08
|
R
|
UTF-8
|
R
| false
| false
| 5,371
|
r
|
felm.old.R
|
# $Id: felm.old.R 1655 2015-03-18 18:51:06Z sgaure $
felm.old <- function(formula,fl,data) {
mf <- match.call(expand.dots = FALSE)
if(missing(fl)) {
# we should rather parse the formula tree
# find the terms involving G
trm <- terms(formula,special='G')
feidx <- attr(trm,'specials')$G-1
festr <- paste(labels(trm)[feidx],collapse='+')
if(festr == '') stop('No factors specified')
# remove the G-terms from formula
formula <- update(formula,paste('. ~ . -(',festr,')'))
mf[['formula']] <- formula
# then make a list of them, and find their names
felist <- parse(text=paste('list(',gsub('+',',',festr,fixed=TRUE),')',sep=''))
nm <- eval(felist,list(G=function(t) as.character(substitute(t))))
# collapse them in case there's an interaction with a funny name
nm <- lapply(nm,paste,collapse='.')
# replace G with as.factor, eval with this, and the parent frame, or with data
# allow interaction factors with '*'
iact <- function(a,b) interaction(a,b,drop=TRUE)
if(missing(data))
fl <- eval(felist,list(G=as.factor,'*'=iact))
else {
G <- as.factor
fl <- local({'*'<-iact;eval(felist,data,environment())})
}
gc()
names(fl) <- nm
} else {
# warning('The fl-argument is obsolete')
}
if(!is.list(fl)) stop('need at least one factor')
fl <- lapply(fl,as.factor)
if(is.null(names(fl))) names(fl) <- paste('fe',1:length(fl),sep='')
# mf <- match.call(expand.dots = FALSE)
m <- match(c("formula", "data"), names(mf), 0L)
mf <- mf[c(1L, m)]
mf$drop.unused.levels <- TRUE
mf[[1L]] <- as.name("model.frame")
mf <- eval(mf, parent.frame())
mt <- attr(mf,'terms')
y <- model.response(mf,'numeric')
# try a sparse model matrix to save memory when removing intercept
# though, demeanlist must be full. Ah, no, not much to save because
# it won't be sparse after centering
# we should rather let demeanlist remove the intercept, this
# will save memory by not copying. But we need to remove it below in x %*% beta
# (or should we extend beta with a zero at the right place, it's only
# a vector, eh, is it, do we not allow matrix lhs? No.)
x <- model.matrix(mt,mf)
rm(mf)
icpt <- 0
icpt <- which(attr(x,'assign') == 0)
if(length(icpt) == 0) icpt <- 0
ncov <- ncol(x) - (icpt > 0)
if(ncov == 0) {
# No covariates
fr <- demeanlist(y,fl)
z <- list(r.residuals=y,fe=fl,p=0,cfactor=compfactor(fl),residuals=fr,call=match.call())
class(z) <- 'felm'
return(z)
}
# here we need to demean things
dm <- demeanlist(list(y=y,x=x),fl,icpt)
yz <- dm[[1]]
xz <- dm[[2]]
rm(dm)
gc()
badconv <- attr(xz,'badconv') + attr(yz,'badconv')
dim(xz) <- c(nrow(x),ncov)
attributes(yz) <- attributes(y)
# here we just do an lm.fit, however lm.fit is quite slow since
# it doesn't use blas (in particular it can't use e.g. threaded blas in acml)
# so we have rolled our own.
# we really don't return an 'lm' object or other similar stuff, so
# we should consider using more elementary operations which map to blas-3
# eg. solve(crossprod(xz),t(xz) %*% yz)
# Or, even invert by solve(crossprod(xz)) since we need
# the diagonal for standard errors. We could use the cholesky inversion
# chol2inv(chol(crossprod(xz)))
cp <- crossprod(xz)
ch <- cholx(cp)
# ch <- chol(cp)
# beta <- drop(inv %*% (t(xz) %*% yz))
# remove multicollinearities
badvars <- attr(ch,'badvars')
b <- crossprod(xz,yz)
if(is.null(badvars)) {
beta <- as.vector(backsolve(ch,backsolve(ch,b,transpose=TRUE)))
inv <- chol2inv(ch)
} else {
beta <- rep(NaN,nrow(cp))
beta[-badvars] <- backsolve(ch,backsolve(ch,b[-badvars],transpose=TRUE))
inv <- matrix(NaN,nrow(cp),ncol(cp))
inv[-badvars,-badvars] <- chol2inv(ch)
}
rm(b)
if(icpt > 0) names(beta) <- colnames(x)[-icpt] else names(beta) <- colnames(x)
# cat(date(),'projected system finished\n')
z <- list(coefficients=beta,badconv=badconv)
N <- nrow(xz)
p <- ncol(xz) - length(badvars)
# how well would we fit with all the dummies?
# the residuals of the centered model equals the residuals
# of the full model, thus we may compute the fitted values
# resulting from the full model.
zfit <- xz %*% ifelse(is.na(beta),0,beta)
rm(xz)
zresid <- yz - zfit
rm(yz)
z$fitted.values <- y - zresid
z$residuals <- zresid
# insert a zero at the intercept position
if(length(fl) > 0) {
if(icpt > 0) ibeta <- append(beta,0,after=icpt-1) else ibeta <- beta
pred <- x %*% ifelse(is.na(ibeta),0,ibeta)
z$r.residuals <- y - pred
} else {
z$r.residuals <- zresid
}
# z$xb <- pred
rm(x)
rm(y)
gc()
z$cfactor <- compfactor(fl)
numrefs <- nlevels(z$cfactor) + max(length(fl)-2,0)
numdum <- sum(unlist(lapply(fl,nlevels))) - numrefs
z$numrefs <- numrefs
# if(length(fl) <= 2) {
# numdum <- sum(unlist(lapply(fl,nlevels))) - nlevels(z$cfactor)
# } else {
# numdum <- sum(unlist(lapply(fl,nlevels))) - length(fl) + 1
# }
z$df <- N - p - numdum
vcvfactor <- sum(z$residuals**2)/z$df
z$vcv <- inv * vcvfactor
z$se <- sqrt(diag(z$vcv))
z$sefactor <- sqrt(vcvfactor)
z$tval <- z$coefficients/z$se
z$pval <- 2*pt(abs(z$tval),z$df,lower.tail=FALSE)
z$terms <- mt
z$fe <- fl
z$N <- N
z$p <- p + numdum
z$xp <- p
z$call <- match.call()
class(z) <- 'felm'
return(z)
}
|
6bc047b8c0bacfb424b631011298956357d32700
|
514130ee03008826e5df1902c8c4ce2b35344bfe
|
/man/format_table.Rd
|
61dc1ffd28e6a3f291bc15f0f27afef567020af5
|
[
"MIT"
] |
permissive
|
agstudy/formattable
|
cfbad16719ee5315fa6ebe219b2a1f10da685052
|
8fb808aaa33518247c85548ab6fe0015c0943da9
|
refs/heads/master
| 2021-01-13T06:48:47.471565
| 2015-10-30T06:06:39
| 2015-10-30T06:06:39
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| true
| 2,889
|
rd
|
format_table.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/formattable.R
\name{format_table}
\alias{format_table}
\title{Format a data frame with formatter functions}
\usage{
format_table(x, formatters = list(), format = c("markdown", "pandoc"),
align = "r", ..., row.names = rownames(x), check.rows = FALSE,
check.names = FALSE)
}
\arguments{
\item{x}{a \code{data.frame}.}
\item{formatters}{a list of formatter functions or formulas.
The existing columns of \code{x} will be applied the formatter
function in \code{formatters} if it exists.
If a formatter is specified by formula, then the formula will be
interpreted as a lambda expression with its left-hand side being
a symbol and right-hand side being the expression using the symbol
to represent the column values. The formula expression will be evaluated
in \code{envir}, that, to maintain consistency, should be the calling
environment in which the formula is created and all symbols are defined
at runtime.}
\item{format}{The output format: markdown or pandoc?}
\item{align}{The alignment of columns: a character vector consisting
of \code{'l'} (left), \code{'c'} (center), and/or \code{'r'} (right).
By default, all columns are right-aligned.}
\item{...}{additional parameters to be passed to \code{knitr::kable}.}
\item{row.names}{row names to give to the data frame to knit}
\item{check.rows}{if TRUE then the rows are checked for consistency
of length and names.}
\item{check.names}{\code{TRUE} to check names of data frame to make
valid symbol names. This argument is \code{FALSE} by default.}
}
\value{
a \code{knitr_kable} object whose \code{print} method generates a
string-representation of \code{data} formatted by \code{formatter} in
specific \code{format}.
}
\description{
This is an table generator that specializes in creating
formatted table presented in a mix of markdown/reStructuredText and
HTML elements. To generate a formatted table, each column of data
frame can be transformed by formatter function.
}
\examples{
# mtcars (mpg in red)
format_table(mtcars,
list(mpg = formatter("span", style = "color:red")))
# mtcars (mpg in red if greater than median)
format_table(mtcars, list(mpg = formatter("span",
style = function(x) ifelse(x > median(x), "color:red", NA))))
# mtcars (mpg in red if greater than median, using formula)
format_table(mtcars, list(mpg = formatter("span",
style = x ~ ifelse(x > median(x), "color:red", NA))))
# mtcars (mpg in gradient: the higher, the redder)
format_table(mtcars, list(mpg = formatter("span",
style = x ~ style(color = rgb(x/max(x), 0, 0)))))
# mtcars (mpg background in gradient: the higher, the redder)
format_table(mtcars, list(mpg = formatter("span",
style = x ~ style(display = "block",
"border-radius" = "4px",
"padding-right" = "4px",
color = "white",
"background-color" = rgb(x/max(x), 0, 0)))))
}
|
2b64a6075b940e343807c435dfaa26b5b6c21dff
|
21acdc9cee7b9ff65a2412d133cf0d28e1800a22
|
/man/MenuSofi.Rd
|
8cabfe212576d14f8e20cb716bb951efab4cab13
|
[] |
no_license
|
cran/Sofi
|
9f2ae4d99bd3b29e440f5835623c69aa2b99c861
|
b8e7e69e906a40548a298bd28e940e1860f26e63
|
refs/heads/master
| 2021-01-17T14:47:56.905700
| 2016-04-10T00:49:47
| 2016-04-10T00:49:47
| 37,902,002
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 553
|
rd
|
MenuSofi.Rd
|
\name{SofiWebsite}
\alias{SofiWebsite}
\alias{Estadistica1}
\alias{Estadistica2}
\alias{Estadistica3}
\alias{Estadistica4}
\alias{Estadistica5}
\alias{Estadistica6}
\alias{Estadistica7}
\encoding{UTF-8}
\title{
Funciones de \pkg{Sofi} creadas para ser usadas en el lanzador Paper para más información visitar http://www.sofi.uno/.
}
\description{
Para saber más sobre cada función lanzar dicha función para mas detalles.
}
\author{Jose D. Loera \email{loerasg@gmail.com}}
\seealso{
\code{\link{Sofi}}
}
\keyword{misc}
|
4ca6278c3dea09e9fd995c253e22687857b3f826
|
53a9ea36ab32e5768f0c6e1f4c4f0135c1b65e46
|
/de_countTables_comparison.R
|
9ae4547f9357af5be00b6ed644552f68f488ffde
|
[] |
no_license
|
barrantesisrael/Dual_RNAseq_review_analysis
|
220bea938ddc154f9ad8fa6243bacfc678f194d1
|
468fa5e4f1fecdd2f5bd8f084d59bba2a45ff798
|
refs/heads/main
| 2023-03-28T11:23:38.473519
| 2021-03-30T10:06:58
| 2021-03-30T10:06:58
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 20,673
|
r
|
de_countTables_comparison.R
|
#!/de_countTables_comparison.R
#The script takes a group of count tables and identifies its differential expressed (DE) elements with DESeq2, edgeR, and NOISeq at different values of cut-off, replicates, and adjusted p-values.
#Then, it compares the identified DE elements to a list furnished by the user.
#Then, it plots different figures.
#The inputs are:
#1)count_table_dir -> Path to the directory where the count tables are saved. DO NOT add the last "/" on the path.
#2)patternFile -> Pattern used to select the right count tables.
#3)compare_table -> Path to the file the genes/isoforms' and their LFC are saved for the comparison. The column names are ID and LFC.
#4)sample_group -> Path to the file where the sample's group are saved as two columns called "Sample" and "Group".
#5)output_dir -> output directory name where the outputs will be save.
#6)groupComparison -> List of group comparison written as "Group1_Group2".
#7)selectReplicate -> List of values for the number of replicates to use per group during the differential expressed analysis.
#8)cutOff -> List of cut-off values for the filtering row (genes/isoforms) step which has a row-sum higher than the cut-off value.
#9)padj_FDR_value -> List of adjusted p-values used to filter for differential expressed genes/isoforms.
#10)log2FC_value -> Single value for the Log2 Fold Change used to select differential expressed genes/isoforms.
#The outputs are saved in a new created directory called from the value of "output_dir" saved in the "count_table_dir" directory.
#1)comparison_tpr_fp.txt -> table with the comparison results with the columns:
#Table (table's name) #Comparison (comparison name) #Tool (tool used to find DE) #Replicate (number of replicates) #CutOff (cut-off value) #AdjPvalue (adjusted p-value)
#TruthDE (number of DE from the user's input)
#TruePositive (number of DE elements in common between the user's input and the identified DE)
#FalsePositive (number of DE elements found in the identified DE but absent in the user's input)
#FalseNegative (number of DE elements found in the user's input but absent in the identified DE)
#TruePositiveRate (number of True Positive divided by the user's input DE)
#2)comparison_truthDE_lfc.txt -> table shows the Spearman correlation between the real LFC and the value from from the analysis with the columns:
#Table (table's name) #Comparison (comparison name) #Tool (tool used to find DE) #Replicate (number of replicates) #CutOff (cut-off value) #AdjPvalue (adjusted p-value)
#Correlation (Spearman's correlation between the truth LFC value and the LFC found from our analysis)
#3)plots.pdf -> one PDF plot file for all plots.
#4)plots.rds -> one RData where the plots are saved.
#Load Library
library(tidyverse) #Collection of R packages designed for data science: ggplot2, dplyr, tidyr, readr, purrr, tibble, stringr, forcats.
library(DESeq2) #Differential gene expression analysis based on the negative binomial distribution
library(edgeR) #Empirical Analysis of Digital Gene Expression Data in R
library(NOISeq) #Exploratory analysis and differential expression for RNA-seq data
library(gridExtra) #Provides functions to work with "grid" graphics, notably to arrange multiple grid-based plots on a page, and draw tables.
library(corrplot)
rm(list = ls()) #Remove all saved objects
####### USER's Input ######
#Files selection
count_table_dir <- "Hsapiens_Spneumoniae_SingleStrand_SE/6_count_table/hsapiens" #Path to the directory where the count tables are saved.
patternFile <- "isoform" #Pattern used to select the right count tables from the directory
compare_table <- "Hsapiens_Spneumoniae_SingleStrand_SE/metadata/truth_hsapiens_2_1_de_isoform.txt" #Path to the directory where the compare tables are present
sample_group <- "Hsapiens_Spneumoniae_SingleStrand_SE/metadata/sim_rep_info.txt" #Path to the file where the sample's group are saved as two columns called "Sample" and "Group".
output_dir <- "Comparison_hsapiens_isoform" #Create Directory in the "count_table_dir" directory where to save all results
#DE Information
groupComparison <- c("2_1") #List of group comparison written as "Group1_Group2".
selectReplicate <- c(2) #The highest number of replicates to use per group during the differential expressed analysis
cutOff <- c(0) #Filter for genes/isoforms which has a row-sum higher than the cut-off value.
padj_FDR_value <- c(0.05) #Adjusted p-values used to filter for differential expressed genes/isoforms.
log2FC_value <- 1 #Single value for the Log2 Fold Change used to select differential expressed genes/isoforms.
###########################
###Global Objects
list_count_table <- list() #Save all the count tables
de_list <- list() #Save the list of DEGs found for each count table at different settings of cutOff, replicates, etc..
de_allInfo_list <- list() #Save all the information at the end of the DEGs identification step.
ggplot_list <- list() #Save the plots
#Get the compare file with column names as "ID" and "LFC"
compare_table_df <- read.table(compare_table, header=TRUE, sep = "\t") %>%
drop_na() %>% #Get the compare DE list file
mutate(LFC = as.numeric(LFC)) %>% #Change the LFC column to numeric type
dplyr::group_by(ID) %>% #Group by the ID
dplyr::summarize(LFC = mean(LFC, na.rm=TRUE)) #In case of duplicated IDs get the mean LFC value.
sample_group_df <- read.table(sample_group, header=TRUE, sep = "\t") #Get the sample and its group membership file
comparison_de_df <- as.data.frame(matrix(data=NA, ncol=12, nrow=0)) #Comparison Table Result
#Get the count tables
files <- list.files(path=count_table_dir, pattern=patternFile)
for (i in files) {list_count_table[[i]] <- read.table(sprintf("%s/%s", count_table_dir, i), header=TRUE, sep = "\t") %>% drop_na()}
### Differential expressed analysis ###
iGC <- 1
iCT <- 1
iRep <- selectReplicate[1]
iCO <- cutOff[1]
pv <- padj_FDR_value[1]
for (iGC in 1:length(groupComparison)) #Loop through the group comparisons
{
#Get the list of DE to compare to
de_compare_list <- unique (compare_table_df$ID)
#Dataframe for LFC analysis
LFC_analysis_df <- compare_table_df %>%
dplyr::select(ID, LFC) %>%
dplyr::rename(Truth=LFC) %>%
dplyr::distinct(ID, .keep_all = TRUE) %>%
dplyr::arrange(ID)
for (iCT in 1:length(list_count_table)) #Loop through the count tables
{
#Tidy the Count Table
nowTable <- list_count_table[[iCT]]
rowID <- nowTable[,1]
nowTable <- nowTable %>% dplyr::select (-c(colnames(nowTable)[1])) %>% mutate_if (is.character,as.numeric)
rownames(nowTable) <- rowID
for (iRep in selectReplicate) #Loop through the number of replicates from 2 to the maximum number of replicate
{
for (iCO in cutOff) #Loop through the cut-off values
{
print(sprintf("Table: %s * Group Comparison: %s * Replicate: %s * CutOff: %s", files[iCT], iGC, iRep, iCO))
#Get the info for the two groups
groupInfo <- str_split(groupComparison, pattern="_")[[1]]
if(length(groupInfo)==2)
{
#Select the right samples for Group1 based on the number of replicates
group1Samples <- sample_group_df %>% filter(Group==groupInfo[1])
group1Samples <- sample (group1Samples$Sample, iRep) #Randomly select the samples in the group
#Select the right samples for Group2 based on the number of replicates
group2Samples <- sample_group_df %>% filter(Group==groupInfo[2])
group2Samples <- sample (group2Samples$Sample, iRep) #Randomly select the samples in the group
#Select the samples selecteed and tidy the table
compaDF <- nowTable %>%
dplyr::select(all_of(c(group1Samples, group2Samples))) %>%
rownames_to_column() %>%
mutate(Sum=rowSums(dplyr::select(., -starts_with("rowname")))) %>%
filter(Sum >= iCO) %>%
dplyr::select(-Sum) %>%
column_to_rownames()
# Group Factors
sampleGroups <- append (rep(1,iRep), rep(2,iRep))
myfactors_D_E <- factor(sampleGroups) #Factors (DESeq2 & edgeR)
myfactors_N <- data.frame(group = sampleGroups) #Factors for NOISeq
#------- DESeq2 ------#
matrixCountTable <- round (as.matrix(compaDF))
coldata <- data.frame(row.names=colnames(matrixCountTable), myfactors_D_E)
dds <- DESeqDataSetFromMatrix(countData=matrixCountTable, colData=coldata, design=~myfactors_D_E)
dds <- DESeq(dds)
res <- results(dds)
res <- as.data.frame(res)
#------- edgeR ------#
y <- DGEList(counts = compaDF, group = myfactors_D_E)
y <- calcNormFactors(y) #TMM normalisation
y <- estimateDisp(y)
et <- exactTest(y)
edgeR_res <- topTags(et, n = nrow(compaDF), sort.by = "none")
#------- NOISeq (biological replicates ------#
noiseq_data <- readData(data = compaDF, factors = myfactors_N) #Converting data into a NOISeq object
gc() #Running Garbage Collection
mynoiseqbio <- noiseqbio(noiseq_data, k = 0.5, norm = "tmm", factor = "group", r = 20, adj = 1.5, plot = FALSE, a0per = 0.9, random.seed = 12345, filter = 0)
#Adjusted P-values
for (pv in padj_FDR_value)
{
###DESeq2
deseq2_deg <- res %>% filter(padj < pv, abs(log2FoldChange) >= log2FC_value)
de_list[[sprintf("%s*%s*DESeq2*%s*%s*%s", files[iCT], groupComparison[iGC], iRep, iCO, pv)]] <- unique(rownames(deseq2_deg))
#Add DESeq2 results to the Comparison result dataframe
tp <- intersect(de_compare_list, rownames(deseq2_deg))
fp <- setdiff(rownames(deseq2_deg), de_compare_list)
fn <- setdiff(de_compare_list, rownames(deseq2_deg))
tpr <- length(tp)/length(de_compare_list)
comparison_de_df <- rbind(comparison_de_df, c(files[iCT], groupComparison[iGC], "DESeq2", iRep, iCO, pv, length(de_compare_list), length(tp), length(fp), length(fn), tpr, length(unique(rownames(deseq2_deg)))))
#Add the LFC
deseq2_lfc <- deseq2_deg %>% rownames_to_column(var="ID") %>% filter(ID %in% LFC_analysis_df$ID) %>% dplyr::select(ID, log2FoldChange) %>% dplyr::rename(LFC=log2FoldChange)
notFoundID <- data.frame(ID=setdiff(LFC_analysis_df$ID, rownames(deseq2_deg)))
notFoundID$LFC <- 0
deseq2_lfc_toAdd <- rbind(deseq2_lfc, notFoundID) %>% arrange(ID) %>% dplyr::select(LFC)
LFC_analysis_df$deseq2_lfc_toAdd <- deseq2_lfc_toAdd
colnames(LFC_analysis_df)[which(colnames(LFC_analysis_df)=="deseq2_lfc_toAdd")] <- sprintf("%s*%s*DESeq2*%s*%s*%s", files[iCT], groupComparison[iGC], iRep, iCO, pv)
###edgeR
edgeR_deg <- tibble::rownames_to_column(edgeR_res$table, "Gene") %>% filter (FDR < pv, abs(logFC) >= log2FC_value)
de_list[[sprintf("%s*%s*edgeR*%s*%s*%s", files[iCT], groupComparison[iGC], iRep, iCO, pv)]] <- unique(edgeR_deg$Gene)
#Add edgeR results to the Comparison result dataframe
tp <- intersect(de_compare_list, edgeR_deg$Gene)
fp <- setdiff( edgeR_deg$Gene, de_compare_list)
fn <- setdiff(de_compare_list, edgeR_deg$Gene)
tpr <- length(tp)/length(de_compare_list)
comparison_de_df <- rbind(comparison_de_df, c(files[iCT], groupComparison[iGC], "edgeR", iRep, iCO, pv, length(de_compare_list), length(tp), length(fp), length(fn), tpr, length(unique(edgeR_deg$Gene))))
#Add the LFC
edgeR_lfc <- edgeR_deg %>% filter(Gene %in% LFC_analysis_df$ID) %>% dplyr::select(Gene, logFC) %>% dplyr::rename(LFC=logFC)
notFoundID <- data.frame(Gene=setdiff(LFC_analysis_df$ID, edgeR_lfc$Gene))
notFoundID$LFC <- 0
edgeR_lfc_toAdd <- rbind(edgeR_lfc, notFoundID) %>% arrange(Gene) %>% dplyr::select(LFC)
LFC_analysis_df$edgeR_lfc_toAdd <- edgeR_lfc_toAdd
colnames(LFC_analysis_df)[which(colnames(LFC_analysis_df)=="edgeR_lfc_toAdd")] <- sprintf("%s*%s*edgeR*%s*%s*%s", files[iCT], groupComparison[iGC], iRep, iCO, pv)
#NOISeq
noiseq_deg <- degenes(mynoiseqbio, q = 1-pv, M = NULL)
noiseq_deg <- subset(noiseq_deg, abs(log2FC) >= 1)
de_list[[sprintf("%s*%s*NOISeq*%s*%s*%s", files[iCT], groupComparison[iGC], iRep, iCO, pv)]] <- unique(rownames(noiseq_deg))
#Add NOISeq results to the Comparison result dataframe
tp <- intersect(de_compare_list, rownames(noiseq_deg))
fp <- setdiff(rownames(noiseq_deg), de_compare_list)
fn <- setdiff(de_compare_list, rownames(noiseq_deg))
tpr <- length(tp)/length(de_compare_list)
comparison_de_df <- rbind(comparison_de_df, c(files[iCT], groupComparison[iGC], "NOISeq", iRep, iCO, pv, length(de_compare_list), length(tp), length(fp), length(fn), tpr, length(unique(rownames(noiseq_deg)))))
#Add the LFC
noiseq_lfc <- noiseq_deg %>% rownames_to_column(var="ID") %>% filter(ID %in% LFC_analysis_df$ID) %>% dplyr::select(ID, log2FC) %>% dplyr::rename(LFC=log2FC)
notFoundID <- data.frame(ID=setdiff(LFC_analysis_df$ID, rownames(noiseq_deg)))
notFoundID$LFC <- 0
noiseq_lfc_toAdd <- rbind(noiseq_lfc, notFoundID) %>% arrange(ID) %>% dplyr::select(LFC)
LFC_analysis_df$noiseq_lfc_toAdd <- noiseq_lfc_toAdd
colnames(LFC_analysis_df)[which(colnames(LFC_analysis_df)=="noiseq_lfc_toAdd")] <- sprintf("%s*%s*NOISeq*%s*%s*%s", files[iCT], groupComparison[iGC], iRep, iCO, pv)
}
}
}
}
}
}
colnames(comparison_de_df) <- c("Table", "Comparison", "Tool", "Replicate", "CutOff", "AdjPvalue", "TruthDE", "TruePositive", "FalsePositive", "FalseNegative", "TruePositiveRate", "TotalDEs")
#Convert to numeric: Replicate - FalsePositive - TruePositiveRate - TotalDEs
i <- c(4, 9, 11, 12)
comparison_de_df [ , i] <- apply(comparison_de_df [ , i], 2, function(x) as.numeric(as.character(x)))
comparison_de_df$CutOff <- factor(comparison_de_df$CutOff, levels = unique (as.numeric(as.character(comparison_de_df$CutOff)))) #Set the right order for the CutOff factors
comparison_de_df$AdjPvalue <- factor(comparison_de_df$AdjPvalue, levels = unique (as.numeric(as.character(comparison_de_df$AdjPvalue)))) #Set the right order for the AdjPvalue factors
##### ##### GGPLOT Series
#Colours
color_pvalue <- c("gray", "black")
color_table <- c("red", "blue", "green", "purple", "orange", "brown", "aquamarine", "yellow", "cyan")
color_total <- c(color_pvalue, color_table) #In case you want to descriminate the min and the max value of the AdjPvalue
#Pvalues
pv <- as.numeric(as.character(unique(comparison_de_df$AdjPvalue)))
min_pvalue <- min (pv)
max_pvalue <- max (pv)
###PLOT -> Number of Identified Differential Expressed Elements
for (iCom in unique(comparison_de_df$Comparison))
{
ggplot_list[[sprintf("%s - %s - Identified DE elements", patternFile, iCom)]] <- comparison_de_df %>%
filter(Comparison==iCom) %>% #Filter for Group Comparison and AdjPvalue
ggplot(aes(x=Replicate, y=TotalDEs, shape=CutOff, color=AdjPvalue, group = interaction(Table))) +
geom_point() +
facet_grid(cols = vars(Table), rows = vars(Tool)) +
scale_colour_manual(values=color_table) + #Colour for both tables and the Pvalues (min & max)
scale_shape_manual (values=c(0:19)) +
labs(title=sprintf("%s - Number of Identified DE elements", iCom)) +
geom_hline(yintercept=as.numeric(unique(comparison_de_df$TruthDE)[1]), linetype="dashed", color = "black", size=0.5)
}
#Plot -> True Positive Rate vs. Number of False Positive
for (iCom in unique(comparison_de_df$Comparison)) #Loop through the group comparisons
{
ggplot_list[[sprintf("%s*%s*Number of Identified DE elements",patternFile, iCom)]] <- comparison_de_df %>%
filter(Comparison==iCom & AdjPvalue %in% c(min_pvalue, max_pvalue)) %>% #Filter for Group Comparison and AdjPvalue
ggplot(aes(x=FalsePositive, y=TruePositiveRate, shape=CutOff, color=Table, group = interaction(Table, AdjPvalue))) +
geom_point() +
geom_line (aes(color=AdjPvalue)) +
facet_grid(cols = vars(Replicate), rows = vars(Tool)) +
scale_colour_manual(values=color_total) + #Colour for both tables and the Pvalues (min & max)
scale_shape_manual (values=c(0:19)) +
theme(panel.spacing = unit(1.2, "lines")) +
labs(x="N* False Positive", y="True Positive Rate", title=sprintf("%s - Number of Identified DE elements", iCom))
ggplot_list[[sprintf("%s*%s*AxisX_log10",patternFile, iCom)]] <- comparison_de_df %>%
filter(Comparison==iCom & AdjPvalue %in% c(min_pvalue, max_pvalue)) %>% #Filter for Group Comparison and AdjPvalue
ggplot(aes(x=FalsePositive, y=TruePositiveRate, shape=CutOff, color=Table, group = interaction(Table, AdjPvalue))) +
geom_point() +
geom_line (aes(color=AdjPvalue)) +
facet_grid(cols = vars(Replicate), rows = vars(Tool)) +
scale_colour_manual(values=color_total) + #Colour for both tables and the Pvalues (min & max)
scale_shape_manual (values=c(0:19)) +
scale_x_continuous(trans = "log10") +
theme(panel.spacing = unit(1.2, "lines")) +
labs(x="N* False Positive", y="True Positive Rate")
}
###PLOT -> LFC Correlation Analysis
#1 -> Get Spearman correlation -> Absolute value of LFC
LFC_analysis_df <- LFC_analysis_df %>% column_to_rownames("ID")
LFC_analysis_df_2 <- as.data.frame(LFC_analysis_df)
LFC_analysis_df_2 [ , i] <- apply(LFC_analysis_df_2 [ , c(1:ncol(LFC_analysis_df_2))], 2, function(x) as.numeric(as.character(x)))
LFC_analysis_df_2[] <- lapply(LFC_analysis_df_2, abs)
cor_LFC_df <- cor(LFC_analysis_df_2, method = "spearman")
cor_truth_row <- subset(cor_LFC_df, rownames(cor_LFC_df) == "Truth")
#Found the Correlation between the truth and the other settings
cor_LFC_truth_df <- data.frame(ID=colnames(cor_LFC_df), Correlation=unname(cor_truth_row[1,]))
cor_LFC_truth_df <- cor_LFC_truth_df[!(cor_LFC_truth_df$ID=="Truth"), ]
cor_LFC_truth_df <- cor_LFC_truth_df %>% separate(ID, c("Table", "Comparison", "Tool", "Replicate", "CutOff", "AdjPvalue"), "\\*")
#Convert to numeric: Replicate - Correlation
i <- c(4,7)
cor_LFC_truth_df [ , i] <- apply(cor_LFC_truth_df [ , i], 2, function(x) as.numeric(as.character(x)))
cor_LFC_truth_df$CutOff <- factor(cor_LFC_truth_df$CutOff, levels = unique (as.numeric(as.character(cor_LFC_truth_df$CutOff)))) #Set the right order for the CutOff factors
cor_LFC_truth_df$AdjPvalue <- factor(cor_LFC_truth_df$AdjPvalue, levels = unique (as.numeric(as.character(cor_LFC_truth_df$AdjPvalue)))) #Set the right order for the AdjPvalue factors
#Plot
for (iCom in unique(cor_LFC_truth_df$Comparison))
{
ggplot_list[[sprintf("%s*%s*Truth DE Correlation ", patternFile, iCom)]] <- cor_LFC_truth_df %>%
filter(Comparison==iCom) %>% #Filter for Group Comparison and AdjPvalue
ggplot(aes(x=Replicate, y=Correlation, shape=CutOff, color=AdjPvalue, group = interaction(Table))) +
geom_point() +
facet_grid(cols = vars(Table), rows = vars(Tool)) +
scale_colour_manual(values=color_table) + #Colour for both tables and the Pvalues (min & max)
scale_shape_manual (values=c(0:19)) +
labs(title=sprintf("%s - %s - Correlation between truth DE", patternFile, iCom))
}
##### Save Data & Plots #####
dir.create(file.path(count_table_dir, output_dir), showWarnings = FALSE)
write.table(comparison_de_df, file= sprintf("%s/%s/comparison_tpr_fp.txt", count_table_dir, output_dir), row.names=FALSE, sep="\t", quote = FALSE)
write.table(cor_LFC_truth_df, file= sprintf("%s/%s/comparison_truthDE_lfc.txt", count_table_dir, output_dir), row.names=FALSE, sep="\t", quote = FALSE)
save(ggplot_list,file=sprintf("%s/%s/plots.rda", count_table_dir, output_dir))
pdf(sprintf("%s/%s/plots.pdf", count_table_dir, output_dir), onefile=TRUE)
for (i in seq(length(ggplot_list))) {grid.arrange (ggplot_list[[i]])}
dev.off()
|
0c24daab11dc6185b9a16f7131a1313e431d8fc1
|
741794f5f03f3f7f832d85e7c10aa7f32948010c
|
/data-raw/scripts/crosswalk ISCO 88 ISCO 08.R
|
bb5f34011446fbe69653acf2befed0125729764f
|
[
"MIT"
] |
permissive
|
Guidowe/occupationcross
|
7e634a2ed08ed83e1b630184606c66b6240c2a52
|
aea616fb4a5a89867db87b0acd0adf3244bfa12d
|
refs/heads/master
| 2023-04-19T04:59:35.516557
| 2022-08-26T13:08:51
| 2022-08-26T13:08:51
| 287,028,618
| 8
| 1
| null | null | null | null |
UTF-8
|
R
| false
| false
| 2,167
|
r
|
crosswalk ISCO 88 ISCO 08.R
|
####funciones y librerias#####
library(tidyverse)
library(readxl)
library(stringr)
library(foreign)
#Funcion propia para aleatorizar. Específica para esto, la voy a aplicar al final
sample.isco <- function(df) {
sample(df$`ISCO-88 code`,size = 1)
}
####Carga de Base LFS y Crosswalk#####
crosstable_isco08_isco88 <- read_excel("data-raw/cross/corrtab88-08.xls")
base_lfs <- readRDS("data-raw/bases/IT2014.RDS")
#saveRDS(base_lfs,"../crosswalk ocupaciones/Bases/IT2014.RDS")
save(crosstable_isco08_isco88,file = "data/crosstable_isco08_isco88.rda")
toy_base_lfs<- base_lfs %>%
select(YEAR,SEX,AGE,WSTATOR,ILOSTAT,COUNTRYB,ISCO3D,ISCO1D,COEFF,HAT11LEV,SIZEFIRM) %>%
sample_n(size = 2000)
save(toy_base_lfs,file = "Bases/toybase_lfs_ita2014.rda")
###Substraigo a 3 digitos el ISCO 08 en el crosswalk (asi aparece en LFS)
cross_isco_3dig <- cross_isco %>%
mutate(ISCO3D = as.integer(str_sub(string = `ISCO 08 Code`,1,3))) %>%
add_row(ISCO3D = 999) # Agrego fila para los casos 999 (lo necesito para cruzar con algo)
###Creo un dataframe con una fila por ISCO 08 a 3 digitos,
###y todos sus cruces posibles con ISCO 88
nested.data.isco.cross <- cross_isco_3dig %>%
select(ISCO3D,`ISCO-88 code`) %>%
group_by(ISCO3D) %>%
nest()
#Es un tipo de dataframe particular de R
###Joineo la base LFS al dataframe anterior del crosswalk###
base_lfs_con_join <- base_lfs %>%
left_join(nested.data.isco.cross)
# Seteo una semilla, para que en caso de quererlo pueda repetir a futuro
# la aleatorización con mismos resultados
set.seed(999971)
base_lfs_sampleada <- base_lfs_con_join %>%
mutate(ISCO.88.sorteado = map(data, sample.isco)) # Acá estoy sorteando
base_lfs_sampleada <- base_lfs_sampleada %>%
select(-data) %>% # Elimino la columna loca que había creado para el sorteo
mutate(ISCO.88.sorteado = as.numeric(ISCO.88.sorteado))
#Cuento cuantos casos de ISCO3D fueron a parar a cada ISCO.88 a 4 dígitos
Conteo_de_cruces <- base_lfs_sampleada %>%
group_by(ISCO3D,ISCO.88.sorteado) %>%
summarise(Casos = n())
# Con esta función se puede exportar en .dta si necesitas
#write.dta(base_lfs_sampleada,"base_sampleada.dta")
|
cff7576912c2ec0aa93c581e6423c0e842826663
|
23529ed1c1c1181f63cb68ea1a4fd43b3204054d
|
/Scripts/ChromoPainter/Run_ChromoPainter_Barcode.R
|
c91f6df9b4fd17604b526720c7c9ae8e303ce1d3
|
[] |
no_license
|
aimeertaylor/QuantLocalPfConnIBD
|
3dd17631a7e3217a61d29ed92d2e55677aebdf61
|
56af8211c721f8ab8d1ccd09afcadf9ceec53f47
|
refs/heads/master
| 2022-01-19T23:44:03.968154
| 2019-05-22T16:57:38
| 2019-05-22T16:57:38
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 4,609
|
r
|
Run_ChromoPainter_Barcode.R
|
##################################################################################################
# Script to run ChromoPainter on Barcode data
# Takes 31 secs
##################################################################################################
rm(list = ls())
# load data in IBD file format
BarcodeData <- read.delim('../../TxtData/Barcode93.txt', sep = '\t')
load('../../RData/Data_store_Barcode.RData')
MetaData <- Data_store$MetaData
# Remove 'chrom' and 'pos' columns from sequences and transpose s.t. Sample.ID per row
SNPData <- t(BarcodeData[,-(1:2)])
SNPData[SNPData == -1] <- NA
# Name metadata by Sample.ID so can order by SNPdata
rownames(MetaData) <- as.character(MetaData$Sample.ID)
MetaData <- MetaData[rownames(SNPData), ]
# Summarise data dimensions
numSamples <- nrow(SNPData)
numSNPs <- ncol(SNPData) # biallelic snps only
# ====================================================================================
# Create files for fs
# For input help see https://people.maths.bris.ac.uk/~madjl/finestructure/manualse4.html#x7-90004.1
# ====================================================================================
# Restrict data for initial set up (checking program runs etc.)
numSNPs_capped <- numSNPs
numSamples_capped <- numSamples
# <idfile>
# Space separated id file: N lines, one per individual: <NAME> <POPULATION> <INCLUSION>, which are string, string, 0 or 1
idfile <- cbind(MetaData[, c('Sample.ID', 'Location.code')], 1)
write.table(idfile[1:numSamples_capped,], file = './Barcode.ids', sep = " ", quote = FALSE,
row.names = FALSE, col.names = FALSE)
# <phasefiles>
# First: imput missing SNPs, since not supported by fs (assume indpendence)
frequencies <- colMeans(SNPData, na.rm = TRUE)
set.seed(1) # set seed for reproducability
SNPDataImputed <- SNPData
for(i in 1:numSamples){ # for each missing per row, draw from a Bernoulli with prob = allele frequency
missing_ind <- is.na(SNPDataImputed[i,])
n_missing <- sum(missing_ind)
SNPDataImputed[i, missing_ind] <- rbinom(n = n_missing, size = 1, prob = frequencies[missing_ind])
}
# Second check order of haplotypes against idfile
rownames(SNPDataImputed) == as.character(idfile[,1])
# Third collapse each samples haplotype into a character with no spaces
haps <- matrix(apply(SNPDataImputed, 1, function(x){paste(x[1:numSNPs_capped], collapse = '')}), ncol = 1)
# Create phase file
phasefile <- rbind(numSamples_capped, # First row is the number of haplotypes
numSNPs_capped, # Second row is number of SNPs
paste('P', paste(BarcodeData$pos[1:numSNPs_capped], collapse = ' ')), # Third row contains "P" followd by bp pos of each SNP
haps[1:numSamples_capped,, drop = FALSE]) # Each additional line contains a haplotype. NO SPACES. NO MISSING VALUES.
write.table(phasefile, file = './Barcode.phase', sep = " ", quote = FALSE,
row.names = FALSE, col.names = FALSE)
# <recombfiles>
# Space separated recombfile with header file, pos, and distance in M/bp
default <- getOption("scipen") # Get default scipen so can temporarily surpress scientific notation
options(scipen = 999)
recomrateperbp <- rep(7.4e-7, numSNPs)
# Put '-9' at last bp position of preceding chrom
chrom <- BarcodeData$chrom[-numSNPs]
chrom_plus1 <- BarcodeData$chrom[-1]
recomrateperbp[(chrom - chrom_plus1) == -1] <- -9
# Create and write file
recombfile <- cbind(start.pos = BarcodeData$pos,
recom.rate.perbp = recomrateperbp) # M/bp [Miles et al, Genome Res 26:1288-1299 (2016)]
write.table(recombfile[1:numSNPs_capped, ], file = './Barcode.recombfile', sep = " ", quote = FALSE,
row.names = FALSE, col.names = TRUE)
# Restore scipen
options(scipen = default)
# ====================================================================================
# Run fs with a recomb file
# WARNING: No regions found, insufficient data for calculating c.
# Try running chromopainter either with a smaller "-k" option, or run chromocombine with the "-C" option.
# See http://www.paintmychromosomes.com (faq page) for a discussion of this issue.
#
# Run chromopainter alone by specifying -dos2. Takes ~ 31 secs.
# ====================================================================================
system.time(
system('../../../../fs-2.1.1/fs ./Barcode_unlinked.cp -n -phasefiles ./Barcode.phase -idfile ./Barcode.ids -ploidy 1 -dos2')
)
# Can also run this way (need to specify the -o flag)
# system('../../../../fs-2.1.1/fs cp -t ./Barcode.ids -g ./Barcode.phase -u -j -a 0 0 -o ./Test')
|
10634d711fc8315657e81f3484328b4fbe6f6218
|
73c9b3c52db44bca119ecd3585ff38db1e9c05b1
|
/man/vectors3d.Rd
|
2fcbed1014b0a6b1fac04c3f4a1b24638ce8bb23
|
[] |
no_license
|
friendly/matlib
|
a360f4f975ae351ce1a5298c7697b460c4ba8dcd
|
13bb6ef45f9832d4bc96e70acc5e879e5f0c5c90
|
refs/heads/master
| 2023-08-30T13:23:26.679177
| 2023-08-25T17:29:23
| 2023-08-25T17:29:23
| 45,190,492
| 72
| 19
| null | 2023-03-16T19:30:18
| 2015-10-29T15:01:24
|
R
|
UTF-8
|
R
| false
| true
| 3,251
|
rd
|
vectors3d.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/vectors3d.R
\name{vectors3d}
\alias{vectors3d}
\title{Draw 3D vectors}
\usage{
vectors3d(
X,
origin = c(0, 0, 0),
headlength = 0.035,
ref.length = NULL,
radius = 1/60,
labels = TRUE,
cex.lab = 1.2,
adj.lab = 0.5,
frac.lab = 1.1,
draw = TRUE,
...
)
}
\arguments{
\item{X}{a vector or three-column matrix representing a set of geometric vectors; if a matrix, one vector is drawn for each row}
\item{origin}{the origin from which they are drawn, a vector of length 3.}
\item{headlength}{the \code{headlength} argument passed to \code{\link{arrows3d}} determining the length of arrow heads}
\item{ref.length}{vector length to be used in scaling arrow heads so that they are all the same size; if \code{NULL}
the longest vector is used to scale the arrow heads}
\item{radius}{radius of the base of the arrow heads}
\item{labels}{a logical or a character vector of labels for the vectors. If \code{TRUE} and \code{X} is a matrix,
labels are taken from \code{rownames(X)}. If \code{FALSE} or \code{NULL}, no labels are drawn.}
\item{cex.lab}{character expansion applied to vector labels. May be a number or numeric vector corresponding to the the
rows of \code{X}, recycled as necessary.}
\item{adj.lab}{label position relative to the label point as in \code{\link[rgl]{text3d}}, recycled as necessary.}
\item{frac.lab}{location of label point, as a fraction of the distance between \code{origin} and \code{X}, recycled as necessary.
Values \code{frac.lab > 1} locate the label beyond the end of the vector.}
\item{draw}{if \code{TRUE} (the default), draw the vector(s).}
\item{...}{other arguments passed on to graphics functions.}
}
\value{
invisibly returns the vector \code{ref.length} used to scale arrow heads
}
\description{
This function draws vectors in a 3D plot, in a way that facilitates constructing vector diagrams. It allows vectors to be
specified as rows of a matrix, and can draw labels on the vectors.
}
\section{Bugs}{
At present, the color (\code{color=}) argument is not handled as expected when more than one vector is to be drawn.
}
\examples{
vec <- rbind(diag(3), c(1,1,1))
rownames(vec) <- c("X", "Y", "Z", "J")
library(rgl)
open3d()
vectors3d(vec, color=c(rep("black",3), "red"), lwd=2)
# draw the XZ plane, whose equation is Y=0
planes3d(0, 0, 1, 0, col="gray", alpha=0.2)
vectors3d(c(1,1,0), col="green", lwd=2)
# show projections of the unit vector J
segments3d(rbind(c(1,1,1), c(1, 1, 0)))
segments3d(rbind(c(0,0,0), c(1, 1, 0)))
segments3d(rbind(c(1,0,0), c(1, 1, 0)))
segments3d(rbind(c(0,1,0), c(1, 1, 0)))
# show some orthogonal vectors
p1 <- c(0,0,0)
p2 <- c(1,1,0)
p3 <- c(1,1,1)
p4 <- c(1,0,0)
corner(p1, p2, p3, col="red")
corner(p1, p4, p2, col="red")
corner(p1, p4, p3, col="blue")
rgl.bringtotop()
}
\seealso{
\code{\link{arrows3d}}, code{\link[rgl]{texts3d}}, code{\link[rgl]{rgl.material}}
Other vector diagrams:
\code{\link{Proj}()},
\code{\link{arc}()},
\code{\link{arrows3d}()},
\code{\link{circle3d}()},
\code{\link{corner}()},
\code{\link{plot.regvec3d}()},
\code{\link{pointOnLine}()},
\code{\link{regvec3d}()},
\code{\link{vectors}()}
}
\author{
Michael Friendly
}
\concept{vector diagrams}
|
a5b659560231902d1b1341c5be9d34e2ef0c0d71
|
5b7812ad30e9d0bf3627b120e05eabb9818590e7
|
/Matt multiple csvs.R
|
b08ebd54c0af6aaa87f018db5c1ea478860d32d5
|
[] |
no_license
|
annam21/School
|
53db458b16e351cb64e4f53c37a91903dedf5ac1
|
b49b5e6ba27c1ae9118b0ba9c51944c0f1c84857
|
refs/heads/master
| 2021-01-12T11:52:11.599333
| 2019-04-25T01:27:18
| 2019-04-25T01:27:18
| 69,594,738
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 623
|
r
|
Matt multiple csvs.R
|
# Multiple csv's to Matt
# Anna Moeller
# 10/25/2018
# Load packages
library(tidyverse)
# All the file paths (full path)
filepaths <- list.files(, full.names = T) # fill in here
# Pull in all the csvs into a tibble
dat <- tibble(File = filepaths) %>%
# This is an example of how to extract certain things from the filepath into new columns
extract(File, "Site", "/(.* County)/", remove = FALSE) %>%
# Read in the files into a single column (this will look like a list)
mutate(Data = lapply(File, read_csv) ) %>%
# Make it into a prettier dataframe (no list column)
unnest(Data)
|
208551c5a67b2d52fa25923cfefdf4491fee4cfd
|
da05dfd4e59fc3a9c25087f40b87142a7902564c
|
/Simulation1_v2June2017.R
|
8ad0bfafaecc4ed0db007ef37794a49081bc7406
|
[
"MIT"
] |
permissive
|
ehsanx/HierarchicalTMLE
|
165043828434ee8c2fc1a6803a4d5318ecc24e5f
|
1eae53cfa9d5a9738c748f1c3c67476827862dfd
|
refs/heads/master
| 2022-02-28T19:05:26.131712
| 2019-10-17T13:27:17
| 2019-10-17T13:27:17
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 7,199
|
r
|
Simulation1_v2June2017.R
|
#-----------------------------#-----------------------------#-----------------------------#-----------------------------
# R code to generate simulated data and implement the hierarchical TMLEs
# described in Balzer et al: "A New Approach to Hierarchical Data Analysis:
# Targeted Maximum Likelihood Estimation of Cluster-Based Effects Under Interference"
#
# Simulation 1 - Simple Observational Settings
#
# Programer: Laura Balzer (lbbalzer@gmail.com)
#
# Last update: June 2, 2017
#-----------------------------#-----------------------------#-----------------------------#-----------------------------
rm(list=ls())
library('MASS')
source('ClusterTMLE_Functions_v2June2017.R')
set.seed(1)
#-----------------------------#-----------------------------#-----------------------------#-----------------------------
# getY: function to generate the outcome
# note: the coefficients beta are global variables
#-----------------------------#-----------------------------
getY<- function( A, W, Z, UY){
as.numeric(UY < plogis(.25 + betaA*A + betaWc*mean(W) + betaZc*mean(Z) + betaW*W + betaZ*Z) )
}
#-----------------------------#-----------------------------#-----------------------------#-----------------------------
# generate.cluster.data:
# input: indicator of effect (or under null), number of indv per cluster (n)
# treatment indicators (A), number of clusters (j)
# output: data.frame of observed data and the counterfactual outcomes for one cluster
#-----------------------------#-----------------------------
generate.cluster.data<- function(effect, n, A, j){
UE <- runif(1, -1, 1)
W<- rnorm(n, UE, .5)
Z <- rnorm(n, UE, .5)
if(is.na(A)){
A <- rbinom(1, 1, prob=plogis( 0.75*mean(W)) )
}
UY<- runif(n, 0, 1)
Y0 <- getY(0, W, Z, UY)
if(effect){
Y1 <- getY(1, W, Z, UY)
} else{
#if under the null, then set the counterfactual outcome Y1 to Y0
Y1 <- Y0
}
if(A){
# if exposed cluster
Y <- Y1
}else{
# if unexposed cluster
Y <- Y0
}
# calculate the weights as 1/n_j
alpha <- 1/n
# return data.frame with id as the cluster indicator and dummy variable U=1 (for unadjusted)
data.frame( cbind(id=j, n=n, U=1, W=W, Wc=mean(W), Z=Z, Zc=mean(Z), A=A, Y, alpha, Y1, Y0) )
}
#-----------------------------#-----------------------------#-----------------------------#-----------------------------
# get.full.data:
# input: number of clusters (j), number of indv per cluster (n),
# indicator of randomized trial (trial), indicator of effect vs. the null (effect)
# output: data.frame of observed data and the counterfactual outcomes
#-----------------------------#-----------------------------
get.full.data <- function(J, n, trial, effect){
if(trial){
# if randomized trial, then assign treatment with equal allocation (J/2 treated & J/2 control)
A.1 <- rbinom(J/2, 1, 0.5)
A.2 <- ifelse(A.1==1, 0, 1)
A <- sample( c(A.1,A.2))
}else{
A<- rep(NA, J)
}
n.indv<- round(rnorm(J, n, 10))
# deal with negative values: set to n if less than 1.
n.indv[n.indv<1] <- n
full.data<- NULL
for(j in 1: J){
data.comm.j <- generate.cluster.data(effect=effect, n=n.indv[j], A=A[j], j)
full.data <- rbind(full.data, data.comm.j)
}
full.data
}
#--------------------------------------------------#--------------------------------------------------#-----------------------------#-----------------------------
#--------------------------------------------------#--------------------------------------------------#-----------------------------#-----------------------------
# SPECIFY THE PARAMETERS FOR THE SIMULATION
# Indicator if working model provides a reasonable approximation
working.model<- F
# Indicator if there is an effect (vs. the null)
effect<- F
# Indicator if randomized trial setting
trial <- F
# Number of clusters in the target population
nPop<- 10000
# Number of clusters in each study
J <- 100
# Average number of individuals per cluster
n <- 50
# Number of repetitions of the data generating experiment
nReps<- 5000
file.name<- paste('Sim1_', 'working.model', working.model, ifelse(trial, '_trial', '_obs'), '_effect', effect, '_J',J, '_n', n, '_nReps', nReps,'_v', format(Sys.time(), "%d%b%Y"), '.Rdata', sep='')
#--------------------------------------------------#--------------------------------------------------#-----------------------------#-----------------------------
#--------------------------------------------------#--------------------------------------------------#-----------------------------#-----------------------------
if(working.model){
# If the working model provides a reasonable approximation
betaA <<- 0.2
betaWc <<- 0.15
betaZc<<- 0
betaW <<- 1.15
betaZ <<- 1
} else{
# If the working model provides a poor approximation
betaA <<- 0.2
betaWc <<- 0.15
betaZc <<- 1
betaW <<- 0.25
betaZ <<- 0
}
#--------------------------------------------------#--------------------------------------------------#-----------------------------#-----------------------------
# Get the true value of the average treatment effect
if(effect){
Pop<- get.full.data(J=nPop, n=n, trial=trial, effect=effect)
PopC<- aggregate(Pop, by=list(Pop$id), mean)
truth <- mean(PopC$Y1 - PopC$Y0)
} else{
truth<- 0
PopC<- nPop<- NA
}
save(nPop, PopC, truth, file=file.name)
#--------------------------------------------------#--------------------------------------------------#-----------------------------#-----------------------------
#--------------------------------------------------#--------------------------------------------------#-----------------------------#-----------------------------
# Repeat the data generating process nReps times and implement the relevant estimators
est0<- est1 <- est2 <- est3 <- NULL
verbose=F
for(r in 1:nReps){
# Generate the full hierarchical data
data<- get.full.data(J, n, trial=trial, effect=effect)
prob.txt <- mean( aggregate(data, by=list(data$id), mean)$A)
# unadjusted estimatior
unadj<- do.estimation.inference(psi=truth, data=data, Qinit.Indv=F, QAdj='U', work.model=F, gAdj=NULL, prob.txt=prob.txt, verbose=verbose)
est0<- rbind(unadj, est0)
QAdj <- c('W', 'Z')
gAdj<- c('W', 'Z')
# TMLE-Ia
clust1<- do.estimation.inference(psi=truth, data=data, Qinit.Indv=F, QAdj=QAdj, work.model=F, gAdj=gAdj, verbose=verbose)
est1 <- rbind(est1, clust1)
# note this is equivalent to using an individual-level regression that only adjusts for cluster=level summaries when estimating QbarC
# See Appendix C
# do.estimation.inference(psi=truth, data=data, Qinit.Indv=T, QAdj=c('Wc', 'Zc'), work.model=F, gAdj=c('Wc','Zc'), verbose=verbose)
# TMLE-Ib
ind1<- do.estimation.inference(psi=truth, data=data, Qinit.Indv=T, QAdj=QAdj, work.model=F, gAdj=gAdj, verbose=verbose)
est2 <- rbind(est2, ind1)
# TMLE-II
ind2<- do.estimation.inference(psi=truth, data=data, Qinit.Indv=T, QAdj=QAdj, work.model=T, gAdj=gAdj, verbose=verbose)
est3 <- rbind(est3, ind2)
save(nPop, PopC, truth, est0, est1, est2, est3, file=file.name)
print(r)
}
save(nPop, PopC, truth, est0, est1, est2, est3, file=file.name)
colMeans(est0[, 1:7])
colMeans(est1[, 1:7])
colMeans(est2[, 1:7])
colMeans(est3[, 1:7])
|
d41a20c44804bf62736aa8641f4dd7f252bf3d1e
|
6bca977d67101a6274457ca850517ee41cf06c45
|
/plot_functions/plot.meth.distribution.R
|
99172e08b9a45e7efc35ac74c9f3a2762a07d9b4
|
[] |
no_license
|
AAlhendi1707/preinvasive
|
bedcf1f1eca93ab9ae4b44bf32e4d0f9947a1fad
|
e683fa79ad76d0784437eba4b267fb165b7c9ae4
|
refs/heads/master
| 2022-01-06T18:25:52.919615
| 2019-01-18T09:39:42
| 2019-01-18T09:39:42
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 1,082
|
r
|
plot.meth.distribution.R
|
########################################################################################################
# Methylation Distributions
########################################################################################################
plot.meth.distribution <- function(filename){
# Density plot of MVPs in cancer vs control:
pdf(filename)
plot(density(apply(tcga.mdata.all[,which(tcga.mpheno.all$Sample_Group == "TCGA Control")], 1, mean)), col='darkgreen',
main="Distribution of beta values across all probes", xlab="Beta value")
lines(density(apply(tcga.mdata.all[,which(tcga.mpheno.all$Sample_Group == "Regressive")], 1, mean)), col='green')
lines(density(apply(tcga.mdata.all[,which(tcga.mpheno.all$Sample_Group == "Progressive")], 1, mean)), col='red')
lines(density(apply(tcga.mdata.all[,which(tcga.mpheno.all$Sample_Group == "TCGA SqCC")], 1, mean)), col='orange')
if(show.legends){
legend('topright', c("TCGA Control", "CIS Regressive", "CIS Progressive", "TCGA Cancer"), col=c('darkgreen', 'green', 'red', 'orange'), lty=1)
}
dev.off()
}
|
45df24834d167a9644d37471a40716e21bf5da66
|
ffdea92d4315e4363dd4ae673a1a6adf82a761b5
|
/data/genthat_extracted_code/sensitivitymw/examples/multrnks.Rd.R
|
4cbcc6b299fc8b45f2598a4baa7ca5970a86c5d6
|
[] |
no_license
|
surayaaramli/typeRrh
|
d257ac8905c49123f4ccd4e377ee3dfc84d1636c
|
66e6996f31961bc8b9aafe1a6a6098327b66bf71
|
refs/heads/master
| 2023-05-05T04:05:31.617869
| 2019-04-25T22:10:06
| 2019-04-25T22:10:06
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 177
|
r
|
multrnks.Rd.R
|
library(sensitivitymw)
### Name: multrnks
### Title: Approximate scores for ranks.
### Aliases: multrnks
### ** Examples
multrnks(1:10)
multrnks(1:10,m1=12,m2=20,m=20)
|
a68e7b8c4b42471073867d32191b0660bdf8b760
|
2aeb79f35732ccddfa1642f17122194b1caeac08
|
/LPT_model_output_analysis/bathyMaps.R
|
958ca099b5986ce0142e9cc799cb77804ca2001e
|
[] |
no_license
|
jwongala/Lagrangian-particle-tracking-model
|
b91bbec04919255101f76b8a160220f00ea14cf6
|
148ed040dc87a027c3ea8d47eb8b9e28518eebcf
|
refs/heads/master
| 2023-06-20T06:10:04.804586
| 2021-07-20T19:00:35
| 2021-07-20T19:00:35
| 298,403,858
| 1
| 1
| null | 2020-10-20T18:04:58
| 2020-09-24T21:57:07
|
R
|
UTF-8
|
R
| false
| false
| 3,255
|
r
|
bathyMaps.R
|
### The purpose of this script is to create maps of the LPT model output using bathymetry maps.
### Jennifer Wong-Ala
### 20201106
####################################################################
### clear the workspace
rm(list=ls())
####################################################################
### load libraries
library(marmap)
####################################################################
### load in data
####################################################################
### load files
### Bathymetry (this will be used if the NOAA bathy server is down)
setwd('/Users/jennifer/Desktop/')
str_name<-"large_domain.tiff"
OR_bathy<-raster(str_name)
library(graphics)
OR_bathy2<-as.bathy(OR_bathy)
####################################################################
### bathymetry plots
dev.new(width=5.26, height=10)
par(mai=c(0.8,0.9,0.2,0.3)) # , mfrow=c(1,2)
plot(OR_bathy2, image = T, land = T, axes = T, lwd=1, deep=-10,shallow=10, step=10, bpal = list(c(0, max(OR_bathy2), "grey"), c(min(OR_bathy2),0,blues)),xlab=expression(paste("Longitude ("^o,'W)')), ylab= expression(paste("Latitude ("^o,'N)')), cex.lab=1.3, cex.axis=1.2) #, xlim= c(-125.5,-123.5), ylim=c(40.2, 47)
rect(-126.0909, 40.65895, -123.9263, 46.99730, lwd=2) # 2km domain
rect(-125.1, 42.00001, -123.5, 45.24775, lty=2, lwd=2) # 250m domain
points(-124.52,42.84, pch=21, cex= 1.2, col='black', bg='aquamarine3') # cape blanco
points(-123.997,44.6368, pch=21, cex= 1.2, col='black', bg='aquamarine3') # newport
points(-124.095, 44.6598, pch=22, cex= 1.5, col='black', bg='orange')
points(-124.304, 44.6393, pch=22, cex= 1.5, col='black', bg='orange')
points(-124.067, 44.613, pch=23, col='black', bg='gold', cex=1.2)
text(-124.25, 42.85, "CB", cex=1.2)
text(-123.84, 44.6368, 'N', cex=1.2)
# text(-124.2, 45.75, "CF", col= 'black')
# # text(-124.2, 45.5, "CA", col='darkblue')
# text(-124.25, 45, "CH", col= 'black')
# # text(-124.2, 45.5, "CA", col='darkblue')
# text(-124.25, 44.75, "OR", col= 'black')
# text(-124.35, 44.2, "CP", col= 'black')
# text(-124.79, 42.8, 'RR', col= 'black')
# lines(MR_transformed, col= 'darkorchid4', lwd= 1.4, lty= 1) # plot MR polygons
contour(unique(bathy.dat$lon),sort(unique(bathy.dat$lat)),bathy.mat,levels=-c(50,1000,2000),labcex=0.7,add=T,col='black', lwd= 0.07)
points(init_par$lon, init_par$lat, col='coral3', cex=0.5, pch=20)
##########
# release locations
setwd('/Users/jennifer/Desktop/')
str_name<-"exportImage.tiff"
OR_bathy3<-raster(str_name)
library(graphics)
OR_bathy4<-as.bathy(OR_bathy3)
# init_pop<-pop.roms[[1]][[1]] # only lat and lon values
# dev.new(width=7, height=10)
# par(mai=c(0.8,0.9,0.2,0.3))
plot(OR_bathy4, image = T, land = T, axes = T, lwd=1, deep=-10,shallow=10, step=10, bpal = list(c(0, max(OR_bathy4), "grey"), c(min(OR_bathy4),0,blues)),xlab=expression(paste("Longitude ("^o,'W)')), ylab='', cex.lab=1.3, cex.axis=1.2) #, xlim= c(-125.5,-123.5), ylim=c(40.2, 47)
# rect(-126.0909, 40.65895, -123.9263, 46.99730, lwd=2) # 2km domain
# rect(-125.1, 42.00001, -123.5, 45.5, lty=2, lwd=2) # 250m domain
lines(MR_transformed, col= 'darkorchid4', lwd= 1.4, lty= 1) # plot MR polygons
points(init_pop$lon, init_pop$lat, col='brown4', cex=0.8, pch=20)
|
dfc9c6b435e418288d4c2d054b70b4e62d1210dc
|
7e239d4369ccf25368e06aff0e1b51272a8a1457
|
/.ipynb_checkpoints/my_first_script-checkpoint.R
|
0040a4ce21dfd18fd762a0da1fecf891512385e0
|
[] |
no_license
|
oscarm524/aws_ML_demos
|
16e029a884c2c9cd2d11ecdc2c10fb33bdf0aeba
|
82986fa30a5d25275b30f2b289f35d27ece46f77
|
refs/heads/main
| 2023-02-12T04:25:33.180170
| 2021-01-11T20:02:36
| 2021-01-11T20:02:36
| 306,625,035
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 37
|
r
|
my_first_script-checkpoint.R
|
x <- rnorm(100)
y <- 3*x + rnorm(100)
|
8e79eceb8c118c60536c125c5ed8bf3122f6ab93
|
ac79d0e10669802dc128caf6f1c935a543b71a72
|
/scripts/cleanup-2012.r
|
cddb186cd0c97d28e3fad6612a69a83f928b84dc
|
[] |
no_license
|
infotroph/efrhizo
|
2ab0f931a8fe9cbcbede794aca539fc9120526e9
|
8783e81d50aa2d7a09fff7cd3456c7728161653b
|
refs/heads/master
| 2020-12-26T04:15:27.176785
| 2017-06-27T03:21:58
| 2017-06-27T03:21:58
| 46,886,036
| 1
| 1
| null | 2016-08-05T06:56:26
| 2015-11-25T20:52:08
|
Max
|
UTF-8
|
R
| false
| false
| 2,969
|
r
|
cleanup-2012.r
|
require(rhizoFuncs)
raw.2012 = read.delim("../data/frametots2012.txt")
raw.2012 = make.datetimes(raw.2012)
# Delete all Loc 1 records (none show roots)
# and any misplaced frames (e.g. 9, 13)
raw.2012 = droplevels(raw.2012[raw.2012$Location %in% seq(5,120,5),])
# Censor all images that were too low-quality to trace
censor.2012 = read.csv("../data/2012/2012-censorframes.csv")
censor.2012$date = as.Date(censor.2012$date)
censor.2012 = censor.2012[order(censor.2012$date, censor.2012$tube, censor.2012$loc),]
# (sort is just for convenience when viewing manually.
# should be close to sorted already, and the script doesn't care.)
raw.to.censor.2012 = (
paste(raw.2012$Date, raw.2012$Tube, raw.2012$Location)
%in% paste(censor.2012$date, censor.2012$tube, censor.2012$loc))
raw.2012 = droplevels(raw.2012[!raw.to.censor.2012,])
rm(raw.to.censor.2012)
# Sort by order of tracing (required by strip.tracing.dups)
raw.2012 = raw.2012[order(raw.2012$MeasDateTime),]
# Drop duplicates:
# silently if only one is reasonable,
# with warning if several candidates might be correct.
noeasy.2012.by = by(raw.2012, raw.2012$Img, strip.tracing.dups)
noeasy.2012 = do.call(rbind, noeasy.2012.by)
rm(noeasy.2012.by)
# many warnings about multiple calibrations.
# TODO: Revisit calibrations and fix upstream.
noeasy.2012$Month = months(noeasy.2012$Date)
noeasy.2012$Block = assign.block(noeasy.2012$Tube)
noeasy.2012$Species = assign.species(noeasy.2012$Tube)
noeasy.2012$Depth = loc.to.depth(noeasy.2012$Location)
noeasy.2012$rootvol.mm3.mm2 = with(noeasy.2012,
rootvol.perarea(TotVolume.mm3, PxSizeH, PxSizeV))
# normed.2012 = normalize.soilvol(noeasy.2012)
# Which images have duplicates? (is this useful at all?)
# dupimg.2012 = tapply(raw.2012$Img, raw.2012$Img, length)
# dupimg.2012 = dupimg.2012[dupimg.2012>1]
# dupimg.2012 = data.frame(img=names(dupimg.2012), n=dupimg.2012, stringsAsFactors=FALSE)
# TODO: Check here for any LOCATIONs with multiple images from the same session
# strip.tracing.dups only gets rid of multiple records for the same image name.
#centering predictors
noeasy.2012$Date.c = noeasy.2012$Date - mean(noeasy.2012$Date)
# or: noeasy.2012$Date.scale = scale(noeasy.2012$Date)
# Others too? Depth??
# Reduced dataset: Collapse to block means
logvol.2012 = with(noeasy.2012, aggregate(rootvol.mm3.mm2, by=list(Depth=Depth,Species=Species, Block=Block, Session=Session), function(x)mean(log(x+1e-6))))
names(logvol.2012)[5] = "log.mean"
logvol.2012$log.var = with(noeasy.2012, aggregate(rootvol.mm3.mm2, by=list(Depth=Depth, Species=Species, Block=Block, Session=Session), function(x)var(log(x+1e-6)))$x)
logvol.2012$expected= exp(logvol.2012$log.mean + logvol.2012$log.var/2)
#logvol.2012.ci = mapply(
# function(y,s)lognormboot(nboot=10000,nsim=4, ybar=y,s2=s),
# logvol.2012$log.mean,
# logvol.2012$log.var)
#logvol.2012.ci = t(logvol.2012.ci)
#logvol.2012$cilo = logvol.2012.ci[,1]
#logvol.2012$cihi = logvol.2012.ci[,2]
|
6947e15d295e88f194f558b96749e72b0676e022
|
70098b915b6cf31d86de5f5ad4687ab4de341097
|
/tests/testthat/test-check_format.R
|
f7e375673ff58ff8d29a9a1fcf19e2160cb0846b
|
[
"MIT"
] |
permissive
|
mattoslmp/chemspiderapi
|
b6364443309db2f5950ed7d88fc1d050f4db3dac
|
0b371791243ff44c2ff8536c1e59f08ed615ac8e
|
refs/heads/master
| 2023-03-15T19:19:53.930291
| 2021-01-06T10:05:09
| 2021-01-06T10:05:09
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 2,026
|
r
|
test-check_format.R
|
library(chemspiderapi)
context("check_format")
test_that("check_format() fails if no input is provided.", {
expect_error(
.check_format()
)
})
test_that("check_format() fails if NULL is provided as input.", {
expect_error(
.check_format(input = NULL)
)
})
test_that("check_format() fails if multiple inputs are provided.", {
expect_error(
.check_format(input = c("RYYVLZVUVIJVGH-UHFFFAOYSA-N", "CN1C=NC2=C1C(=O)N(C(=O)N2C)C"))
)
})
test_that("check_format() fails if a non-character input is provided.", {
expect_error(
.check_format(input = 123)
)
})
test_that("check_format() fails if no inputFormat is provided.", {
expect_error(
.check_format(input = "RYYVLZVUVIJVGH-UHFFFAOYSA-N", outputFormat = "SMILES")
)
})
test_that("check_format() fails if NULL is provided as inputFormat.", {
expect_error(
.check_format(input = "RYYVLZVUVIJVGH-UHFFFAOYSA-N", inputFormat = NULL, outputFormat = "SMILES")
)
})
test_that("check_format() fails if no outputFormat is provided.", {
expect_error(
.check_format(input = "RYYVLZVUVIJVGH-UHFFFAOYSA-N", inputFormat = "InChIKey")
)
})
test_that("check_format() fails if NULL is provided as outputFormat.", {
expect_error(
.check_format(input = "RYYVLZVUVIJVGH-UHFFFAOYSA-N", inputFormat = "InChIKey", outputFormat = NULL)
)
})
test_that("check_format() remains silent when correct inputs are provided.", {
expect_silent(
.check_format(input = "RYYVLZVUVIJVGH-UHFFFAOYSA-N", inputFormat = "InChIKey", outputFormat = "SMILES")
)
})
test_that("check_format() fails if the inchi string is incomplete.", {
expect_error(
.check_format(input = "C8H10N4O2/c1-10-4-9-6-5(10)7(13)12(3)8(14)11(6)2/h4H,1-3H3", inputFormat = "InChI", outputFormat = "SMILES")
)
})
test_that("check_format() fails if smiles is not a character vector.", {
expect_error(
.check_format(input = "InChI=1S/C8H10N4O2/c1-10-4-9-6-5(10)7(13)12(3)8(14)11(6)2/h4H,1-3H3", inputFormat = "SMILES", outputFormat = "InChIKey")
)
})
|
786580cda12383e766711057e390ea453d0cbc94
|
389410f81d53d14646a3c95ce34f48c9163ce053
|
/9-developing-data-products/shiny/ui.R
|
d2e66b34f143e361d297de93aa0609b6e9d4a665
|
[] |
no_license
|
fattyfook2015/datasciencecoursera-1
|
a40d3abae9031552a1beeeec7f953a361699e8ba
|
11962a2796c862302820fc46dfa690d41373ea06
|
refs/heads/master
| 2021-05-30T23:40:35.770180
| 2015-08-03T15:01:12
| 2015-08-03T15:01:12
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 2,055
|
r
|
ui.R
|
library(shiny)
shinyUI(pageWithSidebar(
headerPanel("Tuning Parameters for the Nadaraya-Watson Kernel Regression Estimator"),
sidebarPanel(
sliderInput("bw1", "Bandwidth of Smoother 1:",
min = 2, max = 10, value = 2, step = 0.5,
animate = animationOptions(interval = 1500, loop = T)),
selectInput('kern1', 'Kernel of Smoother 1:', choices = c("normal", "box")),
sliderInput("bw2", "Bandwidth of Smoother 2",
min = 2, max = 10, value = 8, step = 0.5,
animate = animationOptions(interval = 1500, loop = T)),
selectInput('kern2', 'Kernel of Smoother 2:', choices = c("normal", "box")),
numericInput("speed", "Speed of car", value = 18, min = 4, max = 25),
h4("For input speed:"),
verbatimTextOutput("speed"),
h4("Stopping distance from Smoother 1:"),
verbatimTextOutput("dist1"),
h4("Stopping distance from Smoother 2:"),
verbatimTextOutput("dist2")
),
mainPanel(
h4("Instructions"),
p("This app serves to demonstrate the Nadaraya-Watson (NW) kernel regression estimator
and its tuning parameters using the cars dataset."),
p("The NW estimators are plotted together with data points from the cars data.
There are 2 tuning parameters for the NW estimator: bandwidth and kernel.
The user can tweak them to observe how they affect the estimators.
The play button for the bandwidth setting allows the user to
observe how the estimator changes as bandwidth increases.
2 NW estimators are plotted to allow the user to compare
the estimators with different tuning parameters set."),
p("The final input is used to predict the stopping distance of the car
with the given speed. The predictions for both estimators are printed
on the side panel."),
plotOutput("plot")
)
))
|
1b9b6a4a20f7e1a5a35f5b5d501f4fad0aa0c946
|
97edcb6746069b6c6c7facbe82ed9bb4482d6e22
|
/eSVD/man/dot-percent_shrinkage.Rd
|
309570be090f8ae38baa6a18f2d0155bb22aed51
|
[
"MIT"
] |
permissive
|
linnykos/esvd
|
7e59c5fc50f6e0bd23fcb85f1fa6aa66ea782a60
|
0b9f4d38ed20d74f1288c97f96322347ba68d08d
|
refs/heads/master
| 2023-02-24T23:33:32.483635
| 2021-01-31T21:06:41
| 2021-01-31T21:06:41
| 129,167,224
| 2
| 2
| null | null | null | null |
UTF-8
|
R
| false
| true
| 691
|
rd
|
dot-percent_shrinkage.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/slingshot_curves.R
\name{.percent_shrinkage}
\alias{.percent_shrinkage}
\title{Determine the percentage shrinkage (a non-decreasing function)}
\usage{
.percent_shrinkage(pcurve, common_idx)
}
\arguments{
\item{pcurve}{output of \code{princurve::project_to_curve()}}
\item{common_idx}{indices to use}
}
\value{
vector
}
\description{
Determine the lambda's to use for interpolation based on the IQR for
lambda at \code{common_idx} indicies. Then use a linear interpolation
via \code{approx} to assign all the lambdas (even those not in \code{common_idx})
a relevant value based on the CDF of the cosine kernel.
}
|
e1215e50b4106be896d4035e4b793611060de20c
|
01aeb568b73063290dcb7ebad6e238d6711fee35
|
/man/sce_full_Trapnell.Rd
|
bad3d15071c2fd40d139aaa9524bb69b04b53409
|
[] |
no_license
|
chanwkimlab/DuoClustering2018
|
06c86ac4b0538549335f5973089086f7e42719f0
|
6b68abdac141eb043e02850547212715ddfbcf4d
|
refs/heads/master
| 2023-02-28T02:00:15.409134
| 2021-01-30T08:49:38
| 2021-01-30T08:49:38
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 6,003
|
rd
|
sce_full_Trapnell.Rd
|
\name{sce_full_Trapnell}
\docType{data}
\alias{sce_full_Trapnell}
\alias{sce_filteredExpr10_Trapnell}
\alias{sce_filteredHVG10_Trapnell}
\alias{sce_filteredM3Drop10_Trapnell}
\alias{sce_full_TrapnellTCC}
\alias{sce_filteredExpr10_TrapnellTCC}
\alias{sce_filteredHVG10_TrapnellTCC}
\alias{sce_filteredM3Drop10_TrapnellTCC}
\title{
Trapnell data sets
}
\arguments{
\item{metadata}{Logical, whether only metadata should be returned}
}
\description{
Gene or TCC counts for scRNA-seq data set from Trapnell et al. (2014),
consisting of primary myoblasts over a time course of serum-induced
differentiation.
}
\details{
This is a scRNA-seq data set originally from Trapnell et al. (2014). The data
set consists of gene-level read counts or TCCs (transcript compatibility counts)
from human primary myoblasts over a time course of serum-induced
differentiation. It contains 3 subpopulations, defined by the cell phenotype
given by the authors' annotations. The data sets have been used to evaluate the
performance of clustering algorithms in Duò et al. (2018).
For the \code{sce_full_Trapnell} data set, all genes except those with zero
counts across all cells are retained. The gene counts are
gene-level length-scaled TPM values derived from Salmon (Patro et al. (2017))
quantifications (see
Soneson and Robinson (2018)). For the TCC data set we estimated transcripts
compatibility counts using \code{kallisto} as an alternative to the gene-level
count matrix (Bray et al. (2016), Ntranos et al. (2016)).
The \code{scater} package was used to perform quality control of the data sets
(McCarthy et al. (2017)).
Features with zero counts across all cells, as well as all cells with total
count or total number of detected features more than 3 median absolute
deviations (MADs) below the median across all cells (on the log scale),
were excluded. Additionally, cells that were classified as doublets or
debris were filtered out.
The \code{sce_full_Trapnell} data set consists of 222 cells and 41,111 features,
the \code{sce_full_TrapnellTCC} data set of 227 cells and 684,953 features,
respectively.
The \code{filteredExpr}, \code{filteredHVG} and \code{filteredM3Drop10} are
further reduced data sets.
For each of the filtering method, we retained 10 percent of the original
number of genes
(with a non-zero count in at least one cell) in the original data sets.
For the \code{filteredExpr} data sets, only the genes/TCCs with the highest
average expression (log-normalized count) value across all cells were retained.
Using the \code{Seurat} package, the \code{filteredHVG} data sets were filtered
on the variability of the features and only the most highly variable ones were
retained (Satija et al. (2015)). Finally, the \code{M3Drop} package was used
to model the dropout rate of the genes as a function of the mean expression
level using the Michaelis-Menten equation and select variables to retain for
the \code{filteredM3Drop10} data sets (Andrews and Hemberg (2018)).
The \code{scater} package was used to normalize the count values, based on
normalization factors calculated by the deconvolution method from the
\code{scran} package (Lun et al. (2016)).
This data set is provided as a \code{SingleCellExperiment} object
(Lun and Risso (2017)). For further information on the
\code{SingleCellExperiment} class, see the corresponding manual.
Raw data files for the original data set (GSE52529) are available from
https://www.ncbi.nlm.nih.gov/geo/query/acc.cgi?acc=GSE52529.}
\usage{
sce_full_Trapnell(metadata = FALSE)
sce_filteredExpr10_Trapnell(metadata = FALSE)
sce_filteredHVG10_Trapnell(metadata = FALSE)
sce_filteredM3Drop10_Trapnell(metadata = FALSE)
sce_full_TrapnellTCC(metadata = FALSE)
sce_filteredExpr10_TrapnellTCC(metadata = FALSE)
sce_filteredHVG10_TrapnellTCC(metadata = FALSE)
sce_filteredM3Drop10_TrapnellTCC(metadata = FALSE)
}
\examples{
sce_filteredExpr10_Trapnell()
}
\format{SingleCellExperiment}
\value{Returns a \code{SingleCellExperiment} object.}
\references{
Andrews, T.S., and Hemberg, M. (2018). \emph{Dropout-based feature selection
for scRNASeq}. bioRxiv doi:https://doi.org/10.1101/065094.
Bray, N.L., Pimentel, H., Melsted, P., and Pachter, L. (2016).
\emph{Near-optimal probabilistic RNA-seq quantification}.
Nat. Biotechnol. 34: 525–527.
Duò, A., Robinson, M.D., and Soneson, C. (2018).
\emph{A systematic performance evaluation of clustering methods for single-cell
RNA-seq data.}
F1000Res. 7:1141.
Lun, A.T.L., Bach, K., and Marioni, J.C. (2016) \emph{Pooling across cells to
normalize single-cell RNA sequencing data with many zero counts.}
Genome Biol. 17(1): 75.
Lun, A.T.L., and Risso, D. (2017). \emph{SingleCellExperiment: S4 Classes for
Single Cell Data}. R package version 1.0.0.
McCarthy, D.J., Campbell, K.R., Lun, A.T.L., and Wills, Q.F. (2017):
\emph{Scater: pre-processing, quality control, normalization and visualization
of single-cell RNA-seq data in R.} Bioinformatics 33(8): 1179-1186.
Ntranos, V., Kamath, G.M., Zhang, J.M., Pachter, L., and Tse, D.N. (2016):
\emph{Fast and accurate single-cell RNA-seq analysis by clustering of
transcript-compatibility counts.} Genome Biol. 17:112.
Patro, R., Duggal, G., Love, M.I., Irizarry, R.A., and Kingsford, C. (2017):
\emph{Salmon provides fast and bias-aware quantification of transcript
expression.} Nat. Methods 14:417-419.
Satija, R., Farrell, J.A., Gennert, D., Schier, A.F., and Regev, A. (2015).
\emph{Spatial reconstruction of single-cell gene expression data.}
Nat. Biotechnol. 33(5): 495–502.
Soneson, C., and Robinson, M.D. (2018). \emph{Bias, robustness and scalability
in single-cell differential expression analysis.} Nat. Methods, 15(4): 255-261.
Trapnell, C., Cacchiarelli, D., Grimsby, J., Pokharel, P., Li, S., Morse, M.,
Lennon, N.J., Livak, K.J., Mikkelsen, T.S., and Rinn, J.L. (2014).
\emph{The dynamics and regulators of cell fate decisions are revealed by
pseudotemporal ordering of single cells.} Nat. Biotechnol. 32(4): 381–386.
}
\keyword{datasets}
|
f6271ca183340f27b53d3eddd0b9c1092979ba5c
|
2a7e77565c33e6b5d92ce6702b4a5fd96f80d7d0
|
/fuzzedpackages/RBesT/man/pos1S.Rd
|
add845d5a55143bc18ed44d701dbaf4b89180b33
|
[] |
no_license
|
akhikolla/testpackages
|
62ccaeed866e2194652b65e7360987b3b20df7e7
|
01259c3543febc89955ea5b79f3a08d3afe57e95
|
refs/heads/master
| 2023-02-18T03:50:28.288006
| 2021-01-18T13:23:32
| 2021-01-18T13:23:32
| 329,981,898
| 7
| 1
| null | null | null | null |
UTF-8
|
R
| false
| true
| 4,301
|
rd
|
pos1S.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/pos1S.R
\name{pos1S}
\alias{pos1S}
\alias{pos1S.betaMix}
\alias{pos1S.normMix}
\alias{pos1S.gammaMix}
\title{Probability of Success for a 1 Sample Design}
\usage{
pos1S(prior, n, decision, ...)
\method{pos1S}{betaMix}(prior, n, decision, ...)
\method{pos1S}{normMix}(prior, n, decision, sigma, eps = 1e-06, ...)
\method{pos1S}{gammaMix}(prior, n, decision, eps = 1e-06, ...)
}
\arguments{
\item{prior}{Prior for analysis.}
\item{n}{Sample size for the experiment.}
\item{decision}{One-sample decision function to use; see \code{\link{decision1S}}.}
\item{...}{Optional arguments.}
\item{sigma}{The fixed reference scale. If left unspecified, the
default reference scale of the prior is assumed.}
\item{eps}{Support of random variables are determined as the
interval covering \code{1-eps} probability mass. Defaults to
\eqn{10^{-6}}.}
}
\value{
Returns a function that takes as single argument
\code{mix}, which is the mixture distribution of the control
parameter. Calling this function with a mixture distribution then
calculates the PoS.
}
\description{
The \code{pos1S} function defines a 1 sample design (prior, sample
size, decision function) for the calculation of the frequency at
which the decision is evaluated to 1 when assuming a distribution
for the parameter. A function is returned which performs the
actual operating characteristics calculations.
}
\details{
The \code{pos1S} function defines a 1 sample design and
returns a function which calculates its probability of success.
The probability of success is the frequency with which the decision
function is evaluated to 1 under the assumption of a given true
distribution of the data implied by a distirbution of the parameter
\eqn{\theta}.
Calling the \code{pos1S} function calculates the critical value
\eqn{y_c} and returns a function which can be used to evaluate the
PoS for different predictive distributions and is evaluated as
\deqn{ \int F(y_c|\theta) p(\theta) d\theta, }
where \eqn{F} is the distribution function of the sampling
distribution and \eqn{p(\theta)} specifies the assumed true
distribution of the parameter \eqn{\theta}. The distribution
\eqn{p(\theta)} is a mixture distribution and given as the
\code{mix} argument to the function.
}
\section{Methods (by class)}{
\itemize{
\item \code{betaMix}: Applies for binomial model with a mixture
beta prior. The calculations use exact expressions.
\item \code{normMix}: Applies for the normal model with known
standard deviation \eqn{\sigma} and a normal mixture prior for the
mean. As a consequence from the assumption of a known standard
deviation, the calculation discards sampling uncertainty of the
second moment. The function \code{pos1S} has an extra
argument \code{eps} (defaults to \eqn{10^{-6}}). The critical value
\eqn{y_c} is searched in the region of probability mass
\code{1-eps} for \eqn{y}.
\item \code{gammaMix}: Applies for the Poisson model with a gamma
mixture prior for the rate parameter. The function
\code{pos1S} takes an extra argument \code{eps} (defaults to \eqn{10^{-6}})
which determines the region of probability mass \code{1-eps} where
the boundary is searched for \eqn{y}.
}}
\examples{
# non-inferiority example using normal approximation of log-hazard
# ratio, see ?decision1S for all details
s <- 2
flat_prior <- mixnorm(c(1,0,100), sigma=s)
nL <- 233
theta_ni <- 0.4
theta_a <- 0
alpha <- 0.05
beta <- 0.2
za <- qnorm(1-alpha)
zb <- qnorm(1-beta)
n1 <- round( (s * (za + zb)/(theta_ni - theta_a))^2 )
theta_c <- theta_ni - za * s / sqrt(n1)
# assume we would like to conduct at an interim analysis
# of PoS after having observed 20 events with a HR of 0.8.
# We first need the posterior at the interim ...
post_ia <- postmix(flat_prior, m=log(0.8), n=20)
# dual criterion
decComb <- decision1S(c(1-alpha, 0.5), c(theta_ni, theta_c), lower.tail=TRUE)
# ... and we would like to know the PoS for a successful
# trial at the end when observing 10 more events
pos_ia <- pos1S(post_ia, 10, decComb)
# our knowledge at the interim is just the posterior at
# interim such that the PoS is
pos_ia(post_ia)
}
\seealso{
Other design1S: \code{\link{decision1S_boundary}},
\code{\link{decision1S}}, \code{\link{oc1S}}
}
\concept{design1S}
|
72d23ad4007dae22894b6bf1b09dfd9c5b125b2c
|
fb0fdffa0ea694ece6313359582310c5f25eeb12
|
/R/gevpdf.R
|
b6c3f097781373454a4ee0332cdf42b11470bf33
|
[
"MIT"
] |
permissive
|
rozsasarpi/Interactive-snow-map-R
|
f30fbba49698375dafbdbe08045af898ec3ff6f3
|
b62907a29ea2150f196a53d6a2bf6c06486e17d4
|
refs/heads/master
| 2021-01-10T04:22:50.994881
| 2016-08-18T14:16:36
| 2016-08-18T14:16:36
| 47,183,604
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 1,326
|
r
|
gevpdf.R
|
gevpdf = function(x, shape, scale, location) {
# parameters can be only scalars
# x can be vector
# rm(list=ls(all=TRUE))
# x = c(10, 20, -9, 5)
# shape = 0
# scale = -1
# location = 1
nx = length(x)
nshape = length(shape)
nscale = length(scale)
nlocation = length(location)
if (nshape*nscale*nlocation != 1) stop("The parameters can be scalars only!")
if (any(is.na(c(shape, scale, location)))) {
f = rep(NaN, nx)
} else {
f = vector(mode = "numeric", length = length(x))
if (nx > 1){
shape = rep(shape, nx)
scale = rep(scale, nx)
location = rep(location, nx)
}
# out of range of scale parameter
idx_out = scale < 0
f[idx_out] = NaN
x = x[!idx_out]
# Gumbel distribution
idx_g = abs(shape) < .Machine$double.eps
if (any(idx_g)) {
z = (x[idx_g]-location)/scale
f[idx_g] = exp(-exp(-z) - z)
}
# out of the support, assign 0 to these values
idx_0 = ((1 + shape*(x-location)/scale) <= 0) & !idx_g
f[idx_0] = 0
idx = !(idx_0 | idx_g)
f[idx] = 1/scale[idx]*(1 + shape[idx]*(x[idx] - location[idx])/scale[idx])^(-1/shape[idx] - 1)*exp(-(1 + shape[idx]*(x[idx] - location[idx])/scale[idx])^(-1/shape[idx]))
}
return(f)
}
|
1e5e2aa95bec6ac0344e3337cec4d901b337b282
|
d33a5191d950d6044e2611890a69d99de6b9418c
|
/SharedForest/R/Hypers.R
|
a7d1bdebbb75e2a3b1049f76f8b3d063fb2c6520
|
[
"MIT"
] |
permissive
|
theodds/SharedForestPaper
|
84a7ab8bc669c76314aaac897bea22fc21d5a211
|
00d91f4251c73eef4dae10304bbc3196831357f7
|
refs/heads/master
| 2022-03-01T18:51:16.132755
| 2022-02-19T17:59:19
| 2022-02-19T17:59:19
| 188,320,078
| 3
| 0
| null | 2019-05-23T23:34:36
| 2019-05-23T23:26:32
|
C++
|
UTF-8
|
R
| false
| false
| 5,332
|
r
|
Hypers.R
|
#' Create hyperparameter object for SoftBart
#'
#' Creates a list which holds all the hyperparameters for use with the softbart
#' command.
#'
#' @param X NxP matrix of training data covariates.
#' @param Y Nx1 vector of training data response.
#' @param group For each column of X, gives the associated group
#' @param alpha Positive constant controlling the sparsity level
#' @param beta Parameter penalizing tree depth in the branching process prior
#' @param gamma Parameter penalizing new nodes in the branching process prior
#' @param k Related to the signal-to-noise ratio, sigma_mu = 0.5 / (sqrt(num_tree) * k). BART defaults to k = 2.
#' @param sigma_hat A prior guess at the conditional variance of Y. If not provided, this is estimated empirically by linear regression.
#' @param shape Shape parameter for gating probabilities
#' @param width Bandwidth of gating probabilities
#' @param num_tree Number of trees in the ensemble
#' @param alpha_scale Scale of the prior for alpha; if not provided, defaults to P
#' @param alpha_shape_1 Shape parameter for prior on alpha; if not provided, defaults to 0.5
#' @param alpha_shape_2 Shape parameter for prior on alpha; if not provided, defaults to 1.0
#' @param num_tree_prob Parameter for geometric prior on number of tree
#'
#' @return Returns a list containing the function arguments.
Hypers <- function(X,Y,W,delta, group = NULL,
alpha = 1,
beta = 2,
gamma = 0.95,
num_tree = 50,
var_tau = 0.5,
k = 2, ## Determines kappa
k_theta = 2,
alpha_scale = NULL,
alpha_shape_1 = 0.5,
alpha_shape_2 = 1,
sigma_hat = NULL, ## Determines tau_0 and its prior scale
sigma_theta = NULL,
theta_0 = NULL,
shape = 1) {
## Preprocess stuff (in order they appear in args)
Y <- scale(Y)
if(is.null(group)) {
group <- 1:ncol(X) - 1
} else {
group <- group - 1
}
obj <- function(x) {
alpha <- x[1]
beta <- x[2]
mu <- digamma(alpha) - log(beta)
sigma <- sqrt(trigamma(alpha))
return(mu^2 + (sigma - sqrt(var_tau / num_tree))^2)
}
pars <- optim(c(1,1), obj)$par
a_tau <- pars[1]
b_tau <- pars[2]
alpha_scale <- ifelse(is.null(alpha_scale), ncol(X), alpha_scale)
sigma_hat <- ifelse(is.null(sigma_hat), GetSigma(X,Y), sigma_hat)
theta_0 <- ifelse(is.null(theta_0), qnorm(mean(delta)), theta_0)
## MAKE OUTPUT AND RETURN IT
out <- list()
out$alpha <- alpha
out$beta <- beta
out$gamma <- gamma
out$num_tree <- num_tree
out$a_tau <- a_tau
out$b_tau <- b_tau
out$kappa <- 1.0 / (3.0 / (k * sqrt(num_tree)))^2
out$alpha_scale <- alpha_scale
out$alpha_shape_1 <- alpha_shape_1
out$alpha_shape_2 <- alpha_shape_2
out$sigma_hat <- sigma_hat
out$sigma_theta <- 3.0 / (k_theta * sqrt(num_tree))
out$theta_0 <- theta_0
out$group <- group
return(out)
}
#' MCMC options for SoftBart
#'
#' Creates a list which provides the parameters for running the Markov chain.
#'
#' @param num_burn Number of warmup iterations for the chain.
#' @param num_thin Thinning interval for the chain.
#' @param num_save The number of samples to collect; in total, num_burn + num_save * num_thin iterations are run
#' @param num_print Interval for how often to print the chain's progress
#' @param update_sigma_mu If true, sigma_mu/k are updated, with a half-Cauchy prior on sigma_mu centered at the initial guess
#' @param update_s If true, s is updated using the Dirichlet prior.
#' @param update_alpha If true, alpha is updated using a scaled beta prime prior
#' @param update_beta If true, beta is updated using a Normal(0,2^2) prior
#' @param update_gamma If true, gamma is updated using a Uniform(0.5, 1) prior
#' @param update_tau If true, tau is updated for each tree
#' @param update_tau_mean If true, the mean of tau is updated
#'
#' @return Returns a list containing the function arguments
Opts <- function(num_burn = 2500,
num_thin = 1,
num_save = 2500,
num_print = 100,
update_sigma_mu = TRUE,
update_s = TRUE,
update_alpha = TRUE) {
out <- list()
out$num_burn <- num_burn
out$num_thin <- num_thin
out$num_save <- num_save
out$num_print <- num_print
out$update_s <- update_s
out$update_alpha <- update_alpha
# out$update_num_tree <- update_num_tree
out$update_num_tree <- FALSE
return(out)
}
GetSigma <- function(X,Y) {
stopifnot(is.matrix(X) | is.data.frame(X))
if(is.data.frame(X)) {
X <- model.matrix(~.-1, data = X)
}
fit <- cv.glmnet(x = X, y = Y)
fitted <- predict(fit, X)
sigma_hat <- sqrt(mean((fitted - Y)^2))
return(sigma_hat)
}
|
1a2d456c1c574653b515fe72d6f1d036b2a1466b
|
6eb0c9e95e7dc19d762fcf37da0b92e27eb212a5
|
/DTU_ML_kursus/02450Toolbox_R/Scripts/ex5_2_1.R
|
4eed08e51143c61beb7baf92d919d5e8d84161e1
|
[] |
no_license
|
AnnaLHansen/projects
|
81b125e8789c2555c8a2b05c469193094e25610f
|
fb6fe1d268c81146fb819cf42722fe93f9af31f6
|
refs/heads/master
| 2021-07-19T15:29:46.507559
| 2020-09-04T12:23:31
| 2020-09-04T12:23:31
| 211,500,384
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 288
|
r
|
ex5_2_1.R
|
# exercise 5.2.1
# Number of data objects
N = 100;
# Attribute values
X = c(0:(N-1));
# Noise
epsilon = rnorm(mean=0, sd=0.1, n=N);
# Model parameters
w0 = -.5;
w1 = 0.01;
# Outputs
y = w0+w1*X+epsilon;
# Make a scatter plot
plot(X, y, main="Linear regression", xlab="X", ylab="y")
|
b753a4ac0805356378bbe71dd5172461f34838ae
|
fd00a89804c49a9026229a778518e19b59c24f21
|
/server/rScripts/argsTrain.R
|
76ecfffc7648c2b6d08626c594af9513b70a3de4
|
[] |
no_license
|
yz8169/tmbq_scala_js
|
44453a63b184ef59ad20cba3edd0f32b4b93a387
|
f68bcac8becfe74bef12f2b120c0e6fff7cfed0c
|
refs/heads/master
| 2020-12-08T10:39:32.869984
| 2020-01-15T06:39:53
| 2020-01-15T06:39:53
| 232,960,699
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 1,242
|
r
|
argsTrain.R
|
# Title : TODO
# Objective : TODO
# Created by: yz
# Created on: 2018/9/26
library(pracma)
library(baseline)
name <- "TwMCA"
sampleConfig <- read.table(quote = "", "sample_config.txt", header = T, com = '', sep = "\t", check.names = F)
bat=2
colnames(sampleConfig)[2] <- "fileName"
config = subset(sampleConfig, batch == bat)
fileNames = config$fileName
files <- paste("dta/",name,"/", fileNames,".dta", sep = "")
fl=23
createWhenNoExist <- function(f){
! dir.exists(f) && dir.create(f)
}
for (file in files) {
fileName = basename(file)
data <- read.table(quote = "", file, header = T, com = '', sep = "\t", check.names = F)
colnames(data) = c("SEC", "MZ", "INT")
smoothData <- savgol(data$INT, as.numeric(fl))
data$INT <- smoothData
baseLineFrame <- data.frame(Date = data$SEC, Visits = smoothData)
baseLineFrame <- t(baseLineFrame$Visits)
baseLine <- baseline(baseLineFrame)
correctValue = c(getCorrected(baseLine))
data$INT = correctValue
dirName="train"
nameDir <- paste(dirName, name, sep = "/")
createWhenNoExist(nameDir)
colnames(data) = c("#SEC", "MZ", "INT")
write.table(data, paste("train", name, fileName, sep = "/") , quote = FALSE, sep = "\t", row.names = F)
}
|
2af9f476c0eec7840b178374979ec519c37db6eb
|
689635789d25e30767a562933f39fcba1cebecf1
|
/Alpha Modelling/QuantStrat/Packages/IKReporting/man/runSharpe.Rd
|
00a3fc1db398afabb2729373a05dd5ac70d5d6ea
|
[] |
no_license
|
Bakeforfun/Quant
|
3bd41e6080d6e2eb5e70654432c4f2d9ebb5596c
|
f2874c66bfe18d7ec2e6f2701796fb59ff1a0ac8
|
refs/heads/master
| 2021-01-10T18:23:23.304878
| 2015-08-05T12:26:30
| 2015-08-05T12:26:30
| 40,109,179
| 5
| 0
| null | 2015-08-05T12:12:09
| 2015-08-03T06:43:12
|
R
|
UTF-8
|
R
| false
| false
| 644
|
rd
|
runSharpe.Rd
|
% Generated by roxygen2 (4.1.0): do not edit by hand
% Please edit documentation in R/runSharpe.R
\name{runSharpe}
\alias{runSharpe}
\title{Running Sharpe Ratio}
\usage{
runSharpe(R, n = 252, scale = NA, volFactor = 1)
}
\arguments{
\item{R}{a return series}
\item{n}{a lookback period}
\item{scale}{number of periods in a year
(daily scale = 252, monthly scale = 12, quarterly scale = 4)}
\item{volFactor}{a volatility factor -- can be raised to compute a value
biased further to volatility (volFactor > 1)
or away from volatility (volFactor < 1)
(default 1)}
}
\value{
an n-day rolling Sharpe ratio
}
\description{
Running Sharpe Ratio
}
|
c196bdebce36addcf75fd13ac1bcd6b723f683de
|
091211fc733515cbcd42ad63998fcf6184bf3e77
|
/man/regress.Rd
|
5297626901718b5743742c386f814a52ac0609c8
|
[] |
no_license
|
AndrewYRoyal/ebase
|
3560e2e4e717120357b066f27fbfa094d6bb34ec
|
7decc805dc80d26a77505c8c4fb87816c63a7a24
|
refs/heads/master
| 2022-12-22T17:23:30.440452
| 2020-09-30T12:31:43
| 2020-09-30T12:31:43
| 168,870,979
| 3
| 0
| null | null | null | null |
UTF-8
|
R
| false
| true
| 210
|
rd
|
regress.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/regression.R
\name{regress}
\alias{regress}
\title{Regression Method}
\usage{
regress(dat, ...)
}
\description{
Regression Method
}
|
1ab4631b0c04a4091e7390f3e6362b722652b6f8
|
1c44bffe9a0c2f9713f369f46878e04a48f48519
|
/man/attractorScanning.Rd
|
2f04623590cda1d8d7b0c1c6540928a1d3002b44
|
[] |
no_license
|
onlyevil/cafr
|
7ee12d698c3bef46acbbd98530283444ed2efef7
|
41a89fbee93e85a0abc2623d69ccdaa342d26fb5
|
refs/heads/master
| 2021-01-25T01:08:14.234616
| 2015-11-04T19:58:48
| 2015-11-04T19:58:48
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 2,694
|
rd
|
attractorScanning.Rd
|
\name{attractorScanning}
\alias{attractorScanning}
\title{Find all attractors in the dataset}
\description{Exhaustively search for all attractors in the dataset.}
\usage{
attractorScanning(data, a=5, maxIter=100, epsilon=1E-14, bin=6, so=3, rankBased=FALSE, negateMI=TRUE)
}
\arguments{
\item{data}{An expression matrix with genes in the rows, samples in the columns.}
\item{a}{Exponent of the mutual information, used to create weight vector for metagenes. }
\item{maxIter}{Max number of iterations.}
\item{epsilon}{Threshold of convergence.}
\item{bin}{Number of bins used when estimate mutual information (default=6).}
\item{so}{Spline order used when estimate mutual information (default=3).}
\item{rankBased}{When \code{TRUE}, convert the expression values into ranks.}
\item{negateMI}{When \code{TRUE}, negate the mutual information if the two vectors have negative
momentum.}
}
\details{
\code{attractorScanning} searches for all the attractors in the dataset by applying \code{CAFrun}
using every gene as a seed in the dataset. It will ignore the attractors which were dominated by
the seed gene. As the function finds a new attractor, it removes the genes from the search list
that are more significant (with higher MI) than the seed, due to that they will lead to the same
attractor. Therefore the size of the search list decreases exponentially during the process.
During the search process, the program shows its progress by the lines:
CENPA ( 10 / 300 ) ...
which shows the current seed (CENPA), the number of attractor it tries to finds (10th), the number
of seeds left to be searched (300).
}
\value{
Returns a matrix of size k by m, where m is the number of genes (rows) in the dataset, and k the
number of converged attractors. Each row of the matrix is the MI of all the genes with the converged
attractor.
}
\note{ Missing values are not allowed as the input to the function in the current version.}
\examples{
\dontrun{
# Load the toy dataset extracted from TCGA OV data
data(ov)
# find attractor using CENPA as a seed
as <- attractorScanning(ov)
# display the top 20 genes in first attractor
sort(as[1,], decreasing=TRUE)[1:20]
}
}
\seealso{
\code{\link{parAttractorScanning}} for excuting \code{attractorScanning} on SGE framework.
\code{\link{findAttractor}}, \code{\link{attractorScanningGL}}
}
\references{
Wei-Yi Cheng, Tai-Hsien Ou Yang and Dimitris Anastassiou, Biomolecular events in cancer revealed
by attractor metagenes, PLoS Computational Biology, Vol. 9, Issue 2, February 2013.
}
\author{Wei-Yi Cheng}
\keyword{Attractor Metagenes}
\concept{attractor, metagene, MI}
|
8973ba6b788512987cd30c8aa06537e77cdb65d5
|
e169cdc073b991918fe4f2fcab99ffa3a7e9b0c8
|
/MarcoPolo/app.R
|
93e98369e50958cc6f852b4b7d4759afa7ebacfb
|
[] |
no_license
|
hollipista/DataScienceCapstone
|
79a993d44791138f90e7c3718e6701f478630611
|
52d755f16cf662fc34016c0b80a35e6001a2dea2
|
refs/heads/main
| 2023-04-08T06:56:31.020292
| 2021-04-22T20:18:30
| 2021-04-22T20:18:30
| 349,172,186
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 6,425
|
r
|
app.R
|
library(shiny)
load(file='n_gram_prob.RData')
library(tibble)
library(dplyr)
library(tidytext)
library(tidyr)
library(shinythemes)
predict_uni <- function() {
#print('uni')
max_prob = max(UG$prob)
candidates=UG[UG$prob==max_prob,]
return(sample(candidates$word1,1))
}
predict_bi <- function(w1) {
#print('bi')
candidates = BG[(BG$word1)==w1,c('word2','prob')]
candidates = candidates[order(-candidates$prob),]
candidates = candidates[!is.na(candidates$prob),]
if (nrow(candidates) >=1){
max_prob = max(candidates$prob)
candidates=candidates[candidates$prob==max_prob,]
return(sample(candidates$word2,1))
} else
{return(predict_uni())}
}
predict_tri <- function(w1, w2) {
#print('tri')
candidates = TG[(TG$word1)==w1 & TG$word2 == w2, c('word3','prob')]
candidates = candidates[order(-candidates$prob),]
candidates = candidates[!is.na(candidates$prob),]
if (nrow(candidates) >=1){
max_prob = max(candidates$prob)
candidates=candidates[candidates$prob==max_prob,]
return(sample(candidates$word3,1))
} else
{return(predict_bi(w2))}
}
getWords <- function(str){
inputText <- tibble(text = tolower(str))
if (length(grep("\\s[a-zA-Z]", str))>0){
LastBG <- mutate(inputText, text = gsub(x = text,
pattern = "[0-9]+|(?!')[[:punct:]]|\\(.*\\)|\\s$",
replacement = "", perl=TRUE)) %>%
unnest_tokens(bigram, text, token = "ngrams", n = 2) %>%
slice_tail(n = 1) %>%
separate(bigram, c("w1", "w2"), sep = " ")
predict_tri(as.character(LastBG[1]),as.character(LastBG[2]))
}
else{
LastBG <- mutate(inputText, text = gsub(x = text,
pattern = "[0-9]+|(?!')[[:punct:]]|\\(.*\\)|\\s$",
replacement = "", perl=TRUE))
predict_bi(as.character(LastBG[1]))
}
}
ui <- fluidPage(theme = shinytheme("slate"),
titlePanel(
h1("Data Science Capstone - Final Project", align="center")
),
titlePanel(
h2("Marco… Polo!", align="center")
),
tags$br(),
tags$h5("What is this about? Here you can try my text predictor Shiny App made for Data Science
Capstone Project on Coursera (JHU)."),
tags$h5("In the last couple of weeks I've built a predictive model and a web
application. This model tries to predict the next word of any given string -
that’s called text prediction. Let's try it! Enter any text in English
and see how the app tries to find out your next word!"),
tags$br(),
fluidRow(column(4, offset = 4, textInput("caption", "Let start to type your text here:", "TextInput")),
column(4,style = "margin-top: 25px;",actionButton("reset", "Reset"))),
tags$h5(strong("...and your extrapolated sentence is:"), align="center"),
fluidRow(column(4, offset = 4,verbatimTextOutput("value"))),
tags$h5("Let me say some words about my model and the progression I've done
from the first Milestone Report, that you can find"),
tags$a(href="https://rpubs.com/hollipista/DataScienceCapstoneMilestoneReport", "HERE (click!)"),
tags$h5("I was not satisfied with the accuracy of my first model. There were a lot of trigrams
that were not occur in my corpus - due to limited sample size. So I decided to extend the
corpus as large as I can. I reached the limits in terms of memory very soon but I thought
I have more time than memory: I tried to split the input texts to slices and process
them as a sequence and summarize after that. Whit this approach I was able to process
not just the full dataset of the course (thanks for that SwiftKey!) but also the
http://qwone.com/~jason/20Newsgroups/ data sets."),
tags$h5("I took a long time but I hope worth it. The result was a lot of ten millions of bigrams
and trigrams, so I needed to cut the data and get rid of the combinations with very
low frequency. I had to do it outside of R unfortunatelly but I can tell you that
I kept about one million trigrams and one million bigrams (of course the most frequented
ones.) This is the so called pruning in order to decrease the size of model - of course
sacrificed some accuracy.
The other direction would have been to build 4-grams that can leed to a more
'clever' model because my actual model predict stopwords (eg. articles) as well,
which means in many cases the trigrams convey just unigrams' information to put it
very simplified. For this project, my decision was to use just trigrams."),
tags$h5("I've applied Kneser-Ney smoothing on the data which is a bit problematic due to
I've pruned the n-gram datasets independently that caused a smaller inconsistency
but it seemed to me that it has small significance regarding the model accuracy. My model
is a back off model: it always try to predict based on all available input (in our
case it means the last two tokens/words) but if there are no enought evidence, it
cuts back a word (so predict based on the last word). "),
tags$br(),
tags$h5("Please do not forget that it was just a play around of text prediction ;-) As
you've seen my model is very simple and knows nothing about syntax or semantic,
it's a simple model based on the occurance and order of words."),
tags$br(),
tags$h5("I would like to say thanks for Thiloshon Nagarajah, John O. Bonsak, Qiong Wu and Igor Hut
for the their very informative and useful publications."),
tags$br(),
tags$a(href="https://github.com/hollipista/DataScienceCapstone", "Here you can find the repo of the project")
)
server <- function(input, output, session) {
pred <- reactive({getWords(input$caption)})
output$value <- renderText({ paste(input$caption, pred()) })
observe({
input$reset
updateTextInput(session, "caption", value = "")
})
}
shinyApp(ui, server)
|
f30503b4388433e30e2eba14e46008313abec9da
|
0426f02d23e02ec70640b88e4859abb6cc1ad019
|
/wycena_BS.R
|
d9ca67695f271db2e33883a079a52188f529271f
|
[] |
no_license
|
frdanconia/time_series_tests
|
8083afab5672e57a7f4158a6f664a9d931be187f
|
362a5ea1efd8835e2a55a124c42e46669af073fa
|
refs/heads/master
| 2022-04-02T12:44:50.672796
| 2020-01-27T20:36:25
| 2020-01-27T20:36:25
| 236,589,056
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 1,064
|
r
|
wycena_BS.R
|
# Wycena europejskiej opcji kupna wzorem Blacka-Scholesa
wycena_BS <- function(K,S0=1,sigma=1,r=0,T=1){
y1 <- (log(S0/K)+T*(r+sigma^2/2))/(sigma*sqrt(T))
y2 <- (log(S0/K)+T*(r-sigma^2/2))/(sigma*sqrt(T))
C <- S0*pnorm(y1) - K*exp(-r*T)*pnorm(y2)
return(C)
}
# Cena sprawiedliwa w zaleznoaci od r,sigma,K.
rate <- seq(-0.95,1,by=0.05) # stopy procentowe r
sigma <- seq(-2,2,by=0.05); sigma <- sigma[sigma != 0] # zmienności rynku
wykup <- seq(0,2,by=0.05) # ceny wykupu
# Trojwymiarowa macierz cen
cena <- array(0,c(length(rate),length(sigma),length(wykup)), c("r","sigma","K"))
for(i in 1:length(rate))
for(j in 1:length(sigma))
for(k in 1:length(wykup))
cena[i,j,k] <- wycena_BS(wykup[k],sigma=sigma[j],r=rate[i])
# Wykres 3D
library(rgl)
rysuj <- function(x,y,z,...,labs=c("sigma","K","Cena")){
# kolorowanie wykresu
h <- (z-min(z))/(max(z)-min(z))
r.prop <- h
g.prop <- 0
b.prop <- 1 - h
color <- rgb(r.prop, g.prop, b.prop, maxColorValue=1)
persp3d(x,y,z,col=color,...,xlab=labs[1],ylab=labs[2],zlab=labs[3]) # wykres
}
|
f9b6d435ad360d2249904b02fffc5bce216c1b3e
|
2a7e77565c33e6b5d92ce6702b4a5fd96f80d7d0
|
/fuzzedpackages/oppr/R/print.R
|
2c7cf1c4cd26a4050ddd048428081f4fa471bf17
|
[] |
no_license
|
akhikolla/testpackages
|
62ccaeed866e2194652b65e7360987b3b20df7e7
|
01259c3543febc89955ea5b79f3a08d3afe57e95
|
refs/heads/master
| 2023-02-18T03:50:28.288006
| 2021-01-18T13:23:32
| 2021-01-18T13:23:32
| 329,981,898
| 7
| 1
| null | null | null | null |
UTF-8
|
R
| false
| false
| 1,281
|
r
|
print.R
|
#' @include internal.R
NULL
#' Print
#'
#' Display information about an object.
#'
#' @param x Any object.
#'
#' @param ... not used.
#'
#' @return None.
#'
#' @seealso \code{\link[base]{print}}.
#'
#' @name print
#'
#' @aliases print,Id-method print,tbl_df-method
#'
#' @examples
#' a <- 1:4
#' print(a)
NULL
#' @rdname print
#'
#' @method print ProjectProblem
#'
#' @export
#'
print.ProjectProblem <- function(x, ...) x$print()
#' @rdname print
#'
#' @method print ProjectModifier
#'
#' @export
#'
print.ProjectModifier <- function(x, ...) x$print()
#' @rdname print
#'
#' @method print Id
#'
#' @export
#'
print.Id <- function(x, ...) message("id: ", x)
#' @name print
#'
#' @rdname print
#'
#' @usage \S4method{print}{Id}(x)
#'
methods::setMethod("print", "Id", function(x, ...) print.Id(x))
#' @rdname print
#'
#' @method print OptimizationProblem
#'
#' @export
#'
print.OptimizationProblem <- function(x, ...) x$print()
#' @rdname print
#'
#' @method print ScalarParameter
#'
#' @export
#'
print.ScalarParameter <- function(x, ...) x$print()
#' @rdname print
#'
#' @method print ArrayParameter
#'
#' @export
#'
print.ArrayParameter <- function(x, ...) x$print()
#' @rdname print
#'
#' @method print Solver
#'
#' @export
#'
print.Solver <- function(x, ...) x$print()
|
bfb65cb376c091a0a5be580401f358215ba01aa8
|
a932a56ebe8a8b224ce254cffd54317ea004060c
|
/code.R
|
41541468603d37d789f69a19ae30c7dc7737b151
|
[] |
no_license
|
nm1874/week12_workshop
|
5f43da1aed45ea254fc3419d3e3c693479303b00
|
ca19e59dee080267155cf2e6a343939df4d884ea
|
refs/heads/main
| 2023-01-29T12:48:55.030214
| 2020-12-08T15:57:57
| 2020-12-08T15:57:57
| 319,688,434
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 955
|
r
|
code.R
|
#1.
F<-function(v) v[1]*v[3]-v[2]^2
v0 <- c(4,2,1)
g<-function(v) c(v[1]^2, v[1]*v[2]^2, v[2]^4)
v1<-c(2, 1)
#a)
Dg <- jacobian(g, v1); Dg
#since the basis is the image of Dg
#the basis is c(4,1,0) c(0,4,4)
#b) #since the basis of the image is in the kernel of dF
DF <- grad(F, v0); DF
DF%*%Dg[,1]
DF%*%Dg[,2]
#c)
library(lattice)
#install.packages("scatterplot3d") #comment this out after running it once
library(scatterplot3d)
n <- 18
u <- seq (from = 1.5, to = 2.5, length.out = n+1); u
v <- seq (from = .5, to = 1.5, length.out = 2*n+1); v
g <- expand.grid(u=u,v=v); head(g)
#Next compute x, y, and z as functions of the parameter pairs
g$x = (g$u)^2
g$y = (g$u)*(g$v)^2
g$z = (g$v)^4
head(g)
#We need to convert the last three columns to matrices
x <- matrix(g$x, length(u))
y <- matrix(g$y, length(u))
z <- matrix(g$z, length(u))
#Now wireframe will plot the parametrized sphere
wireframe(z ~ x * y, scales = list(arrows = FALSE))
|
f7060d1aa657b011df92ef416a8f363d31a326b2
|
0e2f1ad8be855562a48a7f4961a91dd4a4b1a9e7
|
/amplicon_metacoder_mal_DNA.R
|
1830d521171b7d64128a7d34751351dd92371e80
|
[] |
no_license
|
ngeraldi/Global_ocean_genome_analysis
|
7ba1b2c2250dc476e4a662c8b7ff6bcae9b07f88
|
08a64c282c9f638c184a9807629d47f2c5c9ef3d
|
refs/heads/master
| 2020-04-30T14:39:27.083026
| 2020-01-16T13:28:53
| 2020-01-16T13:28:53
| 176,897,506
| 2
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 8,674
|
r
|
amplicon_metacoder_mal_DNA.R
|
library(dplyr)
library(tidyr)
library(metacoder) #
################################################################################
# get otu table and standardise
#source("/Users/geraldn/Dropbox/Documents/KAUST/eDNA/DMAP/R/Scripts/amplicon_stats_tara_source.R")
source("/Users/geraldn/Dropbox/Documents/KAUST/eDNA/DMAP/R/Scripts/amplicon_stats_mal_source.R")
########################################################################################################
### get unique lineages !!!!!
lin_col<-c(42,43,26,28:38)
lin_otu<-dat %>%
select_at(lin_col) %>%
filter(!duplicated(otu_col)) %>%
mutate_all(as.character)
lin_sp <- lin_otu %>%
filter(!duplicated(sp_col))
# for MAL
########################################################################################################
##### use taxa to parse taxonomy head(lin)
### input two sheets,
### otu - samples in columns and otu in rows
### samples- samples in each row
##### spread based on samples ####### names(dat_otu) summary(dat_otu_sam) names(dat_otu_sam$read_low)
# use stat for mal.
# 1- "raw.otu.DNA_read_low" 2- "raw.species.DNA_read_low" 3- "rare.otu.DNA_read_low" 4- "rare.species.DNA_read_low"
sdat<-stat[["rare.species.DNA_read_low"]] # !!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!
# names(sdat) names(dat$read_low)
tr<-c(17:204) ## rows of taxa
dr<-c(1:16,206:221) ## rows of data for sam
sdat<-sdat[!duplicated(sdat$Sample.ID),] ## check deep_65_DNA is duplicated
otu<-sdat[,tr]
otu1<-data.frame(t(otu)) ### transpose so samples are in columns, taxa in rows
names(otu1)<-sdat$Sample.ID
otu1 <- otu1 %>% # join to add lineage data names(otu1)
mutate(sp_col=as.character(row.names(otu1))) %>%
left_join(lin_sp)
#mutate(sp_num=as.character(sp_num))
#
###### further filter taxa, remove sk and k and if not metazoan names(otu1)
tr<-c(1:226) # names(otu1)
dr<-c(227:length(names(otu1)))
otu2<-otu1 %>%
filter(Kingdom == "Metazoa")
otu3<-otu2[which(rowSums(otu2[,tr])>0),] # remove taxa (rows) with no reads
mes<-otu3[,which(colSums(otu3[,tr])>0)] # remove samples (columns) with no reads
otu4<-cbind(mes,otu3[,dr]) # add back in lineage
## make column of ocean basin
### make ocean basins
geo<-read.table("/Users/geraldn/Dropbox/Documents/KAUST/eDNA/DMAP/R/CSV/Global_layers_oct18.csv", sep=",",header=T)
geo1<-geo[,c(1,23)]# get lohg and ocean names(geo)
### match samples from sam
dr<-c(1:16,206:221) ## rows of data for sam names(sdat)
nam_mes<-as.character(names(mes))
## for sampls names(sdat) sdat$Sample.ID
sam<- sdat %>%
select_at(dr) %>%
filter(Sample.ID %in% names(mes)) %>%
left_join(geo1) %>%
mutate(cruise_leg="profile") %>% # make column with different cruise legs
mutate(cruise_leg=replace(cruise_leg,grepl("deep",Sample.ID),"deep")) %>%
mutate(cruise_leg=replace(cruise_leg,grepl("M",Sample.ID),"surface")) %>%
mutate(pelagic_zone=replace(pelagic_zone,grepl("M",Sample.ID),"E")) %>% # fix pelagic zone catagories (E-<200, M-<1000 B)
mutate(pelagic_zone=replace(pelagic_zone,grepl("deep",Sample.ID),"B")) %>%
mutate(pp_mean_cat=cut(Present.Surface.Primary.productivity.Mean, breaks=c(-Inf,.0025,.005, Inf),
labels=c("<0.0025",">0.0025 and <0.005",">.005"))) %>% # pp catagory
mutate(land_dist_cat=cut(land_dist, breaks=c(-Inf,5,10, Inf),
labels=c("<500",">500 and <1000",">1000"))) %>% # land_dist cat
mutate(Depth_zone_cat="Epipelagic") %>%
mutate(Depth_zone_cat=replace(Depth_zone_cat,pelagic_zone=="B","Bathypelagic")) %>%
mutate(Depth_zone_cat=replace(Depth_zone_cat,pelagic_zone=="M","Mesopelagic"))
# 3 dpeth cat, furface ppmean dist_land hist(sdat$Depth)
#### a few checks
unique(otu2$Class)
mam<-otu1 %>%
filter(Class == "Aves")
otu5<-otu4
otu5$median <- apply(otu5[,1:221], 1, median)
mes<-otu5 %>% # names(otu4)
mutate(sumread=rowSums(.[1:221])) %>% # rowSums(.[1:5])
mutate(readmean=rowMeans(.[1:221])) %>%
select(sp_col:readmean) %>%
arrange(desc(sumread)) # median
########################################################################################################
# make tree data ----------------------------------------------------------
########################################################################################################
### all metazoa removed superkingdom
tax<-taxa::parse_tax_data(otu4, class_cols =c("Kingdom","Phylum","Class","Order",
"Family","Genus","Species"))
# tax2$data
# sum per taxon?????
tax$data$tax_abund_per_sam <- calc_taxon_abund(tax, "tax_data",
cols = sam$Sample.ID)
## ra
# number of samples tha have reads for each taxon: for depth catagories or DNA names(sam)
tax$data$tax_occ_depth <- calc_n_samples(tax, "tax_abund_per_sam", groups = sam$pelagic_zone)
#################################
names(otu4)
tr<-c(2:218) # rows of samples taxon col 1
# To plot read depth, you first need to add up the number of reads per taxon.
# The function `calc_taxon_abund` is good for this.
tax$data$taxon_counts <- calc_taxon_abund(tax, dataset = "tax_data")
tax$data$taxon_counts$total <- rowSums(tax$data$taxon_counts[,tr]) #
# print(tax)
# get_data(tax)
# tax$all_names() tax$taxon_indexes tax$classifications
# plot --------------------------------------------------------------------
heat_tree(tax,
node_label = taxon_names,
node_size = n_obs,
node_color = n_obs,
node_size_axis_label = "OTU count",
initial_layout = "large-graph", layout = "davidson-harel"
)
# n_obs- number of otus
# total-number of reads
# n_sample - number of samples edge_size = n_samples
# also use edge_size
#metazoa_plot <- tax %>%
#filter_taxa(name == "Chordata", subtaxa = TRUE) %>%
heat_tree(tax,
node_label = taxon_names,
node_size = n_obs, # number of samples - right
node_size_axis_label = "Number of samples",
#node_size = "log10",
node_size_range = c(0.01,0.05),
node_color = total, #total is number of reads-left of legend
node_color_axis_label = "Number of reads",
node_color_trans = "log10",
node_label_size_range = c(0.02, 0.026),
#node_label_size_trans = "area",
node_label_max = 65,
overlap_avoidance = 0.8,
initial_layout = "large-graph", layout = "davidson-harel")
## initial_layout = "fruchterman-reingold", layout = "davidson-harel",
## initial_layout = "davidson-harel", layout = "reingold-tilford" # ok better labels
## large-graph , gem-BAD longtime, mds,reingold-tilford
# map comparisons ---------------------------------------------------------
########################### names(sam)
sam2<-sam[complete.cases(sam$ocean),]
### get differences cruise_leg Depth_zone_cat ocean
tax$data$diff_table <- compare_groups(tax, dataset = "tax_abund_per_sam",
cols = sam2$Sample.ID,
groups = sam2$ocean)
# print(tax$data$diff_table_depth) # keep only sig p_value (wilcox_p_value)
## change p_vlaue to adjusted
tax$data$diff_table$wilcox_p_value <- p.adjust(tax$data$diff_table$wilcox_p_value,
method = "fdr")
## set anything > 0.05 to 0 in log2_median_ratio
tax$data$diff_table$log2_median_ratio[tax$data$diff_table$wilcox_p_value >0.05]<-0
### plot
heat_tree_matrix(tax,
dataset = "diff_table",
key_size=0.6 , #0.5 means half the width/height of the graph
node_size = n_obs,
node_label = taxon_names,
node_color = log2_median_ratio,
node_color_range = diverging_palette(),
node_color_trans = "linear",
node_color_interval = c(-3, 3),
edge_color_interval = c(-3, 3),
node_size_axis_label = "Number of OTUs",
node_color_axis_label = "Log2 ratio median proportions",
node_label_size_range = c(0.025, 0.035),
node_label_max = 30,
initial_layout = "large-graph", layout = "davidson-harel")
## grean or pos, more in treat 1 top ??
## only significant ?
# hist(tax$data$diff_table$wilcox_p_value)
# save.image("~/Dropbox/Documents/KAUST/eDNA/DMAP/R/Enviros/metacoder_tara.RData")
########################################################################################################
|
9a37111354950bf67cb3b2bedee1e2ecf8e1b665
|
79a79ac668a49b0902488839a3d9d8d32a988847
|
/man/average_clust.Rd
|
5166ec8327a466cb9b2abcdd9107d03db84d83d6
|
[] |
no_license
|
cran/convergEU
|
86b7ad6d02dd825e88f88b950a1c4acb844c6b1a
|
5767b1d9e19178488b8095a44e0da8bde885780c
|
refs/heads/master
| 2023-03-04T02:55:52.942150
| 2023-02-18T07:50:02
| 2023-02-18T07:50:02
| 247,519,134
| 0
| 1
| null | null | null | null |
UTF-8
|
R
| false
| true
| 2,029
|
rd
|
average_clust.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/average_clust.R
\name{average_clust}
\alias{average_clust}
\title{Unweighted average of countries}
\usage{
average_clust(myTB, timeName = "time", cluster = "EU27")
}
\arguments{
\item{myTB}{time by member states dataset.}
\item{timeName}{name of the variable that contains time.}
\item{cluster}{the label defining a cluster; one string selected within
the following: "EU12" , "EU15" ,"EU19","EU25" ,"EU27_2007", "EU28", "EU27_2020",
"Eurozone","EA", "all" (for all countries in the dataset).}
}
\value{
The dataset with the average of clustered countries.
}
\description{
The computation is based on clusters defined in a objects
created by invoking *convergEU_glb()*.
At now only cluster labels contained into *convergEU_glb()* are possible.
}
\details{
The cluster specification is based on labels: "EU27_2020", "EU27_2007", "EU25", "EU19",
"EU15", "EU12","EA", "Eurozone", "all".
The option cluster = "all" indicates that all countries in the dataset
have to be considered.
}
\examples{
# Example 1
# Unweighted average of Member States for cluster "EU12":
myAC1<-average_clust(emp_20_64_MS,timeName = "time",cluster = "EU12")
# Visualize results for Italy:
myAC1$res[,c(1,17)]
# Visualize results for the first five member states:
myAC1$res[,c(1:6)]
# Example 2
# Unweighted average of Member States for cluster "EU25":
myAC2<-average_clust(emp_20_64_MS,timeName = "time",cluster = "EU25")
# Visualize results for France:
myAC2$res[,c(1,13)]
# Visualize results for the first six member states:
myAC2$res[,c(1:7)]
# Example 3
# Unweighted average of countries for cluster "EU27":
myAC<-average_clust(emp_20_64_MS,timeName = "time",cluster = "EU27")
# Visualize results for Germany:
myAC$res[,c(1,7)]
# Visualize results for the first five member states:
myAC$res[,c(1:6)]
}
\references{
{\url{https://unimi2013-my.sharepoint.com/:u:/g/personal/federico_stefanini_unimi_it/EW0cVSIgbtZAvLPNbqcxdX8Bfn5VGSRHfAH88hQwc_RIEQ?e=MgtSZu}}
}
|
bc0249dd3442388a7d1bf78cf2b8a7ae53b6869e
|
42affdb459e72da8ee1249514b8690a139a718fc
|
/cMeansMembershipSummary.R
|
ccf2161d2afb128cf0f53e6fd3c1422a5e62b0f3
|
[] |
no_license
|
lucarri/Fuzzy-c-means-SNAC-K
|
8f04cec93a0fbf90a9d504afe9fefa1a2cdb0c98
|
e0451ff3e6111a7b38388f4187817eb67a9befef
|
refs/heads/main
| 2023-05-05T20:46:07.481473
| 2021-06-03T16:38:04
| 2021-06-03T16:38:04
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 2,042
|
r
|
cMeansMembershipSummary.R
|
cMeansMembershipSummary <- function(mca,num.clusters,repetitions,m)
{
library(snow)
library(foreach)
library(doSNOW)
library(e1071)
#Empty data frame to store clusterization per repetition
#membership.current <- data.frame(array(0,dim=c(nrow(mca$ind$coord),num.clusters,repetitions))
membership.current <- matrix(0,nrow=nrow(mca),ncol=num.clusters)
set.seed(1234)
#Setting of the progress bar
pb <- txtProgressBar(max = repetitions, style = 3)
progress <- function(n) setTxtProgressBar(pb, n)
opts <- list(progress = progress)
membership <- list()
#Parallelized computation of the repetitions
membership <- foreach(j=1:repetitions,combine=rbind,.options.snow=opts) %dopar%{
#Computing the indexes per number of clusters
library(e1071)
#Proper place to set the seed and obtain different results per each iteration
cmean <- cmeans(mca,centers=num.clusters,iter.max=1000,m=m)
#changing the cluster numbers to have it ordered by increasing number of elements
#All the clusters from the several realizations will have the same cluster order and name
ord <- order(cmean$size)
for(i in 1:length(cmean$size)){
membership.current[,i] <- cmean$membership[,ord[i]]
}
#Upgrading the state of the progress bar
setTxtProgressBar(pb, j)
return(membership.current=membership.current)
}
#Compute mean of memberships across all realizations
membership.current.ave <- array(0,dim=c(nrow(mca),num.clusters,repetitions))
for(i in 1:repetitions){
membership.current.ave[,,i] <- membership[[i]]
}
#Mean of memberships matrices across realizations
membership$membership <- apply(membership.current.ave,1:2,mean)
#Computing cluster with greater membership per individual
membership$cluster <- apply(membership$membership,1,which.max)
#Obtaining ids
#membership$id <- id[as.numeric(row.names.data.frame(mca)),]
return(list(membership=membership))
}
|
6ac097e4170ddc8230b867c4a068a0375b7d2c60
|
3e39e913510eb38fd1fd29fdad918d9c583f993c
|
/double_robust2/DR_500_2ver.R
|
5bdf599f337c3b4be052cdcd57f63ef51457b420
|
[] |
no_license
|
Yu-Zhou/Double
|
80e3a98f755571d8da007862a091e56f460e328a
|
0eb14fb6ee786045db31a126528449b76c7af258
|
refs/heads/master
| 2021-01-19T06:36:16.158284
| 2015-03-02T19:38:32
| 2015-03-02T19:38:32
| 31,485,050
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 3,704
|
r
|
DR_500_2ver.R
|
## double robust estimator two covariates
#
source("basic2.R")
sigma = 0.05 #global parameter
n = 500 #global parameter
sum_names <- c("intercept","X1","X2","hatQ")
p_level <- 1
eta_range <- 1:3
sq<-rep(0,3)
pop.size<-800
it.num<-7
nvars<-length(sq)
Domains<-cbind(rep(-1,nvars),rep(1,nvars))
sim_function <- function(sim_num=2,p_level=0,hard_limit=FALSE){
sim_data_IPWE_PST<- sim_data_IPWE_PSM <- sim_data_DR_PST <- sim_data_DR_PSM <- NULL
####################### data generation #######################
for(i in 1:sim_num){
ptm0 <- proc.time()
x1 <- runif(n,min=-1.5,max=1.5)
x2 <- runif(n,min=-1.5,max=1.5)
x<-cbind(1,x1,x2)
tp <- ilogit( -1 + 0.8*x1^2+ 0.8* x2^2 )
# incorrect model: tp2 <- ilogit(-1 + x1+ x2 )
a<-rbinom(n,1,tp)
y <- Mean_Function(x1,x2,a)
# plot(a,y)
# quantile(y[a==0])
# quantile(y[a==1])
# ############################## Propensity score model ########################
logit.true <- glm(a~I(x1^2)+I(x2^2),family=binomial, epsilon=1e-14)
ph.true<- as.vector(logit.true$fit)
logit.m <- glm(a~ x1+x2 ,family=binomial, epsilon=1e-14)
ph.m<- as.vector(logit.m$fit)
###################################### 1. estimate eta for IPWE , true PS ########
mean_summary_IPWE_PST <- Mestimate(x,y,a,ph.true,p_level)
summary_20_IPWE_PST <- Qestimate(x,y,a,ph.true,.2,p_level)
summary_50_IPWE_PST <- Qestimate(x,y,a,ph.true,.5,p_level)
###################################### 2. estimate eta for IPWE , misspecified PS ########
mean_summary_IPWE_PSM <- Mestimate(x,y,a,ph.m,p_level)
summary_20_IPWE_PSM <- Qestimate(x,y,a,ph.m,.20,p_level)
summary_50_IPWE_PSM <- Qestimate(x,y,a,ph.m,.5,p_level)
##################################### 3. DR estimate optimal eta for quantile criterion, true PS ################
summary_20_DR_PST <- R_Qestimate(x,y,a,ph.true,.2,p_level)
summary_50_DR_PST <- R_Qestimate(x,y,a,ph.true, .5,p_level)
##################################### 4. DR estimate optimal eta for quantile criterion, misspecified PS ################
summary_20_DR_PSM <- R_Qestimate(x,y,a,ph.m,.2,p_level)
summary_50_DR_PSM <- R_Qestimate(x,y,a,ph.m,.50,p_level)
sim_data_IPWE_PST <- rbind(sim_data_IPWE_PST,
c(mean_summary_IPWE_PST , 1),
c(summary_20_IPWE_PST , 20),
c(summary_50_IPWE_PST , 50)
)
sim_data_IPWE_PSM <- rbind(sim_data_IPWE_PSM,
c(mean_summary_IPWE_PSM ,1),
c(summary_20_IPWE_PSM ,20),
c(summary_50_IPWE_PSM ,50)
)
sim_data_DR_PST <- rbind(sim_data_DR_PST,
c(summary_20_DR_PST ,20),
c(summary_50_DR_PST ,50)
)
sim_data_DR_PSM <- rbind(sim_data_DR_PSM,
c(summary_20_DR_PSM ,20),
c(summary_50_DR_PSM ,50)
)
ptm1=proc.time() - ptm0
jnk=as.numeric(ptm1[3])
cat('\n','It took ', jnk, "seconds, Iteration:", i,'\n')
print(paste0("Current working dir: ", i))
}
#comments start here
list(sim_data_IPWE_PST = sim_data_IPWE_PST ,
sim_data_IPWE_PSM = sim_data_IPWE_PSM ,
sim_data_DR_PST = sim_data_DR_PST ,
sim_data_DR_PSM = sim_data_DR_PSM)
}
# sim_function(1)
cores <- 6
p_level <- 0
results <- mclapply(X=rep(10,cores), FUN=sim_function, p_level,hard_limit=FALSE,mc.cores=cores )
save.image("Sigma_0.3_500_ver7.RData")
|
0f9136e40581b7d14c9da43f187e07f49009247e
|
9b9e21fea61870f3458bec92ee25a5a9f10345c3
|
/man/makeGstatCmd.Rd
|
eb400b7b0bec83466a718aff66736ea642693be9
|
[] |
no_license
|
brendo1001/GSIF
|
dd46bc744309a970ef5622f1af423e179bf1d3d7
|
12ed85244a1ca46212033f0ecc16f8cd0303ea64
|
refs/heads/master
| 2021-01-14T09:18:33.661687
| 2014-01-15T00:00:00
| 2014-01-15T00:00:00
| 18,876,903
| 2
| 2
| null | null | null | null |
ISO-8859-2
|
R
| false
| false
| 3,891
|
rd
|
makeGstatCmd.Rd
|
\name{makeGstatCmd}
\alias{makeGstatCmd}
\encoding{latin1}
\title{Make a gstat command script}
\description{Generates a command script based on the regression model and variogram. This can then be used to run predictions/simulations by using the pre-compiled binary \code{gstat.exe}.}
\usage{
makeGstatCmd(formString, vgmModel, outfile, easfile,
nsim = 0, nmin = 20, nmax = 40, radius, zmap = 0,
predictions = "var1.pred.hdr", variances = "var1.svar.hdr",
xcol = 1, ycol = 2, zcol = 3, vcol = 4, Xcols)
}
\arguments{
\item{formString}{object of class \code{"formula"} --- regression model}
\item{vgmModel}{object of class \code{"vgmmodel"} or \code{"data.frame"}}
\item{outfile}{character; output file for the command script}
\item{easfile}{character; file name for the GeoEAS file with observed values}
\item{nsim}{integer; number of simulations}
\item{nmin}{integer; smallest number of points in the search radius (see gstat user's manual)}
\item{nmax}{integer; largest number of points in the search radius (see gstat user's manual)}
\item{radius}{numeric; search radius (see gstat user's manual)}
\item{zmap}{numeric; fixed value for the 3D dimension in the case of 3D kriging}
\item{predictions}{character; output file name for predictions}
\item{variances}{character; output file name for kriging variances}
\item{xcol}{integer; position of the x column in the GeoEAS file}
\item{ycol}{integer; position of the y column in the GeoEAS file}
\item{zcol}{integer; position of the z column in the GeoEAS file}
\item{vcol}{integer; position of the target variable column in the GeoEAS file}
\item{Xcols}{integer; column numbers for the list of covariates}
}
\details{To run the script under Windows OS you need to obtain the pre-compiled \code{gstat.exe} program from the www.gstat.org website, and put it in some directory e.g. \code{c:/gstat/}. Then add the program to your path (see environmental variable under Windows > Control panel > System > Advanced > Environmental variables), or copy the exe program directly to some windows system directory.}
\note{The advantage of using \code{gstat.exe} is that it loads large grids much faster to memory than if you use gstat in R, hence it is potentially more suited for computing with large grids. The draw back is that you can only pass simple linear regression models to \code{gstat.exe}. The stand-alone gstat is not maintained by the author of gstat any more.}
\author{ Tomislav Hengl }
\references{
\itemize{
\item Bivand, R.S., Pebesma, E.J., and \enc{Gómez}{Gomez}-Rubio, V., (2008) \href{http://www.asdar-book.org/}{Applied Spatial Data Analysis with R}. Springer, 378 p.
\item Pebesma, E., (2003) \href{http://www.gstat.org/gstat.pdf}{Gstat user's manual}. Dept. of Physical Geography, Utrecht University, p. 100, www.gstat.org
}
}
\seealso{ \code{\link{write.data}}, \code{\link{fit.gstatModel}}, \code{gstat::krige} }
\examples{
\dontrun{
library(sp)
library(gstat)
# Meuse data:
demo(meuse, echo=FALSE)
# fit a model:
omm <- fit.gstatModel(observations = meuse, formulaString = om~dist,
family = gaussian(log), covariates = meuse.grid)
str(omm@vgmModel)
# write the regression matrix to GeoEAS:
meuse$log_om <- log1p(meuse$om)
write.data(obj=meuse, covariates=meuse.grid["dist"],
outfile="meuse.eas", methodid="log_om")
writeGDAL(meuse.grid["dist"], "dist.rst", drivername="RST", mvFlag="-99999")
makeGstatCmd(log_om~dist, vgmModel=omm@vgmModel,
outfile="meuse_om_sims.cmd", easfile="meuse.eas",
nsim=50, nmin=20, nmax=40, radius=1500)
# compare the processing times:
system.time(system("gstat meuse_om_sims.cmd"))
vgmModel = omm@vgmModel
class(vgmModel) <- c("variogramModel", "data.frame")
system.time(om.rk <- krige(log_om~dist, meuse[!is.na(meuse$log_om),],
meuse.grid, nmin=20, nmax=40, model=vgmModel, nsim=50))
}
}
\keyword{methods}
|
62176bb3e3537b67123abe5870fab57fd6ba979f
|
ef1188cbd0cfdbe4a1620b9292c72b2437fdeb4c
|
/cachematrix.R
|
11369c791da66dc3600ccf3499bcac28eda60f36
|
[] |
no_license
|
mbbayside/ProgrammingAssignment2
|
9236dd26049260a48ae82156b7ad01e1a2e10367
|
9161ca6bb551fac62a52d2cb507db37cb5bf68b3
|
refs/heads/master
| 2021-01-18T17:45:55.016200
| 2015-03-14T18:28:59
| 2015-03-14T18:28:59
| 32,122,315
| 0
| 0
| null | 2015-03-13T02:44:35
| 2015-03-13T02:44:35
| null |
UTF-8
|
R
| false
| false
| 3,703
|
r
|
cachematrix.R
|
## This set of functions supports calculating and caching the inverse of a
## matrix. This is useful for pre-calculating the matrix inverse (a potentially
## costly computation) when this inverse is to be used repetitively.
##
## -- Function makeCacheMatrix:
## Create an object that manages a specified matrix and its
## inverse calculated using the cacheSolve function.
##
## -- Function cacheSolve:
## Calculate the inverse of the matrix stored in object created with
## makeCacheMatrix. If the inverse does not already exist, it is
## calculated and stored back in the object. Otherwise, the value of the
## cached inverse is returned
##
##
## -----------------------------------------------------------------------------
## Function makeCacheMatrix
## Create an object that manages a specified matrix and its
## inverse calculated using the cacheSolve function.
## Inputs:
## An invertible matrix
## Outputs:
## A object that consists of a list of functions for managing the matrix
## and its inverse
## set: store the matrix and clear the inverse placeholder
## get: retrieve the matrix
## setminv: store the matrix inverse
## getminv: retrieve the inverse
##
makeCacheMatrix <- function(mat = matrix()) {
# placeholder for the cached matrix inverse
minv <- NULL
## function for storing the matrix to invert and reset the inverse
## placeholder
set <- function(m = matrix()) {
## assign the argument m to the variable mat. This variable
## exists in the parent environment to this function
mat <<- m
## clear the inverse, so that inverse is recalculated upon
## calling the cacheSolve function
minv <<- NULL
}
## function for retrieving the matrix
get <- function() mat
## function for storing the inverse
setminv <- function(invtostore = matrix() ) {
minv <<- invtostore
}
## function for retrieving the cached matrix inverse
getminv <- function() minv
# output list of functions for creating/storing/retrieving the matrix
# and its inverse
list( set = set, get = get, setminv = setminv, getminv = getminv)
}
## -----------------------------------------------------------------------------
## Function cacheSolve:
## Calculate the inverse of the matrix stored in object created with
## makeCacheMatrix. If the inverse does not already exist, it is calculated
## and stored back in the object. Otherwise, the value of the cached inverse
## is returned
## Inputs:
## An object created by the MakeCacheMatrix function
## Outputs:
## The inverse of the matrix in the input object (calculated or cached value)
##
cacheSolve <- function(mat, ...) {
## Retrieve the value stored in the inverse placeholder of mat using the
## getminv function
minv <- mat$getminv()
## Check if there is a value alredy available
if (!is.null(minv)) {
## there is a non-null value. Post a message and return the
## cached inverse
message("getting cached inverse")
return(minv)
}
## The inverse placeholder is empty. Retrieve the matrix value
data <- mat$get()
## Calculate the matrix inverse using the solve() function
minv <- solve(data)
## store the calculated inverse in object mat using setminv
mat$setminv(minv)
## return the value of the inverse
minv
}
|
a2db300af16f170db1d7f928c5072f30bfab609c
|
1aed97642180d78ed826ac55ecdf2ed5a0b7e300
|
/Nurse_Project/Hormone.R
|
fcd5a1781b949a0fe2cd0712a492ea1daee9fd60
|
[] |
no_license
|
xutaosjtu/Nurse-project
|
e5ccf903237aa41ed6cf6bc72d244f31790e1cfc
|
3c51c7c1e3a98711a8b5ef09b5c4796e2b7e4d80
|
refs/heads/master
| 2021-01-25T05:34:51.672983
| 2014-07-14T08:00:25
| 2014-07-14T08:00:25
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 10,522
|
r
|
Hormone.R
|
hormone = read.csv("data/Hormone data_sul.csv" )
colnames(hormone)[10:12] = c("Melatonin", "Cortisol", "Estradiol")
hormone = hormone[order(hormone$SW_Nr, hormone$Probennahme_Dat, hormone$Proben_Nr),]
#hormone$Probennahme_Uhr = hormone$Probennahme_Uhr*24
hormone$Probennahme_Dat=as.character(hormone$Probennahme_Dat)
hormone$Melatonin = scale(log(hormone$Melatonin))
hormone$Cortisol = scale(log(hormone$Cortisol))
hormone$Estradiol = scale(log(hormone$Estradiol))
hormone$time = paste(hormone$Probennahme_Dat, hormone$Probennahme_Uhr)
hormone$time = strptime(hormone$time, "%Y.%m.%d %H:%M")
metabo = data.merged
metabo[, valid_measures] = scale(log(metabo[, valid_measures]))
for(p in levels(hormone$SW_Nr)){
metabo_person = subset(metabo, SW_Nr==p&Schichtdienst=='Tagschicht')
hormone_person = subset(hormone, SW_Nr==p&Schichtdienst=='Tagschicht')
yrange = c(-3,3)
plot(metabo_person$time, metabo_person$C0, type = 'b', col="red", ylim = yrange, pch = 1)
points(hormone_person$time, hormone_person$Cortisol, type = 'b', col = "red", pch = 2)
points(hormone_person$time, hormone_person$Melatonin, type = 'b', col = "red", pch = 3)
points(hormone_person$time, hormone_person$Estradiol, type = 'b', col = "red", pch = 4)
metabo_person = subset(data.merged, SW_Nr==p&Schichtdienst=='Nachtschicht')
hormone_person = subset(hormone, SW_Nr==p&Schichtdienst=='Nachtschicht')
yrange = c(0, max(max(metabo_person$C0, na.rm=T), max(hormone_person$Melatonin, na.rm=T)))
plot(metabo_person$time, metabo_person$C0, type = 'b', col="blue", ylim = yrange, pch = 1)
points(hormone_person$time, hormone_person$Cortisol, type = 'b', col = "blue", pch = 2, lty=2)
points(hormone_person$time, hormone_person$Melatonin, type = 'b', col = "blue", pch = 3, lty=3)
points(hormone_person$time, hormone_person$Estradiol, type = 'b', col = "blue", pch = 4, lty=4)
}
##ToDo: plot function for the correlation between metabolite and hormone levels
for(p in unique(data.merged$SW_Nr)){
for(shift in c('Tagschicht', 'Nachtschicht')){
metabo_person = subset(metabo, SW_Nr==p&Schichtdienst==shift)
hormone_person = subset(hormone, SW_Nr==p&Schichtdienst==shift)
y1 = aggregate.2(metabo_person$C0, by=list(metabo_person$hour_cut,metabo_person$Probennahme_Dat))
y2 = aggregate.2(hormone_person$Cortisol, by=list(hormone_person$hour_cut, hormone_person$Probennahme_Dat))
if(shift == "Tagschicht"){
col = "red"
}
else {col="blue"}
plot(y1$time, y1$x, type="b", ylim = range(y1$x, y2$V1), col=col, pch = 2)
points(y2$time, y2$V1, type="b", col=col, pch = 3)
}
}
aggregate.2 = function(data, by){
y = aggregate(data, by, mean, na.rm=T)
y = as.data.frame(y)
y[,1] = as.character(y[,1])
y[,1] = sapply(y[,1], function(x) return(unlist(strsplit(x, split=" ", fixed=T))[2]))
y$time = paste(y$Group.2, y$Group.1)
y$time = strptime(y$time, "%Y.%m.%d %H:%M")
return(y)
}
#############################################################################
### Analysis:
### 1. Categorize the sampling time, calculate the aggregated hormone levels and metabolite concentrations at different time intervals
### 2. Correlation analysis between metabolite concentrations and hormone levels.
### 3. Analyze the metabolite secretion rate in categorized time intervals.
###
#############################################################################
### categorized metabolite concentration and hormone levels
### time intervals were set at 0:00:00~8:00:00, 8:00:00~16:00:00, 16:00:00~ 24:00:00
hormone$hour = strptime(hormone$Probennahme_Uhr, "%H:%M")
hormone$hour_cut = cut(hormone$hour, breaks = strptime(c("0:00:00","8:00:00", "16:00:00", "23:59:59"), "%H:%M:%S"))
data.merged$hour = strptime(data.merged$Probennahme_Uhr, "%H:%M")
data.merged$hour_cut = cut(data.merged$hour, breaks = strptime(c("0:00:00","8:00:00", "16:00:00", "23:59:59"), "%H:%M:%S"))
metabo = data.merged
metabo[, valid_measures] = scale(log(metabo[, valid_measures]))
## aggregate the metabolite concentration according to the time intervales
metabo_aggre = aggregate(metabo[,valid_measures], by=list(metabo$SW_Nr, metabo$hour_cut,metabo$Probennahme_Dat, metabo$Schichtdienst), mean, na.rm=T)
colnames(metabo_aggre)[1:4]=c("SW_Nr","hour", "date", "shift")
hormone_aggre = aggregate(hormone[, c("Melatonin","Cortisol","Estradiol")], by=list(hormone$SW_Nr, hormone$hour_cut, hormone$Probennahme_Dat, hormone$Schichtdienst), mean, na.rm=T)
colnames(hormone_aggre)[1:4]=c("SW_Nr","hour", "date", "shift")
## matched metabolite measurements and hormone levels
match_metabo_hormone = merge(metabo_aggre, hormone_aggre)
match_metabo_hormone = match_metabo_hormone[order(match_metabo_hormone$SW_Nr, match_metabo_hormone$date, match_metabo_hormone$hour),]
## heatmap of correlation between metabolites and hormorns
require(corrplot)
pdf("correlation between metabolite with hormones.pdf", width = 20, height=20)
col1 = colorRampPalette(c("#053061","#2166AC","#4393C3","#92C5DE", "#D1E5F0", "#FFFFFF", "#FDDBC7","#F4A582","#D6604D", "#B2182B", "#67001F"))
corrplot(cor(match_metabo_hormone[,-c(1:4)], use="pair"), tl.col = "black")
dev.off()
## using linear mixed effect model to estimate the association between hormone and metabolites
require(nlme)
rst = NULL
for(i in valid_measures){
match_metabo_hormone$m = match_metabo_hormone[,i]
model = lme(m~ Melatonin + Cortisol + Estradiol, data = match_metabo_hormone, random = ~1|SW_Nr, na.action = na.omit)
tmp = summary(model)$tTable
rst = rbind(rst, c(tmp[1,], tmp[2, ], tmp[3,]))
}
rownames(rst) = valid_measures
write.csv(rst, "association between metabolites and hormones_lme.csv")
## 3. Analyze the metabolite secretion rate in categorized time intervals.
metabo_aggre = merge(metabo_aggre, samples.addition, by.x = "SW_Nr", by.y = "P_ID")
metabo_aggre = metabo_aggre[order(metabo_aggre$SW_Nr, metabo_aggre$date, metabo_aggre$hour),]
levels(metabo_aggre$Kontrolle)=c("case", "control")
##GEE
require(gee)
rst = NULL
for(i in valid_measures){
metabo_aggre$m = metabo_aggre[,i]
model = gee(m ~ as.factor(Kontrolle)+ Alter + BMI + as.factor(AR_Rauch_zurzt)+as.factor(SD), #
id = SW_Nr,
data = metabo_aggre,
subset = shift=="Tagschicht"& SW_Nr!="SW1041", #&data.merged$Alter>=45
na.action=na.omit,
corstr = "exchangeable"
)
rst = rbind(rst, summary(model)$coef[2,])
}
rownames(rst) = valid_measures
rst = data.frame(rst, p.value = 2*pnorm(-abs(rst[,5])))
rst = data.frame(rst, fdr = p.adjust(rst$p.value, method = "BH"), bonf = p.adjust(rst$p.value, method = "bonf"))
rst$Estimate = -rst$Estimate
write.csv(rst, "categorized time_Chronic effect of night shift work_GEE_daywork_age_BMI_smoking_disease_exclude diab.csv")
require(nlme)
rst=NULL
for(i in valid_measures){
metabo_aggre$m = metabo_aggre[,i]
model = lme(m ~ shift + Alter + BMI + as.factor(AR_Rauch_zurzt) + as.factor(SD),
metabo_aggre,
subset = Kontrolle=="case" & SW_Nr!="SW1041",
random = ~ 1|SW_Nr,
na.action=na.omit
)
rst = rbind(rst, summary(model)$tTable[2,])
}
rownames(rst) = valid_measures
rst = data.frame(rst)
rst$Value = -rst$Value
rst = data.frame(rst, fdr = p.adjust(rst$p.value, method = "BH"), bonf = p.adjust(rst$p.value, method = "bonf"))
write.csv(rst, file = "categorized time_Short term effect of night shift_mixed model_age_BMI_smoking_disease.csv")
plot(metabo_aggre$C0[which(metabo_aggre$SW_Nr=="SW1030")], type = "b", col=c("blue", "red")[metabo_aggre$shift[which(metabo_aggre$SW_Nr=="SW1030")]])
plot(match_metabo_hormone$C0[which(match_metabo_hormone$SW_Nr=="SW1030")], type = "b", col=c("blue", "red")[match_metabo_hormone$shift[which(match_metabo_hormone$SW_Nr=="SW1030")]])
plot(C0~hour, match_metabo_hormone, subset=(shift=="Tagschicht"&Kontrolle=="case"))
plot(C0~hour, match_metabo_hormone, subset=shift=="Nachtschicht"&Kontrolle=="case", add = T, col = "grey")
plot(C0~hour, match_metabo_hormone, subset=(shift=="Tagschicht"&Kontrolle=="case"))
plot(C0~hour, match_metabo_hormone, subset=shift=="Tagschicht"&Kontrolle=="control", add = T, col = "grey")
require(ggplot2)
require(grid)
pdf("metabolite concentration at different time period of the day.pdf", width = 30, height = 20)
k=1;nrow=1;ncol=1;newp=F
pushViewport(viewport(layout = grid.layout(3, 3)))
for(i in valid_measures){
match_metabo_hormone$m = match_metabo_hormone[,i]
p = ggplot(match_metabo_hormone, aes(hour, m))
p = p + geom_boxplot(aes(fill=interaction(factor(Kontrolle), factor(shift))))+ggtitle(i)
print(p, vp = viewport(layout.pos.row = nrow, layout.pos.col = ncol))
if(ncol==3 & nrow==3) {
grid.newpage()
pushViewport(viewport(layout = grid.layout(3, 3)))
}
if(k<9) {
if(k%%3==0) nrow=nrow+1
k = k+1
ncol = k-3*(nrow-1)
}
else {
k=1
nrow = 1
ncol = 1
}
}
dev.off()
p = ggplot(match_metabo_hormone, aes(x = Cortisol, y = C0, group = shift, col =factor(shift)))
p + geom_point() + geom_smooth()# + geom_line(aes(y = Cortisol), col="red")
pdf("metabolite correlation with Cortisol.pdf", width = 20, height = 20)
k=1;nrow=1;ncol=1;newp=F
pushViewport(viewport(layout = grid.layout(3, 3)))
for(i in valid_measures){
match_metabo_hormone$m = match_metabo_hormone[,i]
##change the code here if plot some thing else
p = ggplot(match_metabo_hormone, aes(x = Cortisol, y = m))
p = p + geom_point() + geom_smooth(method = "lm")+ggtitle(i)
##
print(p, vp = viewport(layout.pos.row = nrow, layout.pos.col = ncol))
if(ncol==3 & nrow==3) {
grid.newpage()
pushViewport(viewport(layout = grid.layout(3, 3)))
}
if(k<9) {
if(k%%3==0) nrow=nrow+1
k = k+1
ncol = k-3*(nrow-1)
}
else {
k=1
nrow = 1
ncol = 1
}
}
dev.off()
metabo_aggre$hour2 = metabo_aggre$hour
for(p in levels(metabo_aggre$SW_Nr)){
for(sh in levels(metabo_aggre$shift)){
subset = which(metabo_aggre$SW_Nr==p& metabo_aggre$shift==sh)
a = metabo_aggre[subset, 3]
metabo_aggre$hour2[subset] = metabo_aggre$hour2[subset]+as.numeric(a-a[1])
}
}
metabo_aggre$hour2 = as.character(metabo_aggre$hour2)
metabo_aggre$date2 = sapply(metabo_aggre$hour2, function(x) strsplit(x, split = " ")[[1]][1])
p = ggplot(metabo_aggre, aes(hour2, m))
p = p + geom_boxplot( aes(fill=interaction(factor(date2), factor(Kontrolle))) ) + ggtitle(i)
p = p + facet_grid(shift~.)
|
8c8998b1bcd74be2edbdffc4f654d38dd7a066d7
|
0889a42eb6c854a6a69606759431b994d03e3667
|
/shiny_asteroids/ui.r
|
a75982d8309aa2a7c37b602a646441466dd52877
|
[] |
no_license
|
nachocab/stats_seminar_talk_2014
|
c7b130721fac07f7164588de21462d0485f25330
|
d35d5ab026cdb26934161893cb9f71b975941d6a
|
refs/heads/master
| 2021-01-19T03:11:27.391371
| 2014-04-07T14:47:25
| 2014-04-07T14:47:25
| 18,366,315
| 1
| 1
| null | null | null | null |
UTF-8
|
R
| false
| false
| 302
|
r
|
ui.r
|
shinyUI(pageWithSidebar(
headerPanel("Asteroid flybys"),
sidebarPanel(
sliderInput("distance", "Distance (LD)", min = 0, max = 5, step = .1, value = c(0,5)),
sliderInput("date", "Year", min = 2000, max = 2100, value = c(2000,2100))
),
mainPanel(
plotOutput("asteroids_plot")
)
))
|
562778dd95cc55610eddd91d8dcc6fd8df33576b
|
68eb16486b156005533d1de33ab26e54f75d5d66
|
/Course 7- Linear Regression/Week2/RegPred.R
|
2e2c4ac3e7ea4986730146a1a7b2607b10e8f876
|
[] |
no_license
|
shovitraj/DataScienceSpecialization-JHU
|
b69559b08d2b85ca5e2d2a355a76bf3b588f736b
|
538b4d90cdced90d271fc2e9b2e8b0efc376705c
|
refs/heads/main
| 2023-08-22T16:38:03.361482
| 2021-09-20T00:01:49
| 2021-09-20T00:01:49
| 408,240,482
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 641
|
r
|
RegPred.R
|
library(UsingR)
data(diamond)
library(ggplot2)
diamond
table(diamond$carat)
g=ggplot(diamond, aes(x=carat, y=price))
g= g + xlab("Mass(carats)")
g= g + ylab("Price(SIN $)")
g = g + geom_point(size = 7, colour = "black", alpha=0.5)
g = g + geom_point(size = 5, colour = "blue", alpha=0.2)
g = g + geom_smooth(method = "lm", colour = "black")
g
fit <- lm(price~carat, data=diamond)
summary(fit)
coef(fit)
fit2 <- lm(price~I(carat-mean(carat)), data=diamond)
coef(fit2)
fit3 <- lm(price~I(carat*10), data=diamond)
coef(fit3)
newx <- c(0.16, 0.27, 0.34)
coef(fit)[1] + coef(fit)[2] * newx
predict(fit, newdata = data.frame(carat = newx))
|
06d000e0f7f58eb77d3e3b77b8fc783d6ab23135
|
360df3c6d013b7a9423b65d1fac0172bbbcf73ca
|
/FDA_Pesticide_Glossary/fenamidone.R
|
5df0c2ae40d60472752098269ef27788fd36ded6
|
[
"MIT"
] |
permissive
|
andrewdefries/andrewdefries.github.io
|
026aad7bd35d29d60d9746039dd7a516ad6c215f
|
d84f2c21f06c40b7ec49512a4fb13b4246f92209
|
refs/heads/master
| 2016-09-06T01:44:48.290950
| 2015-05-01T17:19:42
| 2015-05-01T17:19:42
| 17,783,203
| 0
| 1
| null | null | null | null |
UTF-8
|
R
| false
| false
| 216
|
r
|
fenamidone.R
|
library("knitr")
library("rgl")
#knit("fenamidone.Rmd")
#markdownToHTML('fenamidone.md', 'fenamidone.html', options=c("use_xhml"))
#system("pandoc -s fenamidone.html -o fenamidone.pdf")
knit2html('fenamidone.Rmd')
|
601aaf6267a2a1c283f5b5eaf3b4939f41602d44
|
73df6be0fec5bac3ec5ca552438568a663e769e3
|
/R/FunctionsForGenes.R
|
65a3ae14de1284c3209c6da20c02efd766864f43
|
[] |
no_license
|
kasaha1/kasaBasicFunctions
|
1fecefd905520ce5e0ad7de07d8f8c66fda9e6d3
|
d572d2953842cbc82d98ad2730a18dd90c7a1dd7
|
refs/heads/master
| 2022-07-13T09:37:40.737163
| 2022-06-27T05:18:33
| 2022-06-27T05:18:33
| 90,565,815
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 3,947
|
r
|
FunctionsForGenes.R
|
#' The checking the data clean
#'
#' @param x input dataframe
#' @return results from checking
#' @export
kasa.dataCleaning <- function(x){
res <- list()
res$classes <- sapply(x,function(y) class(y))
res$na<- sapply(x,function(y) sum(is.na(y)))
res$unique <- sapply(x, function(y) length(unique(y)))
res$dulplicated <- sapply(x, function(y) sum(duplicated(y)))
res$map <- Amelia::missmap(x, main = "Missing values vs observed")
return(res)
}
#' Duplicated value removal by SD
#'
#' @param x input dataframe of gene matrix
#' @return removed dataframe
#' @import dplyr
#' @export
kasa.duplicationRemovalBySD <- function(x){
matrix_data <- as.matrix(x[,-c(1)])
sd <- apply(matrix_data,1,sd)
order_num <- seq(1:nrow(x))
transformed <- cbind(order_num,sd,x)
name_list <- colnames(transformed)
colnames(transformed) <- paste0("var_",seq(1:ncol(transformed)))
colnames(transformed)[1:3] <- c("order_num","sd","grouped")
res <- transformed %>% arrange(desc(sd)) %>% group_by(grouped) %>% filter(row_number()==1) %>% ungroup() %>% arrange(order_num)
colnames(res) <- name_list
return(res[c(-1,-2)])
}
#' Transpose matrix
#'
#' @param x input dataframe of gene matrix
#' @param firstColumnName input String for the first column name
#' @return transposed matrix as dataframe
#' @export
kasa.transposeMatrix <- function(x, firstColumnName="sample"){
col_names_1 <- t(x[1])
raw_data <- t(x[-1])
colnames(raw_data) <- col_names_1
raw_data <- as.data.frame(raw_data)
row_name_1 <- row.names(raw_data)
raw_data <- cbind(row_name_1,raw_data)
row.names(raw_data) <- NULL
colnames(raw_data)[1] <- firstColumnName
raw_data[,1] <- as.character(raw_data[,1])
return(raw_data)
}
#' Median centering
#'
#' @param x input dataframe of gene matrix
#' @return Matrix as median centered
#' @export
kasa.geneMedianCentering <- function(x){
raw.data <- as.matrix(x[-1])
median.table <- apply(raw.data ,c(1),median,na.rm = T)
median_centered <- raw.data-median.table
return(cbind(x[1],median_centered))
}
#' Transfomr_NA_to_Median
#'
#' @param x input dataframe of gene matrix
#' @return Matrix with transformed NA values
#' @export
kasa.transform_na_to_median <- function(x) {
raw.data <- as.matrix(x[-1])
for (i in c(1:nrow(x))){
temp.row <- raw.data[i,]
median.temp <- median(temp.row,na.rm = T)
raw.data[i,is.na(raw.data[i,])] <- median.temp
}
res <- cbind(x[c(1)],raw.data)
return (res)
}
#' checkig NaN
#'
#' @param x datafram
#' @export
is.nan.data.frame <- function(x){
do.call(cbind, lapply(x, is.nan))}
#' gene Standardization
#'
#' @param x input dataframe of gene matrix
#' @return standarized genes
#' @export
kasa.geneStandardization <- function(x){
raw.data <- as.matrix(x[-1])
sd.table <- apply(raw.data,1,sd,na.rm = T)
res.table_1 <- raw.data/sd.table # divided by standard deviation
res <- cbind(x[1],res.table_1)
res[is.nan(res)] <- 0
return(res)
}
#' gene z-scoring
#'
#' @param x input dataframe of gene matrix
#' @return z-scored genes matrix
#' @export
kasa.geneZscoring <- function(x){
raw.data <- as.matrix(x[-1])
raw.data.t <- t(raw.data)
res.tmp <- scale(raw.data.t)
res.tmp.t <- t(res.tmp)
res <- cbind(x[1],res.tmp.t)
return(res)
}
#' gene Robust_modified z-scoring
#'
#' @param x input dataframe of gene matrix
#' @return z-scored genes matrix
#' @export
kasa.geneRobustZscoring <- function(x){
raw.data <- as.matrix(x[-1])
median.table <- apply(raw.data ,c(1),median,na.rm = T)
median_centered_abs <- abs(raw.data-median.table)
MeanAD_tmp <- apply(median_centered_abs ,c(1),mean,na.rm = T)*1.253314
# MADtmp <- apply(median_centered_abs ,c(1),median,na.rm = T)
# res.tmp.1 <- (0.6744908*(raw.data-median.table))/MADtmp
MADtable <- apply(raw.data ,c(1),mad,na.rm = T)
MADtable[which(MADtable == 0)] <- MeanAD_tmp[which(MADtable == 0)] # replacing MAD==0 with meanAD
res.tmp <- (raw.data-median.table)/MADtable
res <- cbind(x[1],res.tmp)
return(res)
}
|
1c493127e86b9c3c9628885c6b6fb9810a22dfad
|
958b135d6d988a1d3977a45a4ec71a4ea6c4d607
|
/R/code/other_code_files/find_kdp_optimal_params.R
|
efa2a8ba1bc9a809b32163e69c8791939b4c45d5
|
[] |
no_license
|
VaibhavBehl/Kaggle_how_much_did_it_rain_ii
|
8967bd2f112562abf2a25d9e0d6a13b527defe05
|
2a2cfe542f1c608c955716bf524a676b062c9e08
|
refs/heads/master
| 2021-01-10T15:25:03.555803
| 2015-12-31T22:12:02
| 2015-12-31T22:12:02
| 48,843,149
| 1
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 995
|
r
|
find_kdp_optimal_params.R
|
#to find optimal params for when using both Z and Zdr
source("utility_functions.R")
#original values
trgData <- trData[, .(
target = mean(Expected)
), Id]
tempfun <- function() {
c<-0
aiseq <- seq(2,9)*10
aiseq <- c(aiseq,1,100)
#aiseq <- c(40.6)
biseq <- seq(0,1)/10
biseq <- c(biseq, 100)
#biseq <- c(0.945)
#aiseq <- 0.005
#biseq <- 0.866
resultMat <- matrix(NA, 1, 3)
iter <- length(aiseq)*length(biseq)
rtm <- 7.17*iter/60
print(paste('loop will run for iterations=',iter,', and time(min) = ',rtm))
for (ai in aiseq) {
for (bi in biseq) {
c<-c+1
predictionsMP <-output_kdp_valid_time(trData,ai,bi,ci)
minusV <- trgData$target - predictionsMP$Expected
p <- sum(abs(minusV))/length(minusV)
pVal <- c(ai,bi,p)
resultMat <- rbind(resultMat, pVal)
print(paste('count=',c, ', ai=',ai,', bi=',bi,', p=',p));
}
}
summary(resultMat)
resultMatDT <- data.table(resultMat, key="V3")
print(resultMatDT)
write.table(resultMatDT, append = T, "kdp_mod.txt")
}
|
a62b62bc136fe37b4e482e5bc3aadce689b8fa11
|
8d26d0d664bd1b19970f7793bfec7d57f1728338
|
/run_monocle.R
|
ea3e7223b07d3c2e320d34aa7717a50680f838b8
|
[] |
no_license
|
fanli-gcb/Core.RNAseq
|
497ebc7eca807050e57cf0dd62cbdff46e769fe0
|
bbce62cb3b1b0ffffbc7df93324318d766c686aa
|
refs/heads/master
| 2021-01-10T12:10:18.781827
| 2018-10-09T22:16:24
| 2018-10-09T22:16:24
| 53,674,054
| 0
| 1
| null | null | null | null |
UTF-8
|
R
| false
| false
| 3,542
|
r
|
run_monocle.R
|
#!/usr/bin/Rscript
# load required packages
library(monocle)
library(reshape2)
library(ggplot2)
args <- commandArgs(T)
if (length(args) < 4) {
cat("USAGE: ./run_monocle.R fpkm_table_file sample_sheet_file gene_attr_file out_pdf\n")
q()
}
fpkm_table_file <- args[1]
sample_sheet_file <- args[2]
gene_attr_file <- args[3]
out_pdf <- args[4]
# read in data and set up objects
fpkm_matrix <- read.table(fpkm_table_file, header=T, sep="\t", row.names=1)
sample_sheet <- read.table(sample_sheet_file, header=T, sep="\t", row.names=1)
gene_ann <- read.table(gene_attr_file, header=T, row.names=1, sep="\t")
pd <- new("AnnotatedDataFrame", data=sample_sheet)
fd <- new("AnnotatedDataFrame", data=gene_ann)
cds <- newCellDataSet(as.matrix(fpkm_matrix), phenoData = pd, featureData = fd)
pdf(out_pdf, title="Monocle analysis")
# standard differential analysis
min_expression <- 0.1
cds <- detectGenes(cds, min_expr=min_expression)
expressed_genes <- row.names(subset(fData(cds), num_cells_expressed >= 2))
samples_by_group <- aggregate(rownames(pData(cds)), by=list(as.character(pData(cds)$group)), FUN=paste)$x
tmp <- lapply(samples_by_group, function(x) apply(exprs(cds[, unlist(x)]), 1, function(r) any(r >= min_expression)))
expressed_in_all_groups <- unlist(apply(data.frame(matrix(unlist(tmp), nrow=length(unlist(tmp[1])), byrow=F)), 1, function(x) all(x)))
expressed_in_all_groups.genes <- rownames(exprs(cds))[expressed_in_all_groups]
L <- log(exprs(cds))
melted_dens_df <- melt(t(scale(t(L))))
qplot(value, geom = "density", data = melted_dens_df) + stat_function(fun = dnorm, size = 0.5, color = "red") + xlab("Standardized log(FPKM)") + ylab("Density")
diff_test_res <- differentialGeneTest(cds[expressed_in_all_groups.genes,], fullModelFormulaStr = "expression~group")
sig_genes <- subset(diff_test_res, qval < 0.1)
sig_genes <- merge(fData(cds), sig_genes, by="row.names")
rownames(sig_genes) <- sig_genes$Row.names
sig_genes <- sig_genes[,-1]
# ordering analysis
ordering_genes <- rownames(sig_genes)
cds <- setOrderingFilter(cds, ordering_genes)
cds <- reduceDimension(cds, use_irlba=F)
cds <- orderCells(cds, num_paths=1, reverse=F)
plot_spanning_tree(cds)
# genes that distinguish cell state
diff_test_res.state <- differentialGeneTest(cds[expressed_in_all_groups.genes,], fullModelFormulaStr = "expression~State")
diff_test_res.state <- merge(fData(cds), diff_test_res.state, by="row.names")
# genes that change as a function of pseudotime
diff_test_res.pt <- differentialGeneTest(cds[expressed_in_all_groups.genes,], fullModelFormulaStr = "expression~bs(Pseudotime)")
diff_test_res.pt <- merge(fData(cds), diff_test_res.pt, by="row.names")
# (optional) pseudotime plots of significant genes
my_genes <- as.character(subset(diff_test_res.pt, use_for_ordering==TRUE)$gene_id)
cds.subset <- cds[my_genes,]
for (gene in my_genes) {
plot_genes_in_pseudotime(cds.subset[gene,], color_by="group")
}
# (optional) multi-factorial differential analysis
# clustering by pseudotime
full_model_fits <- fitModel(cds[expressed_in_all_groups.genes,], modelFormulaStr = "expression~bs(Pseudotime)")
expression_curve_matrix <- responseMatrix(full_model_fits)
clusters <- clusterGenes(expression_curve_matrix, k=4) # cluster::pam (partitioning around medoids) clustering with k=4
plot_clusters(cds[ordering_genes,], clusters)
# (optional) violin plots of genes
for (gene in my_genes) {
df <- melt(exprs(cds)[gene,])
p <- ggplot(df, aes(x=1, y=value, fill="red")) + geom_violin() + ggtitle(sprintf("%s",gene))
print(p)
}
dev.off()
|
c2b4e5cad31e967441b8ee0a6c55a0bc57d70b2f
|
71f8f811fbfd86a99f3025994e7d969c3342b484
|
/Plot4.R
|
def8f7a512271722f0f2b6b8124e3464e29438b1
|
[] |
no_license
|
danishtamboli123/EPA-National-Emissions
|
97f3c7e4aa726b2e67c3f595c17ecfe726cf1ee3
|
55974ceccd40bb218443ac2783fc5df7974a0ae9
|
refs/heads/master
| 2022-11-26T01:40:00.305034
| 2020-07-15T22:25:07
| 2020-07-15T22:25:07
| 279,719,128
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 2,490
|
r
|
Plot4.R
|
# Check for if Zip has been Downloaded,else to download.
if(!file.exists("EPA_national_emissions.zip")){
download.file("https://d396qusza40orc.cloudfront.net/exdata%2Fdata%2FNEI_data.zip","EPA_national_emissions.zip")
}
# Check for if Zip has been Unzipped,else to unzip.
if(!file.exists("EPA_national_emissions")){
unzip("EPA_national_emissions.zip")
}
# Reading and storing the given datasets into R.
NEI <- readRDS("summarySCC_PM25.rds")
SCC <- readRDS("Source_Classification_Code.rds")
library(ggplot2)
library(dplyr)
# Regex filtering the SCC dataset to find Sources that contain Coal keyword.
Contains_keyword_coal <- grepl("Fuel Comb.*Coal",SCC$EI.Sector)
# Subsetting the SCC dataset to contain only Values which are related to Coal Combustion.
required_subset_SCC <- SCC[Contains_keyword_coal,]
# Subsetting the NEI dataset to have only Emission reading from Sources related to Coal Combustion.
required_subset_Containing_Coal <- subset(NEI,NEI$SCC %in% required_subset_SCC$SCC)
# Summarizing the Subsetted Data by Year and finding the Total Emission in particular year.
summarized_data <- summarise(group_by(required_subset_Containing_Coal,year),sum(Emissions))
# Bar Plot for Total Emissions (per 1000 tons) in the United States related to Coal Combustion.
ggplot(data = required_subset_Containing_Coal,aes(year,Emissions/1000,fill=year)) + geom_bar(stat = "identity") + labs(title = "Total Emissions (per 1000 tons) in the United States related to Coal Combustion")
# Saving Bar plot to a PNG format
png(filename = "Plot4.png",height = 720,width = 1280)
ggplot(data = required_subset_Containing_Coal,aes(year,Emissions/1000,fill=year)) + geom_bar(stat = "identity") + labs(title = "Total Emissions (per 1000 tons) in the United States related to Coal Combustion")
dev.off()
# Box plot of Emission Levels in the United States caused by Coal Combustion.
ggplot(data = required_subset_Containing_Coal,aes(year,log10(Emissions))) + geom_boxplot(aes(as.character(year),log10(Emissions),color=type)) + labs(title = "Emission Levels in the United States caused by Coal Combustion.",x="Year",y="log10(Emissions")
# Saving Bar plot to a PNG format
png(filename = "Plot4_boxplot.png",height = 720,width = 1280)
ggplot(data = required_subset_Containing_Coal,aes(year,log10(Emissions))) + geom_boxplot(aes(as.character(year),log10(Emissions),color=type)) + labs(title = "Emission Levels in the United States caused by Coal Combustion.",x="Year",y="log10(Emissions")
dev.off()
|
9081f04e8380595c6af898d2a04fff065196066c
|
d8978ecd115f95d9e4f6d987d54c2cb6541a6bf4
|
/code/4_analyzeData/wrds/sizeAvailabilityAllMods.R
|
42b7d5cfee16955bb53983d892ea2bf3c3e22afc
|
[] |
no_license
|
emallickhossain/WarehouseClubs
|
f0eaab1b645e13654de655c2f13e47aa72b02a42
|
7867171cdb3ca3fe32ec778dd8043d538ab1f6ef
|
refs/heads/master
| 2021-06-28T05:37:21.813087
| 2020-09-16T21:49:48
| 2020-09-16T21:49:48
| 149,994,881
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 3,691
|
r
|
sizeAvailabilityAllMods.R
|
# Computes size availability of all non-food products in 2016
library(data.table)
library(ggplot2)
library(ggthemes)
yrs <- 2016
threads <- 8
path <- "/scratch/upenn/hossaine/nielsen_extracts/RMS/2016/"
zipImpute <- fread("/scratch/upenn/hossaine/zipImpute.csv",
select = c("zipImpute", "store_code_uc"))
# Getting products and upc versions
prod <- fread("/scratch/upenn/hossaine/fullProd.csv", nThread = threads,
select = c("upc", "upc_ver_uc", "food", "product_module_code",
"quintile", "totalAmount"))[food == 0]
prod[, "food" := NULL]
rms <- fread(paste0(path, "Annual_Files/rms_versions_2016.tsv"), drop = "panel_year")
# Getting store and retail types
retailers <- fread("/scratch/upenn/hossaine/fullRetailers.csv")
stores <- fread(paste0(path, "Annual_Files/stores_2016.tsv"),
select = c("store_code_uc", "retailer_code"))
retailers <- merge(retailers, stores, by = "retailer_code")
fileNames <- list.files("/scratch/upenn/hossaine/nielsen_extracts/RMS/2016/Movement_Files",
recursive = TRUE, full.names = TRUE)
# Getting annual selection of products for each store
zipSelection <- NULL
for (i in fileNames) {
assort <- unique(fread(i, select = c("upc", "store_code_uc"), nThread = threads))
assort <- merge(assort, rms, by = "upc")
zipSelection <- rbindlist(list(zipSelection, assort), use.names = TRUE)
}
fullData <- merge(zipSelection, prod, by = c("upc", "upc_ver_uc"))
fullData <- merge(fullData, retailers, by = "store_code_uc")
fullData <- merge(fullData, zipImpute, by = "store_code_uc")
avgQuintileChannel <- fullData[, .(avgQtile = mean(quintile)),
by = .(zip_code = zipImpute, channel_type)]
avgQuintileMod <- fullData[, .(avgSize = mean(totalAmount)),
by = .(zip_code = zipImpute, channel_type,
product_module_code)]
avgQuintileAll <- fullData[, .(avgQtile = mean(quintile)),
by = .(zip_code = zipImpute)]
# Getting ACS ZIP income (downloaded from AFF)
zipIncome <- fread("/scratch/upenn/hossaine/zipIncPop.csv")
# Merging with avg package quintile selection
avgQuintileChannel <- merge(avgQuintileChannel, zipIncome, by = "zip_code")
avgQuintileChannel[, "incBin" := cut(medInc, breaks = c(seq(0, 90000, 10000), Inf),
labels = seq(10, 100, 10))]
avgQuintileChannel[, "incBin" := as.integer(as.character(incBin))]
avgQuintileMod <- merge(avgQuintileMod, zipIncome, by = "zip_code")
avgQuintileMod[, "incBin" := cut(medInc, breaks = c(seq(0, 90000, 10000), Inf),
labels = c(as.character(seq(10, 90, 10)), "100"))]
# Graphing size availability by channel
graphData <- avgQuintileChannel[, .(avgAvail = mean(avgQtile)),
keyby = .(incBin, channel_type)]
ggplot(data = na.omit(graphData[channel_type != "Dollar Store"]),
aes(x = incBin, y = avgAvail, color = channel_type)) +
geom_point(aes(shape = channel_type), size = 3) +
geom_hline(yintercept = 2.6) +
geom_vline(xintercept = 15) +
scale_x_continuous(breaks = seq(10, 100, 10)) +
labs(x = "Median Income ($000)",
y = "Package Size Quintile",
shape = "Store Type",
color = "Store Type") +
theme_tufte() +
theme(axis.title = element_text(),
plot.caption = element_text(hjust = 0),
legend.position = "bottom") +
scale_color_grey()
# scale_color_colorblind()
ggsave("./figures/sizeAvailabilityAllModsChannelType.pdf", height = 4, width = 6)
# ggsave("./figures/sizeAvailabilityAllModsChannelTypeColor.pdf", height = 4, width = 6)
|
06bba3b55636d5c0327d0d0646fc9ca3eeacf6b0
|
a7c8cd3a56abe2ee6113e97028964804b28a6119
|
/hw2/Presentation/m_estimation.R
|
caf09b511e019f4c2d27a940ab861740eee21b89
|
[] |
no_license
|
UmbertoJr/Stochastics_Process
|
d0c709f1a48f6c076f1cb91046327cdf7acec4b8
|
a828917027a4d681867e7ecca46ce15ab2683dce
|
refs/heads/master
| 2021-05-04T01:38:55.592174
| 2018-07-18T21:51:03
| 2018-07-18T21:51:03
| 120,361,580
| 4
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 2,183
|
r
|
m_estimation.R
|
sethuraman.cost <- function(number.obs, M){
n <- 5000
y <- rnorm(n)
thet <- rbeta(n,shape1 = 1, shape2 = M)
prob <- rep(0,n)
prob[1] <- thet[1]
for(i in 2:n){
prob[i]<- thet[i]*prod(1 - thet[1:i-1])
}
dat <- sample(y,size= number.obs, prob=prob,replace=T)
return(dat)
}
function.M <- function(obs, M){
n = length(obs)
Z = length(unique(obs))
num = Z*log(M)
den = log(M)
for(i in 1:(n-1)){
den = den + log(M + i)
}
return(num - den)
}
par(mfrow=c(1,1))
M = 10
number.obs=100
observed <- sethuraman.cost(number.obs,M=M)
uniq.obs <- unique(observed)
plot(density(uniq.obs))
xx <- seq(from = 0 , to = 150, by = 0.1)
value <- function.M(observed, xx)
plot(xx, value,type = 'l', col= rgb(1,114,116,250,maxColorValue = 255), main = 'Likelihood estimation of M')
m.bar <- xx[which.max(value)]
segments(m.bar,-1e10,m.bar,1e10,col= rgb(1,114,116,250,maxColorValue = 255))
segments(M,-1e10,M,1e10,col = 'red')
legend('topright',
legend = c(paste('Estimated M = ',round(m.bar,1)),
paste('True M = ',M)),
cex = .7)
M <- c(5, 20 , 50, 100)
number.obs=100
sim= 100
m.estimated <- list()
for(i in 1:4){
m.estimated[[i]] <- rep(0, sim)
for(z in 1:sim){
observed <- sethuraman.cost(number.obs,M=M[i])
value <- function.M(observed, xx)
m.estimated[[i]][z] <- xx[which.max(value)]
}
}
#save(m.estimated, file = 'Stima_m.RData')
load(file = 'Stima_m.RData')
par(mfrow = c(2,2), mar =c(2,2,2,1))
for(i in 1:4){
hist(m.estimated[[i]], main = 'distribution of M', probability = T, col = 'blue')
lines(density(m.estimated[[i]]))
m <- mean(m.estimated[[i]])
legend('topright',
legend = substitute(paste(bar(M),' = ',m), list(m = round(m,1))),
cex = 0.5)
}
compute.prob <- function(obs, M){
lo <- function.M(obs, M)
un.obs <- unique(obs)
num = 0
den = 0
m.vec = rep(0, length(obs))
for(el in un.obs){
m.vec[sum(el == obs)] = m.vec[sum(el == obs)] + 1
}
for(i in (1:length(obs))){
num = num + log(i)
den = den + m.vec[i]*log(i) + log(factorial(m.vec[i]))
}
return(lo + num - den)
}
exp(compute.prob(observed, 11.1))
exp(compute.prob(observed, 10))
|
7986d458877e714e12ac62ee98e722b425387676
|
6f07dccb7e29b191dde05af2c0d7eca21a521c60
|
/within_ancestry/calc_frac_snps.R
|
ac64f8a8f2944a0fbea2a891c67669ffbe4ce774
|
[] |
no_license
|
squisquater/bees
|
8f0619235cc0c9b126e97d71fa88675ae6384ddb
|
0a93371f52528e2f2c2a4c572a3e9e5a5262ee6e
|
refs/heads/master
| 2022-11-29T07:02:59.160548
| 2020-08-11T05:48:35
| 2020-08-11T05:48:35
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 887
|
r
|
calc_frac_snps.R
|
# what is the snp density genomewide?
chr_length_tot <- sum(read.table("../data/honeybee_genome/chr.lengths")$V2)
gaps_tot <- read.table("../data/honeybee_genome/gaps.bed") %>%
mutate(length = V3 - V2) %>%
summarise(tot = sum(length)) %>%
unlist(.)
n_snps <- 3510834 # from wc -l chr.var.sites
# genome_size <- chr_length_tot - gaps_tot #236*10^6 # genome size
# actually, use all sites passing quality filters for the denominator of pi:
all_sites <- data.frame(
bin_name = read.table("../geno_lik_and_SNPs/results/1cM_bins.names", header = F, stringsAsFactors = F)$V1,
n_sites = read.table("../geno_lik_and_SNPs/results/1cM_bins_total_counts.txt",
header = F, sep = "\t")$V2)
genome_size <- sum(all_sites$n_sites)
frac_snps <- n_snps/genome_size # multiplier for any snp-based heterozygosity estimate
# fraction of the genome that is variable, i.e. snps
|
71d6b9bd9b9d7f7a4e236b5cfe718ee0aeee5ac4
|
ce597bcf02a6b10f739f093b4a786ed9e64276af
|
/tests/testthat/test-factor_creation.R
|
fe9c965485440613c79432896889b8634905ff32
|
[
"MIT"
] |
permissive
|
jonmcalder/refactor
|
55a8a96b3430dcab4217fffb8c843b4010a274a8
|
ecf6e4b35f2b60b9f4084fd62d460fb472d64e26
|
refs/heads/master
| 2021-06-18T18:42:56.870677
| 2020-11-13T15:11:24
| 2020-11-13T15:11:24
| 66,887,343
| 3
| 2
| null | 2017-08-12T15:05:10
| 2016-08-29T23:08:07
|
R
|
UTF-8
|
R
| false
| false
| 3,446
|
r
|
test-factor_creation.R
|
context("cfactor")
case1 <- cfactor(rep("x", 5))
case2 <- cfactor(letters, labels = "letter")
case3 <- cfactor(sample(letters, size = 400, replace = TRUE), levels = letters)
# regex ordering used
hard_to_dectect <- c("EUR 21 - EUR 22", "EUR 100 - 101",
"EUR 1 - EUR 10", "EUR 11 - EUR 20")
case4 <- cfactor(hard_to_dectect, ordered = TRUE)
test_that("cfactor returns a factor", {
expect_output(str(case1), "Factor")
expect_output(str(case2), "Factor")
expect_output(str(case3), "Factor")
})
test_that("cfactor returns expected levels", {
expect_equal(levels(case1), "x")
# is this really desired?
expect_equal(levels(case2), paste("letter", 1:26, sep = ""))
expect_equal(levels(case3), letters)
expect_equal(levels(case4), c("EUR 1 - EUR 10", "EUR 11 - EUR 20",
"EUR 21 - EUR 22", "EUR 100 - 101")
)
})
test_that("warnings", {
# empty levels
expect_warning(cfactor(x = c("a", "b", "c"), levels = c("a", "b", "c", "d")),
"the following levels were empty")
# removed levels
expect_warning(cfactor(x = c("a", "b", "c"), levels = c("b", "c")),
"the following levels were removed")
# intersecting x and levels
## case 1: only is represented
expect_warning(cfactor(x = c("a", "b", "c"), levels = c("a", "b", "c"),
labels = c("b", "a", "laste")),
"Some values now used .* is now represented")
## case 2: only still message
expect_warning(cfactor(x = c("a", "b", "c"), levels = c("a", "b", "c"),
labels = c("a", "g", "laste")),
"Some values now used .* still represents")
## case 3: 1 and 2
expect_warning(cfactor(x = c("a", "b", "c"), levels = c("a", "b", "c"),
labels = c("a", "now", "b")),
"Some values now used .* is now represented .* still represents")
# duplicated factor inputs
expect_warning(cfactor(c("a", "b"), levels = c("a", "a", "b")),
"the following duplicated levels were removed: \n a")
})
test_that("errors because of wrong input types", {
# exclude
expect_error(cfactor(letters, exclude = TRUE),
"Must have class 'character'")
expect_error(cfactor(1:26, exclude = "a"),
"Must have class 'integer'")
# ordered
expect_error(cfactor(1:26, ordered = 3),
"Must have class 'logical'")
# exceed nmax
expect_error(cfactor(sample(letters), nmax = 4),
"hash table is full")
})
test_that("coersion of input type of x", {
# input type numeric
expect_error(cfactor(1:14, ordered = TRUE), NA)
expect_equal(cfactor(1:14, ordered = TRUE), factor(1:14, ordered = TRUE))
# input type factor
expect_equal(cfactor(cfactor(letters), ordered = TRUE),
factor(factor(letters), ordered = TRUE))
# input type ordered
expect_equal(cfactor(cfactor(letters, ordered = TRUE), ordered = TRUE),
factor(factor(letters, ordered = TRUE), ordered = TRUE))
})
test_that("x is missing or NULL", {
## no other arguments
expect_equal(cfactor(), factor())
expect_equal(cfactor(NULL), factor(NULL))
# other arguemnts
expect_equal(cfactor(labels = 3), factor(labels = 3))
# non-missing x but NULL
expect_equal(cfactor(NULL, ordered = TRUE), factor(NULL, ordered = TRUE))
})
# width-1-categories with no separator
# labels as character of length 1
|
c57beb5f54c754aa99a34ddff515202230e129ed
|
6ceab1bf9c435b523d2f8e7e9440da39770d741b
|
/R/f7-download.R
|
e6c793022bcc95634c4ed3319714ac73e34a99df
|
[] |
no_license
|
RinteRface/shinyMobile
|
a8109cd39c85e171db893d1b3f72d5f1a04f2c62
|
86d36f43acf701b6aac42d716adc1fae4f8370c6
|
refs/heads/master
| 2023-07-25T16:28:41.026349
| 2022-11-25T17:04:29
| 2022-11-25T17:04:29
| 139,186,586
| 328
| 92
| null | 2023-03-26T05:58:53
| 2018-06-29T19:13:06
|
R
|
UTF-8
|
R
| false
| false
| 1,448
|
r
|
f7-download.R
|
#' Create a download button
#'
#' Use these functions to create a download button;
#' when clicked, it will initiate a browser download. The
#' filename and contents are specified by the corresponding
#' shiny downloadHandler() defined in the server function.
#'
#' @param outputId The name of the output slot that the downloadHandler is assigned to.
#' @param label The label that should appear on the button.
#' @param class Additional CSS classes to apply to the tag, if any.
#' @param ... Other arguments to pass to the container tag function.
#' @export
#'
#' @examples
#' if (interactive()) {
#' library(shiny)
#' library(shinyMobile)
#' ui = f7Page(
#' f7SingleLayout(
#' navbar = f7Navbar(title = "File handling"),
#' f7DownloadButton("download","Download!")
#' )
#' )
#'
#' server = function(input, output, session) {
#' # Our dataset
#' data <- mtcars
#'
#' output$download = downloadHandler(
#' filename = function() {
#' paste("data-", Sys.Date(), ".csv", sep="")
#' },
#' content = function(file) {
#' write.csv(data, file)
#' }
#' )
#' }
#'
#' shinyApp(ui, server)
#' }
f7DownloadButton <- function (outputId, label = "Download", class = NULL, ...) {
shiny::tags$a(
id = outputId,
class = paste("button button-fill external shiny-download-link", class),
href = "", target = "_blank",
download = NA,
shiny::icon("download"),
label, ...
)
}
|
aea2f76bd2a22c6866d771693f1d6ee923d1dc1e
|
6ad68090db6626c3e1c648047d57437337fb75ae
|
/src/an1/04.r
|
eaf51aea7b3b329d2efd56decf4b879cff9cc3c9
|
[] |
no_license
|
int28h/RTasks
|
8764ba7fb8f06eb1b7e09d1dc4dd3a26458d12d6
|
88c39bb8e6b34c8743e16182e33ec5935ef3598f
|
refs/heads/master
| 2022-06-17T18:06:26.464545
| 2022-06-03T22:40:50
| 2022-06-03T22:40:50
| 116,028,292
| 9
| 2
| null | null | null | null |
UTF-8
|
R
| false
| false
| 1,394
|
r
|
04.r
|
# Воспользуемся встроенными данными airquality. В новую переменную сохраните subset исходных данных,
# оставив наблюдения только для месяцев 7, 8 и 9.
#
# При помощи функции aggregate рассчитайте количество непропущенных наблюдений по переменной Ozone в 7, 8 и 9 месяце.
# Для определения количества наблюдений используйте функцию length().
#
# Результат выполнения функции aggregate сохраните в переменную result.
#
# Подсказки:
#
# 1. Не забудьте сделать subset, чтобы отобрать наблюдения только по нужным месяцам, вам может пригодиться
# следующая конструкция:
#
# > x <- 5
# > x %in% c(3, 4, 5)
# [1] TRUE
#
# 2. Для подсчета числа непропущенных наблюдений воспользуйтесь записью с помощью формулы, при которой пропущенные значения
# не учитываются:
#
# aggregate(y ~ x + z , data, FUN)
result <- aggregate(Ozone ~ Month, subset(airquality, Month %in% c(7,8,9)), length)
|
3d5d5d5de50ca6100558d92388ee025b2d5aa805
|
f317887c7d83e62235ba2cf19065dcef9244f645
|
/man/prTable.Rd
|
8507d68451552d9554df78ceffad9ec04ea62b72
|
[] |
no_license
|
rrprf/tablesgg
|
3fec64842266f8a7f28e29899d31c673b5dad09c
|
1a60f894869326b34eff1804c9378a1c05e78a79
|
refs/heads/master
| 2023-05-07T14:12:05.102317
| 2021-06-03T14:45:34
| 2021-06-03T14:45:34
| 318,291,905
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 680
|
rd
|
prTable.Rd
|
% Auto-generated documentation for function prTable
% 2021-06-02 11:12:19
\name{prTable}
\alias{prTable}
\title{Create or Update a Fully Styled Table Ready for Plotting }
\description{
Create or update a \code{prTable} object, a fully styled (plot-ready)
table. This is an S3 generic. It and its methods are internal functions,
not intended to be called by package users.
}
\usage{
prTable(x, ...)
}
\arguments{
\item{x}{An object to be converted to a plot-ready table.
}
\item{...}{Additional arguments passed to specific methods.
}
}
\value{
An object of S3 class \code{prTable}. See \code{prTable.prEntries} for
the structure of this object.
}
\keyword{internal}
|
61f158693cf1ea19f2e10d4a96b2a5d7b1cd0ad9
|
77157987168fc6a0827df2ecdd55104813be77b1
|
/MGDrivE/inst/testfiles/calcCos/libFuzzer_calcCos/calcCos_valgrind_files/1612726553-test.R
|
299a08c2aa005a44831391573500e7148a82bc21
|
[] |
no_license
|
akhikolla/updatedatatype-list2
|
e8758b374f9a18fd3ef07664f1150e14a2e4c3d8
|
a3a519440e02d89640c75207c73c1456cf86487d
|
refs/heads/master
| 2023-03-21T13:17:13.762823
| 2021-03-20T15:46:49
| 2021-03-20T15:46:49
| 349,766,184
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 200
|
r
|
1612726553-test.R
|
testlist <- list(latLongs = structure(c(NaN, 4.48309463911336e-120, 1.64928503582928e-260, Inf), .Dim = c(2L, 2L)), r = 8.29692097078716e-317)
result <- do.call(MGDrivE::calcCos,testlist)
str(result)
|
ee0490c60f6711c89d7ccbc67457f99de993a8f4
|
cd21058c61cba55e8135def0dec1980d77f00ec0
|
/Zmisc/man/n_percent_format.Rd
|
6cabe0704dbe75c40b87e7f4faa682c4423c2792
|
[] |
no_license
|
Zus/zmisc
|
f37488c732a45bb12331fd8c832bb1497aa4e3cd
|
4ec3303676606d968db478f6ef9ee0b880597a2f
|
refs/heads/master
| 2021-01-01T05:14:41.954778
| 2018-09-21T07:07:24
| 2018-09-21T07:07:24
| 56,401,646
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| true
| 412
|
rd
|
n_percent_format.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/utility.R
\name{n_percent_format}
\alias{n_percent_format}
\title{format text to be used in Rmd's mostly}
\usage{
n_percent_format(tabl, event = "1")
}
\arguments{
\item{event}{the way events are coded}
\item{a}{matrix with observations and events}
}
\value{
converted namesl
}
\description{
format text to be used in Rmd's mostly
}
|
bcc2ca6b8464245b1c7cdfd32fdba3a430aef234
|
00e7438f79f95ffab664390a0cbacaf407f4433b
|
/Merging Disease System Files/for_John_merge_all_files.R
|
f75559c5364f13d37986a57127b280ff6e3e3aa3
|
[] |
no_license
|
Key2-Success/HeartBD2K
|
95b410f2b7233419650e6972058112532a7223d8
|
21ad025c40a396707e97dede993ac8c8b393bf13
|
refs/heads/master
| 2018-12-14T19:51:21.941506
| 2018-09-13T22:38:29
| 2018-09-13T22:38:29
| 108,905,096
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 5,028
|
r
|
for_John_merge_all_files.R
|
library(dplyr)
library(stringi)
# read in all files
filenames <- list.files(path = "Kitu/College/Junior Year/Extracurriculars/Data Science Research Internship/John's Data/Merge All/",
pattern = "*.Rdata", full.names = TRUE)
for (i in 1:31)
{
load(filenames[i])
}
# merge all files
hematological_0 <- hematology_0
hematological_10 <- hematology_10
endocrinological_0 <- endocrinology_0
endocrinological_10 <- endocrinology_10
all <- do.call("rbind", list(cancer_0, cancer_10, cardiovascular_0, cardiovascular_10, digestive_0, digestive_10,
endocrinological_0, endocrinological_10, hematological_0, hematological_10, infectious_0, infectious_10,
musculoskeletal_and_rheumatic_0, musculoskeletal_and_rheumatic_10, nephrological_and_urological_0,
nephrological_and_urological_10, neurological_0, neurological_10, obstetrical_and_gynecological_0,
obstetrical_and_gynecological_10, ophthalmological_0, ophthalmological_10, oral_and_maxillofacial_0, oral_and_maxillofacial_10, otorhinolaryngological_0,
otorhinolaryngological_10, respiratory_0, respiratory_10, trauma_0, trauma_10))
# save file
save(all, file = "Kitu/College/Junior Year/Extracurriculars/Data Science Research Internship/John's Data/Merge All/all.Rdata")
load(file = "Kitu/College/Junior Year/Extracurriculars/Data Science Research Internship/John's Data/Merge All/all.Rdata")
# merge by subjects
final <- all %>%
group_by(PMID) %>%
dplyr::mutate(subject = toString(unique(subject))) %>%
distinct(PMID, .keep_all = TRUE)
# remove secondary journal name
final <- final[ , -c(8)]
# change word endings
final$subject <- stri_replace_all_regex(str = final$subject, pattern = "endocrinology", replacement = "endocrinological")
final$subject <- stri_replace_all_regex(str = final$subject, pattern = "hematology", replacement = "hematological")
# create new binary variables for each disease group
final$cancer <- ifelse(grepl(pattern = "cancer", x = final$subject, ignore.case = TRUE), 1, 0)
final$cardiovascular <- ifelse(grepl(pattern = "cardiovascular", x = final$subject, ignore.case = TRUE), 1, 0)
final$digestive <- ifelse(grepl(pattern = "digestive", x = final$subject, ignore.case = TRUE), 1, 0)
final$endocrinological <- ifelse(grepl(pattern = "endocrinological", x = final$subject, ignore.case = TRUE), 1, 0)
final$hematological <- ifelse(grepl(pattern = "hematological", x = final$subject, ignore.case = TRUE), 1, 0)
final$infectious <- ifelse(grepl(pattern = "infectious", x = final$subject, ignore.case = TRUE), 1, 0)
final$musculoskeletal_and_rheumatic <- ifelse(grepl(pattern = "musculoskeletal and rheumatic", x = final$subject, ignore.case = TRUE), 1, 0)
final$nephrological_and_urological <- ifelse(grepl(pattern = "nephrological and urological", x = final$subject, ignore.case = TRUE), 1, 0)
final$neurological <- ifelse(grepl(pattern = "neurological", x = final$subject, ignore.case = TRUE), 1, 0)
final$obstetrical_and_gynecological <- ifelse(grepl(pattern = "obstetrical and gynecological", x = final$subject, ignore.case = TRUE), 1, 0)
final$ophthalmological <- ifelse(grepl(pattern = "ophthalmological", x = final$subject, ignore.case = TRUE), 1, 0)
final$oral_and_maxillofacial <- ifelse(grepl(pattern = "oral and maxillofacial", x = final$subject, ignore.case = TRUE), 1, 0)
final$otorhinolaryngological <- ifelse(grepl(pattern = "otorhinolaryngological", x = final$subject, ignore.case = TRUE), 1, 0)
final$respiratory <- ifelse(grepl(pattern = "respiratory", x = final$subject, ignore.case = TRUE), 1, 0)
final$trauma <- ifelse(grepl(pattern = "trauma", x = final$subject, ignore.case = TRUE), 1, 0)
# change to binary/factor class
final$cancer <- as.factor(final$cancer)
final$cardiovascular <- as.factor(final$cardiovascular)
final$digestive <- as.factor(final$digestive)
final$endocrinological <- as.factor(final$endocrinological)
final$hematological <- as.factor(final$hematological)
final$infectious <- as.factor(final$infectious)
final$musculoskeletal_and_rheumatic <- as.factor(final$musculoskeletal_and_rheumatic)
final$nephrological_and_urological <- as.factor(final$nephrological_and_urological)
final$neurological <- as.factor(final$neurological)
final$obstetrical_and_gynecological <- as.factor(final$obstetrical_and_gynecological)
final$ophthalmological <- as.factor(final$ophthalmological)
final$oral_and_maxillofacial <- as.factor(final$oral_and_maxillofacial)
final$otorhinolaryngological <- as.factor(final$otorhinolaryngological)
final$respiratory <- as.factor(final$respiratory)
final$trauma <- as.factor(final$trauma)
# save file
save(final, file = "Kitu/College/Junior Year/Extracurriculars/Data Science Research Internship/John's Data/Merge All/all.Rdata")
load(file = "Kitu/College/Junior Year/Extracurriculars/Data Science Research Internship/John's Data/Merge All/all.Rdata")
|
8c7d51beec8b11ad16e00666ad62369edef35301
|
247946f5456e093a7fe49f57e722477ac9dc010e
|
/R/plot_pvals.R
|
d7f2027465fe590dea8e32e5a6f6ce3ab0b67800
|
[
"MIT"
] |
permissive
|
jdreyf/jdcbioinfo
|
b718d7e53f28dc15154d3a62b67075e84fbfa59b
|
1ce08be2c56688e8b3529227e166ee7f3f514613
|
refs/heads/master
| 2023-08-17T20:50:23.623546
| 2023-08-03T12:19:28
| 2023-08-03T12:19:28
| 208,874,588
| 3
| 1
| null | null | null | null |
UTF-8
|
R
| false
| false
| 2,068
|
r
|
plot_pvals.R
|
#' Plot p-values
#'
#' Plot p-values
#'
#' @param pvals A matrix of p-values.
#' @param name Name of the plot file.
#' @param width Width of the plot.
#' @param height Height of the plot.
#' @return NULL
plot_pvals <- function(pvals, name=NA, width=8, height=7) {
stopifnot(pvals>=0, pvals<=1)
pvals <- t(pvals)
signifSymbols <- c("***", "**", "*", "")
signifCutpoints <- c(0, 0.001, 0.01, 0.05, 1)
signif <- pvals
for (i in 1:ncol(pvals)) {
signif[, i] <- c(stats::symnum(pvals[, i], corr=FALSE, na=FALSE, cutpoints=signifCutpoints, symbols=signifSymbols))
}
plotLabels <- pvals
for (i in 1:nrow(pvals)) {
for (j in 1:ncol(pvals)) {
plotLabels[i, j] <- paste(round(pvals[i, j], 3), signif[i, j], sep="")
}
}
posLab=1
axisTicks=c(1, 0)
cols <- grDevices::colorRampPalette(rev(c("white", "cornsilk1", "gold", "forestgreen", "darkgreen")))
maxp <- max(pvals)
minp <- min(pvals)
iUpperRange <- min(1, maxp + 0.01)
iLowerRange <- max(0, minp - 0.01)
labels <- function(x, y, z, ...) {
lattice::panel.levelplot(x, y, z, ...)
lattice::ltext(x, y, labels=plotLabels, cex=1, col="black", font=1)
}
if (!is.na(name)){
grDevices::pdf(paste0(name, ".pdf"), width=width, height=height)
on.exit(grDevices::dev.off())
}
l <- lattice::levelplot(pvals, xlab=list(label="", cex=1, rot=0, col="black", font=2),
ylab=list(label="", cex=1, rot=0, col="black", font=2), panel=labels,
pretty=TRUE, par.settings=list(panel.background=list(col="white")),
scales=list(x=list(cex=1, rot=0, col="black", font=2), y=list(cex=1, rot=0, col="black", font=2),
tck=axisTicks, alternating=posLab), aspect="fill",
col.regions=cols, cuts=100, at=seq(iLowerRange, iUpperRange, 0.01),
main=list(label="p-Values", cex=2, rot=0, col="black", font=2),
colorkey=list(space="right", labels=list(cex=1)))
graphics::plot(l)
}
|
96d038540735fb2b63bb8a689dd47365b42466be
|
27912a635b637c2cb729e257cf5f2277887c8f35
|
/7.MCMC/code/basicMCMC.R
|
13d3d1b019878d34f9bbaed6cd9b745f03e1a3ec
|
[] |
no_license
|
timothyfrasier/stats-2019
|
75e45292360fcbc088b6cc7dd45a1a344524d3ff
|
2d74d63af0ac71217b7a3e95a27605a8744cc57b
|
refs/heads/master
| 2020-04-15T06:04:20.335414
| 2019-03-28T09:06:06
| 2019-03-28T09:06:06
| 164,447,800
| 0
| 1
| null | null | null | null |
UTF-8
|
R
| false
| false
| 2,959
|
r
|
basicMCMC.R
|
###############################
# Code for teaching MCMC #
# Methods #
# #
# Requires: #
# The number of steps to take #
###############################
#-----------------------------#
# Basic Chain (No Peaks) #
#-----------------------------#
basicMCMC <- function(nSteps) {
#--- Settings for MCMC ---#
xmin = -10
xmax = 10
ymin = -10
ymax = 10
#--- Vectors for holding results ---#
yOut <- rep(0, nSteps)
xOut <- rep(0, nSteps)
#--- Create Possible Values ---#
yVals <- seq(from = ymin, to = ymax, by = 1)
xVals <- seq(from = xmin, to = xmax, by = 1)
#--- Find initial values ---#
#--- Pick x or y as limit ---#
#--- x = 1, y = 2 ---#
choice <- c(1, 2)
maxmin <- c(1, 2)
startaxis <- sample(choice, 1, replace = TRUE)
startpos <- sample(maxmin, 1, replace = TRUE)
#--- Choose starting position ---#
if (startaxis == 1) {
if (startpos == 1) {
xOut[1] <- min(xVals)
} else
if (startpos == 2) {
xOut[1] <- max(xVals)
}
yOut[1] <- sample(yVals, 1, replace = TRUE)
} else
if (startaxis == 2) {
if (startpos == 1) {
yOut[1] <- min(yVals)
} else
if (startpos == 2) {
yOut[1] <- max(yVals)
}
xOut <- sample(xVals, 1, replace = TRUE)
}
#--- Step through the chain ---#
for (i in 2:nSteps) {
#--- Pick next x-value ---#
addsub <- c(1, 2)
choice <- sample(addsub, 1, replace = TRUE)
if (choice == 1) {
if ((xOut[i-1] + 1) <= max(xVals)) {
xOut[i] <- (xOut[i-1] + 1)
}
else {
xOut[i] <- xOut[i-1]
}
} else
if (choice == 2) {
if ((xOut[i-1] - 1) >= min(xVals)) {
xOut[i] <- (xOut[i-1] - 1)
}
else {
xOut[i] <- xOut[i-1]
}
}
#--- Pick next y-value ---#
addsub <- c(1, 2)
choice <- sample(addsub, 1, replace = TRUE)
if (choice == 1) {
if ((yOut[i-1] + 0.5) <= max(yVals)) {
yOut[i] <- (yOut[i-1] + 0.5)
}
else {
yOut[i] <- yOut[i-1]
}
} else
if (choice == 2) {
if ((yOut[i-1] - 0.5) >= min(yVals)) {
yOut[i] <- (yOut[i-1] - 0.5)
}
else {
yOut[i] <- yOut[i-1]
}
}
}
#--- Plot results ---#
plot(xOut, yOut, ylim = c(min(yVals), max(yVals)), xlim = c(min(xVals), max(xVals)), ylab = "Y Values", xlab = "X Values", main = paste("N =", nSteps), type = "o", pch = 16)
}
|
c676bf86841b254cc10a2be6110af66e352c769e
|
d2f9feb944e8b4315b79e10ac9bf569857aeef2b
|
/engine/simple_html_checker.R
|
298f1efd3a274025620001911f5179ec99b2c7dd
|
[] |
no_license
|
alucas69/CanvasQuizR
|
9c73b9dbe0999f52576ce8066fcd836512900717
|
de189a1df9ca3b65d144586f4396e5abb5cad91b
|
refs/heads/master
| 2022-11-06T21:32:17.298853
| 2020-07-02T09:51:54
| 2020-07-02T09:51:54
| 272,904,014
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 1,213
|
r
|
simple_html_checker.R
|
# only checks for stray "<" or ">"
simple_html_checker= function(vs_text) {
text= paste(vs_text, collapse = " ")
maxlen= stri_length(text)
# count < and > and <...>
position1= stri_locate_all(text, regex = ">")[[1]]
number_gt= nrow(position1) - is.na(position1[1,1])
position1= stri_locate_all(text, regex = "<")[[1]]
number_lt= nrow(position1) - is.na(position1[1,1])
position2= stri_locate_all(text, regex = "<[^<]+>")[[1]]
number_tags= nrow(position2) - is.na(position2[1,1])
# check for stray < or >
if (number_lt != number_tags) stop(c("ERROR: STRAY < IN HTML", vs_text))
if (number_gt != number_tags) stop(c("ERROR: STRAY > IN HTML", vs_text))
if (number_tags == 0) return()
# get html tags
tag= rep(NA, number_tags)
for (i1 in 1:number_tags) {
position1= position2[i1,]
# skip comments
if (!(stri_sub(text, position1[1], min(maxlen, position1[1]+3)) == "<!--")) {
# extract tag
tag1= stri_sub(text, position1[1]+1, min(maxlen, position1[2]-1))
position1= stri_locate(tag1, regex = "[^ ]+")
tag[i1]= stri_sub(tag1, position1[1], position1[2])
}
}
# check matching html tags
simple_html_matching_tag_checker(tag[!is.na(tag)])
}
|
ec0529072250828d09c663b4610183b97d9ac0f1
|
ffdea92d4315e4363dd4ae673a1a6adf82a761b5
|
/data/genthat_extracted_code/pheno/examples/connectedSets.Rd.R
|
10a44bc1f68a1edb0c392c664642c600df8a75e5
|
[] |
no_license
|
surayaaramli/typeRrh
|
d257ac8905c49123f4ccd4e377ee3dfc84d1636c
|
66e6996f31961bc8b9aafe1a6a6098327b66bf71
|
refs/heads/master
| 2023-05-05T04:05:31.617869
| 2019-04-25T22:10:06
| 2019-04-25T22:10:06
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 192
|
r
|
connectedSets.Rd.R
|
library(pheno)
### Name: connectedSets
### Title: Connected sets in a matrix
### Aliases: connectedSets
### Keywords: design models
### ** Examples
data(Simple)
connectedSets(Simple)
|
345d9cf47027dfdafe21733e17dc43f30d38da21
|
c67ed6bfca50b35228ef31a477865e0063701836
|
/site_visit/CFQ_plots.R
|
a022206c0bef0c9c63e0b897ac0bec377c5f6b90
|
[] |
no_license
|
joetidwell/QES2
|
1bbfdbc4d5e901162064e14f2c37a8df58b8e350
|
1f2741e27c8ce7a58c486473f15d980236c70a55
|
refs/heads/master
| 2020-12-24T16:49:56.005345
| 2015-12-09T17:50:29
| 2015-12-09T17:50:29
| 32,096,494
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 9,605
|
r
|
CFQ_plots.R
|
library(ggplot2)
library(foreach)
library(doMC)
registerDoMC()
source("~/ACE/global_vars.R")
source(file.path(kRootPath, "util", "load_data.R"), chdir=T)
source(file.path(kRootPath, "fitting", "interval_fitting_funcs.R"), chdir=T)
source(file.path(kRootPath, "forecast", "method_consensus_dist.R"), chdir=T)
theme_set(theme_classic())
options(stringsAsFactors = FALSE)
path.mydata <- "~/git/QES2/data"
# Load data from grofo
load(file.path(path.mydata,"tmp.RData"))
p <- ggplot(data=ThetaM, aes(x=type, y=S.R.r)) +
geom_boxplot(aes(color=dist), outlier.size=0, notch=TRUE) +
facet_wrap(~ifp_id)
p
tmp <- MDBS[,mean(BS,na.rm=TRUE),by=ifp_id]
setkey(tmp,V1)
MDBS[,ifp_id:=factor(ifp_id,levels=tmp$ifp_id)]
ggplot(data=MDBS[is.finite(BS)], aes(x=ifp_id, y=BS, color=type)) +
geom_hline(yintercept=.5) +
geom_point(size=5.4, color="black") +
geom_point(size=5) +
ylim(0,2) +
theme(axis.text.x = element_text(angle = 90, hjust = 1)) +
labs(x="IFP", y="Mean Daily Brier Score") +
scale_color_discrete(name="Condition")
tmp <- MDBS[,mean(BS,na.rm=TRUE),by=ifp_id]
setkey(tmp,V1)
MDBS[,ifp_id:=factor(ifp_id,levels=tmp$ifp_id)]
ggplot(data=MDBS[is.finite(BS),list(BS=mean(BS)),by=c("ifp_id","dist")], aes(x=ifp_id, y=BS, color=dist)) +
geom_hline(yintercept=.5) +
geom_point(size=5.4, color="black") +
geom_point(size=5) +
ylim(0,2) +
theme(axis.text.x = element_text(angle = 90, hjust = 1)) +
labs(x="IFP", y="Mean Daily Brier Score") +
scale_color_discrete(name="Distribution")
cond1h <- c(NaN,NaN,.159,.714,.202,.227,.451,.168,.323,.382,.188,.137,.221,.099,.886,.160,.546,.164,.182,.688,.345,.192,.236,.323,.277,.289,.427,.127,.210,.190,.768,.658,.502,.768,.924,.462)
tmp <- MDBS[,mean(BS,na.rm=TRUE),by=ifp_id]
setkey(tmp,V1)
MDBS[,ifp_id:=factor(ifp_id,levels=tmp$ifp_id)]
blerg <- MDBS[is.finite(BS),list(BS=mean(BS)),by=c("ifp_id","dist")]
tmp[,V1:=cond1h]
tmp[,BS:=V1]
tmp[,V1:=NULL]
tmp$dist <- "cond1h"
blerg <- rbind(blerg,tmp)
blerg[,dist:=factor(dist,levels=c("beta","gamma","normal",""))]
blerg<- blerg[!is.nan(BS)]
blerg[dist!="",dist:="Consensus"]
blerg[is.na(dist),dist:="ULinOP"]
blerg[,dist:=ordered(dist,levels=c("UlinOP","Consensus"))]
ggplot(data=blerg, aes(x=ifp_id, y=BS, color=dist)) +
geom_hline(yintercept=.5) +
geom_point(size=5.4, color="black") +
geom_point(data=blerg[dist==""], aes(x=ifp_id, y=BS), color="steelblue",size=5) +
geom_point(data=blerg[dist!=""], aes(x=ifp_id, y=BS), color="firebrick",size=5) +
ylim(0,2) +
theme(axis.text.x = element_text(angle = 90, hjust = 1)) +
labs(x="Forecasting Question", y="Mean Daily Brier Score")
# scale_color_manual(name="Blah", values=c("firebrick","firebrick","firebrick","white"))
ggplot(data=blerg, aes(x=ifp_id, y=BS, color=dist)) +
geom_hline(yintercept=.5) +
geom_point(size=5.4, color="black") +
# geom_point(data=blerg[dist==""], aes(x=ifp_id, y=BS), color="steelblue",size=5) +
# geom_point(data=blerg[dist!=""], aes(x=ifp_id, y=BS), color="firebrick",size=5) +
geom_point(size=5) +
ylim(0,2) +
theme(axis.text.x = element_text(angle = 90, hjust = 1)) +
labs(x="Forecasting Question", y="Mean Daily Brier Score") +
scale_color_manual(name="Source",values=c("firebrick","steelblue"))
tmp <- MDBS.ex[,mean(BS,na.rm=TRUE),by=ifp_id]
setkey(tmp,V1)
MDBS.ex[,ifp_id:=factor(ifp_id,levels=tmp$ifp_id)]
ggplot(data=MDBS.ex[is.finite(BS),list(BS=mean(BS)),by=c("ifp_id","dist")], aes(x=ifp_id, y=BS, color=dist)) +
geom_hline(yintercept=.5) +
geom_point(size=5.4, color="black") +
geom_point(size=5) +
ylim(0,2) +
theme(axis.text.x = element_text(angle = 90, hjust = 1)) +
labs(x="IFP", y="Mean Daily Brier Score") +
scale_color_discrete(name="Distribution")
blah
library(reshape2)
blah <- melt(blah)
tmp <- blah[,mean(value, na.rm=TRUE),by=ifp_id]
blerg
setkey(tmp,V1)
blah[,ifp_id:=factor(ifp_id,levels=tmp$ifp_id)]
blah[,col:=as.numeric(variable)]
blah[col==2,col:=1.5]
blah[col==3,col:=3]
blah[col==4,col:=5]
blerg2 <- blerg[dist=="ULinOP"]
blerg2[,value:=BS]
blerg2[,variable:=dist]
ggplot(data=blah[is.finite(value) & col!=1.5,], aes(x=ifp_id, y=value, color=variable)) +
geom_hline(yintercept=.5) +
# geom_point(size=5.4, color="black") +
geom_point(data=blerg2,size=5,color="black") +
geom_point(data=blerg2,size=4.6,color="white") +
geom_point(size=5) +
# geom_line(size=1.25) +
ylim(0,2) +
theme(axis.text.x = element_text(angle = 90, hjust = 1)) +
labs(x="IFP", y="Mean Daily Brier Score") +
scale_color_manual(name="Variance\nInflation\nFactor", values=c("mistyrose","lightpink","firebrick"))
# scale_color_manual(name="Variance\nInflation\nFactor", values=c("#deebf7","#9ecae1","#3182bd"))
blah
MDBS[ifp_id=="1432-0"]
tmp <- ThetaM[ifp_id=="1442-0" & type=="random",]
tmp.ln <- data.table(value=c(as.numeric(as.Date("2014-10-09")),
as.numeric(as.Date("2015-06-01"))),
threshold=c("Outcome","GJP Cutpoint"))
pdata <- tmp[,list(median.date=roll.date+qgamma(.5,par.1,par.2),
roll.date=roll.date,
type=type),]
pdata[,threshold:="Forecast"]
pdata[,c("ymin","ymax"):=tmp[,list(roll.date+qgamma(.01,par.1,par.2),
roll.date+qgamma(.99,par.1,par.2))]]
pdata[,linetype:=c("dashed")]
pdata[,fill:=c("grey")]
p <- ggplot(pdata, aes(x=roll.date,y=median.date)) +
geom_ribbon(aes(ymin=ymin, ymax=ymax,linetype=linetype, fill=fill), alpha=.2, size=.3, show.guide=FALSE) + geom_line(size=1) +
geom_hline(data=tmp.ln, aes(yintercept=value, color=threshold), size=1) +
scale_color_manual(name="", values=c("firebrick","steelblue")) +
scale_fill_manual(values="black", guide=FALSE) +
scale_linetype_manual(values="dashed", guide=FALSE) +
labs(x="Forecast Date", y="Predicted Date") +
coord_cartesian(xlim=c(as.Date("2014-08-20"),as.Date("2014-08-25")))
p
tmp <- ThetaM[ifp_id=="1415-0" & type=="random",]
tmp.ln <- data.table(value=c(as.numeric(as.Date("2014-08-26")),
as.numeric(as.Date("2014-10-01"))),
threshold=c("Outcome","GJP Cutpoint"))
pdata <- tmp[,list(median.date=roll.date+qgamma(.5,par.1,par.2),
roll.date=roll.date,
type=type),]
pdata[,threshold:="Forecast"]
pdata[,c("ymin","ymax"):=tmp[,list(roll.date+qgamma(.01,par.1,par.2),
roll.date+qgamma(.99,par.1,par.2))]]
pdata[,linetype:=c("dashed")]
pdata[,fill:=c("grey")]
ggplot(pdata, aes(x=roll.date,y=median.date, color=threshold)) +
geom_ribbon(aes(ymin=ymin, ymax=ymax,linetype=linetype,fill=fill), alpha=.2, size=.3, show.guide=FALSE) + geom_line(size=1) +
geom_hline(data=tmp.ln, aes(yintercept=value, color=threshold), size=1) +
scale_color_manual(name="", values=c("black","firebrick","steelblue")) +
scale_fill_manual(values="black", guide=FALSE) +
scale_linetype_manual(values="dashed", guide=FALSE) +
labs(x="Forecast Date", y="Predicted Date") +
coord_cartesian(xlim=c(as.Date("2014-08-20"),as.Date("2014-08-25"))))
p
geom_hline(yintercept=as.numeric(as.Date("2014-10-09"))) +
geom_hline(yintercept=as.numeric(as.Date("2015-06-01")))
load("roll.Rdata")
tmp <- MDBS[,mean(BS,na.rm=TRUE),by=ifp_id]
setkey(tmp,V1)
MDBS[,ifp_id:=factor(ifp_id,levels=tmp$ifp_id)]
ggplot(data=MDBS[is.finite(BS)], aes(x=ifp_id, y=BS, color=type)) +
geom_hline(yintercept=.5) +
geom_point(size=5.4, color="black") +
geom_point(size=5) +
ylim(0,2) +
theme(axis.text.x = element_text(angle = 90, hjust = 1)) +
labs(x="IFP", y="Mean Daily Brier Score") +
scale_color_discrete(name="Condition")
pdata <- rbind(MDBS,
data.table(type="Inkling Mean",
ifp_id=unique(MDBS$ifp_id),
dist=NA,
BS=c(.025, .067, .195, .043, .491, .564, .092, 1.246, .411, .287, .156),
N=NA))
ggplot(data=pdata[is.finite(BS)], aes(x=ifp_id, y=BS, color=type, group=type, alpha=type, shape=type)) +
geom_hline(yintercept=.5) +
geom_point(size=5.4, color="black") +
geom_point(size=5) +
# geom_line(size=1.25) +
ylim(0,2) +
theme(axis.text.x = element_text(angle = 90, hjust = 1)) +
labs(x="IFP", y="Mean Daily Brier Score") +
scale_color_manual(name="Method", values=c("black","#d95f02","#1b9e77")) +
scale_alpha_manual(name="Method",values=c(.5,1,1)) +
scale_shape_manual(name="Method",values=c(1,16,16))
pdata[type=="rolling", type:="Rolling"]
pdata[type=="rollingCtl", type:="Rolling Control"]
x <- 1:100
ymin <- rep(1,100)
ymax <- rep(2,100)
y <- rep(1.5,100)
fill=rep("Forecast w/ 95% CI",100)
col = rep(c("IFP Cutpoint","Outcome"),times=c(20,80))
blah <- data.table(x,y,ymin,ymax,fill,col)
ggplot(blah, aes(x=x,y=y,ymin=ymin,ymax=ymax, color=col)) +
# geom_ribbon() +
geom_line(size=1) +
# scale_fill_manual(values="grey") +
# scale_linetype_manual(values=c("solid","dashed"))
scale_color_manual(values=c("firebrick","steelblue"))
ggplot(pdata, aes(x=roll.date,y=median.date)) +
geom_ribbon(aes(ymin=ymin, ymax=ymax,linetype=linetype, fill=fill), alpha=.2, size=.3, show.guide=FALSE) + geom_line(size=1) +
geom_hline(data=tmp.ln, aes(yintercept=value, color=threshold), size=1) +
scale_color_manual(name="", values=c("firebrick","steelblue")) +
scale_fill_manual(values="black", guide=FALSE) +
scale_linetype_manual(values="dashed", guide=FALSE) +
labs(x="Forecast Date", y="Predicted Date") +
coord_cartesian(xlim=c(as.Date("2014-08-20"),as.Date("2014-08-25")))
a <- c(.1,.2,.4,.2,.1)
a <- a[-1]
a[length(a)] <- a[length(a)]/2
a <- c(a,a[length(a)])
a <- a/sum(a)
1/2^c(1:5)
|
5d16c43f93eba8ce83d63d91fc0bad96793686e3
|
4320dcc8598eb1bf08ee2ebd71dcd2558fb579d8
|
/man/gn_search_all.Rd
|
e87616b19c0e1b18d7d13ff19b1a9ddb7c609198
|
[] |
no_license
|
jacob-ogre/us.geonames
|
74716ee395fc44aa4b472ff0b71b4f2a35e593aa
|
94b2f8b5a8adb415c8c351312685a545e6aabf09
|
refs/heads/master
| 2021-01-20T10:29:47.349100
| 2017-10-24T18:36:08
| 2017-10-24T18:36:08
| 100,292,189
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| true
| 756
|
rd
|
gn_search_all.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/search.R
\name{gn_search_all}
\alias{gn_search_all}
\title{Search a file f for all geonames using \link{fastmatch}}
\usage{
gn_search_all(text, ngram_min = 1, ngram_max = 7)
}
\arguments{
\item{text}{Text to be searched for geonames; should be 'clean,' i.e., no
eol characters, multi-spaces replaced with singles, etc.}
\item{ngram_min}{Min ngram length to create of each \code{texts} (default = 1)}
\item{ngram_max}{Max ngram length to create of each \code{texts} (default = 7)}
}
\description{
Fast reverse-lookup search of all 2.27M place names in \code{geonames}.
Whereas \link{gn_search_all} searches the 2.27M geonames against the
\code{text}, this function tokenizes
}
|
c879df3e54561d047520d3239360290fb5d1f803
|
f5171500752e258406718a0d2f33e027e97e9225
|
/Simulators/Hardware/scripts/mf.gen.r
|
8a761010c7821a6288ade024e1db249f73932d2b
|
[] |
no_license
|
UFCCMT/behavioural_emulation
|
03e0c84db0600201ccb29a843a4998dcfc17b92a
|
e5fa2f1262d7a72ab770d919cc3b9a849a577267
|
refs/heads/master
| 2021-09-25T17:55:33.428634
| 2018-10-24T15:55:37
| 2018-10-24T15:55:37
| 153,633,006
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 2,245
|
r
|
mf.gen.r
|
wd <- "C:/Users/Krishna/Desktop/Research/BEO_TILE/memory files/8x8 for threads 0,1,6,7/appBEOs"
in.file <- "appBEO_iROM_0.txt"
out.file <- "appBEO_iROM_0.mif"
setwd(wd)
getwd()
mif.gen <- function(in.file, out.file){
require(compositions)
op.w <- 4
tag.w <- 6
did.w <- 10
stime.w <- 12
ntime.w <- 16
advt.w <- 28
inst.w <- 32
depth <- 256
mif <- c(paste("WIDTH=",inst.w,";",sep=""),paste("DEPTH=",depth,";",sep=""), "", "ADDRESS_RADIX=UNS;", "DATA_RADIX=BIN;", "", "CONTENT BEGIN")
ln.n <- 0
for( inst in readLines(in.file)){
s.inst <- strsplit(inst," ")
s.inst <- s.inst[[1]]
if(identical(s.inst[1],"advt") & (length(s.inst) == 2)){
mif <- c(mif, paste(" ", ln.n, " : ", binary(1,mb=(op.w-1)) , binary(as.numeric(s.inst[2]),mb=(advt.w-1)), ";", sep="" ))
ln.n <- ln.n + 1
}else if(identical(s.inst[1],"send") & (length(s.inst) == 5)){
mif <- c(mif, paste(" ", ln.n, " : ", binary(8,mb=(op.w-1)) , binary(as.numeric(s.inst[2]),mb=(tag.w-1)) , binary(as.numeric(s.inst[3]),mb=(did.w-1)) , binary(as.numeric(s.inst[4]),mb=(stime.w-1)), ";", sep="" ))
ln.n <- ln.n + 1
mif <- c(mif, paste(" ", ln.n, " : ", binary(0,mb=(inst.w-ntime.w-1)) , binary(as.numeric(s.inst[5]),mb=(ntime.w-1)), ";", sep="" ))
ln.n <- ln.n + 1
}else if(identical(s.inst[1],"recv") & (length(s.inst) == 2)){
mif <- c(mif, paste(" ", ln.n, " : ", binary(4,mb=(op.w-1)) , binary(as.numeric(s.inst[2]),mb=(tag.w-1)) , binary(0,mb=(inst.w-op.w-tag.w-1)), ";" , sep="" ))
ln.n <- ln.n + 1
}else if(identical(s.inst[1],"noop") & (length(s.inst) == 1)){
mif <- c(mif, paste(" ", ln.n, " : ", binary(0,mb=(inst.w-1)), ";", sep=""))
ln.n <- ln.n + 1
}else if(identical(s.inst[1],"done") & (length(s.inst) == 1)){
mif <- c(mif, paste(" ", ln.n, " : ", binary(-1,mb=(op.w-1)) , binary(0,mb=(advt.w-1)), ";", sep="" ))
ln.n <- ln.n + 1
}else{
stop(inst)
}
}
for(i in ln.n:(depth-1)){
mif <- c(mif, paste(" ", ln.n, " : ", binary(0,mb=(inst.w-1)), ";", sep="" ))
ln.n <- ln.n + 1
}
mif <- c(mif, "END;")
writeLines(mif,out.file)
}
mif.gen("appBEO_iROM_0.txt","appBEO_iROM_0.mif")
|
bc79eca5a55c1957dcba8d27667d2ab3e7c29664
|
165edd6be58684759ba6d45da6b121997ca768c9
|
/plot3.R
|
2adb5221a8938327088ec106a72d6e5701a0b29e
|
[] |
no_license
|
xinyudong93/ExData_Plotting1
|
ce095af380cabd9b46fa3017616c2a244ee422e7
|
64b3887819f7ba14d3b29495dfdb89b0b40b0216
|
refs/heads/master
| 2023-08-21T19:15:31.904381
| 2014-11-07T15:26:27
| 2014-11-07T15:26:27
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 864
|
r
|
plot3.R
|
householdtable<-read.table("household_power_consumption.txt",stringsAsFactor=FALSE,sep=";",header=TRUE)
extractedSet<-householdtable$Date=='1/2/2007'|householdtable$Date=='2/2/2007'
subSet<-householdtable[extractedSet,]
png("plot3.png")
plot(as.difftime(paste(subSet$Date,subSet$Time),"%d/%m/%Y %H:%M:%S"),as.numeric(subSet$Sub_metering_1),type="l",axes=FALSE,xlab=NA,ylab="Energy sub metering")
points(as.difftime(paste(subSet$Date,subSet$Time),"%d/%m/%Y %H:%M:%S"),as.numeric(subSet$Sub_metering_2), col = "red", type = "l")
points(as.difftime(paste(subSet$Date,subSet$Time),"%d/%m/%Y %H:%M:%S"),as.numeric(subSet$Sub_metering_3), col = "blue", type = "l")
axis(1,at=c(-2836.0,-2835.0,-2834.0),labels=c("Thu","Fri","Sat"))
axis(2)
box()
legend("topright",legend=c("Sub_metering_1","Sub_metering_2","Sub_metering_3"),col=c("black","red","blue"),lty=1)
dev.off()
|
522afdcb127dbb12073016eb850b0794abffb507
|
c903ed6ec9e5181ca7e045f288a8e40410d18934
|
/man/clean.Rd
|
05a33153caed7cbf7d1107b42d7feb34ca26829d
|
[] |
no_license
|
hrbrmstr/subtools
|
df92a1e75f18e734950ee247a295f0ed21cf41d4
|
ff4469d9302f90ff1fa4c0182aeb61633d0f411e
|
refs/heads/master
| 2020-06-01T11:05:54.669427
| 2019-06-07T15:13:23
| 2019-06-07T15:13:23
| 190,758,490
| 0
| 0
| null | 2019-06-07T14:35:59
| 2019-06-07T14:35:58
| null |
UTF-8
|
R
| false
| true
| 961
|
rd
|
clean.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/clean_subtitles.R
\name{cleanTags}
\alias{cleanTags}
\alias{cleanCaptions}
\alias{cleanPatterns}
\title{Clean subtitles}
\usage{
cleanTags(x, format = "srt", clean.empty = TRUE)
cleanCaptions(x, clean.empty = TRUE)
cleanPatterns(x, pattern, clean.empty = TRUE)
}
\arguments{
\item{x}{a \code{Subtitles} or \code{MultiSubtitles} object.}
\item{format}{the original format of the \code{Subtitles} objects.}
\item{clean.empty}{logical. Should empty remaining lines ("") deleted after cleaning.}
\item{pattern}{a character string containing a regular expression to be matched and cleaned.}
}
\value{
A \code{Subtitles} or \code{MultiSubtitles} object.
}
\description{
Functions to clean subtitles. \code{cleanTags} cleans formatting tags.
\code{cleanCaptions} cleans close captions.
\code{cleanPatterns} provides a more general and flexible cleaning based on regular expressions.
}
|
552bd4c806dccfa712c40ff0f37cf67296d34632
|
c201d8f03eb195d1c16dc7e7bdbd5eb1777aaa79
|
/man/process_comments.Rd
|
9860e44eda4623138652a5eaf516812e1aa06930
|
[
"MIT"
] |
permissive
|
lizbethvj/search_reddit
|
824f44c04790b9ff8c66d0ec080e13e3f2eaf51a
|
74491dd77006d1f68d310076d39aac22b2ed00bf
|
refs/heads/master
| 2023-04-12T07:21:33.792490
| 2021-05-03T22:50:59
| 2021-05-03T22:50:59
| 360,658,537
| 0
| 0
| null | 2021-05-02T19:30:29
| 2021-04-22T19:23:25
|
R
|
UTF-8
|
R
| false
| true
| 574
|
rd
|
process_comments.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/process_comments.R
\name{process_comments}
\alias{process_comments}
\title{Process Comments for tidytext}
\usage{
process_comments(comments, other_stop_words = "gt")
}
\arguments{
\item{comments}{data frame, result of \code{extract_comments} function}
\item{other_stop_words}{character vector of words to be removed, eg. for Reddit its 'gt', an artifact from inline HTML in the comments, for multiple use c("word1", "word2")}
}
\value{
data frame
}
\description{
Process Comments for tidytext
}
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.