blob_id
stringlengths 40
40
| directory_id
stringlengths 40
40
| path
stringlengths 2
327
| content_id
stringlengths 40
40
| detected_licenses
listlengths 0
91
| license_type
stringclasses 2
values | repo_name
stringlengths 5
134
| snapshot_id
stringlengths 40
40
| revision_id
stringlengths 40
40
| branch_name
stringclasses 46
values | visit_date
timestamp[us]date 2016-08-02 22:44:29
2023-09-06 08:39:28
| revision_date
timestamp[us]date 1977-08-08 00:00:00
2023-09-05 12:13:49
| committer_date
timestamp[us]date 1977-08-08 00:00:00
2023-09-05 12:13:49
| github_id
int64 19.4k
671M
⌀ | star_events_count
int64 0
40k
| fork_events_count
int64 0
32.4k
| gha_license_id
stringclasses 14
values | gha_event_created_at
timestamp[us]date 2012-06-21 16:39:19
2023-09-14 21:52:42
⌀ | gha_created_at
timestamp[us]date 2008-05-25 01:21:32
2023-06-28 13:19:12
⌀ | gha_language
stringclasses 60
values | src_encoding
stringclasses 24
values | language
stringclasses 1
value | is_vendor
bool 2
classes | is_generated
bool 2
classes | length_bytes
int64 7
9.18M
| extension
stringclasses 20
values | filename
stringlengths 1
141
| content
stringlengths 7
9.18M
|
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
cee17c33c8a2ef5a0bf5284c4ff6f75eb7391352
|
5b345b8a1c60a40853dc67543b4b23635ca52af8
|
/R/oblicz_stale_czasowe.R
|
5b616cb38dbba6bddcdd8b4d73f86c63eff44d66
|
[] |
no_license
|
tzoltak/MLAKdane
|
9dd280e628a1434ef3e0433a7adab8ee6653e258
|
3ff0567b98729648cd54cbb118d55d6bcd5d7bd3
|
refs/heads/master
| 2021-01-12T08:24:27.554590
| 2016-11-14T15:39:47
| 2016-11-14T15:39:47
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 654
|
r
|
oblicz_stale_czasowe.R
|
#' oblicza stałe czasowe i konwertuje istniejące stałe czasowe z powrotem na daty
#' @param dane ramka danych ZDAU (lub pochodna)
#' @param data_badania data konca badanego okresu
#' @return data.frame
#' @export
#' @import dplyr
oblicz_stale_czasowe = function(dane, data_badania){
dane = dane %>%
mutate_(
nokr = ~ data2okres(data_badania) - data_zak,
mcstart = ~ okres2miesiac(data_rozp),
rokstart = ~ okres2rok(data_rozp),
mcdyp = ~ okres2miesiac(data_zak),
rokdyp = ~ okres2rok(data_zak),
data_rozp = ~ okres2data(data_rozp),
data_zak = ~ okres2data(data_zak)
)
return(dane)
}
|
7237504a8f57d137d42244c49b3206bbd79f3bda
|
acc4881b822ffa781e47e55a2c8c56df0100440d
|
/man/model.fake.par.Rd
|
08c30f974bd5ba746132bc7d996b189fb4215a9d
|
[] |
no_license
|
cran/sgr
|
693368d448056bb68616c471f69fc06292226829
|
d1df9176782321241b0c5718a010ce87130b6892
|
refs/heads/master
| 2023-04-06T05:36:43.974635
| 2022-04-14T13:30:02
| 2022-04-14T13:30:02
| 17,699,634
| 0
| 1
| null | null | null | null |
UTF-8
|
R
| false
| false
| 1,643
|
rd
|
model.fake.par.Rd
|
\name{model.fake.par}
\alias{model.fake.par}
\title{
Internal function.
}
\description{
Set different instances of the conditional replacement distribution.
}
\usage{
model.fake.par(fake.model = c("uninformative", "average", "slight", "extreme"))
}
%- maybe also 'usage' for other objects documented here.
\arguments{
\item{fake.model}{A character string
indicating the model for the conditional replacement distribution.
The options are: \code{uninformative} (default option) [\code{gam = c(1,1)} and \code{del = c(1,1)}]; \code{average} [\code{gam = c(3,3)} and \code{del = c(3,3)}];
\code{slight} [\code{gam = c(1.5,4)} and \code{del = c(4,1.5)}]; \code{extreme} [\code{gam = c(4,1.5)} and \code{del = c(1.5,4)}].}
}
\value{
Gives a list with \eqn{\gamma} and \eqn{\delta} parameters.
}
%\references{
%% ~put references to the literature/web site here ~
%}
\author{
Massimiliano Pastore
}
%\note{
%% ~~further notes~~
%}
\references{
Lombardi, L., Pastore, M. (2014). sgr: A Package for Simulating Conditional Fake Ordinal Data. \emph{The R Journal}, 6, 164-177.
Pastore, M., Lombardi, L. (2014). The impact of faking on Cronbach's Alpha for
dichotomous and ordered rating scores. \emph{Quality & Quantity}, 48, 1191-1211.
}
%% ~Make other sections like Warning with \section{Warning }{....} ~
%\seealso{
% \code{\link{dgBetaD}}, \code{\link{pfake}}, \code{\link{pfakegood}}, \code{\link{pfa%kebad}}
%}
\examples{
model.fake.par() # default
model.fake.par("average")
}
% Add one or more standard keywords, see file 'KEYWORDS' in the
% R documentation directory.
\keyword{utility}
%\keyword{ ~kwd2 }% __ONLY ONE__ keyword per line
|
2d5db523dd65751720114850542158dd2324595e
|
7a10e3e78d2e6f276ce8358b0b196363b880b902
|
/ggplotGraphics2.R
|
1ba4cda638677ea3460606a4c15b837418d0f0ff
|
[] |
no_license
|
dhackenburg/Bio381_2018
|
508792c1afd5073656060d6d02ced00bad620974
|
f609f3cabb1c925215cd09662b9882169b76aceb
|
refs/heads/master
| 2021-05-05T09:54:08.286930
| 2018-07-21T15:15:39
| 2018-07-21T15:15:39
| 117,878,152
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 3,997
|
r
|
ggplotGraphics2.R
|
# ggplot graphics
#5 April 2018
#DMH
# preliminaries
library(ggplot2)
library(ggthemes)
library(patchwork)
library(TeachingDemos)
char2seed("10th Avenue Freeze-Out")
d <- mpg
str(d)
#create 4 individual graphs
#graph 1
g1 <- ggplot(data=d, mapping=aes(x=displ,y=cty)) +
geom_point() +
geom_smooth()
print(g1)
#graph 2
g2 <- ggplot(data=d,mapping=aes(x=fl,fill=I("tomato"),color=I("black"))) + geom_bar(stat="count") + theme(legend.position="none")
print(g2)
#graph 3
g3 <- ggplot(data=d, mapping=aes(x=displ,fill=I("royalblue"),color=I("black"))) + geom_histogram()
print(g3)
#graph 4
g4 <- ggplot(data=d,mapping=aes(x=fl,y=cty,fill=fl)) + geom_boxplot() + theme(legend.position="none")
print(g4)
# patchwork for awesome multipanel graphs
# place two plots hoirzontally
g1 + g2
# place 3 plots vertically
g1 + g2 + g3 + plot_layout(ncol=1)
# change relative area of each plot
g1 + g2 + plot_layout(ncol=1,heights=c(2,1))
g1 + g2 + plot_layout(ncol=2,widths=c(2,1))
# add a spacer plot (under construction)
g1 + plot_spacer() + g2
# set up nested plots
g1 + {
g2 + {
g3 +
g4 +
plot_layout(ncol=1)
}
} +
plot_layout(ncol=1)
g1+g2 + g3 + plot_layout(ncol=1)
# / | for very intuitive layouts
(g1 | g2 | g3)/g4
(g1 | g2)/(g3 | g4)
# swapping axis orientiation within a plot
g3a <- g3 + scale_x_reverse()
g3b <- g3 + scale_y_reverse()
g3c <- g3 + scale_x_reverse() + scale_y_reverse()
(g3 | g3a)/(g3b | g3c)
# switch orientation of coordinates
(g3 + coord_flip() | g3a + coord_flip())/(g3b + coord_flip() | g3c + coord_flip())
#ggsave for creating and saving plots
ggsave(filename="MyPlot.pdf",plot=g3,device="pdf",width=20,height=20,units="cm",dpi=300)
# mapping of variables to aesthetics
m1 <- ggplot(data=mpg, mapping=aes(x=displ,y=cty,color=class)) + geom_point()
print(m1)
m2 <- ggplot(data=mpg, mapping=aes(x=displ,y=cty,shape=class,color=class)) + geom_point()
m2
# mapping of a discrete variable to point size
m3 <- ggplot(data=mpg, mapping=aes(x=displ,y=cty,size=class,color=class)) + geom_point()
m3
# mapping a continuous variable to point size
m4 <- ggplot(data=mpg, mapping=aes(x=displ,y=cty,size=hwy,color=hwy)) + geom_point()
m4
# map a continous variable to color
m5 <- ggplot(data=mpg, mapping=aes(x=displ,y=cty,color=hwy)) + geom_point()
m5
# mapping two variables to two different aesthetics
m6 <- ggplot(data=mpg, mapping=aes(x=displ,y=cty,shape=class,color=hwy)) + geom_point()
m6
# mapping 3 variables onto shape, size and color
m7 <- ggplot(data=mpg, mapping=aes(x=displ,y=cty,shape=drv,color=fl,size=hwy)) + geom_point()
m7
# mapping a variable to the same aesthetic for two different geoms
m8 <- ggplot(data=mpg, mapping=aes(x=displ,y=cty,color=drv)) + geom_point() + geom_smooth(method="lm") #geom_smooth is regression line with confidence interval
m8
# faceting for excellent visualiztion in a set of related plots
m9 <- ggplot(data=mpg, mapping=aes(x=displ,y=cty)) + geom_point()
m9 + facet_grid(class~fl)
m9 + facet_grid(class~fl,scales="free_y") #changes y axis for individual plots, look ease of comparing among rows
m9 + facet_grid(class~fl,scales="free")
# facet on only a single variable
m9 + facet_grid(.~class)
m9 + facet_grid(class~.)
# use facet wrap for unordered graphs
m9 + facet_wrap(~class)
m9 + facet_wrap(~class + fl)
m9 + facet_wrap(~class + fl,drop=FALSE)
# use facet in combination with aesthetics
m10 <- ggplot(data=mpg,mapping=aes(x=displ,y=cty,color=drv)) + geom_point()
m10 + facet_grid(.~class)
m11 <- ggplot(data=mpg,mapping=aes(x=displ,y=cty,color=drv)) + geom_smooth(method="lm",se=FALSE) #se says do not give me the standard error - confidence interval
m11 + facet_grid(.~class)
# fitting with boxplots over a continuous variable
m12 <- ggplot(data=mpg,mapping=aes(x=displ,y=cty)) + geom_boxplot()
m12 + facet_grid(.~class)
m13 <- ggplot(data=mpg,mapping=aes(x=displ,y=cty,group=drv,fill=drv)) + geom_boxplot()
m13
m13 + facet_grid(.~class)
|
0da61db862ade69365c6e0b969e65646c7f2a85b
|
d816b9a672e7fcd18f34d9f41426b0678715da41
|
/man/hdi.Rd
|
cf17a5989ef4c26c5f46d4b4926dc2a232883ee0
|
[] |
no_license
|
cran/hdi
|
dd85bac2a284df87cf54ecc670101de626fe212e
|
3f51705e4e07701de8cc58fa12c2dab62cd2cf9d
|
refs/heads/master
| 2021-07-05T10:34:05.280375
| 2021-05-27T12:10:02
| 2021-05-27T12:10:02
| 17,696,611
| 2
| 5
| null | null | null | null |
UTF-8
|
R
| false
| false
| 4,134
|
rd
|
hdi.Rd
|
\name{hdi}
\alias{hdi}
\title{Function to perform inference in high-dimensional (generalized) linear models}
\description{Perform inference in high-dimensional (generalized) linear
models using various approaches.}
\usage{
hdi(x, y, method = "multi.split", B = NULL, fraction = 0.5,
model.selector = NULL, EV = NULL, threshold = 0.75,
gamma = seq(0.05, 0.99, by = 0.01),
classical.fit = NULL,
args.model.selector = NULL, args.classical.fit = NULL,
verbose = FALSE, ...)
}
%- maybe also 'usage' for other objects documented here.
\arguments{
\item{x}{Design matrix (without intercept).}
\item{y}{Response vector.}
\item{method}{Multi-splitting ("multi.split") or stability-selection
("stability").}
\item{B}{Number of sample-splits (for "multi.split") or sub-sample
iterations (for "stability"). Default is 50 ("multi.split")or 100
("stability"). Ignored otherwise.}
\item{fraction}{Fraction of data used at each of the B iterations.}
\item{model.selector}{Function to perform model selection. Default is
\code{\link{lasso.cv}} ("multi.split") and
\code{\link{lasso.firstq}} ("stability"). Function must have at
least two arguments: x (the design matrix) and y (the response
vector). Return value is the index vector of selected columns. See
\code{\link{lasso.cv}} and \code{\link{lasso.firstq}} for
examples. Additional arguments can be passed through
\code{args.model.selector}.}
\item{EV}{(only for
"stability"). Bound(s) for expected number of false positives . Can
be a vector.}
\item{threshold}{(only for "stability"). Bound on selection frequency.}
\item{gamma}{(only for "multi.split"). Vector of gamma-values.}
\item{classical.fit}{(only
for "multi.split"). Function to calculate (classical)
p-values. Default is \code{\link{lm.pval}}. Function must
have at least two arguments: x (the design matrix) and y (the
response vector). Return value is the vector of p-values. See
\code{\link{lm.pval}} for an example. Additional arguments can be
passed through \code{args.classical.fit}.}
\item{args.model.selector}{Named list of further arguments for
function \code{model.selector}.}
\item{args.classical.fit}{Named list of further arguments for function
\code{classical.fit}.}
\item{verbose}{Should information be printed out while computing
(logical).}
\item{...}{Other arguments to be passed to the underlying functions.}
}
%\details{}
\value{
\item{pval}{(only for "multi.split"). Vector of p-values.}
\item{gamma.min}{(only for "multi.split"). Value of gamma where
minimal p-values was attained.}
\item{select}{(only for "stability"). List with selected predictors
for the supplied values of EV.}
\item{EV}{(only for "stability"). Vector of corresponding values of EV.}
\item{thresholds}{(only for "stability"). Used thresholds.}
\item{freq}{(only for "stability"). Vector of selection frequencies.}
}
\references{
Meinshausen, N., Meier, L. and \enc{Bühlmann}{Buhlmann}, P. (2009)
P-values for high-dimensional regression.
\emph{Journal of the American Statistical Association} \bold{104}, 1671--1681.
Meinshausen, N. and \enc{Bühlmann}{Buhlmann}, P. (2010)
Stability selection (with discussion).
\emph{Journal of the Royal Statistical Society: Series B} \bold{72}, 417--473.
}
\author{Lukas Meier}
\seealso{
\code{\link{stability}}, \code{\link{multi.split}}
}
\examples{
x <- matrix(rnorm(100 * 200), nrow = 100, ncol = 200)
y <- x[,1] * 2 + x[,2] * 2.5 + rnorm(100)
## Multi-splitting with lasso.firstq as model selector function
fit.multi <- hdi(x, y, method = "multi.split",
model.selector =lasso.firstq,
args.model.selector = list(q = 10))
fit.multi
fit.multi$pval.corr[1:10] ## the first 10 p-values
## Stability selection
fit.stab <- hdi(x, y, method = "stability", EV = 2)
fit.stab
fit.stab$freq[1:10] ## frequency of the first 10 predictors
}
% Add one or more standard keywords, see file 'KEYWORDS' in the
% R documentation directory.
\keyword{models}
\keyword{regression}% __ONLY ONE__ keyword per line
|
95eb4fffe2b4441f642b4779a6d41e72d14f91ea
|
2a2d771ab408218a642f8639c5c2bfc683aece21
|
/man/splitDateTime.Rd
|
eb5a441977516d503912ae3a3d52b2c78e275a8e
|
[] |
no_license
|
mmiche/esmprep
|
7a525f6d3dfc5365f3c1ef4040c28225bef89e0f
|
8cd3330d9621ba6e69b2d9aa8df62d97eb988a95
|
refs/heads/master
| 2021-01-20T10:28:47.764859
| 2019-07-05T11:15:49
| 2019-07-05T11:15:49
| 101,635,503
| 3
| 0
| null | null | null | null |
UTF-8
|
R
| false
| true
| 4,270
|
rd
|
splitDateTime.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/splitDateTime.R
\name{splitDateTime}
\alias{splitDateTime}
\title{splitDateTime}
\usage{
splitDateTime(refOrEsDf = NULL, refOrEs = NULL,
RELEVANTINFO_ES = NULL, RELEVANTVN_ES = NULL,
RELEVANTVN_REF = NULL, dateTimeFormat = "ymd_HMS")
}
\arguments{
\item{refOrEsDf}{a data.frame. Either the reference dataset or the event sampling raw dataset (already merged to a single dataset).}
\item{refOrEs}{a character string. Enter "REF" if the argument refOrEs is the reference dataset, enter "ES" if it is the event sampling dataset.}
\item{RELEVANTINFO_ES}{a list. This list is generated by function \code{\link{setES}}.}
\item{RELEVANTVN_ES}{a list. This list is generated by function \code{\link{setES}} and it is extended once either by function \code{\link{genDateTime}} or by function \code{\link{splitDateTime}}.}
\item{RELEVANTVN_REF}{a list. This list is generated by function \code{\link{setREF}} and it is extended once either by function \code{\link{genDateTime}} or by function \code{\link{splitDateTime}}.}
\item{dateTimeFormat}{a character string. Choose the current date-time format, "ymd_HMS" (default), "mdy_HMS", or "dmy_HMS".}
}
\value{
The dataset that was passed as first argument with four additional columns, i.e. the separate date and time objects of the combined date-time objects of both ESM start and ESM end. See \strong{Details} for more information.
}
\description{
splitDateTime splits a date-time object into its components date and time.
}
\details{
Splitting up a date-time object means to separate it into a data-object, e.g. 2007-10-03 and a time-object, e.g. 12:00:00.
}
\examples{
# o=o=o=o=o=o=o=o=o=o=o=o=o=o=o=o=o=o=o=o=o=o=o=o=o=o=o=o=o=o=o=o=o=o
# Prerequisites in order to execute splitDateTime. Start ------------
# keyLsNew is delivered with the package. Remove the separate date
# and time for both start and end in each of the ESM datasets.
keyLsNewDT <- sapply(keyLsNew, function(x) {
x <- x[,-match(c("start_date", "start_time",
"end_date", "end_time"), names(x))]
return(x) } )
relEs <- relevantESVN(svyName="survey_name", IMEI="IMEI",
START_DATETIME="ES_START_DATETIME", END_DATETIME="ES_END_DATETIME")
imeiNumbers <- as.character(referenceDf$imei)
surveyNames <- c("morningTestGroup", "dayTestGroup", "eveningTestGroup",
"morningControlGroup", "dayControlGroup", "eveningControlGroup")
RELEVANT_ES <- setES(4, imeiNumbers, surveyNames, relEs)
RELEVANTINFO_ES <- RELEVANT_ES[["RELEVANTINFO_ES"]]
RELEVANTVN_ES <- RELEVANT_ES[["RELEVANTVN_ES"]]
# referenceDfNew is delivered with the package. Remove the separate
# date and time for both start and end.
referenceDfNewDT <- referenceDfNew[,-match(c("start_date", "start_time",
"end_date", "end_time"), names(referenceDfNew))]
relRef <- relevantREFVN(ID="id", IMEI="imei", ST="st",
START_DATETIME="REF_START_DATETIME", END_DATETIME="REF_END_DATETIME")
RELEVANTVN_REF <- setREF(4, relRef)
# Prerequisites in order to execute splitDateTime. End --------------
# ------------------------------------------------------
# Run function 7 of 29; see esmprep functions' hierarchy.
# ------------------------------------------------------
# Applying function to reference dataset (7a of 29)
referenceDfList <- splitDateTime(referenceDfNewDT, "REF", RELEVANTINFO_ES, RELEVANTVN_ES,
RELEVANTVN_REF)
# Extract reference dataset from output
referenceDfNew <- referenceDfList[["refOrEsDf"]]
names(referenceDfNew)
# Extract extended list of relevant variables names of reference dataset
RELEVANTVN_REF <- referenceDfList[["extendedVNList"]]
# Applying function to raw ESM dataset(s) (7b of 29)
# keyLs is the result of function 'genKey'.
keyList <- splitDateTime(keyLsNewDT, "ES", RELEVANTINFO_ES, RELEVANTVN_ES,
RELEVANTVN_REF)
# Extract list of raw ESM datasets from output
keyLsNew <- keyList[["refOrEsDf"]]
# Extract extended list of relevant variables names of raw ESM datasets
RELEVANTVN_ES <- keyList[["extendedVNList"]]
# o=o=o=o=o=o=o=o=o=o=o=o=o=o=o=o=o=o=o=o=o=o=o=o=o=o=o=o=o=o=o=o=o=o
}
\seealso{
Exemplary code (fully executable) in the documentation of \code{\link{esmprep}} (function 27 of 29).\cr \code{splitDateTime} is the reverse function of \code{\link{genDateTime}}.
}
|
fcdd8f863da69fc17f308f0517cf031cabf39a5a
|
369fd863417f6a3bade3e0b7f90302e0fde76815
|
/sbtest5download/PdfDownload/ui.R
|
cd10aba8746a02291629acf132f5e8c76a8d5059
|
[] |
no_license
|
jeffnorville/shinysb1
|
f4338e3e0c4020694cc305cea2ff4507eee143a4
|
f02f3b78a232903254e41aa6dbbfebaefed1e18e
|
refs/heads/master
| 2020-04-06T03:33:24.348633
| 2016-09-15T14:21:59
| 2016-09-15T14:21:59
| 57,279,961
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 557
|
r
|
ui.R
|
# IMPREX download doc test
require(shiny)
pageWithSidebar(
headerPanel("Output to PDF"),
sidebarPanel(
checkboxInput('returnpdf', 'output pdf?', FALSE),
conditionalPanel(
condition = "input.returnpdf == true",
strong("PDF size (inches):"),
sliderInput(inputId="w", label = "width:", min=3, max=20, value=8, width=100, ticks=F),
sliderInput(inputId="h", label = "height:", min=3, max=20, value=6, width=100, ticks=F),
br(),
downloadLink('pdflink')
)
),
mainPanel({ mainPanel(plotOutput("myplot")) })
)
|
986b1f35b620e18588ad92c01ad14f0e1fbc189b
|
caf56f313d6e34f4da4c5a0a29d31ff86262533a
|
/R/tibble.R
|
1760626ce8b1e356c531ec38b0dcb2b917290a89
|
[] |
no_license
|
bhive01/tibble
|
c00b4894e4067a2d6443a33808649bf327367b3a
|
7c0aca252cba66ff02e48e9a9dffea816ffe4d4f
|
refs/heads/master
| 2021-01-17T04:56:06.995980
| 2016-03-19T00:43:30
| 2016-03-19T00:43:30
| 54,232,754
| 0
| 1
| null | 2016-03-19T00:43:30
| 2016-03-18T21:34:01
|
R
|
UTF-8
|
R
| false
| false
| 1,329
|
r
|
tibble.R
|
#' @useDynLib tibble
#' @importFrom Rcpp sourceCpp
#' @import assertthat
#' @importFrom utils head tail
#' @aliases NULL
#' @section Getting started:
#' See \code{\link{tbl_df}} for an introduction,
#' \code{\link{data_frame}} and \code{\link{frame_data}} for construction,
#' \code{\link{as_data_frame}} for coercion,
#' and \code{\link{print.tbl_df}} and \code{\link{glimpse}} for display.
"_PACKAGE"
#' @name tibble-package
#' @section Package options:
#' Display options for \code{tbl_df}, used by \code{\link{trunc_mat}} and
#' (indirectly) by \code{\link{print.tbl_df}}.
#' \describe{
(op.tibble <- list(
#' \item{\code{tibble.print_max}}{Row number threshold: Maximum number of rows
#' printed. Set to \code{Inf} to always print all rows. Default: 20.}
tibble.print_max = 20L,
#' \item{\code{tibble.print_min}}{Number of rows printed if row number
#' threshold is exceeded. Default: 10.}
tibble.print_min = 10L,
#' \item{\code{tibble.width}}{Output width. Default: \code{NULL} (use
#' \code{width} option).}
tibble.width = NULL
#' }
))
tibble_opt <- function(x) {
x_tibble <- paste0("tibble.", x)
res <- getOption(x_tibble)
if (!is.null(res))
return(res)
x_dplyr <- paste0("dplyr.", x)
res <- getOption(x_dplyr)
if (!is.null(res))
return(res)
op.tibble[[x_tibble]]
}
|
46dd347ddd7677618e260e62c71d3a9a2c8f8ece
|
d60a4a66919a8c54d29a4677574b418107b4131d
|
/man/REDWINE.Rd
|
f9a1be2bfc6e4cddb90510ca4c62211303bc8146
|
[] |
no_license
|
cran/tsapp
|
65203e21a255e832f0ad9471f9ee308793eb7983
|
f2679a3d5ee0e3956a4ba013b7879324f77cf95f
|
refs/heads/master
| 2021-11-12T21:18:18.835475
| 2021-10-30T10:30:02
| 2021-10-30T10:30:02
| 248,760,597
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| true
| 576
|
rd
|
REDWINE.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/series.r
\docType{data}
\name{REDWINE}
\alias{REDWINE}
\title{Monthly sales of Australian red wine (1000 l)}
\format{
REDWINE is a univariate time series of length 187; start January 1980, frequency =12
\describe{
\item{REDWINE}{Monthly sales of Australian red wine }
}
}
\source{
R package tsdl <https://github.com/FinYang/tsdl>
}
\usage{
REDWINE
}
\description{
Monthly sales of Australian red wine (1000 l)
}
\examples{
data(REDWINE)
## maybe tsp(REDWINE) ; plot(REDWINE)
}
\keyword{datasets}
|
0b9633a20d1737b24b3945a52dab43f5bb9ac7dc
|
ffdea92d4315e4363dd4ae673a1a6adf82a761b5
|
/data/genthat_extracted_code/hmm.discnp/examples/predict.hmm.discnp.Rd.R
|
99d389070612427e2694054c4cc54de047658a3c
|
[] |
no_license
|
surayaaramli/typeRrh
|
d257ac8905c49123f4ccd4e377ee3dfc84d1636c
|
66e6996f31961bc8b9aafe1a6a6098327b66bf71
|
refs/heads/master
| 2023-05-05T04:05:31.617869
| 2019-04-25T22:10:06
| 2019-04-25T22:10:06
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 620
|
r
|
predict.hmm.discnp.Rd.R
|
library(hmm.discnp)
### Name: predict.hmm.discnp
### Title: Predicted values of a discrete non-parametric hidden Markov
### model.
### Aliases: predict.hmm.discnp
### Keywords: models
### ** Examples
P <- matrix(c(0.7,0.3,0.1,0.9),2,2,byrow=TRUE)
R <- matrix(c(0.5,0,0.1,0.1,0.3,
0.1,0.1,0,0.3,0.5),5,2)
set.seed(42)
ll1 <- sample(250:350,20,TRUE)
y1 <- rhmm(ylengths=ll1,nsim=1,tpm=P,Rho=R,drop=TRUE)
fit <- hmm(y1,K=2,verb=TRUE,keep.y=TRUE,itmax=10)
fv <- fitted(fit)
set.seed(176)
ll2 <- sample(250:350,20,TRUE)
y2 <- rhmm(ylengths=ll2,nsim=1,tpm=P,Rho=R,drop=TRUE)
pv <- predict(fit,y=y2)
|
e0dc5bf4982dae091d10dc3cc1434adb71df355d
|
303ee8c30e03e6bf734e69e1e00f43fefaf3bda4
|
/AllCharts/PieChart.R
|
7ba06dbb05f8b63ccd2f7f998832d6c7c3220d27
|
[] |
no_license
|
zt2730/Rplot
|
d2d57c331283d309dd8ae1d41425874ee432e291
|
a4979f63029b26912c43eb4d631e04c489ca7328
|
refs/heads/master
| 2021-01-01T03:33:35.002731
| 2016-05-24T21:37:27
| 2016-05-24T21:37:27
| 59,609,059
| 0
| 1
| null | null | null | null |
UTF-8
|
R
| false
| false
| 460
|
r
|
PieChart.R
|
library(ggplot2)
#Pie chart
data <- textConnection("Category,Value
Category A,5
Category B,4
Category C,3
Category D,2
Category E,1
")
data <- read.csv(data, h=T)
p <- ggplot(aes(x=factor(1), fill=Category, weight=Value), data=data)
p + geom_bar(width = 1) +
coord_polar(theta="y") +
scale_fill_discrete("Legend Title") +
labs(x="X Label", y="Y Label", title="An Example Pie Chart")
# full output: http://www.yaksis.com/static/img/03/large/PieChart.png
|
4e4444437ff4f03407d8c1fb769b8c98627aaa1e
|
e786517480475f327d99a4638e1b787004166d77
|
/handwriting/rf_and_bagging.R
|
6221bca611331a64b7335d92d664f3f72c92d3a8
|
[] |
no_license
|
asterix135/kaggle
|
217d8c41ba0832698e90d4730b95437d94f47d22
|
45971047b42b8120e756b1608a6154946156e6d2
|
refs/heads/master
| 2021-01-10T14:00:54.158976
| 2015-12-11T01:32:26
| 2015-12-11T01:32:29
| 46,736,817
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 1,476
|
r
|
rf_and_bagging.R
|
#####
# Building some alternate models
#####
# Make sure working directory is where the data is saved
if (getwd() != "/Users/christophergraham/Documents/Code/kaggle/handwriting") {
setwd("/Users/christophergraham/Documents/Code/kaggle/handwriting")
}
require(caret)
require(e1071)
num_data <- read.csv('train.csv')
num_data$label <- as.factor(num_data$label)
set.seed(2943587)
train_crit <- createDataPartition(y=num_data$label, p=0.7, list = FALSE)
training <- num_data[train_crit,]
testing <- num_data[-train_crit,]
test_crit <- createDataPartition(y=testing$label, p=0.7, list=FALSE)
validation <- testing[-test_crit,]
testing <- training[test_crit,]
#PCA pre-processing
# 1. Get rid of zero-variance variables
drop_cols <- nearZeroVar(training)
training <- training[,-drop_cols]
testing <- testing[,-drop_cols]
validation <- validation[,-drop_cols]
# Build Simple RF Model
rf_model <- train(label ~ ., data=training, method='rf')
# PCA
preObj <- preProcess(training[,-1], method=c('pca'), thresh=0.99)
# where do we have all 0s or zero variance?
train_pca <- cbind(label = training$label, num_pca$x[,485])
test_pca <- predict(num_pca, testing)
test_pca <- data.frame(cbind(label = testing$label, test_pca))
test_rf_pred <- predict(rf_model, test_pca)
# Better: use k-fold = ask prof about this
train_control <- trainControl(method = 'cv', number =10)
rf_model2 <- train(label ~ ., data=training, trControl = train_control,
method='rf')
|
d4779a89c08c174d3aec3d0c3cbee6499a7bfd35
|
8244df3775912c290eaf3df9a457019dc2d7b6a9
|
/kmz/move_kmz_destination.R
|
0f1d4c2a8f5cf2f336a1220eab1b1d141633db78
|
[] |
no_license
|
CIAT-DAPA/cwr_verticaltask
|
6e247e3ec32a600d13c4beb3986154d68a37f5b2
|
63da126832048383cea0a8b5b62641ef105fc5b5
|
refs/heads/master
| 2021-01-21T21:38:58.859729
| 2016-05-31T15:01:29
| 2016-05-31T15:01:29
| 34,528,073
| 1
| 1
| null | 2015-11-26T14:46:42
| 2015-04-24T16:01:07
|
Java
|
UTF-8
|
R
| false
| false
| 4,820
|
r
|
move_kmz_destination.R
|
###################### Dependencies #######################
library(parallel)
###################### Configuration #######################
work_home <- ""
work_destination <- ""
work_force <- FALSE
work_cores <- 10
###################### Internal Variables #######################
root_dirs <- NULL
file_extension <- ".KMZ"
folder_richness_gap <- "gap_richness"
folder_richness_species <-"species_richness"
folder_taxon_distribution <-"models"
folder_taxon_priorities <-"gap_spp"
###################### Functions #######################
force_folder <- function(path){
if(work_force && !file.exists(file.path(c(work_destination,path)))){
dir.create(file.path(work_destination,path))
}
}
file_copy_ext <- function(from, to, extension,crop){
files <- list.files(path=from,pattern=paste0("*",extension))
copy <- lapply(files,function(file){
from_full_path <- file.path(from,file)
temp_dir <- gsub(extension, "", file)
if(temp_dir == folder_richness_species || temp_dir == folder_richness_gap){
temp_dir <- gsub("_", "-", temp_dir)
}
to_full_path <- file.path(to,temp_dir,file)
file.copy(from=from_full_path, to=to_full_path, overwrite = work_force, recursive = FALSE, copy.mode = TRUE)
if(!file.exists(file.path(to,temp_dir))){
x<-to_full_path
fpath<-paste0(work_destination,"/",crop)
sname<-gsub(paste0(fpath,"/"),"",to)
write.csv(x,paste0(fpath,"/",sname,"_",file,"_","NOT_COPIED_KMZ.RUN"),row.names = F,quote=F)
cat(file.path(to,temp_dir),"|NOT MATCH|", "\n")
}else{
# x<-to_full_path
# write.csv(x,paste0(to,"/",temp_dir,"/","COPIED_KMZ.RUN"))
cat(file.path(to,temp_dir),"|MATCH|", "\n")
}
})
}
process_crop <- function(crop){
print(paste0(crop," starts to move in destination folder"))
force_folder(crop)
if(file.exists(file.path(work_destination,crop))){
# species richness
print(paste0(crop," SPECIES_RICHNESS processing"))
file_from <- file.path(work_home,crop,folder_richness_species)
file_to <- file.path(work_destination,crop,folder_richness_species)
file_copy_ext(file_from,file_to,file_extension,crop)
# gap richness
print(paste0(crop," GAP_RICHNESS HPS processing"))
file_from <- file.path(work_home,crop,folder_richness_gap,"HPS")
file_to <- file.path(work_destination,crop,folder_richness_gap,"HPS")
file_copy_ext(file_from,file_to,file_extension,crop)
# models
print(paste0(crop," MODELS processing"))
file_from <- file.path(work_home,crop,folder_taxon_distribution)
file_to <- file.path(work_destination,crop,folder_taxon_distribution)
file_copy_ext(file_from,file_to,file_extension,crop)
# gap spp
print(paste0(crop," GAP_SPP HPS processing"))
file_from <- file.path(work_home,crop,folder_taxon_priorities,"HPS")
file_to <- file.path(work_destination,crop,folder_taxon_priorities,"HPS")
file_copy_ext(file_from,file_to,file_extension,crop)
print(paste0(crop," GAP_SPP MPS processing"))
file_from <- file.path(work_home,crop,folder_taxon_priorities,"MPS")
file_to <- file.path(work_destination,crop,folder_taxon_priorities,"MPS")
file_copy_ext(file_from,file_to,file_extension,crop)
print(paste0(crop," GAP_SPP LPS processing"))
file_from <- file.path(work_home,crop,folder_taxon_priorities,"LPS")
file_to <- file.path(work_destination,crop,folder_taxon_priorities,"LPS")
file_copy_ext(file_from,file_to,file_extension,crop)
print(paste0(crop," GAP_SPP NFCR processing"))
file_from <- file.path(work_home,crop,folder_taxon_priorities,"NFCR")
file_to <- file.path(work_destination,crop,folder_taxon_priorities,"NFCR")
file_copy_ext(file_from,file_to,file_extension,crop)
print(paste0(crop," have moved"))
} else {
print(paste0(crop," have not moved (CHECK IT)"))
}
}
###################### Process #######################
crops_dirs<-dir(work_home)
#crops_processed <- mclapply(root_dirs, process_crop, mc.cores=work_cores)
crops_processed <- lapply(crops_dirs, process_crop)
################ Removing old .RUN files###############
# work_destination <- ""
# work_home <- ""
# crops_dirs<-dir(work_home)
# lapply(crops_dirs,function(crop){
#
# W_PATH<-paste0(work_destination,"/",crop);gc()
# l_files<-list.files(W_PATH,pattern = ".RUN",recursive = T);gc()
# if(length(l_files)>0){
# cat("Processing ",as.character(crop),"\n")
# cat("removing .RUN files for ",as.character(crop),"\n")
# cat("removing ",l_files,"\n")
# file.remove(l_files);gc()
#
# }else{
# cat("Skipping ",as.character(crop),"\n")
# }
# })
work_destination <- ""
l<-list.files(work_destination,pattern = ".RUN",recursive = T);gc()
|
8fc82be2d4b7509a587f2ba4afa454b5148aa555
|
ab9cfa948b2b005aab7c00f72b3a461e9252a5d4
|
/plot5.R
|
41b717b299e6db8ffd3f7514ae603316b649a3c4
|
[] |
no_license
|
doctapp/ExData_Plotting2
|
136ee5fde0756ce69fc9762ee328d9dbd0c0a529
|
7997921fffba5df755b0cea17d67b871dce454de
|
refs/heads/master
| 2016-09-01T20:38:47.872972
| 2014-11-23T16:54:56
| 2014-11-23T16:54:56
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 1,637
|
r
|
plot5.R
|
require(ggplot2)
require(plyr)
get_data <- function() {
# Download the data if not already downloaded
zipfile <- "exdata-data-NEI_data.zip"
if (!file.exists(zipfile)) {
url <- "https://d396qusza40orc.cloudfront.net/exdata%2Fdata%2FNEI_data.zip"
download.file(url, destfile = zipfile)
unzip(zipfile)
}
NEI <- readRDS("summarySCC_PM25.rds")
SCC <- readRDS("Source_Classification_Code.rds")
NEI$Emissions <- as.numeric(NEI$Emissions)
NEI$year <- as.numeric(NEI$year)
return(list(NEI=NEI, SCC=SCC))
}
# Load the data
data <- get_data()
NEI <- data$NEI
SCC <- data$SCC
# Checks if column refers to motor vehicles
is_motor <- function(col) {
return(grepl("(diesel|gasoline|motor|highway).*vehicle", col, ignore.case = TRUE))
}
# Get the SCCs which refer to motor vehicles
motor_scc <- SCC[is_motor(SCC$Short.Name),]
# Get the NEI subset for Baltimore
city_data <- NEI[NEI$fips==24510,]
# Get the motor vehicles subset
motor <- city_data[city_data$SCC %in% motor_scc$SCC,]
# Aggregate total emissions by year
emissions_by_year <- ddply(motor, .(year), summarize, total = sum(Emissions))
# Create a PNG
png(file = "plot5.png", width = 480, height = 480, units = "px", bg = "transparent")
# Plot
g <- ggplot(emissions_by_year, aes(year, total))
p <- g +
geom_point(size = 3) +
geom_smooth(method = "lm") +
ggtitle("Total PM2.5 Emissions for Motor Vehicles in Baltimore per Year") +
ylab("Total Emissions")
print(p)
# Write result
dev.off()
|
849f34730b4936ba01935470b54217e74e90efff
|
98a06c9667a439fa92ddf393bfe685156121327b
|
/R/profile.dataset.R
|
92ac43149abe27b40d5f520a43e8774872ed04d2
|
[
"Apache-2.0"
] |
permissive
|
mjfii/Profile-Dataset
|
f11b0f7169462de28fab13eddf6c2ba928081652
|
378d3276ac5976e91081a47fd8046e0b14bb912e
|
refs/heads/master
| 2021-01-22T08:02:32.186668
| 2017-02-13T22:11:54
| 2017-02-13T22:11:54
| 81,870,272
| 1
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 1,083
|
r
|
profile.dataset.R
|
library(reshape2)
library(ggplot2)
single.class <- function(x) {
y <- class(x)
return(y[length(y)])
}
profile.data.frame <- function (pdf) {
density <- sapply(pdf, function(y) sum(length(which(!is.na(y)))))
sparsity <- sapply(pdf, function(y) sum(length(which(is.na(y)))))
unique.vals <- sapply(pdf, function(y) length(unique(y)))
profile <- data.frame(density,sparsity,unique.vals)
profile$cardnality <- round((profile$unique.vals / (profile$density + profile$sparsity)),5)
profile$class <- sapply(pdf, single.class )
profile$NonNumbers <- sapply(pdf, function(y) sum(length(which(is.nan(y)))))
profile$InfinateValues <- sapply(pdf, function(y) sum(length(which(is.infinite(y)))))
return(profile)
}
data.set <- data.frame(diamonds, stringsAsFactors = FALSE)
data.profile <- profile.data.frame(data.set)
numeric.attributes <- row.names(data.profile[data.profile$class %in% c('integer','numeric'), ])
d <- melt(data.set[ , numeric.attributes])
ggplot(d, aes(x = value)) +
facet_wrap(~variable, scales = 'free_x', ncol=3) +
geom_histogram(bins = 30)
|
bd6f4b48025030381704017a57c134818cc7fdd0
|
3d6d4f7e6c2213e43eeb206d23f74bc38c604e19
|
/R_Functions/Functions Folder V2/Old Functions/import_4C.R
|
c552490c076be85e0f6950f25bdd96c5445d21e4
|
[] |
no_license
|
dmcmill/4C_QuantWritingAnalysis
|
64fa2d0f91f84237f73de313bf986e8662a61bc1
|
e3fc6644fab293b50a708e4d8a5f921bf0abd101
|
refs/heads/master
| 2021-06-17T00:16:24.002550
| 2017-06-05T20:41:10
| 2017-06-05T20:41:10
| 75,655,899
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 3,999
|
r
|
import_4C.R
|
##This script will read a worksheet in 4C format from the Master Excel Data Sheet (MEDS) into a local dataframe. The dataframe will be modified so that each student is associated with a single matrix containing his/her 4C score for a specific assignment. The dataframe will then be added to the master 4C dataframe.
##arguments, 1) excel file name, 2) worksheet name, 3) directory
import.4C <- function(filename, worksheet, directory = "Data"){
##This section reads the specified 4C worksheet into a local data frame called "sheet." It pads the "score" column of sheet with leading 0's to the left (up to a width of 4) and then orders the rows of sheet by student IDcode.
wb <- loadWorkbook(paste(directory,"/",filename,sep=""))
sheet <- readWorksheet(wb, worksheet)
sheet <- na.omit(sheet)
sheet$score <- sprintf("%04d", sheet$score)
sheet <- sheet[order(sheet$IDcode),]
##This section converts the "score" column of sheet into 4 seperate columns for "comp", "calc", "context", and "clarity", respectively.
j <- 0
for(i in sheet[,3]){
j <- j + 1
temp <- as.numeric(unlist(strsplit(as.character(i),"")))
sheet[j, 3] <- temp[1]
sheet[j, 4] <- temp[2]
sheet[j, 5] <- temp[3]
sheet[j, 6] <- temp[4]
}
sheet[, 3] <- sapply(sheet[, 3], as.numeric)
colnames(sheet) <- c("IDcode", "statementnum", "comp", "calc", "context", "clarity")
##This section creates four new variables: 1) IDs, which is the full list of all student IDcodes from the specified worksheet (including repeats), 2) uniqueIDS, which will be used to store a complete list of unique student IDcodes, 3) cccclist, which will be used to store the corresponding list of student's 4C scores in matrix form, and 4) j,a counter initialized at 0.
IDs <- as.character((sheet[,1])) ## list of all student ID's
uniqueIDs <- list()
cccclist <- list()
j <- 0
##This section loops through UNIQUE student IDs and saves them to the list "uniqueIDS."
##It also converts each student's 4C scores into a single matrix, where the rows of the
##matrix are the statement number, and the columns of the matrix are "comp", "calc",
##"context", and "clarity", respectively. Each matrix is added to the list "cccclist".
for(i in unique(IDs)){
j <- j+1
uniqueIDs[[j]] <- i
tempframe <- subset.data.frame(sheet, sheet$IDcode==i)
tempframe <- tempframe[order(tempframe$statementnum),]
ccccmatrix <- as.matrix(subset.data.frame(tempframe,select = comp:clarity))
rownames(ccccmatrix) <- 1:(nrow(ccccmatrix))
cccclist[[j]] <- ccccmatrix
}
##This section binds the two parallel lists (uniqueIDS and cccclist) into a dataframe named "df" with column names of "IDcode" and the specified worksheet name, respectively.
df <- as.data.frame(cbind(uniqueIDs, cccclist))
colnames(df) <- c("IDcode", paste("c_",worksheet,sep=""))
##### This section merges the df data frame with the CCCCdf datafame. The all=TRUE argument specifies that all values will be kept even if they do not appear in both dataframes.
merge(CCCCdf,df,all=TRUE)->CCCCdf
CCCCdf<-CCCCdf[,c(colnames(CCCCdf[1]),sort(colnames(CCCCdf[-1])))]
##### This sections calls upon the save.CCCCdf function to save the CCCCdf data frame in multiple formats to the disk. This include saving backup versions in /Data/CCCC_backups with a timestamp
save.CCCCdf()
##### Finally, this last command returns the new CCCCdf to the global environment.
CCCCdf<<-(CCCCdf)
}
|
e671cc454249b1ad6bb0e6adca75603c2f2f3d7a
|
8b3cd7ee200564b65db2d76ca8ab953466e091e2
|
/man/cull.backfaces.Rd
|
d63f76ec0633d954205d76c143b8187f8b18e2b4
|
[] |
no_license
|
alicejenny/project011
|
3df759dfb96e5a7276bde4dd315bbc81f812a98c
|
7de1339bc4c148bfe41264acb3da9307a605e863
|
refs/heads/master
| 2021-01-10T14:30:09.381069
| 2015-07-15T20:17:40
| 2015-07-15T20:17:40
| 36,937,861
| 1
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 305
|
rd
|
cull.backfaces.Rd
|
% Generated by roxygen2 (4.1.1): do not edit by hand
% Please edit documentation in R/cullbackfaces.R
\name{cull.backfaces}
\alias{cull.backfaces}
\title{Cull Backfaces}
\usage{
cull.backfaces()
}
\description{
Cull the backfaces of a point cloud based on vertex normals.
}
\examples{
cull.backfaces()
}
|
ca074c3ca420ed10956248a2fb4fa196ee161ab4
|
26e26aca4102f40bc848120c4ebc99bb40d4a3c1
|
/R/Archive/Other Codes/62-FEI-Urban.R
|
bcf25db1d58506000e8de8747b762ccbe06260ab
|
[] |
no_license
|
IPRCIRI/IRHEIS
|
ee6c00dd44e1e4c2090c5ef4cf1286bcc37c84a1
|
1be8fa815d6a4b2aa5ad10d0a815c80a104c9d12
|
refs/heads/master
| 2023-07-13T01:27:19.954174
| 2023-07-04T09:14:58
| 2023-07-04T09:14:58
| 90,146,792
| 13
| 6
| null | 2021-12-09T12:08:58
| 2017-05-03T12:31:57
|
R
|
UTF-8
|
R
| false
| false
| 8,333
|
r
|
62-FEI-Urban.R
|
# FEI method
#
#
#
# Copyright © 2018:Arin Shahbazian
# Licence: GPL-3
#
rm(list=ls())
starttime <- proc.time()
cat("\n\n================ FEI method =====================================\n")
library(yaml)
Settings <- yaml.load_file("Settings.yaml")
library(readxl)
library(reldist)
library(Hmisc)
library(dplyr)
library(data.table)
library(stringr)
# Calories
MinCalories <- 2300
MinCalories2 <- MinCalories^2
load(file = paste0(Settings$HEISProcessedPath,"Y","95","MyDataUrban.rda"))
#Seperate big cities
MyDataUrban[,sum(Weight*Size),by=ProvinceCode][order(V1)]
MyDataUrban[,HHIDs:=as.character(HHID)]
MyDataUrban[,ShahrestanCode:=as.integer(str_sub(HHIDs,2,5))]
MyDataUrban[,sum(Weight*Size),by=ShahrestanCode][order(V1)][330:387]
MyDataUrbanTehran<-MyDataUrban[ProvinceCode==23]
MyDataUrbanTehran[,sum(Weight*Size),by=ShahrestanCode]
MyDataUrbanTabriz<-MyDataUrban[ProvinceCode==3]
MyDataUrbanTabriz[,sum(Weight*Size),by=ShahrestanCode]
MyDataUrbanAhvaz<-MyDataUrban[ProvinceCode==6]
MyDataUrbanAhvaz[,sum(Weight*Size),by=ShahrestanCode]
MyDataUrbanShiraz<-MyDataUrban[ProvinceCode==7]
MyDataUrbanShiraz[,sum(Weight*Size),by=ShahrestanCode]
MyDataUrbanMashhad<-MyDataUrban[ProvinceCode==9]
MyDataUrbanMashhad[,sum(Weight*Size),by=ShahrestanCode]
MyDataUrbanEsfahan<-MyDataUrban[ProvinceCode==10]
MyDataUrbanEsfahan[,sum(Weight*Size),by=ShahrestanCode]
MyDataUrbanKaraj<-MyDataUrban[ProvinceCode==30]
MyDataUrbanKaraj[,sum(Weight*Size),by=ShahrestanCode]
MyDataUrbanKermanshah<-MyDataUrban[ProvinceCode==5]
MyDataUrbanKermanshah[,sum(Weight*Size),by=ShahrestanCode]
MyDataUrban<-MyDataUrban[ShahrestanCode==2301,ProvinceCode:=as.numeric(ShahrestanCode)]
MyDataUrban<-MyDataUrban[ShahrestanCode==303,ProvinceCode:=as.numeric(ShahrestanCode)]
MyDataUrban<-MyDataUrban[ShahrestanCode==603,ProvinceCode:=as.numeric(ShahrestanCode)]
MyDataUrban<-MyDataUrban[ShahrestanCode==707,ProvinceCode:=as.numeric(ShahrestanCode)]
MyDataUrban<-MyDataUrban[ShahrestanCode==916,ProvinceCode:=as.numeric(ShahrestanCode)]
MyDataUrban<-MyDataUrban[ShahrestanCode==1002,ProvinceCode:=as.numeric(ShahrestanCode)]
MyDataUrban<-MyDataUrban[ShahrestanCode==3001,ProvinceCode:=as.numeric(ShahrestanCode)]
MyDataUrban<-MyDataUrban[ShahrestanCode==2301,ProvinceCode:=as.numeric(ShahrestanCode)]
MyDataUrban<-MyDataUrban[ShahrestanCode==502,ProvinceCode:=as.numeric(ShahrestanCode)]
load(file="dt4Urban.rda")
MyDataUrban<-merge(MyDataUrban,dt2,by =c("ProvinceCode"),all.x=TRUE)
#Sort by Province and Expenditure data
Urb <- MyDataUrban[,.(Percentile=as.integer(Percentile),Per_Daily_Calories,Total_Exp_Month_Per,Total_Exp_Month_Per_nondurable,ProvinceCode,Weight, cluster)]
Urb<- Urb[order(cluster,Total_Exp_Month_Per_nondurable)]
Urb<-Urb[Per_Daily_Calories!=0]
#Calculate cumulative weights
Urb$cumWeightcluster <-ave(Urb$Weight, Urb$cluster, FUN=cumsum)
Urb$ux<-ave(Urb$cumWeight, by=list(Urb$cluster), FUN=max)
#Calculate percentiles by weights for each provinces
Urb<- Urb[, clusterPercentile := Urb$cumWeightcluster/Urb$ux]
Urb<- Urb[, clusterPercentile := clusterPercentile*100]
Urb<- Urb[, clusterPercentile := ceiling(clusterPercentile)]
######### calculate Urban Pov Line #########
d <- Urb
setnames(d,c("pct","cal","exp","ndx","prov","w","cluster","cumw","ux","clusterpct"))
d2 <- d [clusterpct<86]
#plot(cal~exp,data=d)
#plot(cal~exp,data=dx2)
#plot(log(cal)~log(exp),data=d)
#plot(log(cal)~log(exp),data=d2)
d$cal2<-d$cal^2
d2$cal2<-d2$cal^2
dx <- d[,lapply(.SD, mean, na.rm=TRUE),by=.(clusterpct,cluster)]
dx2 <- d2[,lapply(.SD, mean, na.rm=TRUE),by=.(clusterpct,cluster)]
############Urban-all############
#Nonlog-d
for(clus in 1:4){
nam <- paste0("Urb",clus)
assign(nam,d[cluster==clus])
# save(list=ls(pattern = nam),file = paste0(Settings$HEISProcessedPath,nam,".rda"))
model1 <- lm(exp ~ cal + cal2 , weights = w, data=assign(nam,d[cluster==clus]))
summary(model1)
nam3 <- predict(object = model1, newdata = data.table(pct=NA,cal=MinCalories,cal2=MinCalories2,exp=NA,ndx=NA,w=NA))[[1]]
nam2 <- paste0("Urban1PovLine",clus)
assign(nam2,nam3)
}
summary(model1)
MyDataUrbanCluster<-MyDataUrban[cluster==1]
MyDataUrbanCluster[,Poor:=ifelse(Total_Exp_Month_Per_nondurable < Urban1PovLine1,1,0)]
MyDataUrbanCluster[,sum(HIndivNo),by=cluster][order(cluster)]
MyDataUrbanCluster[,sum(Poor),by=cluster][order(cluster)]
MyDataUrban[,sum(Weight),by=cluster][order(cluster)]
#log-d
for(clus in 1:4){
nam <- paste0("Urb",clus)
assign(nam,d[cluster==clus])
# save(list=ls(pattern = nam),file = paste0(Settings$HEISProcessedPath,nam,".rda"))
model1 <- lm(log(exp) ~ log(cal) , weights = w, data=assign(nam,d[cluster==clus]))
summary(model1)
nam3 <- predict(object = model1, newdata = data.table(pct=NA,cal=MinCalories,cal2=MinCalories2,exp=NA,ndx=NA,w=NA))[[1]]
nam2 <- paste0("Urban1PovLine",clus)
nam3<-exp(nam3)
assign(nam2,nam3)
}
summary(model1)
#Nonlog-d2
for(clus in 1:4){
nam <- paste0("Urb",clus)
assign(nam,d2[cluster==clus])
# save(list=ls(pattern = nam),file = paste0(Settings$HEISProcessedPath,nam,".rda"))
model1 <- lm(exp ~ cal + cal2 , weights = w, data=assign(nam,d2[cluster==clus]))
summary(model1)
nam3 <- predict(object = model1, newdata = data.table(pct=NA,cal=MinCalories,cal2=MinCalories2,exp=NA,ndx=NA,w=NA))[[1]]
nam2 <- paste0("Urban1PovLine",clus)
assign(nam2,nam3)
}
summary(model1)
#log-d2
for(clus in 1:4){
nam <- paste0("Urb",clus)
assign(nam,d2[cluster==clus])
# save(list=ls(pattern = nam),file = paste0(Settings$HEISProcessedPath,nam,".rda"))
model1 <- lm(log(exp) ~ log(cal) , weights = w, data=assign(nam,d2[cluster==clus]))
summary(model1)
nam3 <- predict(object = model1, newdata = data.table(pct=NA,cal=MinCalories,cal2=MinCalories2,exp=NA,ndx=NA,w=NA))[[1]]
nam2 <- paste0("Urban1PovLine",clus)
nam3<-exp(nam3)
assign(nam2,nam3)
}
######### calculate Urban Pov Line- Percentile #########
d <- Urb
setnames(d,c("pct","cal","exp","ndx","prov","w","cluster","cumw","ux","clusterpct"))
d2 <- d [clusterpct<86]
#plot(cal~exp,data=d)
#plot(cal~exp,data=d2)
#plot(log(cal)~log(exp),data=d)
#plot(log(cal)~log(exp),data=d2)
d$cal2<-d$cal^2
d2$cal2<-d2$cal^2
dx <- d[,lapply(.SD, mean, na.rm=TRUE),by=.(clusterpct,cluster)]
dx2 <- d2[,lapply(.SD, mean, na.rm=TRUE),by=.(clusterpct,cluster)]
############Urban-all############
#Nonlog-d
for(clus in 1:4){
nam <- paste0("Urb",clus)
assign(nam,dx[cluster==clus])
# save(list=ls(pattern = nam),file = paste0(Settings$HEISProcessedPath,nam,".rda"))
model1 <- lm(exp ~ cal + cal2 , weights = w, data=assign(nam,dx[cluster==clus]))
summary(model1)
nam3 <- predict(object = model1, newdata = data.table(pct=NA,cal=MinCalories,cal2=MinCalories2,exp=NA,ndx=NA,w=NA))[[1]]
nam2 <- paste0("Urban1PovLine",clus)
assign(nam2,nam3)
}
#log-d
for(clus in 1:4){
nam <- paste0("Urb",clus)
assign(nam,dx[cluster==clus])
# save(list=ls(pattern = nam),file = paste0(Settings$HEISProcessedPath,nam,".rda"))
model1 <- lm(log(exp) ~ log(cal) , weights = w, data=assign(nam,dx[cluster==clus]))
summary(model1)
nam3 <- predict(object = model1, newdata = data.table(pct=NA,cal=MinCalories,cal2=MinCalories2,exp=NA,ndx=NA,w=NA))[[1]]
nam2 <- paste0("Urban1PovLine",clus)
nam3<-exp(nam3)
assign(nam2,nam3)
}
#Nonlog-d2
for(clus in 1:4){
nam <- paste0("Urb",clus)
assign(nam,dx2[cluster==clus])
# save(list=ls(pattern = nam),file = paste0(Settings$HEISProcessedPath,nam,".rda"))
model1 <- lm(exp ~ cal + cal2 , weights = w, data=assign(nam,dx2[cluster==clus]))
summary(model1)
nam3 <- predict(object = model1, newdata = data.table(pct=NA,cal=MinCalories,cal2=MinCalories2,exp=NA,ndx=NA,w=NA))[[1]]
nam2 <- paste0("Urban1PovLine",clus)
assign(nam2,nam3)
}
#log-d2
for(clus in 1:4){
nam <- paste0("Urb",clus)
assign(nam,dx2[cluster==clus])
# save(list=ls(pattern = nam),file = paste0(Settings$HEISProcessedPath,nam,".rda"))
model1 <- lm(log(exp) ~ log(cal) , weights = w, data=assign(nam,dx2[cluster==clus]))
summary(model1)
nam3 <- predict(object = model1, newdata = data.table(pct=NA,cal=MinCalories,cal2=MinCalories2,exp=NA,ndx=NA,w=NA))[[1]]
nam2 <- paste0("Urban1PovLine",clus)
nam3<-exp(nam3)
assign(nam2,nam3)
}
endtime <- proc.time()
cat("\n\n============================\nIt took ")
cat(endtime-starttime)
|
69e192cd3eb2867c07d9b73c5d295f84e7265732
|
ce6c631c021813b99eacddec65155777ca125703
|
/R/mdlKM.R
|
00b0564db3f4f7893d3b1488a944aae284c2f638
|
[
"LicenseRef-scancode-warranty-disclaimer",
"LicenseRef-scancode-public-domain-disclaimer"
] |
permissive
|
Zhenglei-BCS/smwrQW
|
fdae2b1cf65854ca2af9cd9917b89790287e3eb6
|
9a5020aa3a5762025fa651517dbd05566a09c280
|
refs/heads/master
| 2023-09-03T04:04:55.153230
| 2020-05-24T15:57:06
| 2020-05-24T15:57:06
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 1,004
|
r
|
mdlKM.R
|
#' @title Estimate Statistics
#'
#' @description Support function for computing statistics for left-censored data.
#'
#' @importFrom survival survfit Surv
#' @param x an object of "lcens" to compute.
#' @param group the group variable.
#' @param conf.int the confidence interval .
#' @return An object of class "survfit."
#' @keywords misc
#' @export
mdlKM <- function(x, group, conf.int=.95) {
##
pvalues <- x@.Data[, 1]
rvalues <- x@censor.codes
## remove NAs from data
Good.data <- !is.na(pvalues)
if(sum(Good.data) > 0) { # At least one value
pvalues <- -pvalues[Good.data] # reverse data
rvalues <- !rvalues[Good.data] # reverse sense for survfit
if(missing(group)) {
retval <- survfit(Surv(pvalues, rvalues) ~ 1, conf.int=conf.int, conf.type="plain")
} else {
group <- group[Good.data]
retval <- survfit(Surv(pvalues, rvalues) ~ group, conf.int=conf.int, conf.type="plain")
}
}
else # no data
retval <- list(NoData=TRUE)
return(retval)
}
|
ca6ee109db6f7ee7d2f7fbd1d07745824a0f244f
|
2a7e77565c33e6b5d92ce6702b4a5fd96f80d7d0
|
/fuzzedpackages/tensorBSS/R/tJADERotate.R
|
698ce754b32f0642504f362a8b2c79e5cdca0391
|
[] |
no_license
|
akhikolla/testpackages
|
62ccaeed866e2194652b65e7360987b3b20df7e7
|
01259c3543febc89955ea5b79f3a08d3afe57e95
|
refs/heads/master
| 2023-02-18T03:50:28.288006
| 2021-01-18T13:23:32
| 2021-01-18T13:23:32
| 329,981,898
| 7
| 1
| null | null | null | null |
UTF-8
|
R
| false
| false
| 912
|
r
|
tJADERotate.R
|
tJADERotate <-
function(x, k = NULL, maxiter, eps){
r <- length(dim(x)) - 1
rotateStack <- vector("list", r)
for(m in 1:r){
pm <- dim(x)[m]
if(is.null(k)){
this_k <- pm
}
else{
this_k <- k[m]
}
if(this_k > 0){
ijStack <- NULL
for(i in 1:pm){
for(j in 1:pm){
if(abs(i - j) < this_k){
ijStack <- rbind(ijStack, mModeTJADEMatrix(x, m, i, j))
}
# ijStack[((i-1)*pm^2 + (j-1)*pm + 1):((i-1)*pm^2 + j*pm) , 1:pm] <- mModeTJADEMatrix(x, m, i, j)
}
}
rotateStack[[m]] <- t(frjd(ijStack, maxiter=maxiter, eps=eps)$V)
x <- tensorTransform(x, rotateStack[[m]], m)
}
else{
rotateStack[[m]] <- diag(pm)
}
}
# for(m in 1:r){
# x <- tensorTransform(x, rotateStack[[m]], m)
# }
return(list(x = x, U = rotateStack))
}
|
75d9f52c0737f31600e561ad52f7818eb22e4a05
|
9d2996ee9ca0f2d7cbacedc163fede388a937d59
|
/R/app.R
|
fe7b36e37ad17d727db5281f2e8f4f9cf0a503b0
|
[] |
no_license
|
KaitlanM/MemoryMeasurer-App
|
66f88412b52b0ec7d111f41b6d41d0a3998e8354
|
f3570faeb0289c1ada7b5b2a56c365c1ba5a8a2c
|
refs/heads/master
| 2020-05-25T18:12:25.961430
| 2019-05-31T21:22:01
| 2019-05-31T21:22:01
| 187,924,419
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 7,100
|
r
|
app.R
|
#' @import shiny plotrix lubridate
#'
source("~/MemoryMeasurer/R/scoring-words.R")
source("~/MemoryMeasurer/R/load-words.R")
source("~/MemoryMeasurer/R/draw_circle_plot.R")
ui <- navbarPage(title = "Memory Measurer",
tabPanel("Instructions",
tags$h1("This is the Memory Measurer!"),
tags$h4(
tags$p("The goal of the task is to remember as many words as you can."),
tags$p("Start by choosing your difficulty level and how much time you would like to spend.
Then", tags$strong("memorize!")),
tags$p("In between the memorizing and the reciting, there will be a", tags$em("distractor"), "task --
don't let it stump you!"),
tags$p("Good luck and have fun!")
)
),
tabPanel("Memorizing Phase",
sidebarLayout(position = "left",
sidebarPanel(
selectInput(inputId = "sylChoice", # User customizes word difficulty
label = "Word Difficulty",
choices = c("Easy -- One Syllable" = "easy",
"Medium -- Two Syllables" = "medium",
"Hard -- Three Syllables" = "hard"),
selected = "medium"),
numericInput(inputId = "timerIn", # Choose how much time to spend
label = "Seconds",
value = 30,
min = 0,
max = 120,
step = 1),
numericInput(inputId = "numWords", # Choose the number of words
label = "Number of Words",
value = 15,
min = 5,
max = 100,
step = 1),
actionButton(inputId = "start",
label = "Start!")
),
mainPanel(
tags$h4(textOutput(outputId = "timeleft")), # Print how much time is left
tags$h2("Memorize the following words:"),
column(tableOutput(outputId = "wordTable"), width = 6) # Show the words
)
)
),
tabPanel("Intermediate Task",
sliderInput(inputId = "circleGuess", # User can guess the number of circles
label = "Count the circles and indicate on the slider how many there are.",
min = 0, max = 50, value = 0),
plotOutput(outputId = "circles"), # Show the circles
actionButton(inputId = "circleDone", label = "Done"),
verbatimTextOutput(outputId = "circleAccuracy") # Feedback for guess
),
tabPanel("Reciting",
textInput(inputId = "wordsRemembered", # User types remembered words
label = "Please type the words that you remember and press the Submit button after
each one",
value = ""),
actionButton(inputId = "submitWord",
label = "Submit"),
tableOutput(outputId = "tableRemembered"), # Typed words appear underneath
actionButton(inputId = "finishSubmit",
label = "I'm Finished"),
verbatimTextOutput(outputId = "scoreText") # Feedback about score
)
)
server <- function(input, output, session){
# Memorizing Phase -----------------------------------------------------------
### Loading the word data and tabling them
allWords <- NULL
observeEvent(input$start, {
allWords <<- load_words(wordLength = input$sylChoice)
})
displayWords <- eventReactive(input$start, {
wordData <<- sample(allWords, size = input$numWords)
})
output$wordTable <- renderTable({
data.frame(matrix(displayWords(), ncol = 5))
})
### Timer (adapted from https://stackoverflow.com/questions/49250167/how-to-create-a-countdown-timer-in-shiny)
timer <- reactiveVal(30)
activeTimer <- reactiveVal(FALSE)
observe({
invalidateLater(1000, session)
isolate({
if(activeTimer()) {
timer(timer() - 1)
if(timer() < 1){
output$wordTable <- renderTable({
data.frame(matrix(ncol = 0, nrow = 0))
})
activeTimer(FALSE)
showModal(modalDialog(
title = "Important!", "Time's Up!"
))
}
}
})
})
observeEvent(input$start, {activeTimer(TRUE)})
observeEvent(input$start, {timer(input$timerIn)})
observeEvent(input$start, {
output$timeleft <- renderText({
paste("Time left: ", lubridate::seconds_to_period(timer()))
})
})
# Intermediate Phase ---------------------------------------------------------
### Plot random circles for the intermediate task
numCirc <- sample(20:30, 1) # The number of circles
# Some circles may overlap, so the user has a buffer of two when counting
numCircTolerance <- seq(from = (numCirc - 2), to = (numCirc + 2), by = 1)
output$circles <- renderPlot({
draw_circle_plot(numCirc)
})
### Give the user feedback about whether the count was accurate or not.
accuracyText <- NULL
makeReactiveBinding("accuracyText")
observe({
if (input$circleGuess %in% numCircTolerance) {
accuracyText <<- ("That's correct! Move on to the Reciting tab.")
} else {
accuracyText <<- ("That's not correct. Try again.")
}
})
observeEvent(input$circleDone, {
output$circleAccuracy <- renderText(accuracyText)
})
# Reciting Phase -------------------------------------------------------------
### Print the user's words into a table
data <- matrix()
# Wait for click to record word
userWords <- eventReactive(input$submitWord, {
data <<- rbind(data, input[["wordsRemembered"]])
return(data)
})
observeEvent(input$submitWord, {
output$tableRemembered <- renderTable({
userData <<- data.frame(userWords())[-1, , drop = FALSE]
colnames(userData) <- ("Guesses")
return(userData)
})
})
### Evaluate the words for accuracy and output data
observeEvent(input$finishSubmit, {
output$scoreText <- renderText({paste("Your score is", scoring(system = wordData, user = data,
wordLength = input$sylChoice), "words. Good job!")})
write.csv(c(input$sylChoice,
input$timerIn,
input$numWords,
scoring(system = wordData, user = data, wordLength = input$sylChoice)),
file = "UserScore.csv",
row.names = c("Difficulty", "Time", "Number of words", "Score"))
})
}
runApp(
shinyApp(ui = ui, server = server)
)
MemoryMeasurer:::runApp()
|
e24c5aedf75c5c78ac4295ea0ed303a3e3d828e9
|
7c96b6eb387314abde40c3998b76784097c06092
|
/Governers.R
|
a8a7b2c8596ce2c7b2851b625c8e345ec6536a4b
|
[] |
no_license
|
SanjayPJ/RBI-Governers-
|
960c467a0965d72c2d4ed09b7d7680753dbdb746
|
e944d6495d3025e3d1f67cad6c3449d1ffc69f28
|
refs/heads/master
| 2020-08-10T12:37:48.402045
| 2019-10-10T17:46:56
| 2019-10-10T17:46:56
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 1,400
|
r
|
Governers.R
|
library(robotstxt)
library(curl)
library(rvest)
paths_allowed(
paths = c("https://en.wikipedia.org/wiki/List_of_Governors_of_Reserve_Bank_of_India")
)
# Since the o/p is TRUE we can go ahead with the extraction of data
rbi_guv <- read_html("https://en.wikipedia.org/wiki/List_of_Governors_of_Reserve_Bank_of_India")
rbi_guv
table <- rbi_guv %>%
html_nodes("table") %>%
html_table()
View(table)
# There are three tables in the web page
rbi_guv %>%
html_nodes("table") %>%
html_table() %>%
extract2(2) -> profile
profile %>%
separate(`Term in office`, into = c("term", "days")) %>%
select(Officeholder, term) %>%
arrange(desc(as.numeric(term))) -> profile_1
profile %>%
count(Background) ->background
profile %>%
pull(Background) %>%
fct_collapse(
Bureaucrats = c("IAS officer", "ICS officer",
"Indian Administrative Service (IAS) officer",
"Indian Audit and Accounts Service officer",
"Indian Civil Service (ICS) officer"),
`No Info` = c(""),
`RBI Officer` = c("Career Reserve Bank of India officer")
) %>%
fct_count() %>%
rename(background = f, count = n) -> backgrounds
backgrounds %>%
ggplot() +
geom_col(aes(background, count), fill = "blue") +
xlab("Background") + ylab("Count") +
ggtitle("Background of RBI Governors")
|
4940bd58ea065cc6578fde729bd5388f8807cdaf
|
311fad25897b2153154a7e2bc92325a7dde4eb98
|
/app.R
|
32064616cb382386832c0f6b43f6391d146e0146
|
[
"MIT"
] |
permissive
|
debruine/bfrr
|
b1a83056a01c4ea7db05bf14c7c08ff0fa6308b2
|
9b80a9933b38b2f1afaa340fad978fa6818b8c90
|
refs/heads/master
| 2020-12-20T05:18:06.639719
| 2020-03-06T20:39:59
| 2020-03-06T20:39:59
| 235,974,846
| 2
| 1
| null | null | null | null |
UTF-8
|
R
| false
| false
| 2,491
|
r
|
app.R
|
## app.R ##
library(shiny)
library(shinyjs)
library(shinydashboard)
library(dplyr)
library(ggplot2)
## Functions ----
source("R/Bf.R")
source("R/bfrr.R")
source("R/plot.bfrr.R")
source("R/summary.bfrr.R")
source("R/default.R")
source("R/likelihood.R")
source("R/utils-pipe.R")
ggplot2::theme_set(theme_bw(base_size = 20))
## UI ----
ui <- dashboardPage(
dashboardHeader(title = "bfrr"),
dashboardSidebar(
sidebarMenu(
#actionButton("reset", "Reset Parameters"),
# input ----
numericInput("sample_mean", "sample mean", value = 0, step = 0.05),
numericInput("sample_se", "sample standard error", value = 0.1, min = 0.001, step = 0.01),
numericInput("sample_df", "sample df", value = 99, min = 1, step = 1),
selectInput("model", "model", choices = c("normal", "uniform"), selected = "normal"),
numericInput("theory_mean", "H1 mean", value = 0, step = 0.05),
numericInput("theory_sd", "H1 SD", value = 1, min = 0.01, step = 0.01),
selectInput("tail", "tails", choices = c(1, 2), selected = 2),
numericInput("criterion", "criterion", value = 3, min = 1.01, step = 1),
selectInput("precision", "precision", choices = c(0.01, .025, .05, .1, .25, .5), selected = .05)
)
),
dashboardBody(
useShinyjs(),
tags$head(
tags$link(rel = "stylesheet", type = "text/css", href = "custom.css")
),
h3("Robustness Regions for Bayes Factors"),
textOutput("summary"),
plotOutput("plot"),
p("Try setting the sample mean to 0.25, the H1 mean to 0.5, and tails to 1, then see what happens when you increase the criterion."),
p("This app is under development. Don't trust anything yet!")
)
)
## server ----
server <- function(input, output, session) {
output$summary <- renderText({
rr <- bfrr(sample_mean = input$sample_mean,
sample_se = input$sample_se,
sample_df = input$sample_df,
model = input$model,
mean = input$theory_mean,
sd = input$theory_sd,
tail = as.numeric(input$tail),
criterion = input$criterion,
rr_interval = NA,
precision = as.numeric(input$precision))
output$plot <- renderPlot(plot(rr))
capture.output(summary(rr))
})
}
shinyApp(ui, server)
|
4b48749fa5a0014acc2bd7b0097ac09ef5eb1754
|
7206275c2c45d8dd8c2bd35e74802452c14066c7
|
/alphaimpute/3_Imputed_GWAS_Run_log_Lambs.R
|
a5227f6a0d23eeb4aad50264e9aad6b44a7d9cd4
|
[] |
no_license
|
sejlab/Soay_Immune_GWAS
|
bdf4a994f7ed1e7e0a27cf1a559096a0133478f4
|
cc8025817218f11d944c16d7b45fc392a58df6df
|
refs/heads/master
| 2020-05-17T09:44:59.090210
| 2019-10-16T12:30:27
| 2019-10-16T12:30:27
| 183,641,012
| 1
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 3,971
|
r
|
3_Imputed_GWAS_Run_log_Lambs.R
|
library(asreml)
library(reshape)
library(GenABEL)
library(plyr)
setwd("alphaimpute/")
load("Imputed_GWAS_log_Lambs.RData", verbose = T)
load("BEAST_GWAS.RData")
models <- subset(models, LambAdult == "Lambs" & Response == "IgEmp")
BEASTX <- subset(BEASTX, IgEmp != 0)
BEASTX$IgEmp <- log10(BEASTX$IgEmp)
names(full.imputedgenos.log.lambs)[1] <- "ID"
gc()
#~~ Prepare data frame for results
restab.ranef <- NULL
restab.fixef <- NULL
restab.wald <- NULL
restab.n <- NULL
for(h in 1:nrow(full.mapfile)){
print(paste("Running SNP", h))
genotab <- full.imputedgenos.log.lambs[,c(1, which(names(full.imputedgenos.log.lambs) == full.mapfile$V2[h]))]
names(genotab)[2] <- "SNP"
ped.results.ranef <- NULL
ped.results.fixef <- NULL
ped.results.wald <- NULL
ped.results.n <- NULL
for(i in 1:nrow(models)){
x.vec <- as.character(models$Model[i])
x.vec <- gsub(",random=~", "+", x.vec)
x.vec <- strsplit(x.vec, split = "\\+")[[1]]
x.vec[grep("ped", x.vec)] <- "ID"
x.vec[grep("ide", x.vec)] <- "ID"
x.vec <- c(x.vec, as.character(models$Response[i]))
x.vec <- unique(x.vec[-which(x.vec == 1)])
if(models$LambAdult[i] %in% c("Lambs", "Adults")){
x.data <- subset(BEASTX, LambAdult == models$LambAdult[i])
} else {
x.data <- BEASTX
}
x.data <- join(x.data, genotab)
x.data <- na.omit(x.data[,x.vec])
#x.data <- droplevels(subset(x.data, ID %in% dimnames(grminv)[[1]]))
eval(parse(text = paste0("fit1 <- asreml(fixed=", models$Response[i], "~",
models$Model[i],"
, data=x.data, ginverse=list(ID=ainv),
workspace = 500e+6, pworkspace = 500e+6,
maxiter = 100, trace = F)")))
ped.results.ranef <- rbind(ped.results.ranef,
cbind(Trait = models$Response[i],
LambAdult = models$LambAdult[i],
ASReml.EstEffects(fit1)))
x <- data.frame(summary(fit1, all = T)$coef.fixed)
x$variable <- row.names(x)
ped.results.fixef <- rbind(ped.results.fixef,
cbind(Trait = models$Response[i],
LambAdult = models$LambAdult[i],
x))
rm(x)
x <- data.frame(wald.asreml(fit1))
x$variable <- row.names(x)
ped.results.wald <- rbind(ped.results.wald,
cbind(Trait = models$Response[i],
LambAdult = models$LambAdult[i],
x))
ped.results.n <- rbind(ped.results.n,
data.frame(Trait = models$Response[i],
LambAdult = models$LambAdult[i],
table(x.data$SNP),
table(unique(subset(x.data, select = c(ID, SNP)))$SNP)))
rm(fit1, x.data, x.vec, x)
}
ped.results.ranef$SNP.Name <- full.mapfile$V2[h]
ped.results.fixef$SNP.Name <- full.mapfile$V2[h]
ped.results.wald$SNP.Name <- full.mapfile$V2[h]
ped.results.n$SNP.Name <- full.mapfile$V2[h]
restab.ranef <- rbind(restab.ranef, ped.results.ranef)
restab.fixef <- rbind(restab.fixef, ped.results.fixef)
restab.wald <- rbind(restab.wald, ped.results.wald)
restab.n <- rbind(restab.n, ped.results.n)
rm(ped.results.ranef, ped.results.fixef, ped.results.wald, ped.results.n, genotab, i)
if(h %in% seq(1, 10000, 100)) save(restab.ranef, restab.fixef, restab.wald, restab.n, file = paste0("GWAS_full_log_Lambs.RData"))
}
save(restab.ranef, restab.fixef, restab.wald, restab.n, file = paste0("GWAS_full_log_Lambs.RData"))
|
c4318321671167bd1fae320cd49d8c3346e1fd09
|
1b676b2d613bf67d8bec3079b3e9c0c4abb2213b
|
/R/rotation2d.R
|
5ee82c8c1546f912f1fdcea12d6575ffb17b36e4
|
[] |
no_license
|
cran/denpro
|
995a97a3eb39a8a75d75b1fc5b17ab8d497675a0
|
503a536c5b2963f0615a9eacf65aa5a84765d6c6
|
refs/heads/master
| 2016-09-06T13:58:43.857283
| 2015-04-24T00:00:00
| 2015-04-24T00:00:00
| 17,695,458
| 1
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 193
|
r
|
rotation2d.R
|
rotation2d<-function(dendat,alpha){
Rx<-matrix(0,2,2)
Rx[1,]<-c(cos(alpha),-sin(alpha))
Rx[2,]<-c(sin(alpha),cos(alpha))
detdat<-Rx%*%t(dendat)
detdat<-t(detdat)
return(detdat)
}
|
78c490f71d402ce08ec683eefa27c4e3647a49b2
|
71df6d25207ba173a45b29484d5a0594737cb48f
|
/auctions/send_update.R
|
ac2c60478f0b00f85cef8b46cfd4c13e782ac6eb
|
[] |
no_license
|
filipstachura/home-scripts
|
d7ffcf2f94199042df65a8da9ae8249e472e3d76
|
db0e10f04281320bd4ebf23a584a571610a8ff33
|
refs/heads/master
| 2021-08-30T12:06:36.621560
| 2017-12-17T21:31:36
| 2017-12-17T21:31:36
| 113,757,814
| 0
| 0
| null | 2017-12-17T12:30:54
| 2017-12-10T14:44:38
|
JavaScript
|
UTF-8
|
R
| false
| false
| 796
|
r
|
send_update.R
|
library(lubridate)
library(purrr)
library(purrrlyr)
library(dplyr)
source('../mails/send_mail.R', chdir = TRUE)
parse_price <- function(price) {
price %>%
gsub(';', '.', ., fixed = TRUE) %>%
gsub(' zł', '', ., fixed = TRUE) %>%
as.numeric() %>%
round(2)
}
prepare_content <- function() {
data <- read.csv("export.csv", stringsAsFactors = FALSE)
content <- data %>%
mutate(price = map_dbl(price, parse_price)) %>%
arrange(price) %>%
by_row(function(row) {
paste("<a href='", row$url, "'>", format(row$price, nsmall = 2), ": ", row$name, "</a><br/>")
}) %>%
{.$.out} %>%
as.list() %>%
do.call(paste, .)
paste("<html>", content, "</html>")
}
content <- prepare_content()
title <- paste("Housing:", today())
send_update(title, content)
|
90f2b663bd6863e5ff3956f62fcc5baf60465e7f
|
dab3c57e18228e58418fadea86362f366fa0d3ee
|
/R/imports.R
|
3704a8574642a0f705dcbcda2903af475e8e921c
|
[] |
no_license
|
gravesee/binnr2
|
01446f0a43d8cce1c8aa9b09e36bd492fff70d1c
|
02050367d2e1893a0bd6987291463f9d63bce7e9
|
refs/heads/master
| 2021-06-10T05:37:18.777779
| 2016-01-22T23:19:27
| 2016-01-22T23:19:27
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 50
|
r
|
imports.R
|
#' @useDynLib binnr2
NULL
#' @import glmnet
NULL
|
91661b9c545a88d32ecc7162368da10ade8a5788
|
f44fd21032067475ce3e61651ee8ad4dd60d6300
|
/Machine Learning/Markov Chains/RentalCar/RentalCar.R
|
67d2121657dc18baa0f48337590bb9ff3e803d92
|
[] |
no_license
|
ribartra/alexhwoods.com
|
a52d1bb814cf33374d69b439c2c860915437b5e5
|
7bb8c084e6592f89a16aa1372c241c5a2300c196
|
refs/heads/master
| 2021-10-28T18:32:18.698430
| 2019-04-24T13:47:57
| 2019-04-24T13:47:57
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 3,261
|
r
|
RentalCar.R
|
# Suppose a car rental agency has three locations in Ottawa: Downtown location (labeled A), East end location (labeled B) and a West end location (labeled C). The agency has a group of delivery drivers to serve all three locations. The agency's statistician has determined the following:
#
# 1. Of the calls to the Downtown location, 30% are delivered in Downtown area, 30% are delivered in the East end, and 40% are delivered in the West end
#
# 2. Of the calls to the East end location, 40% are delivered in Downtown area, 40% are delivered in the East end, and 20% are delivered in the West end
#
# 3. Of the calls to the West end location, 50% are delivered in Downtown area, 30% are delivered in the East end, and 20% are delivered in the West end.
#
# After making a delivery, a driver goes to the nearest location to make the next delivery. This way, the location of a specific driver is determined only by his or her previous location.
#
# We model this problem with the following matrix:
library(markovchain)
rentalStates <- c("Downtown", "East", "West")
rentalTransition <- matrix(c(0.3, 0.3, 0.4,
0.4, 0.4, 0.2,
0.5, 0.3, 0.2),
byrow = T, nrow = 3, dimnames = list(rentalStates, rentalStates))
mcRental <- new("markovchain", states = rentalStates, byrow = T,
transitionMatrix = rentalTransition, name = "Rental Cars")
# We can access the transition matrix just by calling the mc object
mcRental[1] # the probabilities that we go Downtown, East, and West, given that we are currently Downtown
plot(mcRental) # we can plot it
transitionProbability(mcRental, "Downtown", "West") # the prob that a driver will go from downtown to West
# Here is a question to set up some of the functions
# Given we are downtown, what is the probability we will be downtown in two trips?
# We can go Downtown -> Downtown,
a <- 0.3 * 0.3
# East -> Downtown (note that to we have to get the probability of going Downtown from the East location),
b <- 0.3 * 0.4
# West -> Downtown (same logic here)
c <- 0.4 * 0.5
a + b + c # The probability that we will be downtown in 2 trips.
# That isn't something you want to be doing, especially if you want the probabilities after 20 trips.
# In turns out though, we can get the same results by squaring the matrix.
mcRental ^ 2
# We can do this for any number of trips, where the number of trips is the exponent.
mcRental ^ 20 # notice how where you are starts to become irrelevant, as the number of trips increases.
# It's also important to note that the transition matrix T ^ n, will converge as n increases,
# given that there are no 0's or 1's in our initial matrix.
# So if we had 70 drivers, how many drivers would be at the West location after 30 trips?
# This distribution, the converged probabilities of each state, where the location at which you start
# is irrelevant (because n is sufficiently large), is called the stationary distribution.
# We can access it using the steadyStates() method.
mcRental ^ 30
70*steadyStates(mcRental)
# Now let's look at some of the other methods that the markovchain package has
summary(mcRental)
conditionalDistribution(mcRental, "Downtown")
|
396bcd446c028aa02544a7dc409805ed736130e0
|
70fe269c7eed2af3a23402a2031e3d2e549170d5
|
/Json practice.R
|
8c20d79e8ba84577107f7b5bb71f4d2dcfde03c3
|
[] |
no_license
|
VetMomen/Getting-and-cleaning-data
|
1f08e116d1dbeb2a84d6e2b055e81f4f3d9908dc
|
27e87ade198b9da236fe39cbe6301950d342cb98
|
refs/heads/master
| 2020-04-02T14:12:16.231607
| 2018-11-13T21:46:02
| 2018-11-13T21:46:02
| 154,515,160
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 141
|
r
|
Json practice.R
|
#converting df to json and reverse it
mtcarJ<-toJSON(mtcars,pretty = T)
mtcar<-fromJSON(mtcarJ)
#######################################
|
73bc0e3dd387c2d20fa1ecbcb4888a3b95d8faf4
|
ada7b6a9c28e9c1f4f7eff6f9f04b7b0775eb50b
|
/man/OEFPIL.Rd
|
db5cd02cd60a3bf03878152b0607b9e6d72058f7
|
[] |
no_license
|
stazam/OEFPIL-
|
b68d57895e26fb99b74019ce6f91b0b8c944c7c9
|
8667a4509df875e6c5cdc9b96b95087ec3a8dc21
|
refs/heads/main
| 2023-07-03T12:11:40.204486
| 2021-08-18T17:01:43
| 2021-08-18T17:01:43
| 342,588,412
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| true
| 6,400
|
rd
|
OEFPIL.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/OEFPIL.R
\name{OEFPIL}
\alias{OEFPIL}
\title{Optimal Estimation of Parameters by Iterated Linearization}
\usage{
OEFPIL(data, form, start.val, CM, max.iter = 100, see.iter.val = FALSE,
save.file.name, th, signif.level, useNLS = TRUE)
}
\arguments{
\item{data}{a data file can be any object of type \code{data.frame} with 2 named columns or \code{list} with 2 elements.}
\item{form}{an object of class \code{\link[stats]{formula}} (or one that can be coerced to that class): a symbolic description of the model to be fitted. The details of model specification are given under ‘Details’.}
\item{start.val}{a named list of starting values of estimating parameters.}
\item{CM}{a covariance matrix of \code{data} (See 'Details' for the information about required structure.).}
\item{max.iter}{maximum number of iterations.}
\item{see.iter.val}{logical. If \code{TRUE}, all the partial results of the algorithm are displayed and saved. The default value is \code{FALSE}.}
\item{save.file.name}{a name of the file for saving results. If missing, no output file is saved.}
\item{th}{a numerical value, indicating threshold necessary for the iteration stoppage. The default value is \code{.Machine$double.eps ^ (2 / 3)}.}
\item{signif.level}{a significance level for the confidence interval. If missing, the default value 0.05 is used.}
\item{useNLS}{logical. If \code{TRUE} (the default value), function will set up starting parameters calculated by \code{\link{nlsLM}} function (nonlinear least square estimation).}
}
\value{
Returns an object of class \code{"OEFPIL"}. It is a list containing the following components
\item{name_Est}{estimations of model parameters.}
\item{name_upgraded.start.val}{modified starting values of estimating parameters (result from \code{\link{nlsLM}} function).}
\item{cov.m_Est}{estimated covariance matrix of parameters.}
\item{cov.m_nlsLM}{a covariance matrix of starting values of parameters from \code{\link{nlsLM}} function (if \code{useNLS} was set to \code{TRUE}).}
\item{it_num}{number of iterations.}
\item{name_previous.step}{the parameter values from the previous iterative step.}
\item{CI_parameters}{a list of confidence intervals for estimated parameters (a significance level is based on \code{signif.level} argument).}
\item{logs}{warnings or messages of events, which happen during the run of the algorithm.}
\item{contents}{a list of outputs as original values of data and other characteristics, which are usable in plotting or other operations with model results.}
If \code{useNLS} argument is set to \code{FALSE}, the \code{name_upgraded.start.val} are the same as \code{start.values} (no \code{nlsLM} procedure for starting value fitting is performed).
}
\description{
Function for computing optimal estimate of parameters of a nonlinear function by iterated linearization (using Taylor expansion). The model considers measurements errors in both (dependent and independent) variables.
}
\details{
Models for OEPFIL function are specified symbolically. A typical model has the form \code{y ~ f(x, a_1,...,a_n)}, where
\itemize{\item \code{y} is the (numerical) response vector,
\item \code{x} is the predictor,
\item terms \code{a_1,...,a_n} are parameters of specified model.}
Function \code{f} is known nonlinear function with continuous second partial derivatives with respect to \code{x} and parameters \code{a_1,...a_n} (for more details see \emph{Kubacek}).
All calculations are performed assuming normality of a response vector and measurements errors.
In the \code{data} entry of type \code{data.frame}, both columns must be named as variables in formula. The same holds for elements of \code{list}.
A choice of \code{start.val} is important for the convergence of the algorithm. If the \code{OEFPIL} algorithm does not converge, starting values modified by \code{nlsLM} function (\code{useNLS = TRUE}) are recommended (see Example 3).
The \code{CM} has to be a \code{2n} covariance matrix (where \code{n} is length of \code{data}) of following structure: first \code{n} elements of the diagonal correspond to the variance of independent variable (x) and other to the variance of dependent variable (y).
If argument \code{CM} is missing, the input covariance matrix is set to a diagonal variance matrix with sample variance on the main diagonal.
}
\note{
The symbol \code{pi} is reserved for the Ludolf's constant. So naming one of the model´s parameters by this symbol results in constant entry of the model.
}
\examples{
##Example 1 - Use of OEFPIL function for steam data from MASS library
library(MASS)
steamdata <- steam
colnames(steamdata) <- c("x","y")
k <- nrow(steamdata)
CM <- diag(rep(5,2*k))
st1 <- OEFPIL(steamdata, y ~ b1 * 10 ^ (b2 * x/ (b3 + x)),
list(b1 = 5, b2 = 8, b3 = 200), CM, useNLS = FALSE)
## Displaying results using summary function
summary(st1)
## Plot of estimated function
plot(st1, signif.level = 0.05)
## Example 2 - Use of OEFPIL for nanoindentation data "silica2098.RData"
## (which is part of the OEFPIL package)
## Preparing arguments for OEFPIL function
max.iter = 100
see.iter.val = FALSE
signif.level = 0.05
useNLS = TRUE
## Creating a list with starting values for parameters
start.val <- list(alpha=0.1, m=1.5, hp=0.9)
names(start.val) <- c("alpha", "m", "hp")
## Imputed formula
form <- Load ~ alpha * (Depth - hp) ^ m
k <- length(silica2098[,1])
CM <- diag(c(rep(0.5^2,k),rep(0.001^2,k)))
## Use of OEFPIL function with defined arguments
output.form <- OEFPIL(silica2098, form, start.val, CM = CM, max.iter = max.iter,
see.iter.val = see.iter.val, signif.level = signif.level, useNLS = useNLS)
## Displaying results with summary (the result is the same as in NanoIndent.OEFPIL function)
summary(output.form)
}
\references{
Kubacek, L. and Kubackova, L. (2000) \emph{Statistika a metrologie}. Univerzita Palackeho v Olomouci.
Koning, R., Wimmer, G. and Witkovsky, V. (2014) \emph{Ellipse fitting by nonlinear constraints to demodulate quadrature homodyne interferometer signals and to determine the statistical uncertainty of the interferometric phase}.
Measurement Science and Technology.
}
\seealso{
\code{\link{NanoIndent.OEFPIL}} and function \code{\link[minpack.lm]{nlsLM}} from \code{minpack.lm} package for nonlinear least square algorithms.
}
|
a877e3eb0f99bc5405e8694b74c5cb1d5ea6c9af
|
a8dae99358913f006416494901fb13ce88071020
|
/files for github/df_intronCounts_genEx.R
|
46e10a8cd7e145657d4353a8e1dadb076a99671b
|
[] |
no_license
|
nishika/SRA_Annotation
|
b31570ece50ab90a3eb8ea35c3e25320669f3eb2
|
fce1fe1d347fe4fa9ae5fb0b032e937f2283ba23
|
refs/heads/master
| 2020-05-17T21:20:49.008274
| 2015-07-29T20:08:07
| 2015-07-29T20:08:07
| 39,899,627
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 1,730
|
r
|
df_intronCounts_genEx.R
|
#This script creates a data frame that includes expression and total intron counts of a particular gene.
#'sumcall' will be used to measure gene expression. see 'sumcall' script (found in this package);'sumcall' yields 'df_gene', which contains the 500 summed gene expression values.
doc <- read.table("numbers_of_introns.tsv", header=TRUE) #'numbers_of_introns.tsv' contains file names in the format 'junctions.ERP001942_ERS185251_ERX162774_ERR188116-1-1.bed'. For Leek Lab purposes, path = '/home/other/nkarbhar/sratissue/numbers_of_introns.tsv'.
counts <-read.table("counts.tsv",sep="\t",header=TRUE, stringsAsFactors=FALSE) #'counts' contains the number of reads mapping to each chromosome from each of 500 bigWig files.
######################## load gene expression data frame ################################
load("df_gene.Rda") #this is a data frame containing expression of query gene. See script for "sumcall", as this script reports expression as a summed value across all 500 files.
############ create data frame with total intron counts and summed gene expression ########
temp <- " "
namesvec <- vector()
files <- scan("sra_samples.txt", what="", sep="\n") #this gives a vector of the 500 bigWig file names.
for (i in 1: length(files)){ #ideally, 'doc' and 'files' will always be the same length, if updated.
temp <- strsplit(as.character(doc[i, 1]),"junctions.|.bed")[[1]][2] #1st column of 'doc' contains file names. split this to obtain run accession values.
namesvec[i] <- temp
}
doc$file <- namesvec
df_introns_ex <- merge(doc, df_gene, by = 'file') #since 'doc' contains total intron counts, and 'df_gene' contains expression, 'df_introns_ex' now contains all desired values.
|
63050c63182c990df5a99b72a383cfc781434425
|
cef2a9c5c283d31cabb2afec875a4912ebed2a5a
|
/scripts/r_scripts/4.1_fourth_site.R
|
7bf582b0cd531fb2e9929558eb0f9c977ba57e29
|
[] |
no_license
|
la466/fourth_site
|
a15620c08be57e90cedf62cc4b9470b302107a70
|
8eecac178f26cb43f4086d4da862d810e428f4a7
|
refs/heads/master
| 2020-12-02T17:48:48.660463
| 2017-09-05T08:42:27
| 2017-09-05T08:42:27
| 96,431,892
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 10,697
|
r
|
4.1_fourth_site.R
|
# Remove all variables
closeAllConnections()
rm(list=ls(all=TRUE))
# Create directory if not already created
dir.create(file.path("outputs/", "r_outputs/"), showWarnings = FALSE)
dir.create(file.path("outputs/", "graphs/"), showWarnings = FALSE)
dir.create(file.path("outputs/", "graphs/tiff/"), showWarnings = FALSE)
################
# File Inputs
################
site_4_ratios <- read.csv('outputs/ratio_testing/site_4/_site_4_ratios.csv', head=T)
site_4_ratios_t4 <- read.csv('outputs/ratio_testing/site_4/_site_4_ratios_t4.csv', head=T)
site_4_ratios_abs <- read.csv('outputs/ratio_testing/site_4/_site_4_abs_A_count.csv', head=T)
site_4_starts <- read.csv('outputs/ratio_testing/site_4/_site_4_start_codon_A_ratios.csv', head=T)
chitest <- read.csv('outputs/ratio_testing/site_4/_chisquare_site_4.csv', head=T)
no_overlaps <- read.csv('outputs/ratio_testing/site_4/_chisquare_site_4_no_overlap.csv', head=T)
################
# Functions
################
compress_tiff <- function(filepath){
library(tiff)
tiff <- readTIFF(filepath)
writeTIFF(tiff, filepath, compression="LZW")
}
theme_Publication <- function(base_size=16) {
library(grid)
library(ggthemes)
(theme_foundation(base_size=base_size)
+ theme(
plot.title = element_text(face = "bold" , size = rel(1), hjust =0.5, vjust = 0.5),
text = element_text(),
panel.background = element_rect(colour = NA),
plot.background = element_rect(colour = NA),
panel.border = element_rect(colour = NA),
axis.title.y = element_text(angle=90,vjust =2),
axis.title.x = element_text(vjust = -3),
axis.text = element_text(),
axis.line = element_line(colour="black"),
axis.ticks = element_line(),
panel.grid.major = element_line(colour="#f0f0f0"),
panel.grid.minor = element_blank(),
legend.key = element_rect(colour = NA),
legend.position = c(0.9,0.9),
legend.background = element_rect(fill=NA),
legend.title=element_blank(),
plot.margin=unit(c(10,5,5,5),"mm"),
strip.background=element_rect(colour="#f0f0f0",fill="#f0f0f0")
))
}
################
# Tests
################
sink("outputs/r_outputs/4.1_fourth_site.txt")
cat("\n===============================================================\n")
cat("Number of genomes with fourth site A > 0.25\n")
cat("===============================================================\n")
genomes_a4 <- sum(site_4_ratios$Prop_A4 > 0.25, na.rm=TRUE)
cat(sprintf("%s / %s\n", genomes_a4, length(site_4_ratios$Prop_A4)))
cat("\n===============================================================\n")
cat("Number of genomes with A4 > 1\n")
cat("===============================================================\n")
genomes_a4 <- sum(site_4_ratios$A_Ratio > 1, na.rm=TRUE)
cat(sprintf("%s / %s\n", genomes_a4, length(site_4_ratios$A_Ratio)))
cat(sprintf("%s\n", genomes_a4 / length(site_4_ratios$A_Ratio)))
cat("\n===============================================================\n")
cat("Number of genomes with significant A4 > 1\n")
cat("===============================================================\n")
chitest$padj <- p.adjust(chitest$pval, method="bonferroni")
sum(ifelse(chitest$padj < 0.05, 1, 0))
cat("\n===============================================================\n")
cat("Number of genomes with significant A4 > 1 removing overlaps\n")
cat("===============================================================\n")
no_overlaps$padj <- p.adjust(no_overlaps$pval, method="bonferroni")
sum(ifelse(no_overlaps$padj < 0.05, 1, 0))
cat("\n===============================================================\n")
cat("Max A4 prop\n")
cat("===============================================================\n")
max_a4 <- max(site_4_ratios$Prop_A4, na.rm=TRUE)
cat(sprintf("%s - %s\n", max_a4, site_4_ratios$Genus[site_4_ratios$Prop_A4 == max_a4]))
# Are the A ratios at the fourth site correlated with GC3 content?
cat("\n===============================================================\n")
cat("Are the A4 ratios correlated with GC3 content?\n")
cat("===============================================================\n")
shapiro.test(site_4_ratios$GC3)
shapiro.test(site_4_ratios$A_Ratio)
cor.test(site_4_ratios$GC3, site_4_ratios$A_Ratio, method=c("spearman"))
cat("\n===============================================================\n")
cat("Number of genomes with T4 > 1\n")
cat("===============================================================\n")
genomes_t4 = sum(site_4_ratios$T_Ratio > 1, na.rm=TRUE)
cat(sprintf("%s / %s\n", genomes_t4, length(site_4_ratios$T_Ratio)))
cat(sprintf("%s\n", genomes_t4 / length(site_4_ratios$T_Ratio)))
cat("\n===============================================================\n")
cat("Number of genomes with C4 > 1\n")
cat("===============================================================\n")
genomes_c4 = sum(site_4_ratios$C_Ratio > 1, na.rm=TRUE)
cat(sprintf("%s / %s\n", genomes_c4, length(site_4_ratios$C_Ratio)))
cat(sprintf("%s\n", genomes_c4 / length(site_4_ratios$C_Ratio)))
cat("\n===============================================================\n")
cat("Number of genomes with G4 > 1\n")
cat("===============================================================\n")
genomes_g4 = sum(site_4_ratios$G_Ratio > 1, na.rm=TRUE)
cat(sprintf("%s / %s\n", genomes_g4, length(site_4_ratios$G_Ratio)))
cat(sprintf("%s\n", genomes_g4 / length(site_4_ratios$G_Ratio)))
# Are the A4 ratios significantly greater than T4 ratios?
cat("\n=======================================================\n")
cat("Are the A4 ratios significantly greater than T4 ratios?\n")
cat("=======================================================\n")
shapiro.test(site_4_ratios$A_Ratio)
shapiro.test(site_4_ratios$T_Ratio)
wilcox.test(site_4_ratios$A_Ratio, site_4_ratios$T_Ratio, paired=TRUE)
mean_differences <- mean(site_4_ratios$A_Ratio - site_4_ratios$T_Ratio)
cat(sprintf("Mean ratio difference: %s\n\n", mean_differences))
mean_A4_ratios <- mean(site_4_ratios$A_Ratio)
mean_T4_ratios <- mean(site_4_ratios$T_Ratio)
cat(sprintf("Mean A4 ratio: %s\n", mean_A4_ratios))
cat(sprintf("Mean T4 ratio: %s\n", mean_T4_ratios))
# Start codon use?
cat("\n=======================================================\n")
cat("Start codon A ratios?\n")
cat("=======================================================\n")
cat(sprintf("ATG A4 ratios: %s +- %s\n", mean(site_4_starts$atg_a4_ratio), sd(site_4_starts$atg_a4_ratio)))
cat(sprintf("GTG A4 ratios: %s +- %s\n", mean(site_4_starts$gtg_a4_ratio), sd(site_4_starts$gtg_a4_ratio)))
cat(sprintf("GTG A4 ratios: %s +- %s\n", mean(site_4_starts$ttg_a4_ratio), sd(site_4_starts$ttg_a4_ratio)))
sink()
################
# Plots
################
library(tiff)
library(ggplot2)
# Produce scatter of the A use
lab1 <- expression(paste(italic(A),"-starting second codons", sep=""))
lab2 <- expression(paste(italic(A),"-starting codons", sep=""))
plot <- ggplot(site_4_ratios) +
geom_point(aes(x=GC3, y=Prop_A4, colour="points1")) +
geom_smooth(aes(x=GC3, y=Prop_A4,colour="points1"),method = "lm", se = FALSE, size=0.8) +
geom_point(aes(x=GC3, y=Prop_A_Codons, colour="points2")) +
geom_smooth(aes(x=GC3, y=Prop_A_Codons, color="points2"),method = "lm", se = FALSE, size=0.8) +
scale_x_continuous(breaks = seq(0, 1, 0.1), limits=c(0.1, 1)) +
scale_y_continuous(breaks = seq(0, 1, 0.1), limits=c(0.1, 0.7)) +
labs(x="GC3", y="Genome proportion") +
scale_colour_manual(name=element_blank(), values=c(points1="blue", points2="black"), labels=c(lab1, lab2)) +
theme_Publication(base_size=16) +
theme(legend.position = c(0.25,0.1), legend.text=element_text(size=16), legend.text.align = 0)
ggsave('4.1_A_proportions.pdf', path="outputs/graphs/", plot = plot, dpi=600, width=180, height=180, units="mm")
ggsave('4.1_A_proportions.tiff', path="outputs/graphs/tiff/", plot = plot, dpi=350, width=180, height=180, units="mm")
compress_tiff('outputs/graphs/tiff/4.1_A_proportions.tiff')
# Kernel Density plots
generate_density_plot <- function(site){
library(tidyr)
library(dplyr)
library(ggplot2)
ratio_file_path <- paste('outputs/ratio_testing/site_', site, '/_site_', site, '_ratios.csv', sep='')
file <- read.csv(ratio_file_path, head=T)
aprop <- paste('Prop_A', site, sep='')
cprop <- paste('Prop_C', site, sep='')
gprop <- paste('Prop_G', site, sep='')
tprop <- paste('Prop_T', site, sep='')
aprops <- file[aprop]
cprops <- file[cprop]
gprops <- file[gprop]
tprops <- file[tprop]
restrict_columns <- data.frame(aprops, cprops, gprops, tprops)
colnames(restrict_columns) <- c('A', 'C', 'T', 'G')
data <- restrict_columns %>% gather()
colnames(data)<- c("base", 'prop')
# detach(data)
# attach(data)
data.f <- factor(data, levels=c('A', 'C', 'G', 'T'), labels = c("A", "C", "G", "T"))
cols <- c("#56B4e9", "#e69f00","#c979a7","#009e73")
data$prop <- as.numeric(data$prop)
data$base <- as.factor(data$base)
kplot <- ggplot(data, aes(x=prop)) +
geom_density(aes(group=base, colour=base), size=1, show.legend=FALSE, lty=1) +
scale_x_continuous(breaks = seq(0, 1, 0.1), limits=c(0, 1)) +
labs(x=paste("Proportion of CDSs with nucleotide"), y="Density") +
stat_density(aes(x=prop, colour=base), geom="line",position="identity") +
guides(colour = guide_legend(override.aes = list(size=1.2)))+
scale_colour_manual(values=cols)+
ggtitle(paste('Site', site)) +
theme_Publication(base_size=13) +
theme(legend.position = c(0.85,0.85), legend.title=element_blank())
return(kplot)
}
plot4 <- generate_density_plot(4)
plot5 <- generate_density_plot(5)
plot6 <- generate_density_plot(6)
plot7 <- generate_density_plot(7)
# save_plot <- function(site, kplot) {
# ggsave(paste('4.1_site_', site , '_proportion_kernel_density.tiff', sep=""), path="outputs/graphs/tiff/", plot = kplot, dpi=600)
# tiff <- readTIFF(paste('outputs/graphs/tiff/4.1_site_', site, '_proportion_kernel_density.tiff', sep=""))
# writeTIFF(tiff, paste('outputs/graphs/tiff/4.1_site_', site, '_proportion_kernel_density.tiff', sep=""), compression="LZW")
# }
# save_plot(4, plot4)
# save_plot(5, plot5)
# save_plot(6, plot6)
# save_plot(7, plot7)
library(gridExtra)
plot <- grid.arrange(plot4, plot5, plot6, plot7, nrow=2)
ggsave('4.1_sites_proportion_kernel_densities.pdf', path="outputs/graphs/", plot = plot, dpi=400)
ggsave('4.1_sites_proportion_kernel_densities.tiff', path="outputs/graphs/tiff/", plot = plot, dpi=600, width=180, height=180, units="mm")
compress_tiff('outputs/graphs/tiff/4.1_sites_proportion_kernel_densities.tiff')
print('Outputs in outputs/r_outputs/4.1_fourth_site.txt')
print('Graphs in outputs/graphs')
|
80cd820c97a390386379aa101ca4b8a8d3c07860
|
e91d3e01663cea7314679cad9d7fae8e4387253a
|
/Cross_validation_CDA.R
|
183b932f59bbd3b5771c9e035225cde0bf46eb94
|
[] |
no_license
|
HelloFloor/CaseStudyLineair2018
|
736a95aec14240e19027ac04bb7d724adc4693de
|
37b8d1600e45a688f504a30e4a7d58e733f7ea46
|
refs/heads/master
| 2020-04-07T08:50:36.713519
| 2019-01-21T12:38:03
| 2019-01-21T12:38:03
| 158,229,938
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 1,851
|
r
|
Cross_validation_CDA.R
|
################################################################################
# Run-Time Environment: R version 3.4.2
# Author: Ilse van Beelen
# Script: Model_final.R
# Purpose of script: Cross-validation of final model CDA
# Datafiles used: Clean_data_CDA_2018-12-14.csv;
# Data downloaded: Data downloaded from statline.cbs.nl
#
# Date: 2018-12-17
# Version: V.1.0
################################################################################
#### Libraries ####
library(car)
#### Set up ####
rm(list = ls()) # empty work space
Data <- read.csv("1_clean_data/Cleandata_CDA_2018-12-14.csv",
header = T, stringsAsFactors = F)
# Add binairy variables
Data$Non_west <- as.factor(Data$Non_west) # needs to be recognized as factor
row.names(Data) <- Data$Muni # change rownames to the municipalities
Data$CDA_perc <- round(Data$CDA * 1000, digits = 0)
Data$Voted_other <- 1000 - Data$CDA_perc
#### Final model ####
final_model <- glm(cbind(CDA_perc, Voted_other) ~ Urban_index + High_edu_perc +
+Non_west + Perc_60plus,
family=binomial,data = Data)
summary(final_model)
#### Make folds ####
K <- 10
index <- rep(1:K, floor(nrow(Data)/K)+1)[1:nrow(Data)]
summary(as.factor(index))
fold.index <- sample(index)
Loss <- function(x, y){
sum((x-y)^2)/length(x)
}
loss <- numeric(K)
for (k in 1:K){
training <- Data[fold.index!=k, ]
validation <- Data[fold.index==k, ]
training.fit <- final_model
validation.predict <- predict(training.fit, newdata=validation, type='response')
loss[k] <- Loss(validation$CDA, validation.predict)
}
#average, with weights equal to the number of objects used to calculate the loss at each fold:
mean(loss)
|
968696118d0ae3d6bf60c99585f700132661aa52
|
5dd398427794e8b4df1096460c66412696bef039
|
/man/iowaSW97_06small.Rd
|
05cbff5d1c624cfd84e21dc7ccdb83c29b656f59
|
[] |
no_license
|
cran/CARrampsOcl
|
f2dd8d9f1df5f58f50c5e7601b7bafe331c2a49e
|
83866543a9ed921ba23e39ce606783eb0b58ba54
|
refs/heads/master
| 2021-01-19T08:07:44.348036
| 2013-08-18T00:00:00
| 2013-08-18T00:00:00
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 1,355
|
rd
|
iowaSW97_06small.Rd
|
\name{iowaSW97_06small}
\alias{iowaSW97_06small}
\docType{data}
\title{Southwest Iowa 10-year normalized difference vegetation index NDVI values}
%% ~~ data name/kind ... ~~
\description{
Normalized difference vegetation index (NDVI) values derived from satellite
image data from southwest Iowa and eastern Nebraska in July of each
year from 1997 through 2006. These are 2040 values,
representing NDVI at each pixel on a rectangle with 17 rows and 12 columns
at each of 10 times.
}
\usage{data(iowaSW97_06small)}
\format{
A vector of 2040 integer values. The data are in row-major order within
year.
}
%%\details{
%% ~~ If necessary, more details than the __description__ above ~~
%%}
\source{
http://glcf.umiacs.umd.edu/data/gimms/
}
\references{
Pinzon, J., Brown, M.E. and Tucker, C.J., 2005. Satellite time series correction of orbital drift artifacts using empirical mode decomposition. In: N. Huang (Editor), Hilbert-Huang Transform: Introduction and Applications, pp. 167-186.
Tucker, C.J., J. E. Pinzon, M. E. Brown, D. Slayback, E. W. Pak, R. Mahoney, E. Vermote and N. El Saleous (2005), An Extended AVHRR 8-km NDVI Data Set Compatible with MODIS and SPOT Vegetation NDVI Data. International Journal of Remote Sensing, Vol 26:20, pp 4485-5598.
}
\seealso{
\code{\link{plot3Q}}
}
\examples{
data(iowaSW97_06small)
}
\keyword{datasets}
|
9311f0900fd423b9ca01e4a27038728e79e915b9
|
7e83da9f8716e394e68d82229d486a43f83cad4e
|
/01-data_cleaning-post-strat1.R
|
d21b19f9ea02d2f8b1ea22a2767607bedbe65348
|
[] |
no_license
|
Juntonglin/problemset-3
|
599c779d67270bd5f78f48ba27ec820356f36baf
|
ff8a8e0222b7ba17805fb7f81c657bfa5fcb1a5b
|
refs/heads/main
| 2023-01-03T20:02:10.674300
| 2020-11-03T04:54:39
| 2020-11-03T04:54:39
| 308,939,145
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 3,445
|
r
|
01-data_cleaning-post-strat1.R
|
#### Preamble ####
# Purpose: Prepare and clean the survey data downloaded from census data
# Author: Juntong Lin
# Data: 22 October 2020
# Contact: juntong.lin@mail.utoronto.ca
# License: MIT
# Pre-requisites:
# - Need to have downloaded the ACS data and saved it to inputs/data
# - Don't forget to gitignore it!
#### Workspace setup ####
library(haven)
library(tidyverse)
# Read in the raw data.
raw_data2 <- read_csv("usa_00001.csv.gz")
# Add the labels
raw_data2 <- labelled::to_factor(raw_data2)
# Just keep some variables that may be of interest (change
# this depending on your interests)
reduced_data2 <-
raw_data2 %>%
select(STATEICP,
HHINCOME,
PERWT,
SEX,
RACE,
AGE,
EDUC,
EMPSTAT)
#### What's next? ####
# Clean these variables to make them comparable to survey data
reduced_data2 <-
reduced_data2 %>%
mutate(age = AGE,
employment = ifelse(EMPSTAT==1, 1, 0),
gender = ifelse(SEX == 1, 1, 0),
race = ifelse(RACE == 1, 1, 0),
household_income = cut(HHINCOME,
c(-Inf, seq(15000,100000, 5000), seq(125000, 200000, 25000), 250000, Inf),
include.lowest = TRUE, right = FALSE,
labels = c("Less than $14,999","$15,000 to $19,999","$20,000 to $24,999","$25,000 to $29,999",
"$30,000 to $34,999","$35,000 to $39,999","$40,000 to $44,999","$45,000 to $49,999",
"$50,000 to $54,999","$55,000 to $59,999","$60,000 to $64,999","$65,000 to $69,999",
"$70,000 to $74,999","$75,000 to $79,999","$80,000 to $84,999","$85,000 to $89,999",
"$90,000 to $94,999","$95,000 to $99,999","$100,000 to $124,999","$125,000 to $149,999",
"$150,000 to $174,999","$175,000 to $199,999","$200,000 to $249,999","$250,000 and above")),
education = cut(EDUC, c(-1,2,5,6,9,10,11),
labels = c("Middle School or less",
"Completed some high school",
"High school graduate",
"Completed some college, but no degree",
"College Degree (such as B.A., B.S.)",
"More than College"))) %>%
# join table to make state name two letters
inner_join(pscl::state.info %>%
as_tibble() %>%
mutate(state = state.abb[match(state,state.name)]) %>%
rename(STATEICP = icpsr)) %>%
mutate(state = factor(state, levels = unique(.$state))) %>%
subset(select = c(age, employment, gender, race, household_income, education, state, PERWT)) %>%
# filter out don't know and NA
na.omit()
## Here I am only splitting cells by age, but you
## can use other variables to split by changing
## count(age) to count(age, sex, ....)
reduced_data3 <-
reduced_data2 %>%
group_by(age, employment, gender, race, household_income, education, state) %>%
summarise(n = sum(PERWT))
reduced_data3 <-
reduced_data3 %>%
# Only want >= 18, legal to vote
filter(age >= 18)
# Saving the census data as a csv file in my
# working directory
write_csv(reduced_data3, "census_data.csv")
|
22446fc9a6866d0cbec4850e801944bdeea08c32
|
3159605ba0ef744fe785a747340dea82d95d56dd
|
/Project_2/helper_functions.R
|
c88afdd6526b59f0b88ca4a89168ca668bc97eaf
|
[] |
no_license
|
datasci611/bios611-projects-fall-2019-mlfoste1
|
cf3df1b49fbc542d6ba0132f8c922c386b3a81de
|
87eede980d17e8e9d0d08d80362909c4b58b9590
|
refs/heads/master
| 2022-02-15T08:48:57.578492
| 2022-01-12T01:43:38
| 2022-01-12T01:43:38
| 207,409,446
| 0
| 2
| null | null | null | null |
UTF-8
|
R
| false
| false
| 6,004
|
r
|
helper_functions.R
|
library(tidyverse)
library(dplyr)
library(stringr)
#Create dataset-----------------------------------------
#load data file
UMD_df = read_tsv("https://raw.githubusercontent.com/biodatascience/datasci611/gh-pages/data/project1_2019/UMD_Services_Provided_20190719.tsv", na = '**')
#visuals
#Replace spaces in field names with underscores
spaceless <- function(x) {colnames(x) <- gsub(" ", "_", colnames(x));x}
rename_UMD_df <- spaceless(UMD_df)
#Convert date field to date data type; Remove unrelated fields; limit date range to 2007 to 2016 to match open dataset; limit food_provided_for to 1 to 10
final_UMD_df = rename_UMD_df %>%
mutate(Date_of_service = as.Date(UMD_df$Date, "%m/%d/%Y")) %>%
select(-'Date', -'Client_File_Merge', -'Bus_Tickets_(Number_of)', -'Notes_of_Service', -'Referrals', -'Financial_Support', -`Type_of_Bill_Paid`, -`Payer_of_Support`, -'Field1', -'Field2', -'Field3') %>%
subset(Date_of_service > "2006-12-31" & Date_of_service < "2017-01-01")
#select distinct clientfilenum and max family size determined by food provided for
group_UMD_df = final_UMD_df %>%
group_by(Client_File_Number) %>%
filter(Food_Provided_for > 0, Food_Provided_for <= 10) %>%
summarise(max_Food_Provided_for = max(Food_Provided_for)) %>%
drop_na(max_Food_Provided_for)
#Create groups
group_UMD_df$Family_Size <- cut(group_UMD_df$max_Food_Provided_for, breaks = c(0,1,2,3,4,5,6,7,10), labels=c("Individual","2","3","4","5","6","7","8+"))
group_UMD_df$Indv_Family <- cut(group_UMD_df$max_Food_Provided_for, breaks = c(0,1,10), labels=c("Individual","Family"))
#view(group_UMD_df)
#Counts by Family Size (2007-2016)
ggplot(group_UMD_df, aes(x=Family_Size)) +
geom_bar(aes(fill=Family_Size)) +
xlab("Family Size") +
ggtitle('Counts by Family Size (2007-2016)')
#Individuals Versus Families (2007-2016)
ggplot(group_UMD_df, aes(x=Indv_Family), group = Indv_Family) +
geom_bar(aes(fill=Indv_Family)) +
xlab("Family Size") +
ggtitle('Individuals Versus Families (2007-2016)')
#view(group_UMD_df)
#join to original data set to append Date_of_Service
#view by each family size
family_size_group_UMD_df = inner_join(group_UMD_df, final_UMD_df, by = c("Client_File_Number" = "Client_File_Number"), suffix = c(".x", ".y")) %>%
mutate(Year_of_Service = format(Date_of_service, "%Y")) %>%
group_by(Family_Size, Year_of_Service) %>%
summarise(n=n_distinct(Client_File_Number))
#view(family_size_group_UMD_df)
#Total Number Services by Family Size by Year
ggplot(family_size_group_UMD_df, aes(x=Year_of_Service, y=n, group=Family_Size)) +
geom_line(aes(color=Family_Size)) +
scale_x_discrete() +
xlab("Year_of_Service") +
ylab("Number Serviced") +
ggtitle('Total Number Serviced by Family Size by Year')
#view by individual vs family
Indv_Family_group_UMD_df = inner_join(group_UMD_df, final_UMD_df, by = c("Client_File_Number" = "Client_File_Number"), suffix = c(".x", ".y")) %>%
mutate(Year_of_Service = format(Date_of_service, "%Y")) %>%
group_by(Indv_Family, Year_of_Service) %>%
summarise(n=n_distinct(Client_File_Number))
#view(Indv_Family_group_UMD_df)
#Individual vs Family by Year
ggplot(Indv_Family_group_UMD_df, aes(x=Year_of_Service, y=n, group=Indv_Family)) +
geom_line(aes(color=Indv_Family)) +
scale_x_discrete() +
xlab("Year_of_Service") +
ylab("Number Serviced") +
ggtitle('Individual vs Family by Year')
#----------------------------------------------------------------------------------------------
#Create dataset-----------------------------------------
#load data file in Durham Count Point in Time Data
Durham_PIT_df = read_csv("https://raw.githubusercontent.com/datasci611/bios611-projects-fall-2019-mlfoste1/master/Project1/data/external/Durham%20County_Homeless_Point%20In%20Time.csv", na = '**')
#view(Durham_PIT_df)
#Convert date field to date data type; Remove unrelated fields; limit date range to 2006 to 2016 to match open dataset; limit food_provided_for to 1 to 10
final_PIT_df = Durham_PIT_df %>%
select('year','measures','count_') %>%
filter(measures=="Homeless Individuals" | measures=="Homeless People in Families")
#view(final_PIT_df)
#group
final_PIT_df$Indv_Family<-NA
final_PIT_df$Indv_Family[final_PIT_df$measures=="Homeless Individuals"] <- "Individual"
final_PIT_df$Indv_Family[final_PIT_df$measures=="Homeless People in Families"] <- "Family"
final_PIT_df$Year_of_service <- cut(final_PIT_df$year, breaks = c(2006,2007,2008,2009,2010,2011,2012,2013,2014,2015,2016), labels=c("2007","2008","2009","2010","2011","2012","2013","2014","2015","2016"))
final_PIT_df$n <- as.numeric(final_PIT_df$count_)
#Individuals Versus Families (2007-2016)
indv_fam_plot = ggplot(final_PIT_df, aes(x=Indv_Family, y=n)) +
geom_bar(stat="identity", aes(fill=Indv_Family)) +
xlab("Family Size") +
ggtitle('Durham PIT - Individuals Versus Families (2007-2016)')
#Total Number Services by Family Size by Year
fam_size_plot = ggplot(final_PIT_df, aes(x=Year_of_service, y=n, group=Indv_Family)) +
geom_line(aes(color=Indv_Family)) +
scale_x_discrete() +
xlab("Year_of_Service") +
ggtitle('Durham PIT - Total Number Services by Family Size by Year')
#---------------------------------------------------------------------------------
#Overlap data
#Number serviced vs number reported
serv_reprt_plot = function(yearinput){
final_PIT_df_plot = final_PIT_df %>%
filter(year==yearinput)
#view(final_PIT_df_plot)
Indv_Family_group_UMD_df_plot = Indv_Family_group_UMD_df %>%
filter(Year_of_Service==yearinput)
#view(Indv_Family_group_UMD_df_plot)
ggplot(Indv_Family_group_UMD_df_plot, aes(x=Indv_Family, y=n, group = Indv_Family)) +
geom_bar(aes(fill=Indv_Family), stat="Identity") +
geom_bar(data=final_PIT_df_plot, aes(x=measures, y=n, fill=measures), stat="Identity") +
xlab("") +
ylab("Number of People Serviced or Reported") +
theme(axis.title.x=element_blank(),
axis.text.x=element_blank(),
axis.ticks.x=element_blank())
}
|
721e423f5d94de51a42d9482c5641c4b932ea03e
|
2a7e77565c33e6b5d92ce6702b4a5fd96f80d7d0
|
/fuzzedpackages/FDX/R/print_fun.R
|
5c8a24aac074bb40f7391d55bc92f23dae1f3d63
|
[] |
no_license
|
akhikolla/testpackages
|
62ccaeed866e2194652b65e7360987b3b20df7e7
|
01259c3543febc89955ea5b79f3a08d3afe57e95
|
refs/heads/master
| 2023-02-18T03:50:28.288006
| 2021-01-18T13:23:32
| 2021-01-18T13:23:32
| 329,981,898
| 7
| 1
| null | null | null | null |
UTF-8
|
R
| false
| false
| 2,121
|
r
|
print_fun.R
|
#'@title Printing FDX results
#'
#'@description
#'Prints the results of discrete FDX analysis, stored in a \code{FDX}
#'S3 class object.
#'
#'@return
#'The respective input object is invisibly returned via \code{invisible(x)}.
#'
#'@param x an object of class "\code{FDX}".
#'@param ... further arguments to be passed to or from other methods.
#' They are ignored in this function.
#'
#'@template example
#'@examples
#'
#'DPB.crit <- DPB(raw.pvalues, pCDFlist, critical.values = TRUE)
#'print(DPB.crit)
#'
#'@method print FDX
#'@importFrom stats p.adjust
#'@export
## S3 method for class 'FDX'
print.FDX <- function(x, ...){
m <- length(x$Data$raw.pvalues)
k <- x$Num.rejected
if(grepl("Lehmann", x$Method)){
n <- continuous.LR(x$Data$raw.pvalues, x$FDP.threshold, x$Exceedance.probability, TRUE, FALSE)$Num.rejected
orig <- "Lehmann-Romano"
}
else{
n <- continuous.GR(x$Data$raw.pvalues, x$FDP.threshold, x$Exceedance.probability, TRUE, FALSE)$Num.rejected
orig <- "Guo-Romano"
}
# print title (i.e. algorithm)
cat("\n")
cat("\t", x$Method, "\n")
# print dataset name(s)
cat("\n")
cat("Data: ", x$Data$data.name, "\n")
# print short results overview
cat("Number of tests =", m, "\n")
cat("Number of rejections = ", k, " when controlling FDP at level ", x$FDP.threshold, " with probability ",
x$Exceedance.probability, ",\n", paste(rep(" ", 24 + nchar(as.character(k))), collapse = ""),
"i.e. P(FDP > ", x$FDP.threshold, ") <= ", x$Exceedance.probability, "\n", sep = "")
if(!grepl("Continuous", x$Method))
cat("Original", orig, "rejections =", n, "\n")
cat("Original Benjamini-Hochberg rejections =", sum(p.adjust(x$Data$raw.pvalues, "BH") <= x$FDP.threshold),
"at level", x$FDP.threshold, "\n")
if(k){
if(!grepl("Weighted", x$Method))
cat("Largest rejected p value: ", max(x$Rejected), "\n")
else
cat("Largest rejected weighted p value: ", max(x$Weighted[x$Indices]), "\n")
}
cat("\n")
invisible(x)
}
|
4965697b72e8907cb4468e46745ce42e9e2d096b
|
9bb6b5a33eb6f6fed4022a36d49e64d7b9879389
|
/code/old/XX_try-diff-stats.R
|
c23ac261e64e4787b9cace714d374badd184297b
|
[] |
no_license
|
vanichols/ghproj_pfiweeds
|
1a4d9f396a9785342cabbe385e51953bfccdbabe
|
5a6eedd35606d67d289d16bd64b2f83b47a62453
|
refs/heads/master
| 2023-02-18T23:28:20.165748
| 2021-01-08T22:52:09
| 2021-01-08T22:52:09
| 247,970,512
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 3,080
|
r
|
XX_try-diff-stats.R
|
##################################
# Author: Gina Nichols (vnichols@iastate.edu)
# Created: Dec 30 2019
# Last modified: march 23 2020 (trying to recreate old analysis where things were sig)
#
# Purpose: do 'official' stats for manuscript
#
# Inputs: td_GHspecies, td_GHsum, td-all-ryebm2008-2019
#
# Outputs:
#
# Notes:
#
#
####################################
rm(list = ls())
library(tidyverse)
library(lme4) #--for mixed models
library(lmerTest) #--to get significances
library(broom)
library(emmeans)
# what is spread in locs? -------------------------------------------------
dat <- read_csv("data/tidy/td-GHsum.csv")
#dat <- read_csv("_data/tidy/td-GHsum.csv")
dat %>%
group_by(loc) %>%
summarise(min = min(totseeds_m2),
max = max(totseeds_m2),
mean = mean(totseeds_m2))
# matt doesn't use a ratio ------------------------------------------------
dstat_matt <-
dat %>%
unite(loc, cropsys, col = "loc_sys") %>%
mutate(rep2 = paste0(loc_sys, rep),
cc_trt2 = recode(cc_trt,
no = "none",
rye = "aryecc"))
#--full data set
m1 <- lmer(log(totseeds_m2) ~ loc_sys * cc_trt2 + (1|rep2), data = dstat_matt)
anova(m1)
emmeans(m1, pairwise ~ cc_trt2|loc_sys, type = "response")
#--outlier removed
m2 <- lmer(log(totseeds_m2) ~ loc_sys * cc_trt2 + (1|rep2), data = filter(dstat, totseeds_m2 < 15000))
anova(m2)
emmeans(m2, pairwise ~ cc_trt2|loc_sys, type = "response")
#--examples to help
#pigs.emm.s <- emmeans(pigs.lm, "source")
#pairs(pigs.emm.s)
#emm_s.t <- emmeans(noise.lm, pairwise ~ size | type, )
# make a ratio ------------------------------------------------------------
datr <-
dat %>%
spread(cc_trt, value = totseeds_m2) %>%
mutate(rat = (rye/no))
#--fit models w/ratio
# NOTE: only one obs for each rep, so can't include 'rep' in the model
# use a transformation in the actual model
mr1 <- lmer(log(rat) ~ cropsys + (1|loc), data = datr)
mr2 <- lm(log(rat) ~ cropsys, data = datr)
summary(mr2)
mr1em <- emmeans(mr1, "cropsys", weights = "proportional") #--this results in silage not being sig...
mr1em <- emmeans(mr2, "cropsys")
# get CIs/pvals, from https://cran.r-project.org/web/packages/emmeans/vignettes/confidence-intervals.html
test(mr1em)
res92 <- as_tibble(confint(mr1em, level = .925, type = "response")) %>%
mutate(CL = "92.5%")
res95 <- as_tibble(confint(mr1em, level = .95, type = "response")) %>%
mutate(CL = "95%")
res <- bind_rows(res92, res95) %>%
mutate(cropsys = str_to_title(cropsys))
res %>% write_csv("_data/smy/sd_stats-lrr.csv")
# cc bio vs ratio? --------------------------------------------------------
ccbio <- read_csv("_data/smy/sd_ccbio-metrics.csv") %>%
rename(loc = location,
cropsys = crop_sys)
bior <- datr %>% left_join(ccbio)
# nabove1, almost sig
cc1 <- lmer(log(rat) ~ nabove1 + (1|loc), data = bior)
summary(cc1)
confint(cc1)
# mean is sig, but come on....
# nabove2, shan, ccbio_cv, shan_hill, not
#cc4 <- lmer(log(rat) ~ shan_hill + (1|loc), data = bior)
#summary(cc4)
|
03846b897597d814858f53f746b5f10e158e25d3
|
4951e7c534f334c22d498bbc7035c5e93c5b928d
|
/developers/sdarticle.R
|
f9bb7fa77cfad3286f9981f143abb9dc367c0460
|
[] |
no_license
|
Derek-Jones/ESEUR-code-data
|
140f9cf41b2bcc512bbb2e04bcd81b5f82eef3e1
|
2f42f3fb6e46d273a3803db21e7e70eed2c8c09c
|
refs/heads/master
| 2023-04-04T21:32:13.160607
| 2023-03-20T19:19:51
| 2023-03-20T19:19:51
| 49,327,508
| 420
| 50
| null | null | null | null |
UTF-8
|
R
| false
| false
| 1,026
|
r
|
sdarticle.R
|
#
# sdarticle.R, 8 Jan 17
# Data from:
# Knowledge Organization and Skill Differences in Computer Programmers
# Katherine B. McKeithen and Judith S. Reitman and Henry H. Ruster and Stephen C. Hirtle
#
# Example from:
# Empirical Software Engineering using R
# Derek M. Jones
source("ESEUR_config.r")
library("plyr")
plot_layout(2, 1)
pal_col=rainbow(3)
plot_perf=function(df)
{
plot(df$trial, df$lines, type="n",
ylim=range(lread$lines),
xlab="Trial", ylab="Lines recalled")
d_ply(df, .(level), function(df) lines(df$trial, df$lines, type="b", col=df$col))
legend(x="topleft", legend=rev(col_order$level), bty="n", fill=rev(col_order$col), cex=1.2)
}
lread=read.csv(paste0(ESEUR_dir, "developers/sdarticle.csv.xz"), as.is=TRUE)
lread$col=pal_col[as.factor(lread$level)]
col_order=unique(data.frame(level=lread$level, col=lread$col))
col_order$col=as.character(col_order$col)
normal=subset(lread, organization == "normal")
scrambled=subset(lread, organization != "normal")
plot_perf(normal)
plot_perf(scrambled)
|
b0d9d9d649d47d3ef4f93ae0f5356ecb87e8426e
|
1b8eedf870f07fd6316154c09241ecd7c9089943
|
/analysis/features.R
|
dc064f50da94210240b3b513d46349cdff9517c2
|
[] |
no_license
|
dtkaczyk/dark-or-light
|
24b604f8e44fb733f97df6bac399520d1003e8ec
|
ecf83d08601fe117420536017062522d14e3fc55
|
refs/heads/master
| 2021-01-09T08:03:02.898751
| 2017-02-02T05:48:06
| 2017-02-02T05:48:06
| 65,752,413
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 1,652
|
r
|
features.R
|
basicFeatures <- c("Red", "Green", "Blue")
multFeatures <- c("MultRedGreen", "MultGreenBlue", "MultRedBlue", "MultRedGreenBlue")
ratioFeatures <- c("RatioRedGreen", "RatioGreenRed", "RatioRedBlue", "RatioBlueRed",
"RatioBlueGreen", "RatioGreenBlue", "RatioRedGreenBlue")
sqFeatures <- c("SqRed", "SqGreen", "SqBlue", "SqRootRed", "SqRootGreen", "SqRootBlue")
extractFeatures <- function(data) {
data$SqRed <- data$Red^2
data$SqGreen <- data$Green^2
data$SqBlue <- data$Blue^2
data$SqRootRed <- sqrt(data$Red)
data$SqRootGreen <- sqrt(data$Green)
data$SqRootBlue <- sqrt(data$Blue)
data$MultRedGreen <- data$Red * data$Green
data$MultGreenBlue <- data$Green * data$Blue
data$MultRedBlue <- data$Red * data$Blue
data$MultRedGreenBlue <- data$Red * data$Green * data$Blue
data$RatioRedGreen <- data$Red / data$Green
data$RatioGreenRed <- data$Green / data$Red
data$RatioRedBlue <- data$Red / data$Blue
data$RatioBlueRed <- data$Blue / data$Red
data$RatioBlueGreen <- data$Blue / data$Green
data$RatioGreenBlue <- data$Green / data$Blue
data$RatioRedGreenBlue <- data$Red / data$Green / data$Blue
data
}
evaluateFeatures <- function(data) {
dataLight <- data[data$Lum == "L",]
dataDark <- data[data$Lum == "D",]
fNames <- colnames(data)
fNames <- fNames[fNames != "Lum"]
pvalues <- sapply(fNames, function(x) {ks.test(dataLight[[x]], dataDark[[x]])$p.value})
pvalues <- data.frame(
Feature = names(pvalues),
PValue = pvalues
)
pvalues <- pvalues[order(pvalues$PValue),]
row.names(pvalues) <- NULL
pvalues
}
|
003f3330a6c5871ef3f60b9bbd7f424a2f967317
|
3fefe890b546e1b9cbdc6daeed56f9ee121bbfd1
|
/man/fetch_all_deputados.Rd
|
e945f32c4d9269d320f509d0cf02a5ddd4c90db5
|
[] |
no_license
|
analytics-ufcg/rcongresso
|
0cc0078aebbdd57047e1d21c93e56b60128d2fd0
|
d34877d8f7e7ef4da1ad9053d5391f9be02c2828
|
refs/heads/master
| 2021-12-24T07:32:39.539758
| 2021-10-18T14:45:43
| 2021-10-18T14:45:43
| 100,041,012
| 53
| 12
| null | 2021-06-28T18:06:16
| 2017-08-11T14:37:06
|
R
|
UTF-8
|
R
| false
| true
| 707
|
rd
|
fetch_all_deputados.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/deputados.R
\name{fetch_all_deputados}
\alias{fetch_all_deputados}
\alias{fetch_ids_deputados}
\title{Fetches details abaout all deputys}
\usage{
fetch_all_deputados(ids_dep)
fetch_ids_deputados(legislatura_base = .LEGISLATURA_INICIAL)
}
\arguments{
\item{ids_dep}{Dataframe containing all deputys IDs}
\item{legislatura_base}{Legislatura inicial para retornar os deputados}
}
\value{
Dataframe containing details about the deputy's
Dataframe containing all deputys IDs
}
\description{
Fetches details about deputys from the 40º legislature to the current
Fetches all deputys IDs from the given legislature to the current
}
|
1baa5b654950a121c52f1fc73e7b364bacb3b254
|
f7794399168afc3d4a16f0514e04b7e1e9c09202
|
/R/imports.R
|
74a25dcaba4d3dec62f8825e624f99b61f82b7b5
|
[] |
no_license
|
ryapric/fcf
|
b90d8942f654406ed5ef773506270232fd0c9f00
|
ceee2aa566151fc28bf373a17f8a701c5419be93
|
refs/heads/master
| 2020-04-07T22:21:09.092856
| 2018-11-26T22:04:33
| 2018-11-26T22:04:33
| 158,766,478
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 55
|
r
|
imports.R
|
#' @import dplyr
#' @import rvest
#' @import xml2
NULL
|
d789470e18af9a0e5c0c09102e0095c19d0c9237
|
ffdea92d4315e4363dd4ae673a1a6adf82a761b5
|
/data/genthat_extracted_code/haploR/vignettes/haplor-vignette.R
|
dc32d111bc278e7d4392476b8460a033e949d4dd
|
[] |
no_license
|
surayaaramli/typeRrh
|
d257ac8905c49123f4ccd4e377ee3dfc84d1636c
|
66e6996f31961bc8b9aafe1a6a6098327b66bf71
|
refs/heads/master
| 2023-05-05T04:05:31.617869
| 2019-04-25T22:10:06
| 2019-04-25T22:10:06
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 2,546
|
r
|
haplor-vignette.R
|
## ---- message=FALSE, echo=FALSE------------------------------------------
#library(knitcitations)
#cleanbib()
#options("citation_format" = "pandoc")
#r<-citep("10.1093/nar/gkr917")
#r<-citep("10.1101/gr.137323.112")
#r<-citep("10.1093/bioinformatics/btv402")
#write.bibtex(file="references.bib")
## ---- echo=TRUE, eval=FALSE----------------------------------------------
# install.packages("haploR", dependencies = TRUE)
## ---- echo=TRUE, eval=FALSE----------------------------------------------
# devtools::install_github("izhbannikov/haplor")
## ---- echo=TRUE, message=FALSE-------------------------------------------
library(haploR)
x <- queryHaploreg(query=c("rs10048158","rs4791078"))
x
## ---- echo=TRUE, message=FALSE-------------------------------------------
subset.high.LD <- x[as.numeric(x$r2) > 0.9, c("rsID", "r2", "chr", "pos_hg38", "is_query_snp", "ref", "alt")]
subset.high.LD
## ---- echo=TRUE, message=FALSE, eval=FALSE-------------------------------
# require(openxlsx)
# write.xlsx(x=subset.high.LD, file="subset.high.LD.xlsx")
## ---- echo=TRUE, message=FALSE-------------------------------------------
x[, c("Motifs", "rsID")]
x[, c("eQTL", "rsID")]
## ---- echo=TRUE, message=FALSE-------------------------------------------
library(haploR)
x <- queryHaploreg(file=system.file("extdata/snps.txt", package = "haploR"))
x
## ---- echo=TRUE, message=FALSE-------------------------------------------
library(haploR)
# Getting a list of existing studies:
studies <- getStudyList()
# Let us look at the first element:
studies[[1]]
# Let us look at the second element:
studies[[2]]
# Query Hploreg to explore results from
# this study:
x <- queryHaploreg(study=studies[[1]])
x
## ---- echo=TRUE, eval=FALSE, message=FALSE-------------------------------
# library(haploR)
# tables <- getExtendedView(snp="rs10048158")
# tables
## ---- echo=TRUE, message=FALSE-------------------------------------------
library(haploR)
x <- queryRegulome(c("rs4791078","rs10048158"))
x$res.table
x$bad.snp.id
## ---- echo=TRUE, message=FALSE-------------------------------------------
library(haploR)
ldmat <- LDlink.LDmatrix(snps=c("rs77264218", "rs11229158", "rs10896659", "rs10896702", "rs2042592"), population="AFR")
ldmat
# Stylish matrix R2
stylish.matrix.r2 <- makeStylishLDmatrix(ldmat$matrix.r2)
stylish.matrix.r2
# Stylish matrix D'
stylish.matrix.Dprime <- makeStylishLDmatrix(ldmat$matrix.dprime)
stylish.matrix.Dprime
## ---- echo=TRUE----------------------------------------------------------
sessionInfo()
|
bd16f2ecc95b8db1e477392be7f2f90476279724
|
da240952753caf3a3b79e777b1bfe24140aaba86
|
/ZAnc/make_rf_outliers_by_pop.R
|
77e643c990d952cc5e77a8f9dec028835a934c58
|
[] |
no_license
|
cooplab/hilo
|
ea5ea9d472ee7cf2cab17aa83e8f568c54fce34c
|
64483aaf0abd40d25846969b8732e07abf9b7667
|
refs/heads/master
| 2023-08-18T13:03:07.458675
| 2021-09-20T20:12:10
| 2021-09-20T20:12:10
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 5,521
|
r
|
make_rf_outliers_by_pop.R
|
#!/usr/bin/env Rscript
library(dplyr)
library(tidyr)
library(bedr)
# this script identifies high introgression ancestry outlier regions
# in an individual population and shared across pops
# and outputs regions files for outliers
# from focal pop only (1pop)
# and from focal pop + at least 3 other pops (4pop)
# to be used with angsd -rf
# load variables from Snakefile
bed_sites = snakemake@input[["bed_sites"]]
# bed_sites = "local_ancestry/results/thinnedSNPs/HILO_MAIZE55_PARV50/K3/whole_genome.bed"
genome_file = snakemake@input[["genome"]]
# genome_file = "data/refMaize/Zea_mays.AFPv4.dna.chr.autosome.lengths"
anc_maize = snakemake@input[["anc_maize"]]
# anc_maize = "local_ancestry/results/ancestry_hmm/HILO_MAIZE55_PARV50/K3/Ne10000_yesBoot/anc/maize.pops.anc.RData"
meta_maize = snakemake@input[["meta_maize"]]
# meta_maize = "local_ancestry/results/ancestry_hmm/HILO_MAIZE55_PARV50/K3/Ne10000_yesBoot/anc/maize.pop.meta.RData"
anc_mexicana = snakemake@input[["anc_mexicana"]]
# anc_mexicana = "local_ancestry/results/ancestry_hmm/HILO_MAIZE55_PARV50/K3/Ne10000_yesBoot/anc/mexicana.pops.anc.RData"
meta_mexicana = snakemake@input[["meta_mexicana"]]
# meta_mexicana = "local_ancestry/results/ancestry_hmm/HILO_MAIZE55_PARV50/K3/Ne10000_yesBoot/anc/mexicana.pop.meta.RData"
dir_out = snakemake@params[["dir_out"]]
# dir_out = paste0("ZAnc/results/HILO_MAIZE55_PARV50/K3/Ne10000_yesBoot")
focal_pop = snakemake@params[["focal_pop"]]
# focal_pop = "pop362"
meta_file = snakemake@input[["meta"]]
# meta_file = "samples/HILO_MAIZE55_PARV50_meta.RData"
# is the focal population sympatric maize or mexicana?
load(meta_file)
meta_sympatric = meta %>%
filter(symp_allo == "sympatric") %>%
dplyr::select(popN, zea, symp_allo, LOCALITY, ELEVATION) %>%
filter(!duplicated(.)) %>%
mutate(pop = paste0("pop", popN))
zea = meta_sympatric$zea[meta_sympatric$pop == focal_pop]
# load ancestry and population metadata files
# based on whether the focal population is sympatric maize or mexicana
if (zea == "maize"){
load(anc_maize)
load(meta_maize)
# always count outliers for minor (introgressed) ancestry
introgressing_ancestry = "mexicana"
meta_pops$alpha_local_ancestry = meta_pops$alpha_local_ancestry_mexicana
}
if (zea == "mexicana"){
load(anc_mexicana)
load(meta_mexicana)
introgressing_ancestry = "maize"
meta_pops$alpha_local_ancestry = meta_pops$alpha_local_ancestry_maize
}
sites <- read.table(bed_sites, header = F, stringsAsFactors = F, sep = "\t") %>%
data.table::setnames(c("chr", "start", "end", "length"))
# what is 2sd above the mean introgressed ancestry threshold for each pop?
meta_pops$sd2 <- apply(anc[[introgressing_ancestry]], 2, mean) + 2*apply(anc[[introgressing_ancestry]], 2, sd)
# find pop outliers in observed data
anc_outliers <- data.frame(anc[[introgressing_ancestry]], stringsAsFactors = F) %>%
cbind(., sites) %>%
tidyr::pivot_longer(., cols = colnames(anc[[introgressing_ancestry]]), names_to = "pop", values_to = "anc") %>%
left_join(., meta_pops, by = "pop") %>%
mutate(top_sd2 = anc > sd2) %>%
arrange(ELEVATION)
# add a column for how many populations are an outlier at that position
# and filter to only include outliers that involve the focal population
anc_outliers_by_pop <- anc_outliers %>%
group_by(chr, start, end) %>%
summarise(outlier_pops = sum(top_sd2)) %>%
ungroup() %>%
left_join(anc_outliers, ., by = c("chr", "start", "end")) %>%
filter(pop == focal_pop & top_sd2) # must be an outlier in focal pop for that row
# merge adjacent outlier regions for single (focal) population
regions_1pop = anc_outliers_by_pop %>%
filter(outlier_pops == 1) %>%
dplyr::select(chr, start, end) %>% # only keep region
mutate(chr = as.character(chr)) %>%
as.data.frame(., stringsAsFactors = F)
regions_1pop_merged = bedr(
input = list(i = regions_1pop),
method = "merge",
check.chr = F,
params = paste("-sorted -header -g", genome_file)
)
# merge adjacent outlier regions shared between focal population and 3 or more other populations
regions_4pop = anc_outliers_by_pop %>%
filter(outlier_pops >= 4) %>%
dplyr::select(chr, start, end) %>% # only keep region
mutate(chr = as.character(chr)) %>%
as.data.frame(., stringsAsFactors = F)
regions_4pop_merged = bedr(
input = list(i = regions_4pop),
method = "merge",
check.chr = F,
params = paste("-sorted -header -g", genome_file)
)
# print regions files (rf)
regions_1pop_merged %>%
mutate(region = paste0(chr, ":", start + 1, "-", end)) %>% # format for angsd regions file
dplyr::select(region) %>% # only print region
write.table(., file = paste0(dir_out, "/", focal_pop, ".1pop.outliers.regions"),
col.names = F, row.names = F, sep = "\t", quote = F)
regions_4pop_merged %>%
mutate(region = paste0(chr, ":", start + 1, "-", end)) %>% # format for angsd regions file
dplyr::select(region) %>% # only print region
write.table(., file = paste0(dir_out, "/", focal_pop, ".4pop.outliers.regions"),
col.names = F, row.names = F, sep = "\t", quote = F)
# print bed files
dplyr::select(regions_1pop_merged, chr, start, end) %>%
write.table(., file = paste0(dir_out, "/", focal_pop, ".1pop.outliers.bed"),
col.names = F, row.names = F, sep = "\t", quote = F)
dplyr::select(regions_4pop_merged, chr, start, end) %>%
write.table(., file = paste0(dir_out, "/", focal_pop, ".4pop.outliers.bed"),
col.names = F, row.names = F, sep = "\t", quote = F)
|
ff5dcd3ea68ccee2f35b8712ba610a45c85eee0e
|
da316d00f89f9481e7f3381326651594328c5061
|
/functions/getCryptoHistoricalPrice.R
|
fe3c304034578cbeae123ee123f308d8a6639e1e
|
[] |
no_license
|
strebuh/crypto_currencies_models
|
ab7a796459953dbfc2a65aed2cb4a24d75058ec4
|
ee5b0f54c86b9279dd8436f2a836d51cf8142875
|
refs/heads/master
| 2023-08-12T17:23:01.771012
| 2021-10-17T00:27:16
| 2021-10-17T00:27:16
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 633
|
r
|
getCryptoHistoricalPrice.R
|
getCryptoHistoricalPrice <- function(x){
# this function scraps the OHLC historical crypto prices from www.coinmarketcap.com
library(tidyverse)
paste0("https://coinmarketcap.com/currencies/",
x,
"/historical-data/?start=20130428&end=21000101") %>%
xml2::read_html() %>%
rvest::html_table(.) %>%
.[[3]] %>%
as_tibble() %>%
rename(Open = `Open*`,
Close = `Close**`,
MarketCap = `Market Cap`) %>%
mutate(Date = as.Date(Date, format = "%b %d, %Y")) %>%
mutate_if(is.character, function(x) as.numeric(gsub(",", "", x))) %>%
arrange(Date) %>%
return()
}
|
bd178d4aadd8583fb787baf94d30390da8d729c8
|
2d4b32b315ef275119df1be0ea7daa350bb3e3f4
|
/fxScrap/loopedScrapingFunction.R
|
8a405955fc8c1e6dee48074e94055429e3d8e731
|
[] |
no_license
|
muchDS/FX-TS
|
ae190efc7ed1df5ccd8325ec4f8a0a9732c1b3fd
|
2ae13f3cd88cf490178d3c88d60f1ecd3913be27
|
refs/heads/master
| 2021-01-22T07:57:00.315744
| 2017-09-10T12:40:03
| 2017-09-10T12:40:03
| 92,585,590
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 2,267
|
r
|
loopedScrapingFunction.R
|
loopedScrapingFunction <- function(connectionObject, FXCssTagsVector, MICssTagsVector, rowId, localProxyDF, localInitProxyIP, localInitProxyPort){
source("readPage.R")
startTime <- Sys.time()
tryCatch(dbGetInfo(connectionObject), error = function(e){print("reconnecting");Sys.time();connectionObject <<- connectMeToDB();Sys.time()} )
recordTime <- as.POSIXlt(Sys.time())
recordTimeEDT <- recordTime
recordTimeEDT$hour <- recordTimeEDT$hour - 6
if(recordTimeEDT$wday == 5 && recordTimeEDT$wday == 16){scrapMe <<- FALSE}
recordTimeEDT <- paste0(recordTimeEDT, " EDT")
#MIRatesPage <- read_html("https://www.investing.com/indices/major-indices")
iterKillCommand <- FALSE
tryCatch(
MIRatesPage <- readPage("https://www.investing.com/indices/major-indices", localProxyDF ,localInitProxyIP, localInitProxyPort),
error = function(e){print("MIError");iterKillCommand <<- TRUE}
)
tryCatch(
FXRatesPage <- readPage("http://www.marketwatch.com/investing/currencies/tools", localProxyDF, MIRatesPage$ip, MIRatesPage$port),
error = function(e){print("FXError");iterKillCommand <<- TRUE}
)
#print(paste0(iterKillCommand, " ", MIRatesPage$ip, " ", FXRatesPage$ip, " ",is.null(MIRatesPage$page), " ",is.null(FXRatesPage$page)))
if(iterKillCommand ||
length(MIRatesPage$ip) == 0 ||
length(FXRatesPage$ip) == 0 ||
is.null(MIRatesPage$page) ||
is.null(FXRatesPage$page)){print("empty iter")
return(NULL)
}
listToReturn <- list(FXRatesPage$ip, FXRatesPage$port)
names(listToReturn) <- c("ip", "port")
insertString <- paste0(rowId, ", '", recordTime, " CEST'",
", ",
scrapSingleTable(FXRatesPage$page, FXCssTagsVector, "#rates th", "FX"),
", ",
scrapSingleTable(MIRatesPage$page, MICssTagsVector, "#cr_12 th", "MI"),
", '", recordTimeEDT,"'")
dbGetQuery(connectionObject, paste0("INSERT INTO fxtimeseriestable VALUES (", insertString,")"))
waitTime <- ifelse(10 - (Sys.time() - startTime) < 0, 0, 10 - (Sys.time() - startTime))
Sys.sleep(waitTime)
return(listToReturn)
}
|
b31cd474e1630e857ff97fc136a7edef250d3088
|
9b3cff0dd9a6e0402747cb68083f71bd3705ebe1
|
/man/checkData.Rd
|
f4b120dc1318aed46ffb159545c886dc693352e5
|
[] |
no_license
|
cran/MPR.genotyping
|
f3656d7e298f5999b80e62ac15f2ac29c25c65d7
|
9d4112d6ddf825f9701d5631b3123b19ef39b67f
|
refs/heads/master
| 2021-05-05T06:34:14.060691
| 2018-01-24T17:24:42
| 2018-01-24T17:24:42
| 118,804,856
| 1
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 623
|
rd
|
checkData.Rd
|
\name{checkData}
\alias{checkData}
\docType{data}
\title{
Data for check
}
\description{
this is used to check up the genotype results in my example.
}
\usage{data("checkData")}
\format{
The format is:
chr [1:11948, 1:2] "A" "A" "C" "T" "A" ...
- attr(*, "dimnames")=List of 2
..$ : chr [1:11948] "0500000526A" "0500000556A" "0500000559G" "0500000591G" ...
..$ : chr [1:2] "ZS97" "MH63"
}
\details{
you can use "table(checkData[ids,1]==alleleA)"
}
\source{
http://www.ncpgr.cn/supplements/MPR_genotyping/MPR_genotyping.tar.gz
}
\examples{
#load data
data(checkData)
}
\keyword{datasets}
|
b4d8264d747c35ec655d103774ce2bf6e6869009
|
0e7a9c1aad4673d965f406accab79d887ce67843
|
/app/data/2016-01-14/format/consistancy_check_V2.R
|
24902d77002aa5ac55347f35406adee6558836bb
|
[] |
no_license
|
anandgavai/ANDI
|
6207d0442eebdf4d149c2bcb2c2f3a4be29920c5
|
6e4555f14cd45767c54ef4d700bf907aed8d0a34
|
refs/heads/master
| 2020-12-15T17:03:08.632888
| 2016-05-23T20:11:28
| 2016-05-23T20:11:28
| 39,182,643
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 2,531
|
r
|
consistancy_check_V2.R
|
library (gdata)
library (dplyr)
require(RJSONIO)
library (jsonlite)
#df = read.xls ("//home//anandgavai//ANDI//app//data//2016-01-14//format//ANDI_betaTemplate_0303.xlsx", sheet = 1, header = TRUE)
df = read.csv ("//home//anandgavai//ANDI//app//data//2016-01-14//format//ANDI_betaTemplate_11_03_16.csv", header = TRUE)
#Step1:
# count number of rows
dimen<-dim(df)[1]
#Step2:
# count unique combination of ID1, ID2, ID3
IDCheck<-dim(unique(df[,c('category.short.name','ID1','ID2','ID3','ID4','SPSS.name')]))[1]
#Step 3:
#Check for special characters in a column
## if Step1 match with Step2 the columns are unique else raise a flag
if(dimen==IDCheck){
print ("SUCCESS: catetory.short.name,ID1, ID2, ID3, ID4 and SPSSname are unique")
}else{
print("ERROR: Check category.short.name, ID1, ID2, ID3, ID4 and SPSSnames as the do not seem consistant !!")
}
### Now Replace all spaces with "_" to create an identifier concatinating ID1, ID2, ID3 and catenory shortname
category.short.name <- gsub(" ","_",df$category.short.name)
ID1<-gsub(" ","_",df$ID1)
ID2<-gsub(" ","_",df$ID2)
ID3<-gsub(" ","_",df$ID3)
ID4<-gsub(" ","_",df$ID4)
SPSSname<-gsub(" ","_",df$SPSS.name)
### This is my file
df<-cbind(ID, df)
d <-df[,1:3]
MyData<-read.csv ("//home//anandgavai//ANDI//app//data//2016-01-14//format//MyData.csv", header = TRUE)
MyData<-d
makeList<-function(x){
# if(ncol(x)>2){
listSplit<-split(x[,2:3],x[1],drop=T)
lapply(
names(listSplit),function(y){
list(data.frame(id="",value="",label=c(y)),children=data.frame(id="",value="",label="",makeList(listSplit[[y]])))
})
# }
# else{
lapply(seq(nrow(x[1])),function(y){
#list(id="",value="",label=y,children=list(id="",value="",label=x[,1][y],makeList(listSplit[[y]])))
browser()
list(label=x[,1][y])
})
# }
}
jsonOut<-toJSON(makeList(MyData))
cat(jsonOut)
write(jsonOut,"//home//anandgavai//ANDI//app//data//2016-01-14//format//MyData.json")
list1<-split(subset(MyData,select=c(-category.short.name)),MyData$category.short.name)
list2<-lapply(list1,function(x){
split(subset(x,select=c(-ID1)),x$ID1,drop=TRUE)
})
list3<-lapply(list2,function(x){
lapply(x,function(y){
split(subset(y,select=c(-ID2)),y$ID2,drop=TRUE)
})
})
jsonOut<-toJSON(list(MyData=list3))
jsonOut1<-gsub('([^\n]*?): \\{\n "Percentage"','\\{"name":\\1,"Percentage"',jsonOut)
jsonOut2<-gsub('"([^"]*?)": \\{','"name":"\\1","children":\\{',jsonOut1)
|
9f3a4d505d52485edd993e6401b20ab32c899089
|
1e76886c729c7e0ae15cf18102fe0f614f9297e0
|
/R/threshold_perf.R
|
9ea652b3ae92059d02ea8cda7a27d8da5c342d1e
|
[
"MIT"
] |
permissive
|
tidymodels/probably
|
2abe267ef49a3595d29dd7fdbdf7c836b3103c8d
|
c46326651109fb2ebd1b3762b3cb086cfb96ac88
|
refs/heads/main
| 2023-07-10T13:09:55.973010
| 2023-06-27T17:11:22
| 2023-06-27T17:11:22
| 148,365,953
| 87
| 12
|
NOASSERTION
| 2023-06-27T17:11:24
| 2018-09-11T19:02:58
|
R
|
UTF-8
|
R
| false
| false
| 7,151
|
r
|
threshold_perf.R
|
#' Generate performance metrics across probability thresholds
#'
#' `threshold_perf()` can take a set of class probability predictions
#' and determine performance characteristics across different values
#' of the probability threshold and any existing groups.
#'
#' Note that that the global option `yardstick.event_first` will be
#' used to determine which level is the event of interest. For more details,
#' see the Relevant level section of [yardstick::sens()].
#'
#' The default calculated metrics are:
#' - [yardstick::j_index()]
#' - [yardstick::sens()]
#' - [yardstick::spec()]
#' - `distance = (1 - sens) ^ 2 + (1 - spec) ^ 2`
#'
#' If a custom metric is passed that does not compute sensitivity and
#' specificity, the distance metric is not computed.
#'
#' @param .data A tibble, potentially grouped.
#'
#' @param truth The column identifier for the true two-class results
#' (that is a factor). This should be an unquoted column name.
#'
#' @param estimate The column identifier for the predicted class probabilities
#' (that is a numeric). This should be an unquoted column name.
#'
#' @param ... Currently unused.
#'
#' @param na_rm A single logical: should missing data be removed?
#'
#' @param thresholds A numeric vector of values for the probability
#' threshold. If unspecified, a series
#' of values between 0.5 and 1.0 are used. **Note**: if this
#' argument is used, it must be named.
#'
#' @param metrics Either `NULL` or a [yardstick::metric_set()] with a list of
#' performance metrics to calculate. The metrics should all be oriented towards
#' hard class predictions (e.g. [yardstick::sensitivity()],
#' [yardstick::accuracy()], [yardstick::recall()], etc.) and not
#' class probabilities. A set of default metrics is used when `NULL` (see
#' Details below).
#'
#' @param event_level A single string. Either `"first"` or `"second"` to specify
#' which level of `truth` to consider as the "event".
#'
#' @return A tibble with columns: `.threshold`, `.estimator`, `.metric`,
#' `.estimate` and any existing groups.
#'
#' @examples
#' library(dplyr)
#' data("segment_logistic")
#'
#' # Set the threshold to 0.6
#' # > 0.6 = good
#' # < 0.6 = poor
#' threshold_perf(segment_logistic, Class, .pred_good, thresholds = 0.6)
#'
#' # Set the threshold to multiple values
#' thresholds <- seq(0.5, 0.9, by = 0.1)
#'
#' segment_logistic %>%
#' threshold_perf(Class, .pred_good, thresholds)
#'
#' # ---------------------------------------------------------------------------
#'
#' # It works with grouped data frames as well
#' # Let's mock some resampled data
#' resamples <- 5
#'
#' mock_resamples <- resamples %>%
#' replicate(
#' expr = sample_n(segment_logistic, 100, replace = TRUE),
#' simplify = FALSE
#' ) %>%
#' bind_rows(.id = "resample")
#'
#' resampled_threshold_perf <- mock_resamples %>%
#' group_by(resample) %>%
#' threshold_perf(Class, .pred_good, thresholds)
#'
#' resampled_threshold_perf
#'
#' # Average over the resamples
#' resampled_threshold_perf %>%
#' group_by(.metric, .threshold) %>%
#' summarise(.estimate = mean(.estimate))
#'
#' @export
threshold_perf <- function(.data, ...) {
UseMethod("threshold_perf")
}
#' @rdname threshold_perf
#' @export
threshold_perf.data.frame <- function(.data,
truth,
estimate,
thresholds = NULL,
metrics = NULL,
na_rm = TRUE,
event_level = "first",
...) {
if (is.null(thresholds)) {
thresholds <- seq(0.5, 1, length = 21)
}
if (is.null(metrics)) {
metrics <-
yardstick::metric_set(yardstick::sensitivity,
yardstick::specificity,
yardstick::j_index)
}
measure_sens_spec <- check_thresholded_metrics(metrics)
obs_sel <- tidyselect::eval_select(
expr = enquo(truth),
data = .data
)
probs_sel <- tidyselect::eval_select(
expr = enquo(estimate),
data = .data
)
obs <- names(obs_sel)
probs <- names(probs_sel)
rs_ch <- dplyr::group_vars(.data)
rs_ch <- unname(rs_ch)
obs_sym <- sym(obs)
probs_sym <- sym(probs)
if (length(rs_ch) == 0) {
rs_ch <- NULL
rs_id <- NULL
} else {
rs_id <- syms(rs_ch)
}
if (length(probs) > 1 | length(obs) > 1) {
cli::cli_abort(
"{.arg truth} and {.arg estimate} should only be single columns."
)
}
if (!inherits(.data[[obs]], "factor")) {
cli::cli_abort("{.arg truth} should be a factor.")
}
if (length(levels(.data[[obs]])) != 2) {
cli::cli_abort("{.arg truth} should be a 2 level factor.")
}
if (!is.numeric(.data[[probs]])) {
cli::cli_abort("{.arg estimate} should be numeric.")
}
.data <- dplyr::rename(.data, truth = !!obs_sym, prob = !!probs_sym)
if (!is.null(rs_id)) {
.data <- dplyr::select(.data, truth, prob, !!!rs_id)
} else {
.data <- dplyr::select(.data, truth, prob)
}
if (na_rm) {
.data <- stats::na.omit(.data)
}
.data <- .data %>%
expand_preds(
threshold = thresholds,
inc = c("truth", "prob", rs_ch)
) %>%
dplyr::mutate(
alt_pred = recode_data(
obs = truth,
prob = prob,
threshold = .threshold,
event_level = event_level
)
)
if (!is.null(rs_id)) {
.data <- .data %>% dplyr::group_by(!!!rs_id, .threshold)
} else {
.data <- .data %>% dplyr::group_by(.threshold)
}
.data_metrics <- metrics(
.data,
truth = truth,
estimate = alt_pred,
event_level = event_level
)
if (measure_sens_spec) {
# Create the `distance` metric data frame
# and add it on
sens_vec <- .data_metrics %>%
dplyr::filter(.metric == "sens") %>%
dplyr::pull(.estimate)
dist <- .data_metrics %>%
dplyr::filter(.metric == "spec") %>%
dplyr::mutate(
.metric = "distance",
# .estimate is specificity currently. This recodes as distance
.estimate = (1 - sens_vec) ^ 2 + (1 - .estimate) ^ 2
)
.data_metrics <- dplyr::bind_rows(.data_metrics, dist)
}
.data_metrics
}
expand_preds <- function(.data, threshold, inc = NULL) {
threshold <- unique(threshold)
nth <- length(threshold)
n_data <- nrow(.data)
if (!is.null(inc))
.data <- dplyr::select(.data, tidyselect::all_of(inc))
.data <- .data[rep(1:nrow(.data), times = nth), ]
.data$.threshold <- rep(threshold, each = n_data)
.data
}
check_thresholded_metrics <- function(x) {
y <- dplyr::as_tibble(x)
if (!all(y$class == "class_metric")) {
rlang::abort("All metrics must be of type 'class_metric' (e.g. `sensitivity()`, ect)")
}
# check to see if sensitivity and specificity are in the lists
has_sens <-
any(y$metric %in% c("sens", "sensitivity")) &
any(y$metric %in% c("spec", "specificity"))
has_sens
}
utils::globalVariables(
c(
".",
".threshold",
"alt_pred",
"prob",
"statistic",
"value",
".metric",
".estimate",
"distance"
)
)
|
54b0f4054259c0d9a17180e0693092722171a9ee
|
55654e444839976992cc3556ed54ae8f911fb413
|
/plot1.R
|
4d316203e5f32333c00e5e1dace6122f1df38c71
|
[] |
no_license
|
justin-price/ExData_Plotting1
|
e66ad2835cd3f19a02299c2867b59999704f6da0
|
ce7fcb38880d567301ca1f0391af27cffb213318
|
refs/heads/master
| 2020-12-15T10:11:19.787049
| 2020-01-27T01:55:53
| 2020-01-27T01:55:53
| 235,071,384
| 0
| 0
| null | 2020-01-20T10:06:20
| 2020-01-20T10:06:18
| null |
UTF-8
|
R
| false
| false
| 631
|
r
|
plot1.R
|
headers = read.table("household_power_consumption.txt",
sep=";",header = F, nrows = 1, as.is = T)
# reading index 2007-02-01 to 2007-02-02 only
power_consumption <- read.table("household_power_consumption.txt",
sep = ";", header = F, skip = 66637, nrows = 2880)
colnames(power_consumption) <- headers
png("plot1.png",width = 480, height = 480)
with(power_consumption,hist(Global_active_power,
col = "Red",
xlab = "Global Active Power (kilowatts)",
main = "Global Active Power"))
dev.off()
|
36fd66b8515b2e58405d49323ef13f56590cf476
|
6a8d76365adc20d81fd8016da8f2fc2e6635273d
|
/script.R
|
fe2394398052012a49973879656ebf7cea187efd
|
[] |
no_license
|
davidbiol/Missing-data-talk
|
94644b193978a8e2d67bd2e03c34c18339f7f6df
|
955b8f54a1cebdfd8dfda385edd4a7eef6256f0d
|
refs/heads/master
| 2023-08-29T08:52:15.707643
| 2021-09-22T21:54:00
| 2021-09-22T21:54:00
| 406,864,311
| 2
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 2,022
|
r
|
script.R
|
## Instalar paquetes de visualización de datos faltantes
install.packages("visdat")
install.packages("VIM")
## Instalar paquete en desarrollo de estimación de datos faltantes
# install.packages("remotes")
remotes::install_github("davidbiol/empire")
# library(visdat)
# library(VIM)
# Library(empire)
#-------------------------------------------------------------------------------
data(sleep, package = "VIM") #Cargar el dataset sleep
# Análisis descriptivo
summary(sleep) #Resumen del data set
#Correlación
visdat::vis_cor(sleep, na_action = "complete.obs")
#Gráficas
visdat::vis_dat(sleep, sort_type = FALSE)
visdat::vis_miss(sleep)
VIM::aggr(sleep)
VIM::matrixplot(sleep, sortby = 2)
empire::count_miss(data = sleep) #Número de datos faltantes
empire::pos_miss(data = sleep) #Posición fila-columna de los datos faltantes
#-------------------------------------------------------------------------------
## Técnicas de estimación de datos faltantes
# Eliminación de casos
new_sleep <- sleep[complete.cases(sleep),]
print(new_sleep)
summary(new_sleep)
VIM::aggr(new_sleep)
# Imputación con la media
new_sleep <- empire::impute_mean(data = sleep)
new_sleep$positions
new_sleep$imp_values
new_sleep$new_data
#Imputación con la mediana
new_sleep <- empire::impute_median(data = sleep)
new_sleep$imp_values
new_sleep$new_data
# Estimación por regresión lineal múltiple
new_airquality <- empire::estimate_mlr(data = airquality[,1:4], diff = 10e-8)
new_airquality$positions
new_airquality$est_values
new_airquality$new_data
# Ahora con el dataset sleep
new_sleep <- empire::estimate_mlr(data = sleep[,1:7])
# Estimación por regresión lineal múltiple penalizada
new_sleep <- empire::estimate_ridge(data = sleep[,1:7], diff = 10, ridge_alpha = 0)
new_sleep$est_values
new_sleep$new_dat
#-------------------------------------------------------------------------------
#' Cómo contactarme?
#'
#' @name David
#' @last-name Gutiérrez-Duque
#' @email davidgd2015@gmail.com
#' @github davidbiol
|
082cdbfafeff4a5cbbc62a338c45e48a8fe36b1a
|
ffdea92d4315e4363dd4ae673a1a6adf82a761b5
|
/data/genthat_extracted_code/runner/tests/length_run.R
|
81ce2d5ca45599b0431b785bed93337a712bf148
|
[] |
no_license
|
surayaaramli/typeRrh
|
d257ac8905c49123f4ccd4e377ee3dfc84d1636c
|
66e6996f31961bc8b9aafe1a6a6098327b66bf71
|
refs/heads/master
| 2023-05-05T04:05:31.617869
| 2019-04-25T22:10:06
| 2019-04-25T22:10:06
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 522
|
r
|
length_run.R
|
context("Running length")
k <- sample(1:5,10, replace=T)
idx <- cumsum(sample(c(1,2,3), 10, replace=T))
test_that("length_run constant k",{
x1 <- rep(NA, 10)
x2 <- rep(NA, 10)
for(i in 1:10)
for(j in i:1)
if(idx[j] <= (idx[i]-3)){
x1[i] <- i - j
break
}
for(i in 1:10)
for(j in i:1)
if(idx[j] <= (idx[i]-k[i])){
x2[i] <- i - j
break
}
expect_identical( length_run(k=3, idx = idx), x1 )
expect_identical( length_run(k=k, idx = idx), x2 )
})
|
35f2dbde8c7b8fc1e53c6f68c4dd7d14dde0e299
|
cafa52c05f020af31985cfd1b8e2c676ea6e3baa
|
/lib/SmallRNA/proportionTest.R
|
62acb75973675329a786eb6e5c66a0160a7d8dfc
|
[
"Apache-2.0"
] |
permissive
|
shengqh/ngsperl
|
cd83cb158392bd809de5cbbeacbcfec2c6592cf6
|
9e418f5c4acff6de6f1f5e0f6eac7ead71661dc1
|
refs/heads/master
| 2023-07-10T22:51:46.530101
| 2023-06-30T14:53:50
| 2023-06-30T14:53:50
| 13,927,559
| 10
| 9
|
Apache-2.0
| 2018-09-07T15:52:27
| 2013-10-28T14:07:29
|
Perl
|
UTF-8
|
R
| false
| false
| 3,368
|
r
|
proportionTest.R
|
# rm(list=ls())
# outFile='output'
# parSampleFile1='fileList2.txt'
# parSampleFile2=""
# parSampleFile3=''
# parSampleFile4=''
# parFile1='RA_4949_mouse.Category.Table.csv'
# parFile2=''
# parFile3=''
#setwd("C:/projects/composition_test")
library(reshape2)
library(ggplot2)
library(DirichletReg)
library(pheatmap)
comp<-read.csv(parFile1,row.names=1, check.names=F)
getSampleInGroup<-function(groupDefineFile, samples, useLeastGroups=FALSE,onlySamplesInGroup=FALSE){
allGroupData<-read.delim(groupDefineFile,as.is=T,header=F)
if(ncol(allGroupData) < 3){
allGroupData$V3<-"all"
}
result<-NULL
for(title in unique(allGroupData$V3)){
groupData<-allGroupData[allGroupData$V3 == title,]
if(useLeastGroups){
groupData<-groupData[which(groupData$V1 %in% samples),]
groups<-lapply(unique(groupData$V2), function(x){
nrow(groupData[groupData$V2==x,])
})
discardGroups<-NULL
groupNames=unique(groupData$V2)
for(i in c(1:length(groupNames))){
sampleI<-groupData[groupData$V2==groupNames[i], "V1"]
for(j in c(i+1:length(groupNames))){
sampleJ<-groupData[groupData$V2==groupNames[j], "V1"]
if(all(sampleI %in% sampleJ)){
discardGroups<-c(discardGroups, groupNames[i])
break
}else if(all(sampleJ %in% sampleI)){
discardGroups<-c(discardGroups, groupNames[j])
}
}
}
groupData<-groupData[!(groupData$V2 %in% discardGroups),]
}
groupData$V2<-factor(groupData$V2)
res<-NULL
gnameChanged<-FALSE
for(sample in samples){
stog<-groupData[groupData$V1==sample,,drop=F]
if(nrow(stog) == 1){
group<-stog[1,2]
}else if(nrow(stog) > 1){
groups<-stog$V2[order(stog$V2)]
group<-paste(groups, collapse=":")
gnameChanged<-TRUE
}else{
group<-"Unknown"
gnameChanged<-TRUE
}
res<-rbind(res, data.frame(V1=sample, V2=group, V3=title))
}
if (onlySamplesInGroup) {
#remvoe "Unknown" group
res<-res[which(res$V2!="Unknown"),]
}
result<-rbind(result, res)
}
return(result)
}
gs<-getSampleInGroup(parSampleFile1, colnames(comp), onlySamplesInGroup=TRUE)
comp<-comp[,gs$V1]
rownames(gs)<-gs$V1
group<-gs[colnames(comp), "V2"]
comp_data<-comp[7:15,]
comp_data<-t(comp_data)/apply(comp_data,2,sum)
##different from any groups.
data<-data.frame(group=group)
data$sample=DR_data(comp_data)
model1<-DirichReg(sample~group,data)
model2<-DirichReg(sample~1,data )
ares<-anova(model1,model2)
df <- data.frame(matrix(unlist(ares[1:6]), nrow=length(ares[1:6]), byrow=T))
rownames(df)<-names(ares)[1:6]
colnames(df)<-c("model_base", "model_group")
write.csv(df, file=paste0(outFile, ".anova.csv"))
plotdata<-data.frame(comp_data)
plotdata$Sample<-rownames(plotdata)
plotdata$Group<-group
mplotdata<-melt(plotdata,id.vars=c("Sample", "Group"), variable.name = "smallRNA", value.name="Proportion")
png(paste0(outFile, ".boxplot.png"), width=3000, height=2000, res=300)
g<-ggplot(mplotdata, aes(x=Group, y=Proportion, color=Group)) + geom_boxplot() + facet_wrap(~smallRNA, scales="free_y") + theme_bw() + theme(strip.background = element_blank())
print(g)
dev.off()
png(paste0(outFile, ".heatmap.png"), width=2000, height=2000, res=300)
pheatmap(t(comp_data))
dev.off()
|
6daac75a8189f31ba614f5d73e21bbc0f7b57f9e
|
37fb539825eb513562fd580e4c3573c141e774fd
|
/Plot3.R
|
7a5eacca50ceabc528aa5abe2061c5bf153ee9d1
|
[] |
no_license
|
DonMof/ExData_Plotting1
|
4d68053f0a392a907292fb824acf2e2f0a84e704
|
7752a532f0fd88a0290fec79f9f5b5a74a7f6331
|
refs/heads/master
| 2020-03-23T06:00:06.350065
| 2018-07-30T06:04:31
| 2018-07-30T06:04:31
| 141,182,372
| 0
| 0
| null | 2018-07-16T19:11:13
| 2018-07-16T19:11:12
| null |
UTF-8
|
R
| false
| false
| 1,675
|
r
|
Plot3.R
|
plot3week1 <- function () {
#install.packages("dplyr")
#install.packages("data.table") # install it
library(dplyr)
library(data.table)
library(lubridate)
# Week 1 plot 1 file
# Read in the zip file
power_data <- read.table("household_power_consumption.txt",sep=";",header=T)
subsetofpd <- data.frame()
for (i in 1:2075259) {
if ((power_data[i,"Date"]=="1/2/2007") | (power_data[i,"Date"]=="2/2/2007")) {
subsetofpd <- rbind(subsetofpd,power_data[i,])
}
}
png(file="plot3.png")
subsetofpd$newdate <-paste(as.character(subsetofpd[,"Date"]), as.character(subsetofpd[,"Time"]))
#Plot all of the graphs together.
plot(as.POSIXlt(subsetofpd$newdate,format="%d/%m/%Y%t%H:%M:%S"),as.numeric(as.character(subsetofpd$Sub_metering_1)),typ="l",ylim=c(0,40),xlab=" ", ylab="Energy Sub Metering")
#Include the additional data via lines and points specifications
points(as.POSIXlt(subsetofpd$newdate,format="%d/%m/%Y%t%H:%M:%S"),as.numeric(as.character(subsetofpd$Sub_metering_3)),col="blue",typ="l")
lines(as.POSIXlt(subsetofpd$newdate,format="%d/%m/%Y%t%H:%M:%S"),as.numeric(as.character(subsetofpd$Sub_metering_3)),col="blue")
points(as.POSIXlt(subsetofpd$newdate,format="%d/%m/%Y%t%H:%M:%S"),as.numeric(as.character(subsetofpd$Sub_metering_2)),col="red",typ="l")
lines(as.POSIXlt(subsetofpd$newdate,format="%d/%m/%Y%t%H:%M:%S"),as.numeric(as.character(subsetofpd$Sub_metering_2)),col="red",typ="l")
#Add the legend
legend("topright",legend=c("Sub_metering_1", "Sub_metering_2","Sub_metering_3"),
col=c("black","red", "blue"),lty=1)
dev.off()
}
|
d3a4c367f89e23bbdbf9b333abc8d559f1779adb
|
a47ce30f5112b01d5ab3e790a1b51c910f3cf1c3
|
/output/sources/authors/992/colbycol/ncol.colbycol.R
|
392954923e4c00889b56de74d128192e6e5c4eb2
|
[] |
no_license
|
Irbis3/crantasticScrapper
|
6b6d7596344115343cfd934d3902b85fbfdd7295
|
7ec91721565ae7c9e2d0e098598ed86e29375567
|
refs/heads/master
| 2020-03-09T04:03:51.955742
| 2018-04-16T09:41:39
| 2018-04-16T09:41:39
| 128,578,890
| 5
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 477
|
r
|
ncol.colbycol.R
|
#################################################################
#
# File: ncol.colbycol.r
# Purpose: Gets the number of columns in a colbycol object
#
# Created: 20090509
# Author: Carlos J. Gil Bellosta
#
# Modifications:
#
#################################################################
ncol.colbycol <- function( x )
{
if( class( x ) != "colbycol" )
stop("An object of class colbycol is required.")
length( colnames( x ) )
}
|
b430e5b6d502cb5ef96b73492f444e472fba1972
|
c504360a5e3127560c9c3038610664bacf431e33
|
/R_code/stab_results.R
|
87ea0b1c5f4beb624c599dd7ca15274e4e1dff17
|
[] |
no_license
|
gauzens/Intertidal_food_webs
|
3c2342d79da9e54a88d2f575d0dd227a35f9f92b
|
800d0363354e2b70ebde14d5e34c3d24476bfa93
|
refs/heads/master
| 2020-11-24T23:42:57.532551
| 2020-04-25T11:22:58
| 2020-04-25T11:22:58
| 228,392,473
| 1
| 1
| null | null | null | null |
UTF-8
|
R
| false
| false
| 26,071
|
r
|
stab_results.R
|
rm(list = ls())
library(nlme)
detach(tab)
library(ggplot2)
library(measurements)
library(RColorBrewer)
library(viridis)
library("ggsci")
library(car)
error.bars<-function(x,y,xbar,ybar, coul)
{arrows(x,y-ybar,x,y+ybar,code=3,angle=90,length=0.05, col=coul)
#arrows(x-xbar,y,x+xbar,y,code=3,angle=90,length=0.05, col=coul)
}
mmToInches = function(x){
return(x/25.4)
}
tab = read.csv('/home/bg33novu/projects/WarmingWebs/WarmingHPC/outputs/warming_exp_k5.csv', header = FALSE)
names(tab) = c('name', 'region', 'repl', 'depth', 'area', 'tempsee', 'init_temp', 'richness', 'warming', 'nb_ext_nn_basal', 'nb_ext', 'resilience', 'oi', 'tl', 'connectance')
tab$prop_ext = 1 - tab$nb_ext/tab$richness
tab$mean.temp = round(ave(tab$init_temp, tab$region, FUN = mean), 1)
radius2 = tab$area / pi
tab$size = (1/6) * pi * tab$depth *(3*radius2 + tab$depth*tab$depth)
latitude = tab$tempsee
latitude[grepl('Portugal_txt', tab$name)] = '38 42 38'
latitude[grepl('Canada_txt/PP', tab$name)] = '48 29 33'
latitude[grepl('Canada_txt/SF', tab$name)] = '48 36 43'
latitude[grepl('England_txt/MB', tab$name)] = '50 21 24'
latitude[grepl('England_txt/W', tab$name)] = '50 19 00'
latitude[grepl('Portugal_txt/CR', tab$name)] = '38 42 38'
latitude[grepl('Portugal_txt/RV', tab$name)] = '39 17 11'
latitude[grepl('Mad_txt/PC', tab$name)] = '32 46 32'
latitude[grepl('Mad_txt/RM', tab$name)] = '32 38 44'
latitude[grepl('Mad_txt/RM', tab$name)] = '32 38 44'
latitude[grepl('Brasil\\(SP\\)_txt/', tab$name)] = '-23 35 00'
latitude[grepl('Brasil\\(CE\\)_txt/FX', tab$name)] = '3 13 04'
latitude[grepl('Brasil\\(CE\\)_txt/GJ', tab$name)] = '3 14 14'
latitude[grepl('Moz_txt', tab$name)] = '-25 58 36'
unique(cbind.data.frame(tab$name, latitude))
latitude2 = as.numeric(conv_unit(latitude, "deg_min_sec", "dec_deg"))
latitude2[grepl('Portugal_txt/L1', tab$name)] = 39.1508
latitude2[grepl('Portugal_txt/L2', tab$name)] = 39.1508
latitude2[grepl('Portugal_txt/L3', tab$name)] = 39.245223
latitude2[grepl('Portugal_txt/L4', tab$name)] = 39.245223
tab$latitude = latitude2
latitude3 = ave(latitude2, tab$region, FUN = mean)
tab$latitudem = latitude3
rm(latitude)
attach(tab)
inits = unique(init_temp)
warm = unique(warming)
netws = unique(name)
#### reading info about topology and environment######
data = read.csv('/home/bg33novu/projects/WarmingWebs/results/network_topologies.csv', header = T)
names(data) = c("name", "temp_sea", "temp", "area", "detph" ,"elevation", "Nb species", "Nb links", "Connectance", "Mean omnivory",
"PredPreyRatio", "Mean generalism", "% basal", "%intermediate", "%top", "Mean TL", "Mean TL top species", "Avg path length" )
library(ggplot2)
head(data)
# size = data$area*data$detph
# considering pools as spherical caps:
radius2 = data$area / pi
size = (1/6) * pi * data$detph *(3*radius2 + data$detph*data$detph)
data$min.sea.temp = NA
data$max.sea.temp = NA
data$mean.sea.temp = NA
data$mean.summer = NA
data$min.summer = NA
data$max.summer = NA
#########################
seetemps = read.table('~/projects/WarmingWebs/R_code2/see_temps', header = T, sep = ',')
for (name in seetemps$name){
cat(name, '\n')
xx = grep(pattern = name, x = data$name, fixed = TRUE)
data$min.sea.temp[xx] = seetemps$Min[seetemps$name == name]
data$max.sea.temp[grep(pattern = name, x = data$name, fixed = TRUE)] = seetemps$Max[seetemps$name == name]
data$mean.sea.temp[grep(pattern = name, x = data$name, fixed = TRUE)] = seetemps$Mean[seetemps$name == name]
data$mean.summer[grep(pattern = name, x = data$name, fixed = TRUE)] = seetemps$MeanSummer[seetemps$name == name]
data$min.summer[grep(pattern = name, x = data$name, fixed = TRUE)] = seetemps$MaxSummer[seetemps$name == name]
data$max.summer[grep(pattern = name, x = data$name, fixed = TRUE)] = seetemps$MinSummer[seetemps$name == name]
}
data$amplitude = data$max.sea.temp - data$min.sea.temp
#### doing correspondences between the two dataframes ########
tab$name = gsub('/web2.txt', '', tab$name)
tab$name = gsub("\\[u'", '', tab$name)
tab$name = gsub("'", '', tab$name)
data$name[1]
which(tab$name == data$name[1])
tab$name[which(tab$name == data$name[1])]
tab$name[1]
names(tab)
tab = merge(tab, data[,c(1,6,25)])
detach(tab)
attach(tab)
# length(names(data))
#####################################################
#####################################################
##################33 starting effects: ##############
#####################################################
tab.init = tab.init = tab[tab$warming == 0, ]
plot(tab.init$prop_ext ~ tab.init$init_temp, ylab = "Persistence", xlab = "Initial web temperature")
lines(tapply(tab.init$prop_ext, tab.init$init_temp, mean) ~ sort(inits))
tapply(tab.init$prop_ext, tab.init$mean.temp, mean)
plot(tab.init$prop_ext ~ tab.init$tempsee, xlab = "Persistence", ylab = "local sea temperature")
points(as.numeric(names(tapply(tab.init$prop_ext, tab.init$tempsee, mean))), tapply(tab.init$prop_ext, tab.init$tempsee, mean), col = 'red', pch = 16)
boxplot(tab.init$prop_ext ~ tab.init$tempsee, new = TRUE)
pdf('/homes/bg33novu/projects/WarmingWebs/paper/figures/init_persistence.pdf')
df.init = data.frame(Persistence = tab.init$prop_ext, init_temp = tab.init$tempsee, region = tab.init$region)
ggplot(df.init, aes(group = init_temp, y = Persistence, x = init_temp)) +
geom_boxplot()+
geom_point()+
xlab("Average summer temperature")
dev.off()
see2 = (df.init$init_temp)^2
df.init$see2 = see2
summary(lm(df.init$Persistence ~ df.init$init_temp))
summary(lm(df.init$Persistence ~ see2))
summary(lme(Persistence ~ see2, random = ~1|region, data = df.init))
# ------------------------------------------------------------------------------------------------ #
######### plot prop of extinctions #########
df = data.frame(Persistence = tab$prop_ext, Warming = tab$warming, init_temp = tab$tempsee, mean.temp = tab$tempsee)
pdf('/home/bg33novu/projects/WarmingWebs/paper/figures/Fig.3NCC_color_blind.pdf', width = mmToInches(82), height = mmToInches(62))
ggplot(df,
aes(x=Warming, y=Persistence, group = as.factor(mean.temp), fill = as.factor(mean.temp), colour = as.factor(mean.temp)),
)+
stat_summary(geom="point", fun.y=mean, cex = 0.4)+
geom_smooth(method = 'lm', alpha = 0.5, cex = 0.2)+
# scale_color_manual(values=cbPalette, name = paste('Average \nsummer temp.'))+
# scale_fill_manual(values=cbPalette, name = paste('Average \nsummer temp.'))+
# guides(fill=guide_legend(ncol=2))+
# guides(color=guide_legend(ncol=2))+
scale_fill_viridis_d(option = "plasma", name = paste('Average \nsummer \nsea temp.:'))+
scale_color_viridis_d(option = "plasma", name = paste('Average \nsummer \nsea temp.:'))+
theme_classic()+
theme(
axis.text.x = element_text(size = 8),
axis.title.x = element_text(size = 10),
axis.text.y = element_text(size = 8),
axis.title.y = element_text(size = 10),
legend.text = element_text(size = 6),
legend.title = element_text(size = 10),
plot.margin = unit(c(1.1,0.1,0.1,0.8), "cm"),
legend.key.size=unit(0.5,"cm")
# legend.title = '',
# legend.text =
# legend.position = 'bottom'
)
dev.off()
ggplot(df,
aes(x=Warming, y=Persistence, group = init_temp, colour = init_temp),
)+
stat_summary(geom="point", fun.y=mean, cex = 0.4)+
geom_smooth(method = 'lm', alpha = 0.5, cex = 0.2)+
# scale_color_manual(values=cbPalette, name = paste('Local \ntemp.'))+
# scale_fill_manual(values=cbPalette, name = paste('Local \ntemp.'))+
# guides(fill=guide_legend(ncol=2))+
# guides(color=guide_legend(ncol=2))+
theme_classic()+
theme(
axis.text.x = element_text(size = 8),
axis.title.x = element_text(size = 10),
axis.text.y = element_text(size = 8),
axis.title.y = element_text(size = 10),
legend.text = element_text(size = 6),
legend.title = element_text(size = 10),
plot.margin = unit(c(1.1,0.1,0.1,0.8), "cm"),
legend.key.size=unit(0.5,"cm")
# legend.title = '',
# legend.text =
# legend.position = 'bottom'
)
dev.off()
# detach(tab)
# ggsave('~/projects/WarmingWebs/plots/short_gradientggplot.pdf')
########################## plot using average pool temperature #######################
df = data.frame(Persistence = tab$prop_ext, Warming = tab$warming, mean.temp = tab$mean.temp)
pdf('/home/bg33novu/projects/WarmingWebs/paper/figures/Fig.3NCC_color_blind.pdf', width = mmToInches(82), height = mmToInches(62))
ggplot(df,
aes(x=Warming, y=Persistence, group = as.factor(mean.temp), fill = as.factor(mean.temp), colour = as.factor(mean.temp)),
)+
stat_summary(geom="point", fun.y=mean, cex = 0.4)+
geom_smooth(method = 'lm', alpha = 0.5, cex = 0.2)+
# scale_color_manual(values=cbPalette, name = paste('Average \nsummer temp.'))+
# scale_fill_manual(values=cbPalette, name = paste('Average \nsummer temp.'))+
# guides(fill=guide_legend(ncol=2))+
# guides(color=guide_legend(ncol=2))+
scale_fill_viridis_d(option = "plasma", name = paste('Average \npool temp.:'))+
scale_color_viridis_d(option = "plasma", name = paste('Average \npool temp.:'))+
theme_classic()+
theme(
axis.text.x = element_text(size = 8),
axis.title.x = element_text(size = 10),
axis.text.y = element_text(size = 8),
axis.title.y = element_text(size = 10),
legend.text = element_text(size = 6),
legend.title = element_text(size = 10),
plot.margin = unit(c(1.1,0.1,0.1,0.8), "cm"),
legend.key.size=unit(0.5,"cm")
# legend.title = '',
# legend.text =
# legend.position = 'bottom'
)
dev.off()
# make logit transformation as between 0 and 1
library(car)
library(MASS)
library(nlme)
library(tidyr)
logit_prop = logit(prop_ext)
tab$logit_prop = logit_prop
#####################################################
##### initial nework structures #####################
#####################################################
tab.init = tab.init = tab[tab$temp_incr == 0, ]
TLs = tapply(tab.init$TL, sort(tab.init$temp), mean, na.rm = TRUE)
ois = tapply(tab.init$oi, sort(tab.init$temp), mean, na.rm = TRUE)
Cs = tapply(tab.init$C, sort(tab.init$temp), mean)
Ls = nb_s * nb_s * Cs
plot(slopes ~ sort(temps), col = colors)
plot(slopes ~ nb_s, col = colors, xlab = "richness")
plot(TLs ~ sort(temps), col = colors, ylab = "trophic levels",xlab = "init temp")
plot(slopes ~ TLs, col = colors, ylab = "slopes",xlab = "init TL")
plot(slopes ~ ois, col = colors, ylab = "slopes",xlab = "init oi")
plot(slopes ~ Cs, col = colors, ylab = "slopes",xlab = "connectance")
plot(slopes ~ Ls, col = colors, ylab = "slopes",xlab = "Number of links")
plot(ois ~ sort(temps))
model = lm(slopes ~ TLs*Cs + ois)
step.AIC = stepAIC(model)
anova(lm(slopes ~ TLs*ois*Cs))
model.best = lm(slopes ~ TLs + ois)
anova(model.best)
model0 = lm(prop_ext ~ temp*temp_incr)
model1 = lm(prop_ext ~ as.factor(temp)*temp_incr)
model2 = glm(prop_ext ~ temp*temp_incr, family = quasi)
model3 = lme(prop_ext ~ temp*temp_incr, random = ~ 1 |name, data = tab)
######### plot total number of extinctions, per FW #########
# ylimit = c(min(nb_ext), max(nb_ext))
ylimit = c(0,2)
plot(nb_ext ~ temp_incr, ylim = ylimit, col = 'white')
plot(nb_ext ~ temp_incr, col = 'white')
# lines(tapply(nb_ext, temp_incr, mean) ~ warming, ylim = ylimit)
i = 1
for (netw in netws){
# cat(netw)
# cat('\n')
col = i
data = tab[name == netw,]
yaxis = tapply(data$nb_ext, data$temp_incr, mean)
plot(yaxis ~ warming, col = col)
col = netw
i = i+1
}
#############################################################################
################### Statistical models ####################################
#############################################################################
logit.prop_ext = logit(tab$prop_ext)
# first define a variable for beach:
tab$beach = gsub("u'", "", tab$repl)
tab$beach = gsub("'", "", tab$beach)
unique(tab$beach)
unique(cbind.data.frame(tab$region, tab$beach))
tab$beach = gsub("\\d+$", "", tab$beach)
# unique(tab$beach)
tab$beach[grep("PP+", tab$beach)] = "PP"
tab$beach[grep("SF+", tab$beach)] = "SF"
# unique(tab$beach)
# unique(cbind.data.frame(tab$region, tab$beach))
# tab$beach[grepl('FX', tab$beach)]
random_model = ~ warming |region/beach/name # need to change the control here
random_model = ~ 1 | region/beach/name
random_model = ~ 1 | name
model.elev = lme(logit.prop_ext ~ amplitude + elevation + size + abs(latitude) + tempsee*warming, random = random_model, data = tab)
model.elev = lme(logit.prop_ext ~ amplitude + elevation + size + abs(latitude) + tempsee*warming, random = random_model, data = tab)
B.c = BIC(model.elev)
# stepAIC(model.elev)
# minus one variable
model.s = lme(logit.prop_ext ~ amplitude + elevation + abs(latitude) + tempsee*warming, random = random_model, data = tab)
B.s = BIC(model.s)
model.l = lme(logit.prop_ext ~ amplitude + elevation + size + tempsee*warming, random = random_model, data = tab)
B.l = BIC(model.l)
model.a = lme(logit.prop_ext ~ elevation + size + abs(latitude) + tempsee*warming, random = random_model, data = tab)
B.a = BIC(model.a)
# minus 2
model.ae = lme(logit.prop_ext ~ size + abs(latitude) + tempsee*warming, random = random_model, data = tab)
B.ae = BIC(model.ae)
model.as = lme(logit.prop_ext ~ elevation + abs(latitude) + tempsee*warming, random = random_model, data = tab)
B.as = BIC(model.as)
model.al = lme(logit.prop_ext ~ elevation + size + tempsee*warming, random = random_model, data = tab)
B.al = BIC(model.al)
model.es = lme(logit.prop_ext ~ amplitude + abs(latitude) + tempsee*warming, random = random_model, data = tab)
B.es = BIC(model.es)
model.el = lme(logit.prop_ext ~ amplitude + size + tempsee*warming, random = random_model, data = tab)
B.el = BIC(model.el)
model.sl = lme(logit.prop_ext ~ amplitude + elevation + tempsee*warming, random = random_model, data = tab)
B.sl = BIC(model.sl)
# minus3
model.aes = lme(logit.prop_ext ~ abs(latitude) + tempsee*warming, random = random_model, data = tab) # <===========
B.aes = BIC(model.aes)
model.ael = lme(logit.prop_ext ~ size + tempsee*warming, random = random_model, data = tab)
B.ael = BIC(model.ael)
model.esl = lme(logit.prop_ext ~ amplitude + tempsee*warming, random = random_model, data = tab)
B.esl = BIC(model.esl)
#minus4
model.aesl = lme(logit.prop_ext ~ tempsee*warming, random = random_model, data = tab)
B.aesl = BIC(model.aesl)
BIC(lme(logit.prop_ext ~ abs(latitude)*tempsee*warming, random = random_model, data = tab))
# without interaction:
model.add = lme(logit.prop_ext ~ tempsee+warming, random = random_model, data = tab)
B.add = BIC(model.add)
model.w = lme(logit.prop_ext ~ warming, random = random_model, data = tab)
B.w = BIC(model.w)
xx = rbind(c('B.c', 'B.s', 'B.l', 'B.a', 'B.ae', 'B.as', 'B.al', 'B.es', 'B.el', 'B.sl', 'B.aes', 'B.ael', 'B.esl', 'B.aesl'),
c(B.c, B.s, B.l, B.a, B.ae, B.as, B.al, B.es, B.el, B.sl, B.aes, B.ael, B.esl, B.aesl))
xx[2, ] = round(as.numeric(xx[2, ]), 2)
# stepAIC(model.elev)
anova(model.aesl)
summary(model.aesl)
BIC(model.elev)
AIC(model.aesl)
#########################################################################################
################ make linear models for each of the individual locations ################
#########################################################################################
output = function(){
stats = function(){
# print(as.character(unique(subtab$region)))
if (min(subtab$prop_ext) < 1){
x = lme(logit_prop ~ warming, random = random_model, data = subtab)
return(c(anova(x)$`F-value`[2], anova(x)$`p-value`[2], summary(x)$tTable[2,1]))
}else{
return(c(NA, NA))
}
}
i = 0
cat('region \t mean_temp \t Fvalue \t pvalue \t coeff\n')
for (temp in sort(unique(tempsee))){
i = i + 1
subtab = tab[tab$tempsee == temp,]
subtab$logit_prop = logit(prop_ext[tab$tempsee == temp])
# cat(unique(subtab$mean.temp), '\t')
res = tryCatch(stats(), error = function(e)
return(NA)
)
cat(substr(as.character(unique(subtab$region)), 3, 12), '\t', unique(subtab$tempsee), '\t', res[1], '\t', res[2], '\t', res[3], '\n' )
}
}
library(stringr)
random_model = ~ 1 | beach/name
results.stats = as.data.frame(capture.output(output()), col.names = F)
results.stats = results.stats %>% separate('capture.output(output())',
c('region', 'mean_temp', 'Fvalue', 'pvalue', 'coeff'),
sep = '\t')
results.stats = results.stats[-1,]
results.stats
plot(results.stats$coeff ~ results.stats$mean_temp, xlab = "Local see temperature", ylab = "slope of the persistence to warming regression")
summary(lm(results.stats$coeff ~ as.numeric(results.stats$mean_temp)))
# check how intercepts depends on initial temperature
plot(lm(results.stats$coeff ~ as.numeric(results.stats$mean_temp)))
m = list()
output()
# test if even moz has a significnt increase
test = tab[tab$mean.temp == 23.8, ]
model.test = lme(logit_prop ~ warming, random = ~1|name, data = test)
summary(model.test)
anova(model.test)
####################################################
######3 model based on pool temperature (for SI) ###
####################################################
random_model = ~ warming |region/beach/name # need to change the control here
random_model = ~ 1 | region/beach/name
random_model = ~ 1 | name
model.elev = lme(logit.prop_ext ~ amplitude + elevation + size + abs(latitude) + mean.temp*warming, random = random_model, data = tab)
model.elev = lme(logit.prop_ext ~ amplitude + elevation + size + abs(latitude) + mean.temp*warming, random = random_model, data = tab)
B.c = BIC(model.elev)
# stepAIC(model.elev)
# minus one variable
model.s = lme(logit.prop_ext ~ amplitude + elevation + abs(latitude) + mean.temp*warming, random = random_model, data = tab)
B.s = BIC(model.s)
model.l = lme(logit.prop_ext ~ amplitude + elevation + size + mean.temp*warming, random = random_model, data = tab)
B.l = BIC(model.l)
model.a = lme(logit.prop_ext ~ elevation + size + abs(latitude) + mean.temp*warming, random = random_model, data = tab)
B.a = BIC(model.a)
# minus 2
model.ae = lme(logit.prop_ext ~ size + abs(latitude) + mean.temp*warming, random = random_model, data = tab)
B.ae = BIC(model.ae)
model.as = lme(logit.prop_ext ~ elevation + abs(latitude) + mean.temp*warming, random = random_model, data = tab)
B.as = BIC(model.as)
model.al = lme(logit.prop_ext ~ elevation + size + mean.temp*warming, random = random_model, data = tab)
B.al = BIC(model.al)
model.es = lme(logit.prop_ext ~ amplitude + abs(latitude) + mean.temp*warming, random = random_model, data = tab)
B.es = BIC(model.es)
model.el = lme(logit.prop_ext ~ amplitude + size + mean.temp*warming, random = random_model, data = tab)
B.el = BIC(model.el)
model.sl = lme(logit.prop_ext ~ amplitude + elevation + mean.temp*warming, random = random_model, data = tab)
B.sl = BIC(model.sl)
# minus3
model.aes = lme(logit.prop_ext ~ abs(latitude) + mean.temp*warming, random = random_model, data = tab) # <===========
B.aes = BIC(model.aes)
model.ael = lme(logit.prop_ext ~ size + mean.temp*warming, random = random_model, data = tab)
B.ael = BIC(model.ael)
model.esl = lme(logit.prop_ext ~ amplitude + mean.temp*warming, random = random_model, data = tab)
B.esl = BIC(model.esl)
#minus4
model.aesl = lme(logit.prop_ext ~ mean.temp*warming, random = random_model, data = tab)
B.aesl = BIC(model.aesl)
BIC(lme(logit.prop_ext ~ abs(latitude)*mean.temp*warming, random = random_model, data = tab))
# without interaction:
model.add = lme(logit.prop_ext ~ mean.temp+warming, random = random_model, data = tab)
B.add = BIC(model.add)
model.w = lme(logit.prop_ext ~ warming, random = random_model, data = tab)
B.w = BIC(model.w)
xx = rbind(c('B.c', 'B.s', 'B.l', 'B.a', 'B.ae', 'B.as', 'B.al', 'B.es', 'B.el', 'B.sl', 'B.aes', 'B.ael', 'B.esl', 'B.aesl'),
c(B.c, B.s, B.l, B.a, B.ae, B.as, B.al, B.es, B.el, B.sl, B.aes, B.ael, B.esl, B.aesl))
xx[2, ] = round(as.numeric(xx[2, ]), 2)
xx
summary(model.aesl)
anova(model.aesl)
# stepAIC(model.elev)
df = data.frame(Persistence = tab$prop_ext, Warming = tab$warming, init_temp = tab$tempsee, mean.temp = tab$latitudem)
ggplot(df,
aes(x=Warming, y=Persistence, group = as.factor(mean.temp), fill = as.factor(init_temp), colour = as.factor(init_temp)),
)+
stat_summary(geom="point", fun.y=mean, cex = 0.4)+
geom_smooth(method = 'lm', alpha = 0.3, cex = 0.2)+
scale_color_manual(values=cbPalette, name = paste('Average \nsummer temp.'))+
scale_fill_manual(values=cbPalette, name = paste('Average \nsummer temp.'))+
# guides(fill=guide_legend(ncol=2))+
# guides(color=guide_legend(ncol=2))+
theme_classic()+
theme(
axis.text.x = element_text(size = 8),
axis.title.x = element_text(size = 10),
axis.text.y = element_text(size = 8),
axis.title.y = element_text(size = 10),
legend.text = element_text(size = 6),
legend.title = element_text(size = 10),
plot.margin = unit(c(1.1,0.1,0.1,0.8), "cm"),
legend.key.size=unit(0.5,"cm")
# legend.title = '',
# legend.text =
# legend.position = 'bottom'
)
df = data.frame(Persistence = tab$prop_ext, Warming = tab$warming, init_temp = as.factor(tab$tempsee), mean.temp = tab$latitudem)
ggplot(df,
aes(x=Warming, y=Persistence, group = init_temp, fill = init_temp, colour = init_temp),
)+
stat_summary(geom="point", fun.y=mean, cex = 0.5)+
geom_smooth(method = 'lm', alpha = 0.5, cex = 0.2)+
scale_fill_viridis_d(option = "plasma")+
scale_color_viridis_d(option = "plasma")+
# scale_color_manual(values= scale_color_brewer(palette = "Dark2"), name = paste('Average \nsummer temp.'))+
# scale_fill_manual(values= scale_color_brewer(palette = "Dark2"), name = paste('Average \nsummer temp.'))+
# guides(fill=guide_legend(ncol=2))+
# guides(color=guide_legend(ncol=2))+
theme_classic()+
# scale_color_brewer(palette = "YlGnBu")+
# scale_fill_brewer(palette = "YlGnBu")+
# scale_color_futurama()+
theme(
axis.text.x = element_text(size = 8),
axis.title.x = element_text(size = 10),
axis.text.y = element_text(size = 8),
axis.title.y = element_text(size = 10),
legend.text = element_text(size = 6),
legend.title = element_text(size = 10),
plot.margin = unit(c(1.1,0.1,0.1,0.8), "cm"),
legend.key.size=unit(0.5,"cm")
# legend.title = '',
# legend.text =
# legend.position = 'bottom'
)
# scale_color_brewer(palette = "Dark2")
#######################################
### old stuff
# AIC / BIC comparisons
# model_c = lme(logit(prop_ext) ~ tempsee*warming*abs(latitude)*size, random = ~ 1 |name, data = tab)
# model_s = lme(logit(prop_ext) ~ tempsee*warming*abs(latitude), random = ~ 1 |name, data = tab)
# model_l = lme(logit(prop_ext) ~ tempsee*warming*size, random = ~ 1 |name, data = tab)
# model_t = lme(logit(prop_ext) ~ warming*abs(latitude)*size, random = ~ 1 |name, data = tab)
#
# model_ls = lme(logit(prop_ext) ~ tempsee*warming, random = ~ 1 |name, data = tab)
# model_lt = lme(logit(prop_ext) ~ warming*size, random = ~ 1 |name, data = tab)
# model_ts = lme(logit(prop_ext) ~ warming*abs(latitude), random = ~ 1 |name, data = tab)
#
# model_lst = lme(logit(prop_ext) ~ warming, random = ~ 1 |name, data = tab)
#
#
# model_corrected = lme(logit(prop_ext) ~ tempsee*warming + abs(latitude), random = ~ 1 |name, data = tab)
# model_see = lme(logit(prop_ext) ~ tempsee*warming + latitude, random = ~ 1 |name, data = tab)
# model_bis = lme(logit(prop_ext) ~ tempsee*latitude, random = ~ 1 |name, data = tab)
#
# AIC(model_see)
# AIC(model_corrected)
# # c = complete, s = minus size, l = minus lattitude ...
# mod.names = c('c', 's', 'l', 't', 'ls', 'lt', 'ts', 'null')
# bics = c(BIC(model_c), BIC(model_s), BIC(model_l), BIC(model_t), BIC(model_ls), BIC(model_lt), BIC(model_ts), BIC(model_lst))
# aics = c(AIC(model_c), AIC(model_s), AIC(model_l), AIC(model_t), AIC(model_ls), AIC(model_lt), AIC(model_ts), AIC(model_lst))
# cbind(mod.names, aics, bics)
#
# model.best = model_ls
#
# library(lmerTest)
# model_c.bis = lme(logit(prop_ext) ~ tempsee*warming*abs(latitude)*size*elevation, random = ~ 1 |name, data = tab, method = 'ML')
# stepAIC(model_c.bis)
# AIC(model_ls)
# BIC(model_ls)
#
# model.try = lme(logit(prop_ext) ~ tempsee + warming + abs(latitude) + size +
# elevation + tempsee:warming + tempsee:abs(latitude) + warming:abs(latitude) +
# tempsee:size + warming:size + abs(latitude):size + tempsee:elevation +
# warming:elevation + abs(latitude):elevation + size:elevation +
# tempsee:warming:abs(latitude) + tempsee:warming:size + warming:abs(latitude):size +
# tempsee:warming:elevation + tempsee:abs(latitude):elevation +
# warming:abs(latitude):elevation + tempsee:size:elevation +
# warming:size:elevation + tempsee:warming:abs(latitude):elevation +
# tempsee:warming:size:elevation, random =~1 |name, data = tab, method = 'ML')
#
# AIC(model.try)
# BIC(model.try)
#
#
# model.intelligent = lme(logit(prop_ext) ~ elevation + size + tempsee*warming, random = ~ 1 |name, data = tab)
# BIC(model.intelligent)
# BIC(model_ls)
# anova(model.intelligent)
#
# model = lme(logit(prop_ext) ~ init_temp*warming, random = ~ 1 |name, data = tab)
# model2 = lme(logit(prop_ext) ~ init_temp*warming*size, random = ~ 1 |name, data = tab)
# model3 = lme(logit(prop_ext) ~ init_temp*warming + size + size:warming, random = ~ 1 |name, data = tab)
# model4 = lme(logit(prop_ext) ~ init_temp*warming + size, random = ~ 1 |name, data = tab)
# model5 = lme(logit(prop_ext) ~ init_temp+warming, random = ~ 1 |name, data = tab)
#
# #checking elevation
# model.elev = lme(logit(prop_ext) ~ init_temp*warming*elevation*size*abs(latitude), random = ~ 1 |name, data = tab)
#
# BIC(model.elev)
# BIC(model.best)
# BIC(lme(logit(prop_ext) ~ init_temp*warming+elevation, random = ~ 1 |name, data = tab))
#
|
50300c3b36dc32e8626d3a89c1ba8275c44ba57e
|
5f66de9c67ebbf11de219b15663f631335584914
|
/Estatítica/BasePaises_Discritivas.R
|
5401ee521934a9c47b776eac5128db9062362fd9
|
[] |
no_license
|
ZecaRueda/FIAP4IA
|
da319cab3a7cbad3198d6f897530257c0bd7bab4
|
6edd82e08d64a55d9bf3ac193ce996a35f58ef90
|
refs/heads/master
| 2020-04-11T10:18:17.156568
| 2018-12-06T23:30:09
| 2018-12-06T23:30:09
| null | 0
| 0
| null | null | null | null |
ISO-8859-1
|
R
| false
| false
| 3,444
|
r
|
BasePaises_Discritivas.R
|
# limpar memória do R
rm(list=ls(all=TRUE))
# mostrar até 2 casas decimais
options("scipen" = 2)
# Ler arquivo csv
paises <- read.csv("C:/Users/logonrmlocal/Documents/paulofranco/FIAP4IA/DADOS_Papercsv_1.csv", row.names=1, sep=";")
fix(paises)
#Verificando o formato das variáveis
str(paises)
#Estatísticas descritivas
summary(paises)
mean(paises$p100ms) # média
median(paises$p100ms) # mediana
quantile(paises$p100ms,type=4) # Quartis
quantile(paises$p100ms,.65,type=4) # exato percentil
quantile(paises$p100ms,seq(.01,.99,.01))
range(paises$p100ms) # amplitude
diff(range(paises$p100ms)) #diferença entre o maior e o menor valor
min(paises$p100ms) # valor mínimo de x
max(paises$p100ms) # valor máximo de x
var(paises$p100ms) # para obter a variância
sd(paises$p100ms) # para obter o desvio padrão
CV_p100ms<-sd(paises$p100ms)/mean(paises$p100ms)*100 # para obter o coeficiente de variação
CV_p100ms
CV_p200ms<-sd(paises$p200ms)/mean(paises$p200ms)*100
CV_p200ms
CV_p800mm<-sd(paises$p800mm)/mean(paises$p800mm)*100
CV_p800mm
par (mfrow=c(1,2))
hist(paises$p100ms)
boxplot(paises$p100ms)
par (mfrow=c(1,1))
#comando para gerar em 4 linhas e duas colunas os histogramas
par (mfrow=c(4,2))
hist(paises$p100ms)
hist(paises$p200ms)
hist(paises$p400ms)
hist(paises$p800mm)
hist(paises$p1500mm)
hist(paises$p3000mm)
hist(paises$pmaratm)
par (mfrow=c(1,1))
hist(paises$p100ms ,col=c("pink"), col.main="darkgray", prob=T , main="p100ms")
par (mfrow=c(3,3))
boxplot(paises$p100ms)
boxplot(paises$p200ms)
boxplot(paises$p400ms)
boxplot(paises$p800mm)
boxplot(paises$p1500mm)
boxplot(paises$p3000mm)
boxplot(paises$pmaratm)
par (mfrow=c(1,2))
boxplot(paises$pmaratm,col = "dark red")
boxplot(paises$pmaratm,range = 2.5)
par (mfrow=c(1,1))
?boxplot
boxplot.stats(paises$p100ms)
boxplot.stats(paises$p200ms)$out
boxplot.stats(paises$p400ms)$out
boxplot.stats(paises$p800mm)$out
boxplot.stats(paises$p1500mm)$out
boxplot.stats(paises$p3000mm)$out
boxplot.stats(paises$pmaratm)$out
par (mfrow=c(2,3))
plot (paises$p100ms,paises$p200ms)
plot (paises$p100ms,paises$p400ms)
plot (paises$p100ms,paises$p800mm)
plot (paises$p100ms,paises$p1500mm)
plot (paises$p100ms,paises$p3000mm)
plot (paises$p100ms,paises$pmaratm)
par (mfrow=c(2,3))
plot (paises$p200ms,paises$p400ms)
plot (paises$p200ms,paises$p800mm)
plot (paises$p200ms,paises$p1500mm)
plot (paises$p200ms,paises$p3000mm)
plot (paises$p200ms,paises$pmaratm)
par (mfrow=c(2,2))
plot (paises$p400ms,paises$p800mm)
plot (paises$p400ms,paises$p1500mm)
plot (paises$p400ms,paises$p3000mm)
plot (paises$p400ms,paises$pmaratm)
par (mfrow=c(2,3))
plot (paises$p800mm,paises$p1500mm)
plot (paises$p800mm,paises$p3000mm)
plot (paises$p800mm,paises$pmaratm)
plot (paises$p1500mm,paises$p3000mm)
plot (paises$p1500mm,paises$pmaratm)
plot (paises$p3000mm,paises$pmaratm)
par (mfrow=c(1,1))
panel.cor <- function(x, y, digits=2, prefix ="", cex.cor,
...) {
usr <- par("usr")
on.exit(par(usr))
par(usr = c(0, 1, 0, 1))
r <- cor(x, y , use = "pairwise.complete.obs")
txt <- format(c(r, 0.123456789), digits = digits) [1]
txt <- paste(prefix, txt, sep = "")
if (missing(cex.cor))
cex <- 0.8/strwidth(txt)
# abs(r) é para que na saída as correlações ficam proporcionais
text(0.5, 0.5, txt, cex = cex * abs(r))
}
#pdf(file = "grafico.pdf")
pairs(paises, lower.panel=panel.smooth, upper.panel=panel.cor)
|
f3419a78c4a0b6f18ef6074ccadac39ab54a9673
|
ce787bd5433526b83f1ea4e0912aca346f181ae7
|
/man/chi.mle.Rd
|
ce87c81d931f430bcf2b2da4078f4f9e26b0d979
|
[] |
no_license
|
wangyf/rseismNet
|
d43cc77382276cbba8ff225d4e748a5c3a93c65b
|
34264097f3c1fe3ca5f78d8ae673599903418e1c
|
refs/heads/master
| 2023-03-17T11:42:02.864201
| 2019-07-06T07:26:21
| 2019-07-06T07:26:21
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| true
| 1,899
|
rd
|
chi.mle.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/fmd.R
\name{chi.mle}
\alias{chi.mle}
\title{\eqn{\chi}-value}
\usage{
chi.mle(m, mc, mbin = 0.1)
}
\arguments{
\item{m}{a numeric vector of earthquake magnitudes}
\item{mc}{the completeness magnitude value}
\item{mbin}{the magnitude binning value (if not provided, \code{mbin = 0.1})}
}
\value{
The numeric value of \eqn{\chi}.
}
\description{
Estimate the \eqn{\chi}-value (i.e. slope) of the incomplete part of the
elemental frequency-magnitude distribution with \eqn{\chi} = \eqn{\kappa} - \eqn{\beta},
\eqn{\kappa} representing the earthquake detection parameter (Mignan, 2012).
}
\details{
\eqn{\chi} is estimated similarly to \eqn{\beta}, by using the maximum likelihood
estimation method (Aki, 1965).
}
\examples{
theta <- list(kappa = 2 * log(10), beta = log(10), mc = 2)
m.angular <- efmd.sim(1e4, theta)
mdistr <- fmd(m.angular)
plot(mdistr$mi, mdistr$Ni, log = "y")
points(mdistr$mi, mdistr$ni)
chi <- chi.mle(m.angular, theta$mc)
chi + theta$beta # = kappa
beta <- beta.mle(m.sim, theta$mc)
abline(v = theta$mc, lty = "dotted", col = "red")
abline(a = log10(mdistr$ni[which(mdistr$mi >= theta$mc)[1]]) +
beta / log(10) * theta$mc, b = -beta / log(10), col = "red")
abline(a = log10(mdistr$ni[which(mdistr$mi <= theta$mc)[length(which(mdistr$mi <= theta$mc))]]) -
chi / log(10) * theta$mc, b = chi / log(10), col = "red")
}
\references{
Aki, K. (1965), Maximum likelihood estimate of b in the formula log N =
a - bM and its confidence limits, Bull. Earthquake Res. Inst. Univ. Tokyo, 43, 237-239
Mignan, A. (2012), Functional shape of the earthquake frequency-magnitude
distribution and completeness magnitude, J. Geophys. Res., 117, B08302,
\href{http://onlinelibrary.wiley.com/doi/10.1029/2012JB009347/full}{doi: 10.1029/2012JB009347}
}
\seealso{
\code{beta.mle}; \code{efmd.sim}; \code{mc.val}
}
|
7af35a42f56f3af082d12f99c3437d84ba4fb7d4
|
2c9cb01e8fee85a5d4c1184bb9f7db1eda5bbaa5
|
/R/greenampt.R
|
35b086019cb3d2279c7e1ee20f9b15d974ef19fe
|
[] |
no_license
|
Mactavish11/vadose
|
8cc191528d148caee32b42aee9e45c7d1cc34945
|
d8466273720822c8c1a6d6313175a20652bd5892
|
refs/heads/master
| 2023-03-17T19:45:15.855983
| 2018-02-19T14:04:57
| 2018-02-19T14:04:57
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 9,218
|
r
|
greenampt.R
|
#' Green and Ampt infiltration parameter optmisation in R
#'
#' @description This function optimises Green and Ampt (1911)
#' infiltration parameters: Ks and G. It also predicts infiltration.
#'
#' @inheritParams philip
#' @inheritParams OFEST
#' @inheritParams BEST
#' @inheritParams lass3
#' @inheritParams ksat
#' @inheritParams vg
#' @param Ks Hydraulic conductivity
#' @param G Green and Ampt parameter that is equivalent to Sorptivity
#' @author George Owusu
#'
#' @references
#' Green, W. A., & Ampt, G. A., 1911..4,1-24. (1911). Studies on soil physics:1.
#' The flow of air and water through soils. Journal of Agricultural Science, 4(1-24).
#' @return
#' \itemize{
#' \item{Ks:} { Hydraulic conductivity [L]}
#' \item{G:} { Green and Ampt parameter that is equivalent to Sorptivity [LT^0.5]}
#' \item{predict:}{predicted infiltration}
#' \item{output:} { output of the group simulation}
#' }
#' @export
#'
#' @examples
#' data=read.csv(system.file("ext","sys","exampleBEST.csv",package="vadose"))
#' greenampt1<-greenampt(data=data,time="time",I="I")
#' #print(gof(greenampt1))
#' #plot(greenampt1)
#' predict(greenampt1)
#' coef(greenampt1)
#'
#' #group simulation
#' data=read.csv(system.file("ext","sys","infiltration2.csv",package="vadose"))
#' greenampt1g<-greenampt(data=data,time="minutes",I="CumInfil",group="ID")
#' coef(greenampt1g)
#' #generic function ######################################
#' @rdname greenampt
#generic function
greenampt<-function(data,time,I,Ks=0.1,G=0.1,group=NULL) UseMethod ("greenampt")############
#' @export
#' @rdname greenampt
#default function
greenampt.default<-function(data,time,I,Ks=0.1,G=0.1,group=NULL)###################
{
#stop warning from displaying
options(warn=-1)
if(is.null(data)){
data=data.frame(cbind(I,time))
names(data)=c("I","time")
}
if(!is.null(data)){
if(!is.null(data$I)){
I="I"
}
if(!is.null(data$time)){
time="time"
}
}
#decalare the group data incase group variable is not null
addoutput=NULL
#set parameters of optimsation functions######################
ones <- c(Ks=Ks, G = G) # all ones start
#determine whether it is rate or cumulative data
rate="yes"
f=NULL
if(data[[I]][length(data[[I]])]<data[[I]][1])
{
return (print("the function accepts only cumulative infiltration in cm"))
data$f=data[I]
#data[I]=cumsum(data[I])
f="yes"
}
if(data[[I]][length(data[[I]])]>data[[I]][1])
{
#cumulative function ###############################
greenamptF<-paste(I,"~Ks*",time,"+G*log(1+(",I,"/G))")
rate="no"
}
else
{
#rate function #######################################
greenamptF<-paste(I,"~G*Ks*",time,"^(G-1)")
}
#check for grouped data and execute the optimisation function
if(is.null(group)){
greenampt<- nlxb(greenamptF, start = ones, trace = FALSE, data = data)
print(greenampt)
}
else
{
# write the group function
aggdata =row.names(table(data[group]))
#create group data frame###############################################
addoutput=data.frame(groupid=factor(),time=numeric(),observed=numeric(),predict=numeric(),Ks=numeric(),G=numeric())
i=1
while(i<=length(aggdata)){
print(paste("Group Number:",aggdata[i]))
single=data[data[group]==aggdata[i],]
#group function
greenampt<- nlxb(greenamptF, start = ones, trace = FALSE, data = single)
print(greenampt)
print("....................................................................................")
#greenampt paramters #################################
Ks=coef(greenampt)[1]
G=coef(greenampt)[2]
groupdata=single[[group]]
time2=single[[time]]
#prediction cumulative equation ##############################
predict2=Ks*(time2)+(G*log(1+(single[[I]]/G)))
if(rate=="yes"){
#predict rate###############################################
predict2=G*Ks*(time2^(G-1))
}
addoutput=rbind(addoutput,data.frame(groupid=groupdata,time=time2,observed=single[[I]],predict=predict2,Ks=Ks,G=G))
i=i+1
}
}
#equations for ungrouped data ############################
if(is.null(group)){
#prediction################################################
Ks=coef(greenampt)[1]
time2=data[[time]]
I=data[[I]]
time=data[[time]]
###########################################################
G=coef(greenampt)[2]
I=coef(greenampt)[3]##########################################
######################
predict=Ks*(time2)+(G*log(1+(I/G)))
predict3=Ks *((G/I)+1)
if(!is.null(f)){
predict=predict3
I=data$f[[1]]
}
if(rate=="yes"){
#####################################################
predict=G*Ks*(time2^(G-1))
}
}
else
{
################################################
G=addoutput$G
I=data[[I]]
time=data[[time]]
##############################################
Ks=addoutput$Ks
predict=addoutput$predict
time=addoutput$time
}
#return varibales ########################################
factor<-list(greenampt=greenampt,data=data,time=time,I=I,Ks=Ks,G=G,group=group,
predict=predict,rate=rate,formular=greenamptF,addoutput=addoutput,output=addoutput)
factor$call<-match.call()
class(factor)<-"greenampt"
factor
}
#' @export
#' @rdname greenampt
#predict function#########################
predict.greenampt<-function(object,time=NULL,...)
{
x<-object
if(is.null(object$group)){
predict=as.data.frame(cbind(x$time,x$predict))
names(predict)=c("time","predict")
if(!is.null(time)&object$rate=="yes"){####################
predict=object$G*object$Ks*(time^(object$G-1))
predict=predict[[1]]
}
#cumulative###################################
if(!is.null(time)&object$rate!="yes"){
predict=object$Ks*(time)+(object$G*log(1+(object$I/object$G)))
predict=predict[[1]]
}
print((predict))
}
else######################################
{
predict=object$addoutput
#rate######################################
if(!is.null(time)&object$rate=="yes"){
predict=object$G*object$Ks*(time^(object$G-1))
predict2= (data.frame(cbind(object$addoutput$groupid,predict)))
names(predict2)=c("groupid","predict")
predict=aggregate(predict2$predict,by=list(predict2$groupid),FUN=mean)
colnames(predict)=c("Group","Predict")
predict$Group=row.names(table(object$addoutput$groupid))
}
#cumulative###################################
if(!is.null(time)&object$rate!="yes"){
predict=object$Ks*(time)+(object$G*log(1+(object$I/object$G)))
predict2= (data.frame(cbind(object$addoutput$groupid,predict)))
names(predict2)=c("groupid","predict")
predict=aggregate(predict2$predict,by=list(predict2$groupid),FUN=mean)
colnames(predict)=c("Group","Predict")
predict$Group=row.names(table(object$addoutput$groupid))
}
print(predict)
}
}
#plot function
#' @export
#' @rdname greenampt
plot.greenampt<-function(x,xlab="Time(Minutes)",ylab="Cumulative (cm)",main=NULL,layout=NULL,...)
{
object<-x
x<-object
G=x$G
time=x$time
I=x$I
rate=x$rate
op=par()
if(rate=="yes"&ylab=="Cumulative (cm)"){
ylab="rate(cm/mins)"
}
if(is.null(x$group)){
r2=cor(I,x$predict)^2
if(is.null(main)){
main=paste("R2=",round(r2,4))
}
plot(time,I,xlab=xlab,ylab=ylab,main=main,...)
#plot(time,I)
par(new=T)
predict=x$predict
lines(time,predict,col="red")
}
else
{
aggdata =row.names(table(object$addoutput$groupid))
data=object$addoutput
if(is.null(layout)){
lengthD=length(aggdata)
if(lengthD==2){
op=par(mfrow=c(1,2),mar=c(2, 2, 2, 2))
}
if(lengthD==3){
op=par(mfrow=c(1,3),mar=c(2, 2, 2, 2))
}
if(lengthD==4){
op=par(mfrow=c(2,2),mar=c(4, 4, 2, 2))
}
if(lengthD==5){
op=par(mfrow=c(2,3),mar=c(2, 2, 2, 2))
}
if(lengthD==6){
op=par(mfrow=c(3,3),mar=c(2, 2, 2, 2))
}
if(lengthD>6){
op=par(mfrow=c(round(lengthD/2),round(lengthD/2)),mar=c(2, 2, 2, 2))
}
}
#print(length(aggdata))
#matrix plot
i=1
while(i<=length(aggdata)){
#label=aggdata[i]
#print (label)
single=data[data["groupid"]==aggdata[i],]
I=single$observed
predict=single$predict
time=single$time
r2=cor(I,predict)^2
title=NULL
if(is.null(main)){
title=paste(aggdata[i],"(R2=",round(r2,4),")")
}
plot(time,I,main=main,xlab=xlab,ylab=ylab,...)
title(title)
par(new=T)
lines(time,predict,col="red")
i=i+1
}
par(op)
}
}
#' @export
#' @rdname greenampt
#summary function
summary.greenampt<-function(object,...)
{
x<-object$greenampt
if(is.null(object$group)){
summary1=summary(x)
print(summary1)
}
else
{########################
coef=aggregate(cbind(Ks, G) ~ groupid, data = object$addoutput, mean)##############
print(coef)
}
}
#print function
#' @export
#' @rdname greenampt
print.greenampt<-function(x,...)
{
object=x
if(is.null(object$group)){
x<-object$greenampt
print((x))
}
else
{######################################################
coef=aggregate(cbind(Ks, G) ~ groupid, data = object$addoutput, mean)
print(coef)
}
}
#' @export
#' @rdname greenampt
#coef function
coef.greenampt<-function(object,...)
{
x<-object$greenampt
if(is.null(object$group)){
coef=(coef(x))
}
else################################################
{
coef=aggregate(cbind(Ks, G) ~ groupid, data = object$addoutput, mean)
}
print(coef)
}
|
5682758afd7ffa83e426a0596119f8c929dcc7ff
|
24de8feb7a5c21c5b32536f5c0e048945ca522e5
|
/script.R
|
74107acb932d18e87db68754b3d1fd36f06deb77
|
[] |
no_license
|
thomasantonakis/Practical-Machine-Learning-CP
|
cec430fbc9034fadde509330acc43c1c7f4d3eab
|
3cd2debe41142c4671bfe92b7c3e3f6a59a3ce05
|
refs/heads/master
| 2021-01-16T20:42:17.890661
| 2014-11-22T17:52:59
| 2014-11-22T17:52:59
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 2,842
|
r
|
script.R
|
# Libraries
library(caret)
library(randomForest)
# Downloading the data
if(!file.exists("data")){
dir.create("data")
}
trainUrl<-"https://d396qusza40orc.cloudfront.net/predmachlearn/pml-training.csv"
testUrl<-"https://d396qusza40orc.cloudfront.net/predmachlearn/pml-testing.csv"
if(!file.exists("data/train.csv")){
download.file(trainUrl, destfile="./data/train.csv", method="auto")
}
if(!file.exists("data/test.csv")){
download.file(testUrl, destfile="./data/test.csv", method="auto")
}
dateDownloaded<-date()
# loading the data
data<-read.csv("./data/train.csv", , na.strings = c("NA", ""))
final_test<-read.csv("./data/test.csv", na.strings = c("NA", ""))
# Exploring the data
str(data)
names(data)
summary(data$classe)
# Cross Validation
set.seed(0)
inTrain = createDataPartition(y=data$classe, p=0.7, list=FALSE)
training = data[inTrain,]
testing = data[-inTrain,]
dim(training);dim(testing)
# Clearing out variables with too many missing values.
missingvals = sapply(training, function(x) {sum(is.na(x))})
table(missingvals)
# 100 columns have 13767 missing values, we must filter them out from all dataframes.
tbexcluded<- names(missingvals[missingvals !=0])
training = training[, !names(training) %in% tbexcluded]
testing = testing[, !names(testing) %in% tbexcluded]
# final_test = final_test[, !names(final_test) %in% tbexcluded]
str(training)
# Clearing variables with not much sense like time stamps, usernames now names etc
training = training[, - c(1:7)]
testing = testing[, - c(1:7)]
# Still 53 variables, let's do PCA
dim(training)
# Model Building
# Principal COmponents
preProc <- preProcess(training[,-53],method="pca",thresh = 0.95)
preProc
# Forget about initial variables, we now use the Principal Components. (25)
trainTransformed <- predict(preProc, training[,-53])
testTransformed <- predict(preProc, testing[,-53])
dim(trainTransformed)
# Random Forest
#modelFit <- train(training$classe ~ ., data = trainTransformed, method="rf")
modelFit <- randomForest(trainTransformed,training$classe, do.trace = TRUE)
modelFit
##Accuracy
predicts<-predict(modelFit,testTransformed)
confusionMatrix(testing$classe,predicts)
###################################
####### Submission ##############
###################################
# Clean the test data
final_test = final_test[, !names(final_test) %in% tbexcluded]
dim(final_test)
final_test = final_test[, - c(1:7)]
dim(final_test)
final_final_test <- predict(preProc, final_test[,-53])
answers <- predict(modelFit,final_final_test)
submission<-as.character(answers)
pml_write_files = function(x){
n = length(x)
for(i in 1:n){
filename = paste0("problem_id_",i,".txt")
write.table(x[i],file=filename,quote=FALSE,row.names=FALSE,col.names=FALSE)
}
}
pml_write_files(submission)
|
97ddc08a0cf60a412dbddce85b9b030d97ddf881
|
f0167ebc6323c601e75de50252c62f44d306ab2e
|
/R/survey_prep/r2f_score.R
|
33acf2afdd78ea868712eff5e9c400619ae4e38a
|
[] |
no_license
|
mattdblanchard/PhD-study-2
|
bfd21386c2d84948953fb3555de400e94b71a5f9
|
fa2c8eb2cddb6aaf40b0e65959cf67fae480e437
|
refs/heads/master
| 2021-07-05T14:14:07.137015
| 2021-05-18T01:29:10
| 2021-05-18T01:29:10
| 242,681,922
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 2,083
|
r
|
r2f_score.R
|
source("R/survey_prep/r2f_neg.R")
source("R/survey_prep/r2f_pos.R")
# Need to create a new itemnum variable that is uniform across both frames
# some participants did not complete both frames of R2F
# need to remove these teams and calculate vars using those that compelted both Pos & Neg
# used the following code to identify these teams
p_uid <- unique(r2f_pos$uid)
n_uid <- unique(r2f_neg$uid)
unique(r2f_neg %>% filter(!uid %in% p_uid) %>% select(uid))
unique(r2f_pos %>% filter(!uid %in% n_uid) %>% select(uid))
r2f_neg <- r2f_neg %>%
group_by(uid) %>%
mutate(ItemNum = 1:n(),
frame = "N") %>%
select(-R2FItemNum) %>%
filter(group != "18080610_1") %>%
filter(group != "17102710_2")
r2f_pos <- r2f_pos %>%
group_by(uid) %>%
mutate(ItemNum = 1:n(),
frame = "P") %>%
select(-R2FItemNum) %>%
filter(group != "18081413_1") %>%
filter(group != "18110716_1") %>%
filter(group != "19013010_1") %>%
filter(group != "17102710_2")
r2f <- rbind(r2f_pos, r2f_neg)
r2f.uid <- r2f %>%
group_by(uid, ItemNum) %>%
summarise(ind.resp = abs(Ind_Stimulus.RESP[frame == "P"] - Ind_Stimulus.RESP[frame == "N"]),
grp.resp = abs(Grp_Stimulus.RESP[frame == "P"] - Grp_Stimulus.RESP[frame == "N"])) %>%
group_by(uid) %>%
summarise(r2f.ind = mean(ind.resp, na.rm = TRUE),
r2f.grp = mean(grp.resp, na.rm = TRUE))
# to check which uids differ between the two r2f frames
# p <- unique(r2f_pos$uid)
# n <- unique(r2f_neg$uid)
#
# r2f_pos %>% filter(!uid %in% n) %>% select(uid)
# r2f_pos %>% filter(str_detect(uid, "18041213")) %>% select(uid, ResponseID, StartDate)
# r2f_neg %>% filter(str_detect(uid, "18041213")) %>% select(uid, ResponseID, StartDate)
# groups less influenced by framing
# t.test(r2f$ind_resp, r2f$grp_resp, paired = TRUE)
# Reiliability - currently not working
# # FOR INDIVIDUALS
# x <- r2f %>%
# drop_na() %>%
# select(uid, ItemNum, Grp_Stimulus.RESP) %>%
# # mutate(n = 1:n()) %>%
# spread(ItemNum, Grp_Stimulus.RESP) %>%
# select(-uid)
# #
# psych::alpha(x)$total$raw_alpha
|
9c1c9f1df51bdafc9cbfec8c60fe7dc99ab705c3
|
9beb6005d6581bb534b6eef49ed82296499518a7
|
/16_Modelo_Estadistico_Regresion_R.R
|
4442e9e0e81d51e4ff5ff06b2a064ad1129e5d95
|
[] |
no_license
|
BidartMG/R-Mas-Scripts-Practicas
|
68ca1c635d235cfcbe932afdba4e3235299cc6e8
|
af53bff823d372206cfcc6b51867b1d25a6ef980
|
refs/heads/master
| 2022-12-25T06:52:48.642663
| 2020-09-29T00:22:18
| 2020-09-29T00:22:18
| 297,231,643
| 0
| 0
| null | null | null | null |
ISO-8859-1
|
R
| false
| false
| 837
|
r
|
16_Modelo_Estadistico_Regresion_R.R
|
# modelos
# cargando paquete para analizar datos
library(tidyverse)
# cargando datos a entorno
data("Orange")
# cargando datos a entorno
head(Orange)
# problema/pregunta
# Cuanto medirá la circunsferencia, en promedio, de
# un árbol de naranjas a los 800 días de plantarlo
Orange %>%
ggplot(aes(x = age,
y = circumference)) +
geom_point() +
geom_abline(intercept = 10,
slope = 0.1,
col = 'blue')
# "mejor" ajuste de regresión lineal simple
lm(circumference ~ age, data = Orange)
Orange %>%
ggplot(aes(x = age,
y = circumference)) +
geom_point() +
geom_abline(intercept = 17.3997,
slope = 0.1068,
col = 'blue') +
geom_vline(xintercept = 800,
col = 'red')
dias <- 800
medida <- 0.1068 * dias + 17.3997
print(medida)
|
7d1345693da56c131213a25c6ba38f502550f5d4
|
fb508866590bcd29193226f8100a3dc77f923c93
|
/R/dm.bc.script.R
|
3dc77357c128c0e21ccbadcae0c8be3bc3674942
|
[
"MIT"
] |
permissive
|
bostasie/WFCTSI-Public
|
45f8ebcf4c15e810c27f31d72a176e0cf7fa6482
|
8d4c13f0c077af5ec8cf69ce5098500a2e76b71d
|
refs/heads/master
| 2020-05-26T04:54:51.213043
| 2018-05-10T14:43:41
| 2018-05-10T14:43:41
| 84,992,771
| 2
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 18,583
|
r
|
dm.bc.script.R
|
######################
#MERGE DATA FROM I2B2#
######################
#import i2b2 file, merge and clean data in preparation for model
# data setpredefined as patients with diabetes type II between ages of 40 and 90 on any medication
#outcome- bladder cancer
#exposure - tzd
#covariates - gender, race, age, bmi, smoking status
#baseline - 2015-01-01
###################
#About R interface#
###################
#import file into workspace
#install.packages("readxl")
library(readxl)
#finding help
#https://cran.r-project.org/web/packages/
#https://cran.r-project.org/web/packages/readxl/readxl.pdf
??readxl
#Write function to read multiple sheets
read_excel_allsheets <- function(filename) {
sheets <- readxl::excel_sheets(filename)
x <- lapply(sheets, function(X) readxl::read_excel(filename, sheet = X,
col_types ="text", #import all as characters
col_names=T, #give it column names
na=c(""," "), #designate white space as na
trim_ws=T )) #trim extra spaces
names(x) <- sheets
x
}
#call function, apply it to your file
bc.data<-read_excel_allsheets("~/workshop/dm.bladder.cancer.xlsx")
#bc.data<-read_excel_allsheets("Z:/klenoir/dm.bladder.cancer.xlsx") #path example
#this does not require a function
#bc.data<-read.csv(file="Z:/klenoir/dm.bladder.cancer.csv", header=T, colClasses="character", na.strings=c("", "#N/A", "NULL")))
##############
#DEMOGRAPHICS#
##############
#pull out patients and clean demographics, calculate age at baseline
#############
#REVIEW DATA#
#############
#separate into data frames and make names user friendly
#list vs data frame (class)
pts<-as.data.frame(bc.data$Patients)
names(pts)<-c("MRN","DOB","gender","race")
#General data views, checks
head(pts) #see top of data
tail(pts) #see bottom of data
##########
#PRACTICE# class, str, how to call "columns", vector
##########
#Do we have unique set of pts (no duplicates)?
length(unique(pts$MRN))
nrow(pts)
#or
length(unique(pts$MRN))==nrow(pts)
###################
#Set baseline date#
###################
#If medication is active on this date, time to first occurance of bladder diagnosis
pts$baseline<-as.Date("2015-01-01")
#Date format and examples
#https://www.stat.berkeley.edu/~s133/dates.html
#handy date packages: lubridate (add/subtract years/months/days), chron
#Calculate Age at baseline
pts$dob<-as.Date(as.POSIXlt(pts$DOB, format="%m/%d/%Y %H:%M:%S"))
pts$age<-pts$baseline-pts$dob
#look at the top of the data set, what's wrong?
#get years, make it numeric, round it (no decimals)
pts$age<-round(as.numeric((pts$baseline-pts$dob)/365.25),0)
#Formatting
#as.numeric
#as.character
#as.Date
str(pts$age) #numeric type
#histogram of age
##########
#PRACTICE#
##########
#visualize race
#install.packages("ggplot2")
library(ggplot2) #http://www.cookbook-r.com/Graphs/
#review composition
table(pts$race)
#visualize with gplot
g <- ggplot(pts, aes(race)) +geom_bar()
###########
#DIAGNOSES#
###########
#use list of icd9 and 10 codes to find patients who were diagnosed with bladder cancer
#merge this into demographic file
#calculate CCI from icd codes
diagcodes<-as.data.frame(bc.data$Diagnoses)
names(diagcodes)<-c("mrn", "visit", "visit_date", "diag9", "diag10", "diag_desc")
#Find those with bladder cancer
#http://www.icd10data.com/
#icd9 codes: "188"
#icd10 codes: "C67"
#default grepl function catches 188 and x188.x
#returns T/F argument
#says: go row by row, and if there is a 188 code in diag9 column or a C67 in the diag10 column,
#return "true". If not, return "false"
#make a new column called "bc" for the result
diagcodes$bc<-sapply(1:nrow(diagcodes), function(x) ifelse(grepl("188", diagcodes[x,"diag9"], ignore.case=T) |
grepl("C67", diagcodes[x,"diag10"], ignore.case=T),T,F))
#check logic
#for all that are true, did I get the expected codes?
check<-diagcodes[diagcodes$bc==T,] #get these where bladder cancer is T
table(check$diag9)
#################
#PRACTICE# #check icd10 codes that we identified
##########
#get bladder cancer cases only (format date first)
diagcodes$diag_date<-as.Date(as.POSIXlt(diagcodes$visit_date, format="%m/%d/%Y %H:%M:%S"))
#get where my bc identification column is T and where there is not and NA value
bc.cases<-diagcodes[!is.na(diagcodes$bc) & diagcodes$bc==T,]
#get earliest date of diagnosis: order by date to bring earliest to top
bc.cases<-bc.cases[order(bc.cases$diag_date,decreasing=F),]
#get only the top instance
bc.cases<-bc.cases[!duplicated(bc.cases$mrn),]
#bc.cases<-bc.cases[duplicated(bc.cases$mrn)==F,] #some people like this alternative to the ! sign
#merge with patients
pts.bc<-merge(pts,bc.cases[,c("mrn","bc","diag_date")], by.x="MRN", by.y="mrn", all.x=T, all.y=F)
summary(pts.bc)
#only get cases where diagnosis date occurs after the baseline (we want to keep those without a diagnosis date)
nrow(pts.bc)
pts.bc<-pts.bc[is.na(pts.bc$diag_date) | (pts.bc$diag_date>pts.bc$baseline),]
#Calculate CCI
#https://github.com/bostasie/WFCTSI-Public
library(WFCTSI)
??CCI
pts.bc<-CCI(patients=pts.bc, diagcodes=diagcodes, ID.patients = "MRN",
ID.diagcodes = "mrn", dx.diagcodes = "diag9",
dx.diagcodes2 = "diag10", dx.date = "diag_date",
st.date = "1800-01-01", end.date = "baseline",
weights = "original", method = "quan")
#everyone should have a fake score of at least 2 (diabetes)...
#but this is fake data, fewer codes so it doesn't crash
######
#LABS#
######
#Find most recent creatinine value (will be used to calculated GFR) that occured within a year prior to baseline
#merge it with demographics
labs<-as.data.frame(bc.data$Labs)
names(labs)<-c("mrn", "visit", "date_time", "lab_code", "lab_desc",
"txt_value", "num_value", "units")
labs$lab_date<-as.Date(as.POSIXlt(labs$date_time, format="%m/%d/%Y %H:%M:%S"))
#view codes
table(labs$lab_code)
creat<-c("CMEP:CREATININE","CREATININE")
creat_labs<-labs[labs$lab_code %in% creat,] #matches exact
#install.packages("lubridate")
library(lubridate)
#install.packages("survival")
library(survival)
#neardate function gives closes date before baseline (or/and after)
indx1<-neardate(pts.bc$MRN, creat_labs$mrn, pts.bc$baseline, creat_labs$lab_date, best= "prior")
temp1<-creat_labs[indx1, c("num_value","units","lab_date")]
pts.bc<-cbind(pts.bc,temp1) #cbind - order must be same for two
names(pts.bc)[names(pts.bc)=="num_value"]<-"creat"
#specify 30 days before baseline
#if the lab date is within one year prior to baseline, use it, otherwise, give it NA
pts.bc$creat_year<-ifelse(pts.bc$lab_date>=(pts.bc$baseline-years(1)), pts.bc$creat,NA)
pts.bc$creat_year<-as.numeric(pts.bc$creat_year)
#how do you test/break it down? Test second row
#pts.bc$lab_date[2]
#pts.bc$baseline[2]
#pts.bc$baseline[2]-years(1)
#pts.bc$lab_date[2]>=(pts.bc$baseline[2]-years(1))
#normal range 0.5-1.2mg/dL, check for outliers
##########
#PRACTICE#
##########
#histogram of pts.bc
#replace outliers with na, anything over 100 for now
pts.bc$creat_year<-ifelse(pts.bc$creat_year>100,NA,pts.bc$creat_year)
######
#MEDS#
######
#identify if patient was on tzd med at baseline (if active), merge information with pts
#In this data set, all meds are presumed active without end date
meds<-as.data.frame(bc.data$Meds)
names(meds)<-c("mrn", "visit", "start", "end",
"ndc_rxnorm", "desc", "txt_val", "num_val", "units")
#format date
meds$date_med<-as.Date(as.POSIXlt(meds$start, format="%m/%d/%Y %H:%M:%S"))
#helpful medication information
#https://mor.nlm.nih.gov/download/rxnav/RxNavDoc.html
#meds list for which to search:
#pioglitazone/Actos
#rosiglitazone/Avandia #search by drug or Rxnorm
#look for med descriptions "like" these names
tzdmeds<-c("piogli","rosigli")
meds$tzd<-sapply(1:nrow(meds), function(x) grepl(paste(tzdmeds, collapse="|"), meds[x,"desc"], ignore.case=T))
#does the same thing, but might use the method above in the case for searching for many alternatives
#meds$tzdsame<-sapply(1:nrow(meds), function(x) grepl("pioglit|rosiglit", meds[x,"desc"], ignore.case=T))
table(meds[meds$tzd==T,][,"desc"]) #looks like we got the expected
unique(meds[meds$tzd==F,][,"desc"]) #anything missed?
#get earliest instance and remove everything else
#subset where tzd is T, and do not get NA's
tzdmeds<-meds[!is.na(meds$tzd) & meds$tzd==T,]
tzdmeds<-tzdmeds[order(tzdmeds$date_med,decreasing=F),]
tzdmeds<-tzdmeds[!duplicated(tzdmeds$mrn),]
#neardate function gives closes date before baseline (or/and after)
indx2<-neardate(pts.bc$MRN, tzdmeds$mrn, pts.bc$baseline, tzdmeds$date_med, best= "prior")
temp2<-tzdmeds[indx2, c("tzd", "date_med")]
pts.bc<-cbind(pts.bc,temp2) #cbind - order must be same for two
################
#SMOKING STATUS#
################
#find if patient has ever smoked before (categorize, as prior, never, or missing data) and merge with data
smoke<-as.data.frame(bc.data$Smoking)
names(smoke)<-c("mrn", "visit", "date_time", "history", "desc",
"txt", "num", "unit")
#format date
smoke$date_smoke<-as.Date(as.POSIXlt(smoke$date_time, format="%m/%d/%Y %H:%M:%S"))
unique(smoke$desc)
#only get smoking status before baseline
smoke<-smoke[(!is.na(smoke$date_smoke) & smoke$date_smoke<=as.Date("2015-01-01")),]
#categorize into ever/never/missing
#Decide how to code, unknown and missing category?
#if unknown or missing, assume non-smoking?
#for these purposes we'll code as missing
unique(smoke$desc)
unknown<-"Unknown"
never<-c("Never", "Never Used", "Never Smoker")
ever<-c("Former User","Former Smoker", "Quit", "Yes", "Current Some Day Smoker", "Current User",
"Current Every Day Smoker")
smoke$smoke<-ifelse(smoke$desc %in% never, "never",
ifelse(smoke$desc %in% ever, "smoke",
ifelse(is.na(smoke$desc) | smoke$desc %in% unknown, "missing","oops")))
#anything with an oops means you missed it. You could just stop at the second ifelse statement
#and code all else as missing
#table(smoke$smoke)
#if multiple instances per person, get smoke over never, and never over unknown
smoke<-smoke[order(smoke$smoke,decreasing=T),]
#demonstration of ordering if not alphabetical, missing on bottom, never in middle, smoke on top
#smoke<-smoke[order(smoke$smoke=="missing",smoke$smoke=="never",smoke$smoke=="smoke"),]
#remove duplicates (get first instance on top)
length(unique(smoke$mrn))==nrow(smoke)
smoke<-smoke[!duplicated(smoke$mrn),]
length(unique(smoke$mrn))==nrow(smoke)
pts.bc<-merge(pts.bc,smoke[,c("mrn","smoke")], by.x="MRN", by.y="mrn", all.x=T, all.y=T)
##########################
# SAVE WORK AND SUMMARIZE#
##########################
#we now have diabetes patient with exposure and covariates
#ready for cleaning and cph model
#Save the data frame with which you have been working as an Rdata object
#save(pts.bc,file="~/workshop/pts.bc.Rdata")
#saves all of the objects you have created/loads the same way as above
#save.image("Z:/R/klenoir/pts.bc.workspace.Rdata")
#load that data frame you previously saved
load("~/workshop/pts.bc.Rdata")
#Review Data
summary(pts.bc)
#are there any na's that we need to fix for model? How many are true?
summary(is.na(pts.bc))
#is everything in the format that we want for model?
str(pts.bc)
#reformat a few things
##############
#CLEAN/FORMAT#
##############
#create outcome variable - time to development of bc in daya
pts.bc$time.to<-as.numeric(pts.bc$diag_date-pts.bc$baseline)
#fictitious end date due to lack of last follow-up data
#2016-12-28 (last diagnosis date) use 1/1/2017 as end of follow up date
pts.bc$time.to.bc<-ifelse(is.na(pts.bc$time.to),
as.numeric(as.Date("2017-01-01")-pts.bc$baseline), #time difference from when stop following
pts.bc$time.to) #otherwise use the time to diagnosis
#outcome - develop bc (TRUE) or not (FALSE)
pts.bc$bc<-ifelse(pts.bc$bc==F | is.na(pts.bc$bc),F, T)
pts.bc$bc<-as.factor(pts.bc$bc)
#smoking status: NA's become "missing
pts.bc$smoke<-ifelse(is.na(pts.bc$smoke), "missing", pts.bc$smoke)
pts.bc$smoke<-as.factor(pts.bc$smoke)
#make gender a factor
pts.bc$gender<-as.factor(pts.bc$gender)
#exposure - make factor and NA's become FALSE
pts.bc$tzd<-as.factor(ifelse(pts.bc$tzd==F | is.na(pts.bc$tzd),F, T))
summary(pts.bc)
#missing values for creat_year - impute
########
#IMPUTE#
########
#select variables for imputation
dput(names(pts.bc)) #can be helpful to copy and paste
row.names(pts.bc)<-pts.bc$MRN
pts.bc.preimp<-pts.bc[,c("gender", "race","age", "bc",
"CCIo", "creat_year", "tzd", "smoke",
"time.to.bc")]
#install.packages("mice")
library(mice) #multivariate imp by chained equations
set.seed(454) #allows you to reproduce the imputation
pts.bc.imp<-mice(pts.bc.preimp,10) #10 imputations
#complete with 1st iteration
pts.imp<-complete(pts.bc.imp,1)
#model hates attributes of factor variables caused by imputation function
#correct all factors to remove extra attributes
pts.imp[,c("tzd","smoke","bc","gender")]<-lapply(pts.imp[,c("tzd","smoke","bc","gender")],
function(x) factor(x,ordered=F))
#####
#GFR#
#####
#calculate GFR after imputation of creatinine
pts.imp$MRN<-rownames(pts.imp)
gfrset<-pts.imp[,c("MRN","creat_year")]
#just need a place holder date - quirk of function to be fixed
gfrset$date<-as.Date(Sys.Date())
#remove creat_year in pts, another quirk
pts.imp$creat_year<-NULL
#apply gfr function
library(WFCTSI)
pts.imp.gfr<-gfr(patients = pts.imp, labs = gfrset, ID.patients = "MRN",
ID.labs = "MRN", lab.name="none",
ord.value = "creat_year", result.date = "date",
gender = "gender", race = "race", age = "age")
#######
#MODEL#
#######
#When multiple imputations are available,
#we typically pool these sets for the final model.
#for the sake of demonstration, we will use the first imputed set only
#muliple imputed sets are demonstrated below
#rms not working with newest R version
#installed directly from harrel's git hub as the version not yet updated on CRAN
#install.packages("rms")
#install.packages("devtools")
library(devtools)
#install_github("harrelfe/rms") #prompts install of Rtools
library(rms)
dd<-datadist(pts.imp.gfr)
options(datadist='dd')
bc.develop<-cph(formula=Surv(time=time.to.bc, bc=="TRUE") ~ rcs(age, 3) + gender
+ rcs(CCIo, 3) + smoke + rcs(GFR,3)
+tzd, #exposure
,data=pts.imp.gfr, x=T, y=T, surv=T)
#view model
bc.develop
bc.develop$terms #has many parts with $
#Hazard Ratios
summary(bc.develop)
########################
#DESCRIPTIVE STATISTICS#
########################
#install.packages("epiDisplay")
library(epiDisplay)
#examine by tzd expsure
pts.imp.gfr$tzd<-as.character(pts.imp.gfr$tzd)
describebc<-tableStack(dataFrame=pts.imp.gfr,
c(gender, race, age, bc,
CCIo, smoke,
time.to.bc, GFR),
by=tzd, #breakdown by this variable
total.column=TRUE,
var.labels = TRUE,
#means=T,
medians=T,
na.rm =T)
#write descriptive statistics to csv file
#write.csv(x=describebc,file= "~/workshop/describebc_20170508.csv", row.names=T, col.names=T)
######################################
#ADVANCED CPH with MULIPLE IMPUTATION#
######################################
#add a for loop to utilize all imputed sets
#use pts.bc.imp - imputed data sets created above
#create empty list to hold iterations of cph models
#for each imputed data set
bc.multi<-list()
#creat for loop the length of number of data sets
for (i in 1:10){
#complete set i and then complete loop
#complete set i+1 and then complete loop
#and so on
pts.imp<-complete(pts.bc.imp,i)
#reformat all factors to remove attributes generated in mice
#quirk of new R
pts.imp[,c("tzd","smoke","bc","gender")]<-lapply(pts.imp[,c("tzd","smoke","bc","gender")],
function(x) factor(x,ordered=F))
#calculate GFR after imputation of creatinine
#for each imputed set
pts.imp$MRN<-rownames(pts.imp)
gfrset<-pts.imp[,c("MRN","creat_year")]
#just need a place holder date - quirk of function to be fixed
gfrset$date<-as.Date(Sys.Date())
#remove creat_year in pts, another quirk
pts.imp$creat_year<-NULL
#apply gfr function
pts.imp.gfr<-gfr(patients = pts.imp, labs = gfrset, ID.patients = "MRN",
ID.labs = "MRN", lab.name="none",
ord.value = "creat_year", result.date = "date",
gender = "gender", race = "race", age = "age")
#prepare for cph
dd<-datadist(pts.imp.gfr)
options(datadist='dd')
#throw models into i in list of 1:10
bc.multi[[i]]<-cph(formula=Surv(time=time.to.bc, bc=="TRUE")
~ rcs(age, 3) + gender
+ rcs(CCIo, 3) + smoke + rcs(GFR,3)
+tzd, #exposure
,data=pts.imp.gfr, x=T, y=T, surv=T)
}
#import pool.mi function from a txt file
Sys.setlocale('LC_ALL','C') #fix warning below with this
source(file="~/workshop/poolMI.txt")
#Warning messages:
#1: In grepl("\n", lines, fixed = TRUE) :
#input string 4 is invalid in this locale # text file you are reading in contains a character that is not available
pool.cph<-poolMI(bc.multi)
#hazard ratios
summary(pool.cph)
###############
#VIEWING MODEL#
###############
#view parts of pool.cph
pool.cph$sformula
#view first cph generated from first imputed data set
bc.multi[[1]]
####################################################
#ADVANCED CPH with MULIPLE IMPUTATION# ALTERNATIVE#
####################################################
#another option for multiple imputed data sets
#is the use the fit.mult.imput function
#quirk is that gfr should be imputed instead of calculated after imputation
#demonstrated without GFR
longdata<-complete(pts.bc.imp, action="long", include=T)
dd<-datadist(longdata)
options(datadist="dd")
patients.imp2<-as.mids(longdata)
fit.mult.impute(formula=Surv(time=time.to.bc, bc=="TRUE")
~ rcs(age, 3) + gender
+ rcs(CCIo, 3) + smoke #+ rcs(GFR,3)
+tzd,
fitter=cph, x=T, y=T, xtrans=patients.imp2)
|
39e7c012762e8e7ba6d70a5a4a69fe90bb136ae1
|
5debcf7061d78d9cfd29372e9d6cb505c166a1d3
|
/statsSensitivity.R
|
604b9d771373e33cc451133dc79de641544149e3
|
[
"MIT"
] |
permissive
|
NadineJac/gaitEEGfootprint
|
481222df5438d49a302b12acf51cf02dabe72a30
|
8dcad94adf409968274342a1f4553db19724e4b6
|
refs/heads/master
| 2023-06-11T10:58:11.958004
| 2023-05-30T09:07:30
| 2023-05-30T09:07:30
| 241,641,737
| 2
| 1
| null | null | null | null |
UTF-8
|
R
| false
| false
| 6,478
|
r
|
statsSensitivity.R
|
# statistical comparisons of the footprint sensitivity analysis
#
# directory: */derivates/footprint/group/results
#
# developed in R (v4.0.1) by Nadine Jacobsen, nadine.jacobsen@uol.de
# June 2020, last revision July 1, 2020
#PATH = file.path("E:", "nadine", "test_footprint_BIDS") # add your path to the BIDS dataset here
PATH = "D:/DATA_D/test_footprint_scripts_PC"
# libraries
library(effsize) # needed for cohens d, if not installed use: install.packages("effsize")
library(xlsx) #save results as excel file, s.a.
setwd(file.path(PATH, "derivates", "footprint", "group", "results"))
# footprint distances -----------------------------------------------------
# In: footprintDistances.txt (output of "calcFootprintDistances.m")
# Out: "statsSensitvity.xlsx" sheet "footprintDistances"
# load data
distFoot <- read.csv("footprintDistances.txt", T)
# data frame for storing results
statsDistFoot <- data.frame(
Comp = double(),
p.shapiro = double(),
M = double(),
SD = double(),
T.stat = double(),
df = integer(),
p = double(),
p.adj = double(),
d = double()
)
for (var in 3:4) {
# col 2 (raw2ASr not reported in manuscript)
# Descriptives
statsDistFoot[var - 1, "Comp"] <- colnames(distFoot[var])
statsDistFoot[var - 1, "M"] <- round(mean(distFoot[, var]), 2)
statsDistFoot[var - 1, "SD"] <- round(sd(distFoot[, var]), 2)
# assess normality of distances
# tmp <- shapiro.test(distFoot[,var])
# statsDistFoot[var-1,"Comp"]<-round(tmp$p.value,3)
# perform one-sample t-test
StudentModel <-
t.test(distFoot[, var], mu = 0, alternative = "greater")
statsDistFoot[var - 1, "T.stat"] <-
round(StudentModel$statistic, 2)
statsDistFoot[var - 1, "df"] <- StudentModel$parameter
statsDistFoot[var - 1, "p"] <- round(StudentModel$p.value, 3)
# correct for 2 comparisons (only raw2ICA and ASR2ICA reported)
statsDistFoot[var - 1, "p.adj"] <-
round(p.adjust(StudentModel$p.value, method = "holm", n = 2), 3)
# calculate effect size
CohenD <- cohen.d(distFoot[, var], NA)
statsDistFoot[var - 1, "d"] <- round(CohenD$estimate, 2)
}
# add note
statsDistFoot[var, "Comp"] <-
"Note. p-value adjusted for 2 comparisons (Bonferroni-Holm)"
# save results
write.xlsx(statsDistFoot,
"statsSensitivity.xlsx",
sheetName = "footprintDistances",
append = T)
# single feature distances ------------------------------------------------
# In: gait_footprint_before/ _after/ _afterASR.txt(output of "calculateFootprint.m")
# Out: several sheets in "statsSensitivity.xlsx"
## descriptives ________________________
# Out: sheets "features befor", "fetures after", "feautures afterASR"
COND <- c("before", "afterASR", "after")
# set up dfs
desFeatures <- data.frame(
Feature = double(),
Mdn = double(),
M = double(),
SD = double()
)
for (c in 1:3) {
# did not report comarison raw2ASR
# load data
dat1 <-
read.csv(paste("gait_footprint_", COND[c], ".txt", sep = ""), T)
for (var in 2:8) {
# descriptives
# save descriptors
desFeatures[var-1, "Feature"] <- LETTERS[var-1]
desFeatures[var-1, "Mdn"] <- round(median(dat1[, var]), 2)
desFeatures[var-1, "M"] <- round(mean(dat1[, var]), 2)
desFeatures[var-1, "SD"] <- round(sd(dat1[, var]), 2)
}
write.xlsx(
desFeatures,
"statsSensitivity.xlsx",
sheetName = paste("features", COND[c]),
append = T
)
}
## stats_______________________________________________________
# Out: sheets "features raw2ICA", "features ASR2ICA"
COND1 <- c("before", "afterASR", "before")
COND2 <- c("after", "after", "afterASR")
CONDname <- c("raw2ICA", "ASR2ICA", "raw2ASR")
statsFeatures <- data.frame(
Feature = double(),
M.diff = double(),
SD.diff = double(),
p.shapiro = double(),
test.stat = double(),
p = double(),
p.adj = double(),
eff.size = double()
)
## since differences are not normal distributed, use wicox signed rank (dep. samples)
for (c in 1:2) {
# did not report comarison raw2ASR
# load data
dat1 <-
read.csv(paste("gait_footprint_", COND1[c], ".txt", sep = ""), T)
dat2 <-
read.csv(paste("gait_footprint_", COND2[c], ".txt", sep = ""), T)
for (var in 2:8) {
# comparisons
statsFeatures[var-1, "Feature"] <- LETTERS[var-1]
statsFeatures[var-1, "M.diff"] <-
round(mean(dat2[, var] - dat1[, var]), 2)
statsFeatures[var-1, "SD.diff"] <-
round(sd(dat2[, var] - dat1[, var]), 2)
# assess normality of differences w shapiro-wilk
tmp <- shapiro.test(dat1[, var] - dat2[, var])
statsFeatures[var-1, "p.shapiro"] <- round(tmp$p.value, 3)
if (tmp$p.value < 0.05) {
# wilcoxon signed rank test, dependent samples
wilcoxModel <- wilcox.test(dat1[, var], dat2[, var], paired = TRUE)
Z <- qnorm(wilcoxModel$p.value / 2)
r <- Z / sqrt(nrow(dat1))
statsFeatures[var-1, "test.stat"] <-
round(wilcoxModel$statistic, 2)
statsFeatures[var-1, "p"] <- round(wilcoxModel$p.value, 3)
statsFeatures[var-1, "eff.size"] <- round(r, 2)
} else {
# dependent two-sided dependent samples student t-test
StudentModel <- t.test(dat2[, var], dat1[, var], paired = TRUE)
statsFeatures[var-1, "test.stat"] <-
round(StudentModel$statistic, 2)
statsFeatures[var-1, "p"] <- round(StudentModel$p.value, 3)
# cohen d
d <- cohen.d(dat2[, var], dat1[, var], paired = TRUE)
statsFeatures[var-1, "eff.size"] <- round(d$estimate, 2)
}
}
# correct for multiple comparisons (n014 since we will report two comparisons of 7 features each)
statsFeatures$p.adj <-
p.adjust(statsFeatures$p, method = "holm", n = 14)
# add note
statsFeatures[var, "Feature"] <-
"Note. two-sided, dependent samples t-test, effect size: Cohen's d, if p-shapiro<.05: dependent samples, Wilcoxon signed-rank test, Effect size: R. p-value adjusted for 14 comparisons (Bonferroni-Holm)"
# save results
write.xlsx(
statsFeatures,
"statsSensitivity.xlsx",
sheetName = paste("features", CONDname[c]),
append = T
)
}
rm(list = ls())
|
d61164eac98089bb1d4238e2849a96d1a5723c37
|
2f60f4273dcf277a9579cc0cb10a5182f59280c6
|
/WEEKS-8_9_R/02-read_data.R
|
1c20c184bddaf0387efb2a3dd91335eeb0f431d0
|
[] |
no_license
|
chirlas24/Master_Data_Science
|
b82bb40c3d51f2a288ec7dc76721a2e4dbaf1f20
|
719fc2c4328478a1ad8fa51232a338ad35b65829
|
refs/heads/master
| 2020-04-08T00:54:28.541418
| 2019-04-28T17:27:30
| 2019-04-28T17:27:30
| 158,873,183
| 1
| 1
| null | null | null | null |
UTF-8
|
R
| false
| false
| 5,743
|
r
|
02-read_data.R
|
##########################################################################
# Jose Cajide - @jrcajide
# Master Data Science: Reading data
##########################################################################
list.of.packages <- c("R.utils", "tidyverse", "doParallel", "foreach", "sqldf")
new.packages <- list.of.packages[!(list.of.packages %in% installed.packages()[,"Package"])]
if(length(new.packages)) install.packages(new.packages)
# Base R: Do not run
# flights <- read.csv("data/flights/2007.csv")
airports <- read.csv("data/airports.csv")
# Reading data ------------------------------------------------------------
# readr
library(readr)
?read_csv
ptm <- proc.time()
flights <- read_csv('data/flights/2007.csv', progress = T)
proc.time() - ptm
print(object.size(get('flights')), units='auto')
# data.table
remove.packages("data.table")
# Notes:
# http://www.openmp.org/
# https://github.com/Rdatatable/data.table/wiki/Installation
#
# Linux & Mac:
# install.packages("data.table", type = "source", repos = "http://Rdatatable.github.io/data.table")
#
# install.packages("data.table")
library(data.table)
ptm <- proc.time()
flights <- fread("data/flights/2007.csv")
proc.time() - ptm
# Reading multiple files --------------------------------------------------
( data_path <- file.path('data','flights') )
( files <- list.files(data_path, pattern = '*.csv', full.names = T) )
system.time( flights <- lapply(files, fread) )
system.time( flights <- lapply(files, fread, nThread=4) )
# What is flights?
class(flights)
flights <- rbindlist(flights)
# Parallel reading --------------------------------------------------------
# library(parallel)
# system.time(flights <- mclapply(files, data.table::fread, mc.cores = 8))
library(doParallel)
registerDoParallel(cores = detectCores() - 1)
library(foreach)
system.time( flights <- foreach(i = files, .combine = rbind) %dopar% read_csv(i) )
system.time( flights <- data.table::rbindlist(foreach(i = files) %dopar% data.table::fread(i, nThread=8)))
print(object.size(get('flights')), units='auto')
unique(flights$Year)
# Reading big files -------------------------------------------------------
# Some times system commands are faster
system('head -5 data/flights/2008.csv')
readLines("data/flights/2008.csv", n=5)
# Num rows
length(readLines("data/flights/2008.csv")) # Not so big files
nrow(data.table::fread("data/flights/2008.csv", select = 1L, nThread = 2)) # Using fread on the first column
# Reading only what I neeed
library(sqldf)
jfk <- sqldf::read.csv.sql("data/flights/2008.csv",
sql = "select * from file where Dest = 'JFK'")
head(jfk)
data.table::fread("data/flights/2008.csv", select = c("UniqueCarrier","Dest","ArrDelay" ))
# Using other tools
# shell: csvcut ./data/airlines.csv -c Code,Description
data.table::fread('/Library/Frameworks/Python.framework/Versions/2.7/bin/csvcut ./data/airports.csv -c iata,airport' )
# shell: head -n 100 ./data/flights/2007.csv | csvcut -c UniqueCarrier,Dest,ArrDelay | csvsort -r -c 3
data.table::fread('head -n 100 ./data/flights/2007.csv | /Library/Frameworks/Python.framework/Versions/2.7/bin/csvcut -c UniqueCarrier,Dest,ArrDelay | /Library/Frameworks/Python.framework/Versions/2.7/bin/csvsort -r -c 3')
# Dealing with larger than memory datasets
# Using a DBMS
# sqldf("attach 'flights_db.sqlite' as flights")
# sqldf("DROP TABLE IF EXISTS flights.delays")
read.csv.sql("./data/flights/2008.csv",
sql = c("attach 'flights_db.sqlite' as flights",
"DROP TABLE IF EXISTS flights.delays",
"CREATE TABLE flights.delays as SELECT UniqueCarrier, TailNum, ArrDelay FROM file WHERE ArrDelay > 0"),
filter = "head -n 100000")
db <- dbConnect(RSQLite::SQLite(), dbname='flights_db.sqlite')
dbListTables(db)
delays.df <- dbGetQuery(db, "SELECT UniqueCarrier, AVG(ArrDelay) AS AvgDelay FROM delays GROUP BY UniqueCarrier")
delays.df
unlink("flights_db.sqlite")
dbDisconnect(db)
# Chunks ------------------------------------------------------------------
# read_csv_chunked
library(readr)
f <- function(x, pos) subset(x, Dest == 'JFK')
jfk <- read_csv_chunked("./data/flights/2008.csv",
chunk_size = 50000,
callback = DataFrameCallback$new(f))
# Importing a file into a DBMS:
db <- DBI::dbConnect(RSQLite::SQLite(), dbname='flights_db.sqlite')
dbListTables(db)
dbWriteTable(db,"jfkflights",jfk) # Inserta en df en memoria en la base de datos
dbGetQuery(db, "SELECT count(*) FROM jfkflights")
dbRemoveTable(db, "jfkflights")
rm(jfk)
##########################################################################
# Ex: Coding exercise: Using read_csv_chunked, read ./data/flights/2008.csv by chunks while sending data into a RSQLite::SQLite() database
##########################################################################
db <- DBI::dbConnect(RSQLite::SQLite(), dbname='flights_db.sqlite')
writetable <- function(df,pos) {
dbWriteTable(db,"flights",df,append=TRUE)
}
readr::read_csv_chunked(file="./data/flights/2008.csv", callback=SideEffectChunkCallback$new(writetable), chunk_size = 50000)
# Check
num_rows <- dbGetQuery(db, "SELECT count(*) FROM flights")
num_rows == nrow(data.table::fread("data/flights/2008.csv", select = 1L, nThread = 2))
dbGetQuery(db, "SELECT * FROM flights LIMIT 6")
dbRemoveTable(db, "flights")
dbDisconnect(db)
# sqlite3 /Users/jose/Documents/GitHub/master_data_science/flights_db.sqlite
# sqlite> .tables
# sqlite> SELECT count(*) FROM flights;
# Basic functions for data frames -----------------------------------------
names(flights)
str(flights)
nrow(flights)
ncol(flights)
dim(flights)
|
9dc2dcaa871234e2b2f7498dce636cf93039e6a0
|
0438fa2503105ab4ac26171bb5c018120007d386
|
/R/oolong_intro.R
|
f780bc63f3bdaa0f5dccf4892648cb3d0f08ca78
|
[] |
no_license
|
bachl/workshop_topicmodels
|
be611105bca4beef63c77c3d64e1e9e511cb19d1
|
7819121d81bc034961cc0ba73471ebb864c84b0b
|
refs/heads/master
| 2022-11-21T08:18:51.878185
| 2020-07-21T09:12:41
| 2020-07-21T09:12:41
| 273,678,089
| 1
| 1
| null | null | null | null |
UTF-8
|
R
| false
| false
| 1,302
|
r
|
oolong_intro.R
|
## ---- oolong-intro
m37 = read_rds("R/data/model37.rds")
out = read_rds("R/data/out.rds")
# Erstellen eines Tests
m37_oolong = create_oolong(
input_model = m37, # Modell, das wir testen wollen
input_corpus = out$meta$txt, # Korpus, auf dem Modell basiert; können wir aus "out" für stminsights nehmen
use_frex_words = TRUE, # FREX-Features in beiden Tests nutzen
n_top_terms = 5, # Zahl der korrekten Features im word intrusion test
difficulty = 0.5, # Schwierigkeit des word intrusion tests; 0.5 = frexweight, das wir zur Interpretation genutzt haben
bottom_terms_percentile = 0.4, # Definition der intruder words; hier: haben theta < 0.4
n_topiclabel_words = 10, # Zahl der Features, die als Label im topic intrusion test angezeigt werden
n_top_topics = 2, # Zahl der besten Topics, die für ein Dokument gezeigt werden
exact_n = 5 # Zahl der Dokumente für topic intrusion test (alternativ frac für Anteil); in echtem Test mehr Dokumente codieren, hier nur 5, damit Demo nicht so lange dauert
)
# Ausführen der Tests; Durchführen interaktiv in Viewer
m37_oolong$do_word_intrusion_test()
m37_oolong$do_topic_intrusion_test()
# Beenden des Tests
m37_oolong$lock()
# Test-Ergebnisse
m37_oolong_res = m37_oolong %>% summarise_oolong()
m37_oolong_res
|
1e713b0a0c5595ef1d02126e2920cc9984e2a4bc
|
3b6b122a29011054de8dfd7e4fd2b2087be4407c
|
/man/mle_foot.Rd
|
06ba71fc0c254b28e2c5e1bb15971e9973784d12
|
[] |
no_license
|
LeoEgidi/footBayes
|
e0845ec52d934e848af595af87200043391062c1
|
c3c9b3dd49fe2aa75ab379d60f9ac1d8bbbfa3be
|
refs/heads/master
| 2022-12-16T18:45:46.955707
| 2022-12-13T14:36:37
| 2022-12-13T14:36:37
| 219,478,427
| 34
| 5
| null | 2022-11-11T13:24:32
| 2019-11-04T10:47:59
|
R
|
UTF-8
|
R
| false
| true
| 2,647
|
rd
|
mle_foot.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/mle_foot.R
\name{mle_foot}
\alias{mle_foot}
\title{Fit football models with Maximum Likelihood}
\usage{
mle_foot(data, model, predict, ...)
}
\arguments{
\item{data}{A data frame, or a matrix containing the following mandatory items: season, home team, away team,
home goals, away goals.}
\item{model}{The type of model used to fit the data.
One among the following: \code{"double_pois"},
\code{"biv_pois"}, \code{"skellam"}, \code{"student_t"}.}
\item{predict}{The number of out-of-sample matches. If missing, the function returns
the fit for the training set only.}
\item{...}{Optional arguments for MLE fit algorithms.}
}
\value{
MLE and 95\% profile likelihood deviance confidence intervals for the
model's parameters: attack, defence, home effect and goals' correlation.
}
\description{
ML football modelling for the most famous models:
double Poisson, bivariate Poisson, Skellam and student t.
}
\details{
See documentation of \code{stan_foot} function for model details.
MLE can be obtained only for static models, with no time-dependence.
Likelihood optimization is performed via the \code{BFGS} method
of the \code{\link{optim}} function.
}
\examples{
\donttest{
if(requireNamespace("engsoccerdata")){
require(engsoccerdata)
require(tidyverse)
require(dplyr)
italy <- as_tibble(italy)
italy_2008<- italy \%>\%
dplyr::select(Season, home, visitor, hgoal,vgoal) \%>\%
dplyr::filter( Season=="2008")
mle_fit <- mle_foot(data = italy_2008,
model = "double_pois")
}
}
}
\references{
Baio, G. and Blangiardo, M. (2010). Bayesian hierarchical model for the prediction of football
results. Journal of Applied Statistics 37(2), 253-264.
Egidi, L., Pauli, F., and Torelli, N. (2018). Combining historical data
and bookmakers' odds in modelling football scores. Statistical Modelling, 18(5-6), 436-459.
Gelman, A. (2014). Stan goes to the World Cup. From
"Statistical Modeling, Causal Inference, and Social Science" blog.
Karlis, D. and Ntzoufras, I. (2003). Analysis of sports data by using bivariate poisson models.
Journal of the Royal Statistical Society: Series D (The Statistician) 52(3), 381-393.
Karlis, D. and Ntzoufras,I. (2009). Bayesian modelling of football outcomes: Using
the Skellam's distribution for the goal difference. IMA Journal of Management Mathematics 20(2), 133-145.
Owen, A. (2011). Dynamic Bayesian forecasting models
of football match outcomes with estimation of the
evolution variance parameter. IMA Journal of Management Mathematics, 22(2), 99-113.
}
\author{
Leonardo Egidi \email{legidi@units.it}
}
|
3bc9ed253a3e13d368e8e245a9faa05a6fa66e44
|
753e3ba2b9c0cf41ed6fc6fb1c6d583af7b017ed
|
/service/paws.serverlessapplicationrepository/man/list_application_dependencies.Rd
|
fcd125a21b0cdadfa483802855b0a25782c042d0
|
[
"Apache-2.0"
] |
permissive
|
CR-Mercado/paws
|
9b3902370f752fe84d818c1cda9f4344d9e06a48
|
cabc7c3ab02a7a75fe1ac91f6fa256ce13d14983
|
refs/heads/master
| 2020-04-24T06:52:44.839393
| 2019-02-17T18:18:20
| 2019-02-17T18:18:20
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| true
| 965
|
rd
|
list_application_dependencies.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in
% R/paws.serverlessapplicationrepository_operations.R
\name{list_application_dependencies}
\alias{list_application_dependencies}
\title{Retrieves the list of applications nested in the containing application}
\usage{
list_application_dependencies(ApplicationId, MaxItems = NULL,
NextToken = NULL, SemanticVersion = NULL)
}
\arguments{
\item{ApplicationId}{[required] The Amazon Resource Name (ARN) of the application.}
\item{MaxItems}{The total number of items to return.}
\item{NextToken}{A token to specify where to start paginating.}
\item{SemanticVersion}{The semantic version of the application to get.}
}
\description{
Retrieves the list of applications nested in the containing application.
}
\section{Accepted Parameters}{
\preformatted{list_application_dependencies(
ApplicationId = "string",
MaxItems = 123,
NextToken = "string",
SemanticVersion = "string"
)
}
}
|
2bbc0877e0a0a1505f60515b6d54c8e0f11fe684
|
30d2ed023fed988d04dbb83e66edba3df96dad74
|
/redd_substrate/redd_substrate_srv.R
|
5de37f43da10c2a6ece9ef03cf80905355266c6f
|
[
"MIT"
] |
permissive
|
arestrom/Chehalis
|
7949a449a0e4ec603b98db72b9fbdefd0c39529a
|
c4d8bfd5c56c2b0b4b58eee3af7eb1a6b47b2695
|
refs/heads/master
| 2023-05-31T05:58:51.247660
| 2021-06-29T17:26:19
| 2021-06-29T17:26:19
| 295,434,189
| 0
| 0
|
MIT
| 2021-05-26T22:11:54
| 2020-09-14T14:03:02
|
HTML
|
UTF-8
|
R
| false
| false
| 20,901
|
r
|
redd_substrate_srv.R
|
#========================================================
# Generate lut select ui's
#========================================================
# Substrate level
output$substrate_level_select = renderUI({
req(valid_connection == TRUE)
substrate_level_list = get_substrate_level(pool)$substrate_level
substrate_level_list = c("", substrate_level_list)
selectizeInput("substrate_level_select", label = "Substrate Level",
choices = substrate_level_list, selected = NULL,
width = "150px")
})
# Substrate type
output$substrate_type_select = renderUI({
req(valid_connection == TRUE)
substrate_type_list = get_substrate_type(pool)$substrate_type
substrate_type_list = c("", substrate_type_list)
selectizeInput("substrate_type_select", label = "Substrate Type",
choices = substrate_type_list, selected = NULL,
width = "150px")
})
#========================================================
# Primary datatable for redd_substrate
#========================================================
# Primary DT datatable for survey_intent
output$redd_substrates = renderDT({
req(input$tabs == "data_entry")
req(input$surveys_rows_selected)
req(input$survey_events_rows_selected)
req(input$redd_encounters_rows_selected)
req(!is.na(selected_redd_encounter_data()$redd_encounter_id))
start_dt = format(selected_survey_data()$survey_date, "%m/%d/%Y")
redd_substrate_title = glue("{selected_survey_event_data()$species} redd substrate data for {input$stream_select} on ",
"{start_dt} from river mile {selected_survey_data()$up_rm} ",
"to {selected_survey_data()$lo_rm}")
redd_substrate_data = get_redd_substrate(pool, selected_redd_encounter_data()$redd_encounter_id) %>%
select(substrate_level, substrate_type, substrate_pct,
created_dt, created_by, modified_dt, modified_by)
# Generate table
datatable(redd_substrate_data,
colnames = c("Substrate Level", "Substrate Type", "Substrate Percent", "Created Date",
"Created By", "Modified Date", "Modified By"),
selection = list(mode = 'single'),
options = list(dom = 'ltp',
pageLength = 5,
lengthMenu = c(1, 5, 10, 20),
scrollX = T,
initComplete = JS(
"function(settings, json) {",
"$(this.api().table().header()).css({'background-color': '#9eb3d6'});",
"}")),
caption = htmltools::tags$caption(
style = 'caption-side: top; text-align: left; color: black; width: auto;',
htmltools::em(htmltools::strong(redd_substrate_title))))
})
# Create surveys DT proxy object
redd_substrate_dt_proxy = dataTableProxy(outputId = "redd_substrates")
#========================================================
# Collect individual_redd values from selected row for later use
#========================================================
# Create reactive to collect input values for update and delete actions
selected_redd_substrate_data = reactive({
req(input$tabs == "data_entry")
req(input$surveys_rows_selected)
req(input$survey_events_rows_selected)
req(input$redd_encounters_rows_selected)
req(input$redd_substrates_rows_selected)
req(!is.na(selected_redd_encounter_data()$redd_encounter_id))
redd_substrate_data = get_redd_substrate(pool, selected_redd_encounter_data()$redd_encounter_id)
redd_substrate_row = input$redd_substrates_rows_selected
selected_redd_substrate = tibble(redd_substrate_id = redd_substrate_data$redd_substrate_id[redd_substrate_row],
substrate_level = redd_substrate_data$substrate_level[redd_substrate_row],
substrate_type = redd_substrate_data$substrate_type[redd_substrate_row],
substrate_pct = redd_substrate_data$substrate_pct[redd_substrate_row],
created_date = redd_substrate_data$created_date[redd_substrate_row],
created_by = redd_substrate_data$created_by[redd_substrate_row],
modified_date = redd_substrate_data$modified_date[redd_substrate_row],
modified_by = redd_substrate_data$modified_by[redd_substrate_row])
return(selected_redd_substrate)
})
#========================================================
# Update select inputs to values in selected row
#========================================================
# Update all input values to values in selected row
observeEvent(input$redd_substrates_rows_selected, {
srsdat = selected_redd_substrate_data()
updateSelectizeInput(session, "substrate_level_select", selected = srsdat$substrate_level)
updateSelectizeInput(session, "substrate_type_select", selected = srsdat$substrate_type)
updateNumericInput(session, "substrate_pct_input", value = srsdat$substrate_pct)
})
#========================================================
# Insert operations: reactives, observers and modals
#========================================================
# Disable "New" button if four rows of substrate already exists
# There are only four categories in the lut
observe({
req(!is.na(selected_redd_encounter_data()$redd_encounter_id))
input$insert_redd_substrate
input$delete_redd_substrate
redd_substrate_data = get_redd_substrate(pool, selected_redd_encounter_data()$redd_encounter_id)
if (nrow(redd_substrate_data) >= 4L) {
shinyjs::disable("substrate_add")
} else {
shinyjs::enable("substrate_add")
}
})
# Create reactive to collect input values for insert actions
redd_substrate_create = reactive({
req(input$tabs == "data_entry")
req(input$surveys_rows_selected)
req(input$survey_events_rows_selected)
req(input$redd_encounters_rows_selected)
req(!is.na(selected_redd_encounter_data()$redd_encounter_id))
# Redd_encounter_id
redd_encounter_id_input = selected_redd_encounter_data()$redd_encounter_id
# Substrate level
substrate_level_input = input$substrate_level_select
if ( substrate_level_input == "" ) {
substrate_level_id = NA
} else {
substrate_level_vals = get_substrate_level(pool)
substrate_level_id = substrate_level_vals %>%
filter(substrate_level == substrate_level_input) %>%
pull(substrate_level_id)
}
# Substrate_type
substrate_type_input = input$substrate_type_select
if ( substrate_type_input == "" ) {
substrate_type_id = NA
} else {
substrate_type_vals = get_substrate_type(pool)
substrate_type_id = substrate_type_vals %>%
filter(substrate_type == substrate_type_input) %>%
pull(substrate_type_id)
}
new_redd_substrate = tibble(redd_encounter_id = redd_encounter_id_input,
substrate_level = substrate_level_input,
substrate_level_id = substrate_level_id,
substrate_type = substrate_type_input,
substrate_type_id = substrate_type_id,
substrate_pct = input$substrate_pct_input,
created_dt = lubridate::with_tz(Sys.time(), "UTC"),
created_by = Sys.getenv("USERNAME"))
return(new_redd_substrate)
})
# Generate values to show in modal
output$redd_substrate_modal_insert_vals = renderDT({
redd_substrate_modal_in_vals = redd_substrate_create() %>%
select(substrate_level, substrate_type, substrate_pct)
# Generate table
datatable(redd_substrate_modal_in_vals,
rownames = FALSE,
options = list(dom = 't',
scrollX = T,
ordering = FALSE,
initComplete = JS(
"function(settings, json) {",
"$(this.api().table().header()).css({'background-color': '#9eb3d6'});",
"}")))
})
# Modal for new individual_redds. Need dup flag
observeEvent(input$substrate_add, {
new_redd_substrate_vals = redd_substrate_create()
new_level = redd_substrate_create()$substrate_level
existing_substrate = get_redd_substrate(pool, selected_redd_encounter_data()$redd_encounter_id)
old_levels = existing_substrate$substrate_level
existing_pct = sum(existing_substrate$substrate_pct)
new_substrate_pct = existing_pct + new_redd_substrate_vals$substrate_pct
new_type = redd_substrate_create()$substrate_type
old_types = existing_substrate$substrate_type
showModal(
tags$div(id = "redd_substrate_insert_modal",
# Verify required fields have data...none can be blank
if ( is.na(new_redd_substrate_vals$substrate_level) |
is.na(new_redd_substrate_vals$substrate_type) |
is.na(new_redd_substrate_vals$substrate_pct) ) {
modalDialog (
size = "m",
title = "Warning",
paste0("Values are required in all fields"),
easyClose = TRUE,
footer = NULL
)
# Verify no levels are repeated
} else if ( new_level %in% old_levels ) {
modalDialog (
size = "m",
title = "Warning",
glue("The '{new_level}' substrate level has already been entered. Please select a different substrate level"),
easyClose = TRUE,
footer = NULL
)
# Verify no substrate types are repeated
} else if ( new_type %in% old_types ) {
modalDialog (
size = "m",
title = "Warning",
glue("The '{new_type}' substrate type has already been entered. Please select a different substrate type"),
easyClose = TRUE,
footer = NULL
)
# Verify substrate pct does not exceed 100
} else if ( new_substrate_pct > 100 ) {
modalDialog (
size = "m",
title = "Warning",
glue("The substrate_percent total exceeds 100%. Please select a different value or edit previous values"),
easyClose = TRUE,
footer = NULL
)
# Write to DB
} else {
modalDialog (
size = 'l',
title = glue("Insert new redd substrate data to the database?"),
fluidPage (
DT::DTOutput("redd_substrate_modal_insert_vals"),
br(),
br(),
actionButton("insert_redd_substrate", "Insert substrate data")
),
easyClose = TRUE,
footer = NULL
)
}
))
})
# Reactive to hold values actually inserted
redd_substrate_insert_vals = reactive({
new_redd_substrate_values = redd_substrate_create() %>%
select(redd_encounter_id, substrate_level_id, substrate_type_id,
substrate_pct, created_by)
return(new_redd_substrate_values)
})
# Update DB and reload DT
observeEvent(input$insert_redd_substrate, {
tryCatch({
redd_substrate_insert(pool, redd_substrate_insert_vals())
shinytoastr::toastr_success("New substrate data was added")
}, error = function(e) {
shinytoastr::toastr_error(title = "Database error", conditionMessage(e))
})
removeModal()
post_redd_substrate_insert_vals = get_redd_substrate(pool, selected_redd_encounter_data()$redd_encounter_id) %>%
select(substrate_level, substrate_type, substrate_pct,
created_dt, created_by, modified_dt, modified_by)
replaceData(redd_substrate_dt_proxy, post_redd_substrate_insert_vals)
}, priority = 99999)
#========================================================
# Edit operations: reactives, observers and modals
#========================================================
# Create reactive to collect input values for insert actions
redd_substrate_edit = reactive({
req(input$tabs == "data_entry")
req(input$surveys_rows_selected)
req(input$survey_events_rows_selected)
req(input$redd_encounters_rows_selected)
req(!is.na(selected_redd_substrate_data()$redd_substrate_id))
# Redd_substrate_id
redd_substrate_id_input = selected_redd_substrate_data()$redd_substrate_id
substrate_level_input = input$substrate_level_select
if ( substrate_level_input == "" ) {
substrate_level_id = NA
} else {
substrate_level_vals = get_substrate_level(pool)
substrate_level_id = substrate_level_vals %>%
filter(substrate_level == substrate_level_input) %>%
pull(substrate_level_id)
}
# Substrate_type
substrate_type_input = input$substrate_type_select
if ( substrate_type_input == "" ) {
substrate_type_id = NA
} else {
substrate_type_vals = get_substrate_type(pool)
substrate_type_id = substrate_type_vals %>%
filter(substrate_type == substrate_type_input) %>%
pull(substrate_type_id)
}
edit_redd_substrate = tibble(redd_substrate_id = redd_substrate_id_input,
substrate_level = substrate_level_input,
substrate_level_id = substrate_level_id,
substrate_type = substrate_type_input,
substrate_type_id = substrate_type_id,
substrate_pct = input$substrate_pct_input,
modified_dt = lubridate::with_tz(Sys.time(), "UTC"),
modified_by = Sys.getenv("USERNAME"))
return(edit_redd_substrate)
})
# Generate values to show in modal
output$redd_substrate_modal_update_vals = renderDT({
redd_substrate_modal_up_vals = redd_substrate_edit() %>%
select(substrate_level, substrate_type, substrate_pct)
# Generate table
datatable(redd_substrate_modal_up_vals,
rownames = FALSE,
options = list(dom = 't',
scrollX = T,
ordering = FALSE,
initComplete = JS(
"function(settings, json) {",
"$(this.api().table().header()).css({'background-color': '#9eb3d6'});",
"}")))
})
# Edit modal
observeEvent(input$substrate_edit, {
old_redd_substrate_vals = selected_redd_substrate_data() %>%
select(substrate_level, substrate_type, substrate_pct)
current_type = old_redd_substrate_vals$substrate_type
old_redd_substrate_vals[] = lapply(old_redd_substrate_vals, set_na)
new_redd_substrate_vals = redd_substrate_edit() %>%
mutate(substrate_pct = as.integer(substrate_pct)) %>%
select(substrate_level, substrate_type, substrate_pct)
new_redd_substrate_vals[] = lapply(new_redd_substrate_vals, set_na)
existing_substrate = get_redd_substrate(pool, selected_redd_encounter_data()$redd_encounter_id)
existing_pct = sum(existing_substrate$substrate_pct) - old_redd_substrate_vals$substrate_pct
new_substrate_pct = existing_pct + new_redd_substrate_vals$substrate_pct
new_type = redd_substrate_edit()$substrate_type
old_types = existing_substrate$substrate_type
showModal(
tags$div(id = "redd_substrate_update_modal",
if ( !length(input$redd_substrates_rows_selected) == 1 ) {
modalDialog (
size = "m",
title = "Warning",
paste("Please select a row to edit!"),
easyClose = TRUE,
footer = NULL
)
# Verify at least one value has been edited
} else if ( isTRUE(all_equal(old_redd_substrate_vals, new_redd_substrate_vals)) ) {
modalDialog (
size = "m",
title = "Warning",
paste("Please change at least one value!"),
easyClose = TRUE,
footer = NULL
)
# Verify no substrate types are repeated
} else if ( !new_type == current_type & new_type %in% old_types ) {
modalDialog (
size = "m",
title = "Warning",
glue("The '{new_type}' substrate type has already been entered. Please select a different substrate type"),
easyClose = TRUE,
footer = NULL
)
# Verify substrate pct does not exceed 100
} else if ( new_substrate_pct > 100 ) {
modalDialog (
size = "m",
title = "Warning",
glue("The substrate_percent total exceeds 100%. Please select a different value or edit previous values"),
easyClose = TRUE,
footer = NULL
)
} else {
modalDialog (
size = 'l',
title = "Update individual redd data to these new values?",
fluidPage (
DT::DTOutput("redd_substrate_modal_update_vals"),
br(),
br(),
actionButton("save_substrate_edits", "Save changes")
),
easyClose = TRUE,
footer = NULL
)
}
))
})
# Update DB and reload DT
observeEvent(input$save_substrate_edits, {
tryCatch({
redd_substrate_update(pool, redd_substrate_edit())
shinytoastr::toastr_success("Substrate data was edited")
}, error = function(e) {
shinytoastr::toastr_error(title = "Database error", conditionMessage(e))
})
removeModal()
post_redd_substrate_edit_vals = get_redd_substrate(pool, selected_redd_encounter_data()$redd_encounter_id) %>%
select(substrate_level, substrate_type, substrate_pct,
created_dt, created_by, modified_dt, modified_by)
replaceData(redd_substrate_dt_proxy, post_redd_substrate_edit_vals)
}, priority = 9999)
#========================================================
# Delete operations: reactives, observers and modals
#========================================================
# Generate values to show in modal
output$redd_substrate_modal_delete_vals = renderDT({
redd_substrate_modal_del_id = selected_redd_substrate_data()$redd_substrate_id
redd_substrate_modal_del_vals = get_redd_substrate(pool, selected_redd_encounter_data()$redd_encounter_id) %>%
filter(redd_substrate_id == redd_substrate_modal_del_id) %>%
select(substrate_level, substrate_type, substrate_pct)
# Generate table
datatable(redd_substrate_modal_del_vals,
rownames = FALSE,
options = list(dom = 't',
scrollX = T,
ordering = FALSE,
initComplete = JS(
"function(settings, json) {",
"$(this.api().table().header()).css({'background-color': '#9eb3d6'});",
"}")))
})
observeEvent(input$substrate_delete, {
redd_substrate_id = selected_redd_substrate_data()$redd_substrate_id
showModal(
tags$div(id = "redd_substrate_delete_modal",
if ( length(redd_substrate_id) == 0L ) {
modalDialog (
size = "m",
title = "Warning",
glue("Please select a row to delete"),
easyClose = TRUE,
footer = NULL
)
} else {
modalDialog (
size = 'l',
title = "Are you sure you want to delete this redd substrate data from the database?",
fluidPage (
DT::DTOutput("redd_substrate_modal_delete_vals"),
br(),
br(),
actionButton("delete_redd_substrate", "Delete substrate data")
),
easyClose = TRUE,
footer = NULL
)
}
))
})
# Update DB and reload DT
observeEvent(input$delete_redd_substrate, {
tryCatch({
redd_substrate_delete(pool, selected_redd_substrate_data())
shinytoastr::toastr_success("Substrate data was deleted")
}, error = function(e) {
shinytoastr::toastr_error(title = "Database error", conditionMessage(e))
})
removeModal()
redd_substrates_after_delete = get_redd_substrate(pool, selected_redd_encounter_data()$redd_encounter_id) %>%
select(substrate_level, substrate_type, substrate_pct,
created_dt, created_by, modified_dt, modified_by)
replaceData(redd_substrate_dt_proxy, redd_substrates_after_delete)
})
|
ddbf033063698068e96205662514c93bac74c2a5
|
158782c06de5cf63cb4ded26991acdea475894da
|
/R/ClassificationTreeScript.R
|
6c59814d9f30d7a5680a907e53cda18e7e98023e
|
[] |
no_license
|
jackmoorer/Project
|
459bda911bd6a46c637f6935f09469f11ab00264
|
9d48f93fdd113c35a6d79f3422ff5b024f51d800
|
refs/heads/master
| 2021-03-24T09:33:16.049563
| 2017-12-12T05:29:21
| 2017-12-12T05:29:21
| 112,793,017
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 5,993
|
r
|
ClassificationTreeScript.R
|
#Title: Classification Tree Script
#Discription: Run code for Classification Tree part of project
#load packages
library(ggplot2)
library(tree)
library(ROCR)
#read in clean data
train <- read.csv("../data/clean_train.csv", header = TRUE)
#remove numeric predictor
train_tree <- train[,-ncol(train)]
#intialize basic classification tree
classification_tree <- tree(Over50k ~., data = train_tree)
#cross validation based on prune.misclass
set.seed(100)
classification_tree_cv <- cv.tree(classification_tree, FUN = prune.misclass)
#report cross validation for prune.misclass
sink("../output/training_results/cv-prune-misclass-classification-tree.txt")
print(classification_tree_cv)
sink()
#prepare cv plots
Size <- classification_tree_cv$size
K <- classification_tree_cv$k
Dev <- classification_tree_cv$dev
Misclass <- data.frame(Size, K, Dev)
#report cv plot for size
pdf("../images/training_plots/cv-prine-misclass-size-vs-error.pdf")
ggplot(data = Misclass, aes(x = Size, y = Dev)) + geom_point() + geom_line() + ggtitle("Size of Tree vs Error for CV Misclass")
dev.off()
#report cv plot for cost complexicity
pdf("../images/training_plots/cv-prine-misclass-k-vs-error.pdf")
ggplot(data = Misclass, aes(x = K, y = Dev)) + geom_point() + geom_line() + ggtitle("Cost-Complexity vs Error for CV Misclass")
dev.off()
#use default method for cv
set.seed(200)
classification_tree_cv_default <- cv.tree(classification_tree, FUN = prune.tree)
#report cross validation for prune.misclass
sink("../output/training_results/cv-prune-tree-classification-tree.txt")
print(classification_tree_cv_default)
sink()
#prepare cv plots
Size <- classification_tree_cv_default$size
K <- classification_tree_cv_default$k
Dev <- classification_tree_cv_default$dev
default <- data.frame(Size, K, Dev)
#report cv plot for size
pdf("../images/training_plots/cv-prine-tree-size-vs-error.pdf")
ggplot(data = default, aes(x = Size, y = Dev)) + geom_point() + geom_line() + ggtitle("Size of Tree vs Error for CV Default Method")
dev.off()
#report cv plot for cost complexicity
pdf("../images/training_plots/cv-prine-tree-k-vs-error.pdf")
ggplot(data = default, aes(x = K, y = Dev)) + geom_point() + geom_line() + ggtitle("Cost-Complexity vs Error for Default Method")
dev.off()
#get hyper-parameter size
names <- classification_tree_cv_default$size
values <- classification_tree_cv_default$dev
names(values) <- names
size <- as.numeric(names(which.min(values)))
#build tree
set.seed(4)
prune_classification_tree <- prune.misclass(classification_tree, best = size)
#report results of tree
sink("../output/training_results/pruned-classification-tree.txt")
print(prune_classification_tree)
print(" ")
print(summary(prune_classification_tree))
sink()
#show classification tree plot
pdf("../images/training_plots/classification-tree-plot.pdf")
plot(prune_classification_tree)
text(prune_classification_tree, pretty = 0)
dev.off()
#build confusion matrix
real_train <- train_tree$Over50k
train_preds <- predict(prune_classification_tree, train_tree, type = "class")
#report confusion matrix
sink("../output/training_results/classification-tree-train-confusion-matrix.txt")
print(table(train_preds, real_train))
sink()
#get error rate
err_rate <- mean(train_preds != real_train)
#report error rate
sink("../output/training_results/train-error-rate-classification-tree.txt")
print("Train Error Rate for Classification Tree")
print(err_rate)
sink()
#prepare roc
train_probs <- predict(prune_classification_tree, train_tree)
train_prediction <- prediction(train_probs[,2], real_train)
train_performance <- performance(train_prediction, measure = "tpr", x.measure = "fpr")
pdf("../images/training_plots/train-ROC-classificaiotn-tree.pdf")
plot(train_performance, main = "Train ROC Curve for Classification Tree")
abline(a=0, b=1, lty=2)
dev.off()
#get auc
auc <- performance(train_prediction, measure="auc")@y.values[[1]]
#report auc
sink("../output/training_results/classification-tree-train-auc.txt")
print("Train AUC for Classifcation Tree")
print(auc)
sink()
#This part of the script deals with Test Performance
#read in data
test <- read.csv("../data/clean_test.csv", header = TRUE)
#prepare data
test_tree <- test[, -ncol(test)]
test_tree_preds <- test_tree[, -ncol(test_tree)]
real_test <- test_tree$Over50k
#get test error rate
ct_test_preds <- predict(prune_classification_tree, test_tree_preds, type = "class")
test_err_rate <- mean(ct_test_preds != real_test)
#report test error rate
sink("../output/test_results/classificaton-tree-test-error-rate.txt")
print("Classification tree test error rate")
print(test_err_rate)
sink()
#create test confusion matrix
confusionMatrix <- table(ct_test_preds, real_test)
#report test confusion matrix
sink("../output/test_results/classification-tree-test-confusion-matrix.txt")
print("Classification Tree Confusion Matrix")
print(confusionMatrix )
sink()
#get sensitivity and specificity
sensitivity <- confusionMatrix[2, 2]/(confusionMatrix[2, 2] + confusionMatrix[1, 2])
specificity <- confusionMatrix[1, 1]/(confusionMatrix[1, 1] + confusionMatrix[2, 1])
#report sensitivity and specificity
sink("../output/test_results/classification-tree-sensitivity-specificity.txt")
print("Classification Tree Sensitivity:")
print(sensitivity)
print("Classification Tree Specificity:")
print(specificity)
sink()
#prepare roc cruve
ct_test_probs <- predict(prune_classification_tree, test_tree_preds)
ct_test_prediction <- prediction(ct_test_probs[,2], real_test)
ct_test_performance <- performance(ct_test_prediction, measure = "tpr", x.measure = "fpr")
#plot roc
pdf("../images/test_plots/classification-tree-test-roc-curve.pdf")
plot(ct_test_performance, main="Test ROC Classification Tree")
abline(a=0, b=1, lty=2)
dev.off()
#get test auc
test_auc <- performance(ct_test_prediction, measure="auc")@y.values[[1]]
#report test auc
sink("../output/classification-tree-test-auc.txt")
print("Classification Test AUC")
print(test_auc)
sink()
|
474dc4784cf5f772e624f4615062980d0c203341
|
dcaf3f2986f96a68f9f7f351a9be2ff31f37acbf
|
/server.r
|
183863babf347f9259e6a2508b999e0d5a19d8dd
|
[] |
no_license
|
glesica/exploring-phillips
|
de2aa9dab834dda48490220441fecbf43a2a26f9
|
61671d2b336892a954c597006cf0e767a4dae4aa
|
refs/heads/master
| 2016-09-06T03:10:46.463617
| 2013-09-08T18:19:09
| 2013-09-08T18:19:09
| 12,630,982
| 1
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 777
|
r
|
server.r
|
shinyServer(function(input, output) {
customDataset <- reactive({
load.phillips(input$yrs[1], input$yrs[2],
clusters=input$clusters, df=full.df)
})
output$phillipsPlot <- renderPlot({
plot.phillips(customDataset(), lag=input$lag, labs=input$labs)
})
output$inflationHist <- renderPlot({
data <- customDataset()
hist(data$Inflation, xlab="Inflation Rate",
main="Histogram of Inflation Rate")
})
output$unemploymentHist <- renderPlot({
data <- customDataset()
hist(data$Unemployment, xlab="Unemployment Rate",
main="Histogram of Unemployment Rate")
})
output$downloadData <- downloadHandler(
filename="exploring_phillips.csv",
content=function(file) { write.csv(customDataset(), file) }
)
})
|
b1421a96d778c95be069dc880aee81620b79473b
|
47a8dff9177da5f79cc602c6d7842c0ec0854484
|
/man/CellSelector.Rd
|
23713204f4c7c14afa46740ad1f59cd82bddef79
|
[
"MIT"
] |
permissive
|
satijalab/seurat
|
8949973cc7026d3115ebece016fca16b4f67b06c
|
763259d05991d40721dee99c9919ec6d4491d15e
|
refs/heads/master
| 2023-09-01T07:58:33.052836
| 2022-12-05T22:49:37
| 2022-12-05T22:49:37
| 35,927,665
| 2,057
| 1,049
|
NOASSERTION
| 2023-09-01T19:26:02
| 2015-05-20T05:23:02
|
R
|
UTF-8
|
R
| false
| true
| 1,248
|
rd
|
CellSelector.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/visualization.R
\name{CellSelector}
\alias{CellSelector}
\alias{FeatureLocator}
\title{Cell Selector}
\usage{
CellSelector(plot, object = NULL, ident = "SelectedCells", ...)
FeatureLocator(plot, ...)
}
\arguments{
\item{plot}{A ggplot2 plot}
\item{object}{An optional Seurat object; if passes, will return an object
with the identities of selected cells set to \code{ident}}
\item{ident}{An optional new identity class to assign the selected cells}
\item{...}{Ignored}
}
\value{
If \code{object} is \code{NULL}, the names of the points selected;
otherwise, a Seurat object with the selected cells identity classes set to
\code{ident}
}
\description{
Select points on a scatterplot and get information about them
}
\examples{
\dontrun{
data("pbmc_small")
plot <- DimPlot(object = pbmc_small)
# Follow instructions in the terminal to select points
cells.located <- CellSelector(plot = plot)
cells.located
# Automatically set the identity class of selected cells and return a new Seurat object
pbmc_small <- CellSelector(plot = plot, object = pbmc_small, ident = 'SelectedCells')
}
}
\seealso{
\code{\link{DimPlot}} \code{\link{FeaturePlot}}
}
\concept{visualization}
|
b453fd14cea1a83d348976a8f298fc324b06284f
|
d474efb74fd5268fd908a2a5394a8ecc97e28f3b
|
/R/git/medips.R
|
a62420c4ad5e703e4e4cdcd5aa72d88670af2681
|
[] |
no_license
|
bradleycolquitt/seqAnalysis
|
e1f2fbefa867ee11a51801aeeaf57ebc357a0057
|
d2c37fb0609754a0ec4e263dda27681717087523
|
refs/heads/master
| 2021-01-01T05:42:03.060788
| 2017-05-28T02:47:58
| 2017-05-28T02:47:58
| 2,284,790
| 0
| 2
| null | null | null | null |
UTF-8
|
R
| false
| false
| 5,776
|
r
|
medips.R
|
library(MEDIPS)
library(BSgenome.Mmusculus.UCSC.mm9)
medipsToGRanges <- function(medips.object) {
chrs <- c(paste("chr", 1:18, sep=""), "chrX", "chrY")
which.idx <- genome_chr(medips.object) %in% chrs
genome.chr <- Rle(genome_chr(medips.object)[which.idx])
genome.rng <- IRanges(start=genome_pos(medips.object)[which.idx], width=bin_size(medips.object))
# seqlen <- as.vector(chr_lengths(medips.object)[chr_names(medips.object) %in% chrs], mode='integer'
return(GRanges(seqnames=genome.chr, ranges=genome.rng,
reads=genome_raw(medips.object)[which.idx]))
}
loadMedips <- function(fname, extend = 350, bin.size) {
d <- MEDIPS.readAlignedSequences(BSgenome = "BSgenome.Mmusculus.UCSC.mm9", file=fname)
d <- MEDIPS.genomeVector(data=d, bin_size=bin.size, extend=extend)
d <- MEDIPS.getPositions(data=d, pattern="CG")
d <- MEDIPS.couplingVector(data=d, fragmentLength=700, func="count")
d <- MEDIPS.calibrationCurve(data=d)
d <- MEDIPS.normalize(data=d)
return(d)
}
createEmptyMedips <- function(bin.size){
BSgenome <- "BSgenome.Mmusculus.UCSC.mm9"
chromosomes <- paste("chr",c(1:19,"X","Y"),sep="")
dataset <- get(ls(paste("package:",BSgenome,sep="")))
chr_lengths=as.numeric(sapply(chromosomes, function(x){as.numeric(length(dataset[[x]]))}))
d <- new('MEDIPSset',genome_name=BSgenome,chr_names=chromosomes,chr_lengths=chr_lengths)
return(d)
}
plotCalibrateDip <- function(dip.data, fname, chr) {
GDD(file=fname, type="png", width=1200, height=900)
MEDIPS.plotCalibrationPlot(data=dip.data, linear=T, plot_chr=chr)
dev.off()
}
plotCoverageAnalysis <- function(dip.data, fname) {
GDD(file=fname, type="png", width=1600, height=1200)
dip.data <- MEDIPS.coverageAnalysis(data=dip.data, extend=350, no_iterations=10)
MEDIPS.plotCoverage(dip.data)
dev.off()
return(dip.data)
}
subsetMedipsByROI <- function(medips.obj, roi) {
if (!identical(chr_names(medips.obj), roi$chr)) {
idx <- which(genome_chr(medips.obj) %in% roi$chr)
if (length(idx) == 0) {
stop("No chr in common")
}
medips.obj@chr_lengths <- chr_lengths(medips.obj)[chr_names(medips.obj) %in% roi$chr]
} else {
idx = NULL
}
if (!is.null(idx)) {
medips.obj@genome_chr <- genome_chr(medips.obj)[idx]
medips.obj@genome_pos <- genome_pos(medips.obj)[idx]
medips.obj@genome_raw <- genome_raw(medips.obj)[idx]
medips.obj@genome_norm <- genome_norm(medips.obj)[idx]
medips.obj@chr_names <- unique(genome_chr(medips.obj))
}
return(medips.obj)
}
reduceMedipsResolution <- function(medips.obj, window.size) {
scale.factor <- window.size / medips.obj@bin_size
reduced.chr.len <- vector(length=length(medips.obj@chr_names), mode="integer")
for (i in 1:length(medips.obj@chr_names)) {
curr.chr <- medips.obj@chr_names[i]
reduced.chr.len[i] <- ceiling(medips.obj@chr_lengths[i] / window.size)
}
scaled.size <- sum(reduced.chr.len)
chr.pos <- cumsum(reduced.chr.len)
genome.chr <- vector(scaled.size, mode="integer")
genome.pos <- vector(scaled.size, mode="integer")
genome.raw <- vector(scaled.size, mode="integer")
genome.norm <- vector(scaled.size, mode="numeric")
medips.obj@bin_size <- window.size
for (i in 1:length(medips.obj@chr_names)) {
curr.chr <- medips.obj@chr_names[i]
if (i == 1) {
idx.rng <- 1:reduced.chr.len[i]
} else {
idx.rng <- (chr.pos[i-1]+1):(chr.pos[i-1]+reduced.chr.len[i])
}
genome.chr[idx.rng] <- curr.chr
genome.pos[idx.rng] <- seq(1, medips.obj@chr_lengths[i], window.size)
genome.raw[idx.rng] <- rescale.vector(medips.obj@genome_raw[medips.obj@genome_chr == curr.chr], scale.factor)
genome.norm[idx.rng] <- rescale.vector(medips.obj@genome_norm[medips.obj@genome_chr == curr.chr], scale.factor)
}
medips.obj@genome_chr <- genome.chr
medips.obj@genome_pos <- genome.pos
medips.obj@genome_raw <- genome.raw
medips.obj@genome_norm <- genome.norm
return(medips.obj)
}
# use Fisher's exact test?
# fits a poisson GLM to each window TOO SLOW
diffMeth.glm <- function(medips1, medips2) {
require(utils)
cat("Computing differential enrichment\n")
num.windows <- length(genome_raw(medips1))
pb <- txtProgressBar(min=1, max=num.windows, style=3)
glm.pval <- vector()
fold.changes <- vector()
cs <- c(sum(genome_raw(medips1)), sum(genome_raw(medips2)))
sample.f <- factor(c(1, 2))
for (i in 1:num.windows) {
setTxtProgressBar(pb, i)
s1 <- genome_raw(medips1)[i]
s2 <- genome_raw(medips2)[i]
data <- as.vector(unlist(c(s1, s2)))
glm.curr <- glm(data ~ 1 + sample.f, offset=log(cs), family="poisson")
glm.pval[i] <- anova(glm.curr, test="Chisq")[5][2,1]
# fold.changes[i] <- exp(glm.curr$coefficients[1])/(exp(glm.curr$coefficients[1]+glm.curr$coefficients[2]))
}
out <- matrix(ncol=2, nrow=num.windows)
out[,1] <- glm.pval
out[,2] <- fold.changes
colnames(out) <- c("pval", "fc")
return(output)
}
saveMedipsForDipData <- function(medips.obj, dset.name) {
data.path <- paste(dset.name, "dipdata", sep=".")
if (file.exists(data.path)) {
unlink(data.path, recursive=TRUE)
}
dir.create(data.path)
chrs <- data.frame(name=dset.name, chr.names=medips.obj@chr_names, chr.lengths=medips.obj@chr_lengths, bin.size=medips.obj@bin_size)
write.table(chrs, file=paste(data.path, dset.name, sep="/"), sep=",")
chrs.num <- chrVecToNum(medips.obj@genome_chr)
all.data <- matrix(0, length(chrs.num), 4)
colnames(all.data) <- c("chr", "pos", "raw","norm")
all.data[,1] <- chrs.num
all.data[,2] <- medips.obj@genome_pos
all.data[,3] <- medips.obj@genome_raw
all.data[,4] <- medips.obj@genome_norm
write.table(as.data.frame(all.data), file=paste(data.path, "genome_data.txt", sep="/"), sep=",", quote=FALSE, row.names=FALSE)
}
|
7133fb58df1f068714037a5db997f2339c2fee8a
|
f997b825b89a191ef89709870065d375dd84358d
|
/man/datastamp-package.Rd
|
a4947ff72e2e853d457afc7c48075a799eb62daf
|
[
"MIT"
] |
permissive
|
teunbrand/datastamp
|
1557b85d423c7a892696a4900c3e239870d1bf72
|
ebd6c3bc3a3bd9efe08bc615cc7d9e38ea252ebd
|
refs/heads/master
| 2022-12-28T19:44:54.396093
| 2020-10-19T08:49:09
| 2020-10-19T08:49:09
| 304,337,082
| 2
| 0
| null | null | null | null |
UTF-8
|
R
| false
| true
| 834
|
rd
|
datastamp-package.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/datastamp-package.R
\docType{package}
\name{datastamp-package}
\alias{datastamp}
\alias{datastamp-package}
\title{datastamp: Stamping data with metadata}
\description{
The goal of datastamp is to make it easy to attach some metadata to
R objects. This can be convenient if you want to make `.RData` or `.rds`
objects self-documenting, by attaching time of creation or the path to the
script used to generate the data.
}
\seealso{
Useful links:
\itemize{
\item \url{https://github.com/teunbrand/datastamp}
\item Report bugs at \url{https://github.com/teunbrand/datastamp/issues}
}
}
\author{
\strong{Maintainer}: Teun van den Brand \email{tahvdbrand@gmail.com} (\href{https://orcid.org/0000-0002-9335-7468}{ORCID})
}
\keyword{internal}
|
f7a3d2f4b9a229f64cc55523b41f5ba968758eba
|
633f3cf081277fcc3fad31e19b6717dfe2b11915
|
/Bayesian_MS/BEDms/OptimDataFilesGenerator.R
|
52e2fddd67c18475bc6f4ffa5cd46ac25ab67367
|
[] |
no_license
|
csynbiosysIBioEUoE/ODMSiSY_2020_SI
|
a26620cf8cd908ec9b988c2908c4c2d1a0433cf3
|
5f9353a64fa7a0ca92bfd007d1c201c0aaf90fa2
|
refs/heads/master
| 2022-12-24T16:13:20.689940
| 2020-09-08T12:10:04
| 2020-09-08T12:10:04
| 276,348,122
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 3,680
|
r
|
OptimDataFilesGenerator.R
|
################################# Generate Data Files for Optimum Experiments #################################
# Function used to generate the necessary CSV Data Files (Events_Inputs, Inputs and Observables) in order to generate plots
# and simulate the three different models to a specific set of inputs using the functions enclosed in Predictions&Analysis.
# The CSV files generated have the same structure as the ones extracted from Lugagne et.al.
# As arguments it takes fileName (name to be taged to the different CSV files as a marker) and iputs (a vector with the input
# values ordered as IPTG1, aTc1, IPTG2, aTc2, ..., IPTGn, aTcn) and this can have any desired length (from 1 step experiment
# to n steps experiment) extracted from the optimisation results.
OptimFileGener <- function(fileName = "Optim", inputs = c(0.8100001, 6.0000001)){
########################### Generate Observables file
baseFile <- read.csv("Optim_Observables.csv")
write.csv(baseFile, file = paste(fileName, "_Observables.csv", sep = ""), row.names = FALSE)
########################### Generate Events_Inputs file
FinalTime <- rep(24*60, length(inputs)/2) # Time vetor of the experiment (24h)
Switchingtimes <- seq(0, 24*60, length = ((length(inputs)/2)+1)) # Switching times where each step has the same duration
Switchingtimes <- Switchingtimes[1:length(FinalTime)]
IPTGpre <- rep(1, length(inputs)/2) # Input values for the 24h incubation
aTcpre <- rep(0, length(inputs)/2)
IPTG <- c() # Input values for the experiment
aTc <- c()
for(j in seq(1, length(inputs), 2)){
IPTG <- c(IPTG, inputs[j])
aTc <- c(aTc, inputs[j+1])
}
# Write results in a CSV file
allTog <- cbind(Switchingtimes, FinalTime, IPTGpre, aTcpre, IPTG, aTc)
write.csv(allTog, file = paste(fileName, "_Events_Inputs.csv", sep = ""), row.names = FALSE)
########################### Generate Inputs file
# Same as the previous file, but instead of ordered by events ordered by time points
time <- seq(0, 24*60, length = (24*60)+1)
IPTGpre <- rep(1, (24*60)+1)
aTcpre <- rep(0, (24*60)+1)
IPTG <- c()
aTc <- c()
j <- 1
Switchingtimes <- seq(0, 24*60, length = ((length(inputs)/2)+1))
for(i in seq(1, length(inputs), 2)){
if(i == 1){
ip <- rep(inputs[i], Switchingtimes[j+1]-Switchingtimes[j]+1)
at <- rep(inputs[i+1], Switchingtimes[j+1]-Switchingtimes[j]+1)
} else {
ip <- rep(inputs[i], Switchingtimes[j+1]-Switchingtimes[j])
at <- rep(inputs[i+1], Switchingtimes[j+1]-Switchingtimes[j])
}
IPTG <- c(IPTG, ip)
aTc <- c(aTc, at)
j <- j+1
}
allTog2 <- cbind(time, IPTGpre, aTcpre, IPTG, aTc)
write.csv(allTog2, file = paste(fileName, "_Inputs.csv", sep = ""), row.names = FALSE)
}
######### Generate Posterior Predictive Distributions files if required
#### MODEL 1 ####
# Path needs to be modified according to the user
source('D:/PhD/GitHub/FOSBE2019_Paper/Predictions&Analysis/PostPredCheckSimulM1.R')
# Inputs to the function need to be modified according to the user results
PPCSimul1(fileName, "ALL_Model1.stan")
#### MODEL 2 ####
# Path needs to be modified according to the user
source('D:/PhD/GitHub/FOSBE2019_Paper/Predictions&Analysis/PostPredCheckSimulM2.R')
# Inputs to the function need to be modified according to the user results
PPCSimul2(fileName, "ALL_Model2.stan")
#### MODEL 3 ####
# Path needs to be modified according to the user
source('D:/PhD/GitHub/FOSBE2019_Paper/Predictions&Analysis/PostPredCheckSimulM3.R')
# Inputs to the function need to be modified according to the user results
PPCSimul3(fileName, "ALL_Model3.stan")
|
c3115927d43618480a381dc69050a8b2377726c6
|
73613f0527f130ed04c641b8408620fca49cd8e8
|
/R/tabulate_chemo_effects.R
|
785c2c2cb86ae154727b133e8cbdea90b59344da
|
[] |
no_license
|
cobriniklab/rb_exome
|
86b7be48dbc518059ffdb9e715cb6c782cdb475e
|
8ffe630d119fac1ba7fea0816bbcd7420d41dc7d
|
refs/heads/main
| 2022-12-23T10:38:22.642801
| 2022-03-19T00:17:39
| 2022-03-19T00:17:39
| 151,138,264
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 861
|
r
|
tabulate_chemo_effects.R
|
##' .. content for \description{} (no empty lines) ..
##'
##' .. content for \details{} ..
##'
##' @title
##' @param reynolds_snv
##' @return
##' @author whtns
##' @export
tabulate_chemo_effects <- function(reynolds_snv) {
reynolds_post_chemo_samples <- c("194-CL", "196-CL", "203-CL")
reynolds_mean_var <-
reynolds_snv %>%
dplyr::group_by(sample) %>%
dplyr::count()
dx_sample_mean <-
reynolds_mean_var %>%
dplyr::filter(!sample %in% reynolds_post_chemo_samples) %>%
dplyr::ungroup() %>%
dplyr::summarize(mean_count = mean(n))
post_chemo_sample_mean <-
reynolds_mean_var %>%
dplyr::filter(sample %in% reynolds_post_chemo_samples) %>%
dplyr::ungroup() %>%
dplyr::summarize(mean_count = mean(n))
list(dx = dx_sample_mean, post_chemo = post_chemo_sample_mean)
}
|
e93ef53a2f02071149996d7f9075c84dca335320
|
3e3ab1934554bb4dd1ba876f69fa636ce17f21f7
|
/Biostatistics-with-R/Intermediate Linear Regression - Predictors of Body Fat.R
|
84514ae0597ef5dfafdf985d35caf29159f794c4
|
[] |
no_license
|
MadzivaDuane/Academic-Projects
|
12f4d87e72dcb9885188f533b4cd82b26f3a4b64
|
78fa58467236ca23ea5364ba399dea3f27a467ab
|
refs/heads/master
| 2023-04-20T18:40:27.423875
| 2023-04-04T22:52:24
| 2023-04-04T22:52:24
| 267,905,746
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 3,878
|
r
|
Intermediate Linear Regression - Predictors of Body Fat.R
|
#Predictors of Body Fat
data <- read.csv("~/Documents/Academics/Other/BIS 505 Biostats for PH II/Datasets/body_fat.csv")
body_fat <- data; head(body_fat); dim(body_fat)
#website on using cook's distance or dffts to identify outliers:
#https://cran.r-project.org/web/packages/olsrr/vignettes/influence_measures.html
#general relationships between variables
pairs(body_fat, pch = 16)
boxplot_body_fat <- boxplot(body_fat$Percent_Body_Fat)
print(body_fat[which(body_fat$Percent_Body_Fat == boxplot_body_fat$out),])
print(paste0("The outlier is: ", boxplot_body_fat$out, " which is row 216"))
boxplot(body_fat$Density)
#correlation matrix to remove variables that are colinear
library(corrplot)
library(RColorBrewer)
correlation_matrix<-cor(body_fat) #only include numeric variables
corrplot(correlation_matrix, type="upper", order="hclust",col=brewer.pal(n=6, name="RdYlBu"), addCoef.col = "black", number.cex = 0.75)
#initial model
initial_model <- lm(Percent_Body_Fat ~ Age+Weight+Height+Neck_Circ+Chest_Circ+Abdomen_Circ+Hip_Circ+
Thigh_Circ+Knee_Circ+Ankle_Circ+Bicep_Circ+Forearm_Circ+Wrist_Circ, data = body_fat)
summary(initial_model)
#diagnositcs and evaluation of mode: also outlier analysis
par(mfrow = c(2,2))
for (i in 1:4){
plot(initial_model, i)
} #Row 39 is an outlier by cook's distance
#additional outlier identifier
par(mfrow = c(1,1))
library(car)
influencePlot(initial_model, main="Influence Plot")
print("Row 39 and 224 are outliers")
#other useful diagnostic methods: using OLSSR
library(olsrr)
#website for instructions: https://cran.r-project.org/web/packages/olsrr/vignettes/influence_measures.html
ols_plot_cooksd_bar(initial_model)
ols_plot_cooksd_chart(initial_model)
ols_plot_dffits(initial_model)
ols_plot_resid_stand(initial_model)
ols_plot_resid_lev(initial_model)
#removes outliers and remove colinear variables
final_body_fat <- body_fat[-c(39, 224),]
#confirm removal of outlier rows
dim(body_fat); dim(final_body_fat)
#removes colinear variables
#includes both pair plots and correlations
library(GGally)
potential_predictors_data <- final_body_fat[, -c(1,2)]
ggpairs(potential_predictors_data)
#perform Farrar – Glauber test : https://datascienceplus.com/multicollinearity-in-r/
library(mctest)
omcdiag(potential_predictors_data, final_body_fat$Percent_Body_Fat)
#this confirms that of the 6 tests, 5 confirmed the presence of colinearity
#now whcih variables are the problem?
imcdiag(x = potential_predictors_data, y = final_body_fat$Percent_Body_Fat)
#from this, potential cause of colinearity are: Weight, Chest_Circ, Abdomen_Circ,
#Hip_Circ, Thigh_Circ, Knee_Circ
#conduct pairwise t-test for independence and find which variable pairs are statistically significant
library(ppcor)
pairwise_independecen_test <- pcor(potential_predictors_data, method = "pearson")
print(paste0("Maxmimum estimate is: ",max(pairwise_independecen_test$estimate[pairwise_independecen_test$estimate < 1])))
print(paste0("Minimum estimate is: ",min(pairwise_independecen_test$estimate[pairwise_independecen_test$estimate < 1])))
print(paste0("Hence range of estimates is: ", min(pairwise_independecen_test$estimate[pairwise_independecen_test$estimate < 1]),
" to ", max(pairwise_independecen_test$estimate[pairwise_independecen_test$estimate < 1])))
#model for OLSRR package
olsrr_model <- lm(Percent_Body_Fat ~ ., data = final_body_fat[, -c(1)])
summary(olsrr_model)
ols_best_subset_interactions <- ols_step_best_subset(olsrr_model)
print(ols_best_subset_interactions)
#most parsimonious model:
best_model <- lm(Percent_Body_Fat ~ Age+Weight+Neck_Circ+Abdomen_Circ+Thigh_Circ+Ankle_Circ+Bicep_Circ+Forearm_Circ+Wrist_Circ
, data = final_body_fat[, -c(1)])
summary(best_model)
#final model diagnostics
par(mfrow = c(1,2))
for (i in 1:2){
plot(initial_model, i)
}
par(mfrow = c(1,1))
|
43078230d0aa0c7082cacd10e47821e57347064f
|
90d339192c3d427dfbc9363e7b1bb637fe831b55
|
/man/sam.gen.ncpen.Rd
|
048d0183aa6baa4442437666801de231b8caad8b
|
[] |
no_license
|
zeemkr/ncpen
|
acd4c57fb3d78a8063ca2473306097488c298039
|
e17a0f5f2869d3993a3323e7a269dbb1819201ac
|
refs/heads/master
| 2021-03-16T09:22:17.260173
| 2018-11-19T00:20:49
| 2018-11-19T00:20:49
| 107,593,275
| 9
| 0
| null | 2018-11-19T00:17:27
| 2017-10-19T20:09:23
|
C++
|
UTF-8
|
R
| false
| true
| 2,127
|
rd
|
sam.gen.ncpen.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/ncpen_cpp_wrap.R
\name{sam.gen.ncpen}
\alias{sam.gen.ncpen}
\title{sam.gen.ncpen: generate a simulated dataset.}
\usage{
sam.gen.ncpen(n = 100, p = 50, q = 10, k = 3, r = 0.3,
cf.min = 0.5, cf.max = 1, corr = 0.5, seed = NULL,
family = c("gaussian", "binomial", "multinomial", "cox", "poisson"))
}
\arguments{
\item{n}{(numeric) the number of samples.}
\item{p}{(numeric) the number of variables.}
\item{q}{(numeric) the number of nonzero coefficients.}
\item{k}{(numeric) the number of classes for \code{multinomial}.}
\item{r}{(numeric) the ratio of censoring for \code{cox}.}
\item{cf.min}{(numeric) value of the minimum coefficient.}
\item{cf.max}{(numeric) value of the maximum coefficient.}
\item{corr}{(numeric) strength of correlations in the correlation structure.}
\item{seed}{(numeric) seed number for random generation. Default does not use seed.}
\item{family}{(character) model type.}
}
\value{
An object with list class containing
\item{x.mat}{design matrix.}
\item{y.vec}{responses.}
\item{b.vec}{true coefficients.}
}
\description{
Generate a synthetic dataset based on the correlation structure from generalized linear models.
}
\details{
A design matrix for regression models is generated from the multivariate normal distribution with a correlation structure.
Then the response variables are computed with a specific model based on the true coefficients (see references).
Note the censoring indicator locates at the last column of \code{x.mat} for \code{cox}.
}
\examples{
### linear regression
sam = sam.gen.ncpen(n=200,p=20,q=5,cf.min=0.5,cf.max=1,corr=0.5)
x.mat = sam$x.mat; y.vec = sam$y.vec
head(x.mat); head(y.vec)
}
\references{
Kwon, S., Lee, S. and Kim, Y. (2016). Moderately clipped LASSO.
\emph{Computational Statistics and Data Analysis}, 92C, 53-67.
Kwon, S. and Kim, Y. (2012). Large sample properties of the SCAD-penalized maximum likelihood estimation on high dimensions.
\emph{Statistica Sinica}, 629-653.
}
\seealso{
\code{\link{ncpen}}
}
\author{
Dongshin Kim, Sunghoon Kwon, Sangin Lee
}
|
d2160b3efd7816d7457bdf0abd490b644710c70f
|
1b88a1b82041a657fe526f4d670bab12dfce4b14
|
/Assignment_two/Plot.1.R
|
1f87c4f8543baf0a9d7150119c9e9327a8388e0e
|
[] |
no_license
|
3Dan3/Coursera-Exploratory-Data-Analysis
|
05746f046429aef88c14748140086bb4930166f3
|
9c4737fef0ddd5eb965184df5140697ec6885139
|
refs/heads/master
| 2021-01-21T01:52:36.162793
| 2017-07-07T18:27:21
| 2017-07-07T18:27:21
| 96,565,351
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 909
|
r
|
Plot.1.R
|
### Data ###
#Load required packages
library(downloader)
suppressPackageStartupMessages(library(dplyr))
#Download and store data into R
dataset_url <- "https://d396qusza40orc.cloudfront.net/exdata%2Fdata%2FNEI_data.zip"
download(dataset_url, dest = "data.zip", mode = "wb")
unzip("data.zip", exdir = ".")
NEI <- readRDS("summarySCC_PM25.rds")
SCC <- readRDS("Source_Classification_Code.rds")
#Convert to tibble format for better on-screen printing.
NEI <- tbl_df(NEI)
SCC <- tbl_df(SCC)
### Plot 1 ###
png('plot1.png')
NEI %>%
select(Emissions, year) %>%
group_by(year) %>%
summarise (total=sum(Emissions)/10^6) %>%
select(year, total) %>%
with(barplot(total, names.arg = year, col = "brown", xlab="Year",
ylab=expression("PM" [2.5]* " Emissions (Millions of Tons)"),
main=expression("Total PM" [2.5]*" Emissions From All US Sources")) )
dev.off()
|
4c4a125ae0642ac3b427db965bf26ccf1f6ba9a0
|
499a9e4122dd4524b5fdb7a262ad64ae1e072314
|
/Script/Old/data_combineUGA-VL.R
|
f89a5f06906fc225c7163eee0d26a33865b2b173
|
[] |
no_license
|
vincentlinderhof/NutritionUGA
|
9dbda98e8f59c7f72ee55885054ca21188873465
|
78d0a32c4b753428ef0c76db282c437e670d1b1a
|
refs/heads/master
| 2020-09-10T04:51:51.614791
| 2016-11-28T11:17:50
| 2016-11-28T11:17:50
| 67,415,327
| 0
| 1
| null | 2016-11-28T10:06:17
| 2016-09-05T11:30:11
|
R
|
UTF-8
|
R
| false
| false
| 2,643
|
r
|
data_combineUGA-VL.R
|
# -------------------------------------
# creating a panel dataset and a
# balanced panel dataset with the waves
# of the UGA data (three waves)
# -------------------------------------
#Tom
#dataPath <- "C:/Users/Tomas/Documents/LEI/"
# LEI server dataPath
#Vincent at home
dataPath <- "D:/Models/CIMMYT/UGA/Data"
setwd("D:/Models/CIMMYT/UGA")
library(dplyr)
options(scipen=999)
# get all three waves, the output of the UGA_***.R script files
#UGA2009 <- readRDS(file.path(dataPath, "data/UGA/UGA2009.rds"))
#UGA2010 <- readRDS(file.path(dataPath, "data/UGA/UGA2010.rds"))
#UGA2011 <- readRDS(file.path(dataPath, "data/UGA/UGA2011.rds"))
# get all three waves, the output of the UGA_***.R script files
# for Vincent at home
UGA2009 <- readRDS(file.path(dataPath, "UGA2009.rds"))
UGA2010 <- readRDS(file.path(dataPath, "UGA2010.rds"))
UGA2011 <- readRDS(file.path(dataPath, "UGA2011.rds"))
# -------------------------------------
# example: select only maize farmers:
# filter on household head and
# crop_code = 130 (maize)
# -------------------------------------
# 2009
# maize09 <- UGA2009
# maize09 <- filter(maize09, status %in% "HEAD", crop_code %in% 130)
#
# # 2010
# maize10 <- UGA2010
# maize10 <- filter(maize10, status %in% "HEAD", crop_code %in% 130)
#
# # 2011
# maize11 <- UGA2011
# maize11 <- filter(maize11, status %in% "HEAD", crop_code %in% 130)
# -------------------------------------
# unlike TZA data there is no need to
# use a panel key to link households
# and individuals
# -------------------------------------
hhid2009 <- unique(UGA2009$hhid2009)
hhid2010 <- unique(UGA2010$hhid2010)
hhid2011 <- unique(UGA2011$hhid2011)
table(hhid2009 %in% hhid2010)
table(hhid2009 %in% hhid2011)
# so all we do is change the names of the variables
# to a standard name across all years
UGA2009 <- rename(UGA2009, hhid=hhid2009, indidy=indidy1)
UGA2010 <- rename(UGA2010, hhid=hhid2010, indidy=indidy2)
UGA2011 <- rename(UGA2011, hhid=hhid2011, indidy=indidy3)
# -------------------------------------
# Some waves of the data have variables
# that were not available in others.
# -------------------------------------
# get all name variables that are common to the three waves
good <- Reduce(intersect, list(names(UGA2009), names(UGA2010), names(UGA2011)))
# select only those names common in all three waves
UGA2009_2 <- UGA2009[, good]
UGA2010_2 <- UGA2010[, good]
UGA2011_2 <- UGA2011[, good]
# new full dataset
fullData <- rbind(UGA2009_2, UGA2010_2, UGA2011_2) %>%
select(hhid, indidy, everything())
rm(list=ls()[!ls() %in% c("fullData", "dataPath")])
#Vincent
saveRDS(fullData, "data/UGAfulldata.rds")
|
276ba4b2edc8cd152bf05b22b38c57595d633757
|
dc3665fa074c42cd25d3eca313b90f4ae4482520
|
/weight_from_string_list.R
|
b7d8a330a9765512fee11048cabe88041814b2f4
|
[] |
no_license
|
andfdiazrod/darkweb_functions
|
5f6a350e6902bfbb9a9ce8886425ed62c48dbf3e
|
b8f20f47c916494103a9f7f2f418ed2a39f80b6d
|
refs/heads/master
| 2022-05-16T02:01:45.786947
| 2019-11-29T16:53:37
| 2019-11-29T16:53:37
| 216,660,996
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 2,962
|
r
|
weight_from_string_list.R
|
weight_from_string_list <- function(string_list){
weight_words_1 <- c('g\'s','gr','g.','g ','gs','gz','g','gm','gram','grams','gramme','oz','ounce','ounces','mg', 'kg','kilo')
conversion <- c(1,1,1,1,1,1,1,1,1,1,1,28.3495,28.3495,28.3495,1/1000,1000,1000)[order(nchar(weight_words_1),decreasing = TRUE)]
weight_words_1 <- weight_words_1[order(nchar(weight_words_1),decreasing = TRUE)]
weight_words_sorted <- paste(weight_words_1,collapse = '|')
weight_in_grams <- matrix(ncol=3)
for(str in string_list){
position_unit_cut <- c(1,1)
str <- tolower(str)
replace_1 <- '[0-9\\.]+\\s+[0-9\\.]+'
find_replace_1 <- strsplit(str_extract_all(str,replace_1)[[1]],' ')
for(num_temp in find_replace_1){
if(length(num_temp)==2 &
(sum(as.numeric(num_temp)<50,na.rm=T)==2 |
0 %in% num_temp)){
str_temp_1 <- paste0(num_temp,collapse='.')
str <- sub(str_temp_1,paste0(' ',str_temp_1),str)
}
}
if(FALSE){
replace_2 <- 'o\\s+[0-9]+'
find_replace_2 <- strsplit(str_extract_all(str,replace_2)[[1]],' ')
for(num_temp in find_replace_2){
str_temp_2 <- paste0('0.',num_temp[2])
}
}
str <- sub('half','0.5',str)
str <- sub('full','1',str)
str <- sub('deux','2',str)
str <- sub('1 one','1',str)
str <- sub('1 single','1',str)
str <- sub('\\.\\.\\.','',sub('\\.\\.\\.','',str))
str <- sub('qtr','1/4',str)
str <- sub('eight','1/8',str)
str <- sub('8 ball','3.5 grams',str)
weight_not_found <- TRUE
while(weight_not_found){
str <- substr(str,position_unit_cut[2],nchar(str))
unit <- str_extract(str, weight_words_sorted)
position_unit_cut <- str_locate(str,unit)
if(!is.na(unit)){
pattern <- paste0("([0-9\\.]+)\\s*",unit)
match <- regexec(pattern, str)
adjacent_words <- unlist(regmatches(str, match))[-1]
weight <- na.omit(suppressWarnings(as.numeric(adjacent_words)))
if(length(weight) != 0 & unit %in% weight_words_1){
if(unit %in% weight_words_1){
weight_grams <- weight[1] * conversion[which(unit == weight_words_1)]
} else {
weight_grams <- 99999999
print(str)
}
weight_in_grams <- rbind(weight_in_grams, c(weight[1], unit, weight_grams))
weight_not_found <- FALSE
} else if(nchar(str)==1){
weight_in_grams <- rbind(weight_in_grams,NA)
weight_not_found <- FALSE
}
} else {
weight_in_grams <- rbind(weight_in_grams,NA)
weight_not_found <- FALSE
}
}
}
weight_in_grams <- weight_in_grams[-1,]
colnames(weight_in_grams) <- c('weight', 'unit','weight_in_grams')
weight_in_grams[,c("weight","weight_in_grams")] <- as.numeric(weight_in_grams[,c("weight","weight_in_grams")])
return(data.frame(weight_in_grams,stringsAsFactors = FALSE))
}
|
f1aa9ec68bbf684850cb386961b9ba6bafc62e8f
|
d0b099dca80322316a1dd5083bb0bad993d9c206
|
/scripts/preprocess.R
|
4b18b1b24e94a3c046cd4c3382150d9e90c4ba6b
|
[
"BSD-3-Clause"
] |
permissive
|
lmsac/BacteriaMS-mixture
|
c84c4e189a826ad6ebcf1fbd5f342f1efcb9bdb1
|
0b1379ad4ccda74386f8b8f27d09b12447cb0250
|
refs/heads/master
| 2021-07-13T11:30:28.183455
| 2020-05-25T08:22:51
| 2020-05-25T08:22:51
| 142,287,626
| 2
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 956
|
r
|
preprocess.R
|
#' setwd(...)
local({
mz.lower = 4000
mz.upper = 12000
window = 0.015
offset = 0.5
dir.create('raw')
# dir.create('normalized')
dir.create('peaklists')
lapply(list.files(pattern = '.txt'), function(file) {
raw = read.table(file)
raw = crop.mz(raw, mz.lower = mz.lower, mz.upper = mz.upper)
baseline = get.baseline(raw, window = window, offset = offset)
subbase = subtract.baseline(raw, baseline)
normalized = normalize.intensity(subbase)
peaklist = find.peaks(normalized, window = window, offset = offset)
# write.table(
# normalized,
# file = paste0('normalized/normalized ', file),
# col.names = F,
# row.names = F,
# quote = F
# )
write.table(
peaklist,
file = paste0('peaklists/peaklist ', file),
col.names = F,
row.names = F,
quote = F
)
file.rename(file, paste0('raw/', file))
file
})
})
|
212bfd3d92cf46fdc021cabfb14f3f06c61a0386
|
936617f15596e0cebec03c21457d3182c79c45ba
|
/datanalysis/.Rproj.user/8E5AEE1C/sources/per/t/DDA697CC-contents
|
eb0a7c57aa3f561a888dc9f07b5e8969c330c8c2
|
[] |
no_license
|
raizaoliveira/dados-mestrado
|
2967925f0902aa4cdff1e233894dd2edc8045cfd
|
e446303d04b084c07f0f87c82fca60328197d1cd
|
refs/heads/master
| 2022-01-12T03:27:39.205961
| 2019-06-12T19:35:00
| 2019-06-12T19:35:00
| 190,767,552
| 0
| 2
| null | null | null | null |
UTF-8
|
R
| false
| false
| 1,629
|
DDA697CC-contents
|
read_files <- function() {
require(miscTools)
folder <- "D:\\Camila\\Documentos\\IMPACTO\\datanalysis\\CAP3\\"
file_list <- list.files(path=folder, pattern="*.csv")
for (l in 1:length(file_list)){
variabilities <- read.csv(paste(folder, file_list[l], sep=''), na.strings = c("","NA"), header=FALSE, colClasses=c("V1"="character","V2"="character","V3"="character","V4"="character"))
print(variabilities)
col1 <- c(variabilities$V1)
col2 <- c(variabilities$V2)
col3 <- c(variabilities$V3)
col4 <- c(variabilities$V4)
col7 <- c(variabilities$V7)
depDel <- c(variabilities$V5)
depAdd <- c(variabilities$V6)
depCh <- c(variabilities$V8)
depNCh <- c(variabilities$V9)
add = del = pres = change = notchange = 0
aux = 1 ;
x = 1
for(i in 1:length(depNCh)){
del = del + depDel[i];
add = add + depAdd[i];
change = change + depCh[i];
notchange = notchange + depNCh[i];
aux = aux + 1;
if (aux == 50){
result <- c(col1[i], col2[i],col3[i], col4[i], col7[i], del, add, change, notchange)
if(x == 1){
smoke <- matrix(c(result),ncol=length(result),byrow=TRUE)
colnames(smoke) <- c("Date","Evolution","Variabilities","TotalDependencies","Preserved","DependenciesDeleted","DependenciesAdditions","DependenciesChanged","DependenciesNotModified")
smoke <- as.table(smoke)
}
if(x > 1){
smoke <- rbind(smoke, result)
}
x = 10
aux = 1
}
}
write.csv(smoke, file = file_list[l],row.names=FALSE)
}
}
read_files()
|
|
d4746bfc61c0d7859fa3437bf85282baf15ba05a
|
f90071514fd6defd84cbba44946b51a1c23f8f76
|
/plot4.R
|
9067cfcf2d522d616e13678fada25ba9cba665e3
|
[] |
no_license
|
nokka09/exploringdataanalysiswk1
|
8fccb88647f4dcd127c1c8ed311ad34345243cb9
|
caae38cd281860d50bcf6eb3f76c8f2ccd41b879
|
refs/heads/master
| 2020-11-25T06:37:11.346154
| 2019-12-17T05:46:16
| 2019-12-17T05:46:16
| 228,541,497
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 2,117
|
r
|
plot4.R
|
# First we create a directory for our dataset
if(!file.exists("./dataset")){dir.create("./dataset")}
# Then we download the file for the dataseth
fileUrl <- "https://d396qusza40orc.cloudfront.net/exdata%2Fdata%2Fhousehold_power_consumption.zip"
download.file(fileUrl,destfile = "./dataset/sourcedata.zip")
# Next we unzip the downloaded dataset within the same directory of the download
unzip(zipfile = "./dataset/sourcedata.zip",exdir = "./dataset")
# Up next we assign read and assign our dataset to a variable
electric_power_consumption <- read.csv("./dataset/household_power_consumption.txt", sep = ";",
header = TRUE, stringsAsFactors=FALSE, dec=".", na.strings = "?")
# Now we subset our data to only inlcude rows from the dates 2007-02-01 and 2007-02-02
epc_feb_1and2 <- subset(electric_power_consumption, Date %in% c("1/2/2007","2/2/2007"))
# Here we convert the column to date and then convert and store a new column called Date_time
epc_feb_1and2$Date <- as.Date(epc_feb_1and2$Date, "%d/%m/%Y")
datetime <- paste(as.Date(epc_feb_1and2$Date), epc_feb_1and2$Time)
epc_feb_1and2$Date_time <- as.POSIXct(datetime)
# Plotting the last plot, a collection of four line graphs showing Global Active Power, Voltage,
# Global Active Power Sub Metering, and Global Reactive Power over Time (Thu - Sat)
par(mfrow=c(2,2), mar=c(4,4,2,1), oma=c(0,0,2,0))
with(epc_feb_1and2, {
plot(Global_active_power~Date_time, type="l", ylab="Global Active Power (kilowatts)", xlab="")
plot(Voltage~Date_time, type="l", ylab="Voltage (volt)", xlab="")
plot(Sub_metering_1~Date_time, type="l", ylab="Global Active Power (kilowatts)", xlab="")
lines(Sub_metering_2~Date_time,col="red")
lines(Sub_metering_3~Date_time,col="blue")
legend("topright", col=c("black", "red", "blue"), lty=1, lwd=3, bty="n",
legend=c("Sub_metering_1", "Sub_metering_2", "Sub_metering_3"))
plot(Global_reactive_power~Date_time, type="l",
ylab="Global Rective Power (kilowatts)",xlab="")
})
# We then save the resulting plot as plot1.png in our wd
png(filename = "plot4.png", width = 480, height = 480, units = "px")
dev.off()
|
73077a73bdeac115c0eb0d6c5974ef4fdfdaa040
|
77c10327c17b9f60397f91a7259d3e0ec45f1fbe
|
/integration/integrate.R
|
e807a6cdaec2848b3da67dd9d1f7470d91cec44c
|
[] |
no_license
|
jasminalbert/Albert_ReumanRepo
|
5c435dbf897871d6850ba9bdd295c256085a253d
|
e00f75a49568b122dd04e4a5e7de7bbaeb72cce5
|
refs/heads/master
| 2023-07-17T21:55:01.727810
| 2021-09-01T17:28:00
| 2021-09-01T17:28:00
| 243,134,004
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 1,933
|
r
|
integrate.R
|
? integrate
install.packages("cubature")
library(cubature)
pdf1 <- function(x, mu=0, sigma=1){
pdf1 <- 1/(sigma*sqrt(2*pi))*exp((-1/2)*((x-mu)/sigma)^2)
return(pdf1)
}
gr <- function(u, delta=0.5){
gr <- log(1-delta+delta*exp(u))
return(gr)
}
integrand_rsharp1 <- function(u, mu=0, sigma=1, delta=0.5){
pdf1 <- (1/(sigma*sqrt(2*pi))*exp((-1/2)*((u-mu)/sigma)^2))
gr <- log(1-delta+delta*exp(u))
return(pdf1*gr)
}
curve(integrand_rsharp, from=0, to=250)
x <- seq(-250,250,1)
y <- integrand_rsharp(x, sigma = 1)
plot(x,y, type="l")
integrate(integrand_rsharp1, -500,500, mu=0, sigma=1.9)
integrate(pdf1, -Inf, Inf)
integrate(gr,-500,500)
gr(1)
mu1 <- 0.5; mu2 <- 0.4
b1 <- #normal noise 1 #mean mu1 var sigma^2
b2 <- #normal noise 2 #mean mu2 var sigma^2
b1sharp <- #normal noise 1 sharp #mean mu1 var sigma^2
b2sharp <- #normal noise 2 sharp #mean mu2 var sigma^2
u_r1sharp <- b1sharp - b2 #normal with mean mu1-mu2 and var 2sigma^2
u_r2sharp <- b2sharp - b2 #normal with mean 0 and var 2sigma^2
# use to plug into rsharp
#term1 of rbar1
integrand_r1t1 <- function(u, mu=0, sigma=1, delta=0.5, mu1=0.5, mu2=0.4){
pdf1 <- (1/(sigma*sqrt(2*pi))*exp((-1/2)*((u-mu)/sigma)^2))
grt1 <- log(1-delta + delta*exp(mu1-mu2))
return(pdf1*grt1)
}
integrate(integrand_r1t1, -Inf, 0)
pdf2 <- function(vars, mu1, mu2, sigma=1){
u1 <- vars[1]
u2 <- vars[2]
pdf2 <- (1/(2*pi*(sigma)^2))*exp((-1/2)*(((u1-mu1)/sigma)^2+((u2-mu2)/sigma)^2))
return(pdf2)
}
cuhre(pdf2, lowerLimit=c(-Inf, -Inf), upperLimit=c(Inf, Inf), mu1=0.5, mu2=0.5)
integrand_r1t2 <- function(vars, mu1, mu2, sigma, delta){
u1 <- vars[1]
u2 <- vars[2]
pdf2 <- (1/(2*pi*(sigma)^2))*exp((-1/2)*(((u1)/sigma)^2+((u2)/sigma)^2))
#pdf with mean 0
grt2 <- log(1-delta+delta*exp(u1-u2+mu1-mu2))
return(pdf2*grt2)
}
cuhre(integrand_r1t2, lowerLimit=c(0, 0), upperLimit=c(700, 700), mu1=0.9, mu2=0.9, sigma=2, delta=0.5) #wtf
int$integral
|
fc9a11ba6db25d05c57f8f050a8cc0127886fd0b
|
4848ca8518dc0d2b62c27abf5635952e6c7d7d67
|
/R/V_STL_sh_3si.R
|
b46ba8ebaa38126b1c01d117494cf5a5795a30a7
|
[] |
no_license
|
regenesis90/KHCMinR
|
ede72486081c87f5e18f5038e6126cb033f9bf67
|
895ca40e4f9953e4fb69407461c9758dc6c02cb4
|
refs/heads/master
| 2023-06-28T00:29:04.365990
| 2021-07-22T04:44:03
| 2021-07-22T04:44:03
| 369,752,159
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 1,674
|
r
|
V_STL_sh_3si.R
|
#' Straight-through Traffic Using a Shared Left-turn Lane on Access Road with Only Straight and Left-turn Shared Lanes at 3-way Signalized Intersection
#'
#' On an access road with only straight and left turns at a three-way signal intersection,
#' if there is a public lane for turning left,
#' the amount of straight-through traffic arriving before the first left turn
#' This function follows <Formula 8-34> in KHCM(2013), p.245.
#' @param V_L Left Turn Traffic Volume(vph)
#' @param V_TH Straight-through traffic (vph)
#' @param E_L Forward conversion factor for left turn. See \code{\link{E_L_si}}
#' @param N Total number of access lanes (excluding dedicated left-turn lanes).
#' @param L_H Loss of saturation headway time due to roadside friction on right-turn lanes at signal intersections. See \code{\link{L_H_si}}
#' @keywords straight-through traffic volume public left-turn signalized intersection
#' @seealso \code{\link{E_L_si}}, \code{\link{lane_group_3si}}, \code{\link{L_H_si}}
#' @export V_STL_sh_3si
#' @examples
#' V_STL_sh_3si(V_L = 291, V_TH = 999, E_L = 1.09, N = 4, L_H = 2.2)
V_STL_sh_3si <- function(V_L = NULL, V_TH = NULL, E_L = NULL, N = NULL, L_H = NULL){
if (V_L >= 0 & V_TH >= 0){
if (E_L > 0){
if (N >= 1){
if (L_H >= 0){vstl <- 1/N * (V_TH - E_L * V_L * (N - 1) + L_H/1.63)}
else {vstl <- 'Error: [L_H] must be positive. Please check that.'}
}
else {vstl <- 'Error : [N] must be >= 1 and integer. Please check that.'}
}
else {vstl <- 'Error : [E_L] must be positive. Please check that.'}
}
else {vstl <- 'Error : [V_L], [V_TH] must be >= 0(vph). Please check that.'}
vstl
}
|
312699eede8bf053875356fff6771046707ef565
|
7d105c9c74252ed1005f6cd3af441960eb888287
|
/man/download_schellens_et_al_2015_sup_1.Rd
|
0303faa714c364bf24b5c4db1d7b047e98c69d8a
|
[
"MIT"
] |
permissive
|
richelbilderbeek/bianchi_et_al_2017
|
8a892d50b1a8f92c6e5b8fc2e1d2a6ac82ee3872
|
2e1460fe84dd3650108755273942e96201d21206
|
refs/heads/master
| 2022-12-26T12:40:37.420782
| 2022-12-13T09:14:18
| 2022-12-13T09:14:18
| 253,762,495
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| true
| 1,022
|
rd
|
download_schellens_et_al_2015_sup_1.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/download_schellens_et_al_2015_sup_1.R
\name{download_schellens_et_al_2015_sup_1}
\alias{download_schellens_et_al_2015_sup_1}
\title{Downloads the XLSX file by Schellens et al., 2015}
\usage{
download_schellens_et_al_2015_sup_1(
url = "http://richelbilderbeek.nl/schellens_et_al_2015_s_1.xlsx",
xlsx_filename = file.path(rappdirs::user_data_dir(appname = "bianchietal2017"),
"schellens_et_al_2015_s_1.xlsx"),
verbose = FALSE
)
}
\arguments{
\item{url}{the download URL. Note that the original URL is
\url{https://doi.org/10.1371/journal.pone.0136417.s005},
which redirects to an unknown (and hence unusable)
actual download URL}
\item{xlsx_filename}{the XLSX filename}
\item{verbose}{set to TRUE for more output}
}
\value{
the XLSX filename of the downloaded file
}
\description{
Downloads the XLSX file by Schellens et al., 2015
}
\seealso{
use \link{get_schellens_et_al_2015_sup_1} to
read the table as a \link[tibble]{tibble}
}
|
6c83f23d22c0e74e1c6037aa2d00cb810ee26dd9
|
71c4400f7cd574bf38c8a0ec1fe355e3648d62f5
|
/Pierre_Casco_HW9.R
|
fa3040d877d78cea2d974a6e56a7ba1ee864b7c3
|
[] |
no_license
|
PierreCasco/SVM-lab
|
35b35f37e5f0c426a0dce4d14d174663271215f2
|
a84a11a39d1dc95cfa80659452feaa684ff92e0c
|
refs/heads/master
| 2020-04-27T15:17:10.963868
| 2019-03-14T00:52:00
| 2019-03-14T00:52:00
| 174,440,022
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 2,937
|
r
|
Pierre_Casco_HW9.R
|
library('arules')
library('kernlab')
#Load the air quality dataset
aq <- airquality
aq[is.na(aq)] <- 0
#Study data set
str(aq)
#Prepare train and test data sets
numrows <- nrow(aq)
cutoff <- (numrows/3*2)
randIndex <- sample(1:numrows[1])
aq.train <- aq[randIndex[1:cutoff],]
aq.test <- aq[randIndex[(cutoff+1):numrows],]
#Build model using KSVM
model <- ksvm(Ozone ~ Solar.R + Temp, data = aq.train)
#Test the model on the testing dataset and compute RMSE
svmPred <- predict(model, aq.test, type="votes")
RMSE <- sqrt(mean(aq.test$Ozone - svmPred)^2)
#Plot
g1 <- ggplot(aq.test,aes(x=Temp,y=Wind)) +
geom_point(aes(size=(abs(aq.test$Ozone - svmPred)), colour = (abs(aq.test$Ozone - svmPred))))
#Build model using SVM in the e1071 package
library('e1071')
model2 <- svm(Ozone ~ Solar.R + Temp, data = aq.train)
#Test model2 on the testing dataset and compute RMSE
svmPred2 <- predict(model2, aq.test, type = "votes")
RMSE2 <- sqrt(mean(aq.test$Ozone - svmPred2)^2)
#Plot
g2 <- ggplot(aq.test,aes(x=Temp,y=Wind)) +
geom_point(aes(size=(abs(aq.test$Ozone - svmPred2)), colour = (abs(aq.test$Ozone - svmPred2))))
#Build model using LM
model3 <- lm(formula = Ozone ~ Temp + Wind, data = aq.train)
summary(model3)
#Test model2 on the testing dataset and compute RMSE
svmPred3 <- predict(model3, aq.test, type = "response")
RMSE3 <- sqrt(mean(aq.test$Ozone - svmPred3)^2)
#Plot
g3 <- ggplot(aq.test,aes(x=Temp,y=Wind)) +
geom_point(aes(size=(abs(aq.test$Ozone - svmPred3)), colour = (abs(aq.test$Ozone - svmPred3))))
#Plot all 3 charts
library('gridExtra')
grid.arrange(g1,g2,g3)
#Create good Ozone variable, 0 if < average, 1 if >= average
goodCutoff = mean(aq$Ozone)
aq2 <- aq
aq2$goodOzone <- ifelse(aq2$Ozone >= goodCutoff, 1, 0)
#Prepare train and test data sets with new data set
aq2.train <- aq2[randIndex[1:cutoff],]
aq2.test <- aq2[randIndex[(cutoff+1):numrows],]
#Build model using KSVM with new good Ozone data
model4 <- ksvm(goodOzone ~ Solar.R + Temp, data = aq2.train)
#Test the model on the testing dataset
svmPred4 <- predict(model4, aq2.test, type="votes")
#Plot
g4 <- ggplot(aq2.test,aes(x=Temp,y=Wind)) +
geom_point(aes(size=(abs(aq2.test$Ozone - svmPred4)), colour = goodOzone))
#Build model using SVM with new good Ozone data
model5 <- svm(goodOzone ~ Solar.R + Temp, data = aq2.train)
#Test the model on the testing dataset
svmPred5 <- predict(model5, aq2.test, type="votes")
#Plot
g5 <- ggplot(aq2.test,aes(x=Temp,y=Wind)) +
geom_point(aes(size=(abs(aq2.test$Ozone - svmPred5)), colour = goodOzone))
#Build model using Naive Bayes with new good Ozone data
model6 <- naiveBayes(goodOzone ~ Solar.R + Temp, data = aq2.train)
#Test the model on the testing dataset
svmPred6 <- predict(model6, aq2.test)
#Plot
g6 <- ggplot(aq2.test,aes(x=Temp,y=Wind)) +
geom_point(aes(size=(abs(aq2.test$Ozone - svmPred5)), colour = goodOzone))
#Plot all 3 charts
grid.arrange(g4,g5,g6)
|
f52805b6ececdb8967e14c72a238b023a5f6c50f
|
d5e085247744171e340504e0bf8720e2c7f82b1b
|
/R/fars_summarize_years.R
|
f3fd442076fff19b92cc7c5807c90b31640c06e6
|
[] |
no_license
|
jcpsantiago/FARSr
|
9a1bfd0588585e8b1247c63a0ca055f81421e886
|
908bb763e6abe85d7f7d39b9429df4b54472bcc8
|
refs/heads/master
| 2020-05-22T03:16:57.059092
| 2017-03-11T17:46:21
| 2017-03-11T17:46:21
| 84,594,628
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 675
|
r
|
fars_summarize_years.R
|
#' Number of accidents per year and month.
#'
#' This function will summarize the number of accidents for each month in each year of \code{years}.
#' If the user provides an invalid year, an error message will be printed.
#'
#' @param years A vector with the years of interest.
#'
#' @return A tibble with the number of accidents for the whole country, for each month per year in \code{years}.
#'
#' @import dplyr
#' @import tidyr
#'
#' @export
fars_summarize_years <- function(years) {
year=MONTH=NULL
dat_list <- fars_read_years(years)
dplyr::bind_rows(dat_list) %>%
dplyr::group_by(year, MONTH) %>%
dplyr::summarize(n = n()) %>%
tidyr::spread(year, n)
}
|
23e6037c4464e1b1bc8c7d8793101501fa2c0f61
|
de17eb3f7b45bb9ea5e2db8a2bcd1f13fe3689b9
|
/server.R
|
6016f06f9656c8d5abc1f5f651f4e2c0c419df96
|
[] |
no_license
|
kuzmenkov111/Shiny-login-page
|
5c04781d3e55863ded64d9d56636a91638d536c9
|
9c8a83e60397b811e43df6231963074eb185b563
|
refs/heads/master
| 2020-08-22T13:50:28.379179
| 2019-08-01T13:14:44
| 2019-08-01T13:14:44
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 6,652
|
r
|
server.R
|
library(shiny)
library(V8)
library(sodium)
library(openssl)
library(rJava) #For sending an email from R
library(mailR) #For sending an email from R
library(DBI)
library(pool)
library(RSQLite)
#Database
sqlite_path = "www/sqlite/users"
pool <- dbPool(drv = RSQLite::SQLite(), dbname=sqlite_path)
onStop(function() {
poolClose(pool)
})
#Create table user in DB
dbExecute(pool, 'CREATE TABLE IF NOT EXISTS user (user_name TEXT, country TEXT, email TEXT, password TEXT)')
#Countries
countries.list <- read.table("www/countries.txt", header = FALSE, sep = "|",
stringsAsFactors = FALSE, quote = "",
col.names = c("abbr", "country"))
choice.country <- as.list(as.character(countries.list$country))
names(choice.country) <- countries.list$country
server <- function(input, output, session) {
#####################################################################################
########################## Start LogIn ################################################
#####################################################################################
## Initialize - user is not logged in
#user_abu <- reactiveValues(login = FALSE, name = NULL, role = NULL, header = NULL)
loggedIn <- reactiveVal(value = FALSE)
user <- reactiveValues(name = NULL, id=NULL)
#observeEvent will execute only if butLogin is pressed. observe is executed uniformly over time.#
#THis after pressing login#
observeEvent(input$butLogin, {
#browser() #: for debug mode test
req(input$username, input$pwInp) #Make sure username and passowrd are entered#
query <- sqlInterpolate(pool,"select * from user where user_name=?user or email=?email;",user=input$username,email=input$username)
user_data <- dbGetQuery(pool,query)
if(nrow(user_data) > 0){ # If the active user is in the DB then logged in
if(sha256(input$pwInp) == user_data[1, "password"]){
user$name <- user_data[1, "user_name"]
user$id <- user_data[1, "user_id"]
loggedIn(TRUE)
#print(paste("- User:", user$name, "logged in"))
#removeModal() ## remove the modal
toggleModal(session, "window", toggle = "close")
output$App_Panel <- renderUI({
span(
strong(paste("welcome", user$name, "|")),
actionLink(inputId = "logout", "Logout")
)
})
}
} else {
loggedIn(FALSE)
}
})
output$login_status <- renderUI({
if(input$butLogin == 0){
return(NULL)
} else {
if(!loggedIn()){
return(span("The Username or Password is Incorrect", style = "color:red"))
}
}
})
#For creating a new account#
observeEvent(input$create_account, {
showModal(
modalDialog(title = "Create an account", size = "m",
textInput(inputId = "new_user", label = "Username"),
textInput(inputId = "new_email", label = "Email"),
selectizeInput(inputId = 'country', 'Country', choices = choice.country),
passwordInput(inputId = "new_pw", label = "Password"),
passwordInput(inputId = "new_pw_conf", label = "Confirm password"),
checkboxInput(inputId = "terms", label = a("I, agree for terms and conditions",target="_blank",href="Disclaimer-TermsandConditions.html")),
actionButton(inputId = "register_user", label = "Submit"),
#p(input$register_user),
uiOutput("register_status"),
footer = actionButton("dismiss_modal",label = "Dismiss")
)
)
register_user()
})
observeEvent(input$dismiss_modal,{
removeModal()
})
register_user <- eventReactive(input$register_user, {
if(!isTruthy(input$new_user) | !isTruthy(input$new_email) | !isTruthy(input$new_pw) ){
return(span("Fill required information correctly", style = "color:red"))
}
if (!isValidEmail(input$new_email)){
return(span("Please provide a valid email address", style = "color:red"))
}
if (sha256(input$new_pw)!=sha256(input$new_pw_conf)){
return(span("Entered passwords do not match.", style = "color:red"))
}
if (!input$terms){
return(span("Please tick the box to show that you agree with terms and conditions", style = "color:red"))
}
query <- sqlInterpolate(pool,"select * from user where user_name=?user or email=?email;",user=input$new_user,email=input$new_email)
users_data <- dbGetQuery(pool,query)
#users_data <- DB_get_user(input$new_user)
if(nrow(users_data) > 0){
return(span("User already exists", style = "color:red"))
}
new_hash <- sha256(input$new_pw)
new_user <- input$new_user
dbExecute(pool,paste0("INSERT INTO user (user_name, country, email, password) values ","('",new_user,"','",input$country,"','",input$new_email,"','",new_hash,"')", ";"))
print("- New user added to database")
#Send an email to the newly regitered user. The email will provide him with username and password#
# isolate({send.mail(from = "....@gmail.com",
# to = input$new_email,
# subject = "Welcome to ... App",
# body = HTML(paste(paste("Hi",new_user,","),
# "<p>Thank you for using https://test.com. Please find below your credentials for future reference:</p>",
# paste("Username:",new_user,"<br>"),
# paste("Password:",input$new_pw,"<br><br><br>"),
# paste("Best regards, <br><br>Test.com Team"))),
# smtp = list(host.name = "smtp.gmail.com", port = 465, user.name = "...@gmail.com", passwd = "...", ssl = TRUE),
# authenticate = TRUE,
# html = TRUE,
# send = TRUE)})
#
return(span("Your registration was successful. An email with your credential is sent to the registred email adrress", style = "color:green"))
loggedIn(FALSE)
})
output$register_status <- renderUI({
if(input$register_user == 0){
return(NULL)
} else {
isolate(register_user())
}
})
observeEvent(input$logout, {
user$name <- NULL
user$id <- NULL
loggedIn(FALSE)
js$reset2()
#stopApp()
#print("- User: logged out")
})
}
|
57882f06763f502adbbf96f49ee961e1dc510298
|
1e48c563b2b9c723ed2a234df90f1dcd9338e6c3
|
/R/request.R
|
131b13ecd6843f1bcdaac1edfb5e6f3619de8240
|
[] |
no_license
|
bobjansen/mattR
|
e87f254d9bc54022a83ea4dc7eb1561528e3c956
|
9dfba5bd5436e0954b5dab77ba283f1adc94c13d
|
refs/heads/master
| 2021-01-20T06:04:58.701879
| 2018-04-03T19:47:09
| 2018-04-03T19:49:20
| 101,481,868
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 1,240
|
r
|
request.R
|
#' Extract Parameters from a request
#'
#' Extract the parameters from a request (GET, POST and from the URL).
#'
#' @param request The request object from which to extract the parameters.
#' @return A list of extracted parameters.
#'
#' @import shiny
#' @importFrom utils modifyList hasName
#' @export
extractParameters <- function(request) {
params <- if ("QUERY_STRING" %in% names(request)) {
shiny::parseQueryString(request[["QUERY_STRING"]])
} else {
list()
}
params <- if (request[["REQUEST_METHOD"]] == "POST") {
postParams <- request[["rook.input"]]$read_lines()
if (length(postParams) > 0) {
modifyList(params, shiny::parseQueryString(postParams))
} else {
params
}
} else {
params
}
params <- if (
hasName(request, "RegExpMatch") &&
hasName(attributes(request[["RegExpMatch"]]), "capture.names")
) {
m <- request[["RegExpMatch"]]
urlNamedParams <- substring(
request[["PATH_INFO"]],
attr(m, "capture.start"),
attr(m, "capture.start") + attr(m, "capture.length") - 1)
names(urlNamedParams) <- attr(m, "capture.names")
modifyList(params, split(unname(urlNamedParams), names(urlNamedParams)))
} else {
params
}
params
}
|
9a6b18666c4a98fbeb8f231f260ab02c653eb8cb
|
b5cad150733fd310e8bf8d8802a6aa94cf735ff3
|
/R/do.transform.R
|
a86d2828d11061a3613123bbf990fbf177d11bee
|
[] |
no_license
|
yannabraham/cytoCore
|
187242e0d24c53275da9c203e2cb43a2affb75a2
|
6cd18d96ec5aa52c6c4308e8e12109e6309fad00
|
refs/heads/master
| 2021-01-10T01:12:50.951843
| 2015-11-26T22:05:28
| 2015-11-26T22:05:28
| 46,948,430
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 592
|
r
|
do.transform.R
|
do.transform <-
function(ff,cols=NULL,type=NULL,fun=arcsinhTransform()) {
if(!is.null(cols)) {
if(!all(cols %in% parameters(ff)$name)) {
warning("Some column names are not found")
}
cls <- which(parameters(ff)$name %in% cols)
} else if(!is.null(type)) {
if(!all(type %in% parameters(ff)$type)) {
warning("Some types are not found")
}
cls <- which(parameters(ff)$type %in% type)
} else {
stop("You must provide either a list of columns or a list of types to transform")
}
exprs(ff)[,cls] <- apply(exprs(ff)[,cls,drop=F],2,fun)
ff <- correct.range(ff)
return(ff)
}
|
6b6ca1607ff4b3b4525b10bac8063c891f0c3b2e
|
bc1665e1cbe713412e707020da4bf4b46b755796
|
/fleschkincaid/esda_flesch.R
|
c34888c21c9f33443178a51e84137b2e9302c166
|
[] |
no_license
|
rheimann/UMBC
|
421480eef9cbe7106f5e5b0f7743d567616747a2
|
51e4aa8537e0a24d1c72ceff4c34d814d1a868be
|
refs/heads/master
| 2020-12-24T14:27:03.890648
| 2014-05-09T00:54:40
| 2014-05-09T00:54:40
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 8,669
|
r
|
esda_flesch.R
|
#### ESDA Example Fletch Kincaid ####
install.packages("spdep", dependencies=TRUE)
require(spdep)
require(maptools)
geo.fk <- readShapePoly("/Users/heimannrichard/Google Drive/GIS Data/flesch_kincaid/TwitterReadingCNTYJoin.shp",
proj4string=CRS('+proj=longlat +datum=NAD83'))
# colnames(geo.fk)
# pairs.default(geo.fk)
# scatter.smooth(geo.fk$MEANflecMC, geo.fk$AGE_18_21)
hist(geo.fk$MEANflesch)
hist(geo.fk$MEANflecMC)
install.packages("RColorBrewer")
require(RColorBrewer)
## Create blue-red-state palette
br.palette <- colorRampPalette(c("blue", "red"), space = "rgb")
br.palette(5)
# spplot, easy but not very flexible option.
spplot(geo.fk, "MEANflesch", at=quantile(geo.fk$MEANflesch, edge.col = "white",
sp=c(0,.25, .5, .75, 1), na.rm=TRUE), col.regions=br.palette(100), main="Cloropleth", sub="flesch-kincaid")
dev.off()
plot(geo.fk,col=cols,border=NA)
legend(x="bottom",cex=.7,fill=attr(cols,"palette"), bty="n",legend=names(attr(cols, "table")),
title="Flesch Kincaid Reading Index (Twitter, 2013)",ncol=5)
## Plot binary mean center (Red/Blue)
data.fk <- geo.fk
cols <- ifelse(data.fk$MEANflecMC > 0,"red","blue")
par(mar=rep(0,4))
plot(geo.fk,col=cols,border=NA)
legend(x="bottom",cex=.7,fill=c("red","blue"),bty="n",legend=c("Losers","Winners"),
title="Winners and Losers - The Flesch Kincaid Reading Index (Twitter, 2013)",ncol=2)
dev.off()
## Create matrix of polygon centroids
map_crd <- coordinates(geo.fk)
## Contiguity Neighbors
# B is the basic binary coding,
# W is row standardised (sums over all links to n),
# C is globally standardised (sums over all links to n),
# U is equal to C divided by the number of neighbours (sums over all links to unity),
# S is the variance-stabilizing coding scheme proposed by Tiefelsdorf et al. 1999, p. 167-168 (sums over all links to n).
nb.fk <- poly2nb(geo.fk, queen=T)
nb_30nn <- knn2nb(knearneigh(cbind(geo.fk$MEANlong, geo.fk$MEANlat), k=30, zero.policy=TRUE))
# W_cont_el <- poly2nb(geo.fk, queen=T)
geo.fk_queen <- nb2listw(W_cont_el, style="W", zero.policy=TRUE)
fk.30nn <- nb2listw(neighbours=nb_30nn, style="W", zero.policy=TRUE)
## Plot the connections
par(mar=rep(0,4))
plot(nb_30nn, coords=map_crd, pch=19, cex=0.1, col="red")
summary(nb.fk)
summary(nb_30nn)
# Moran's I statistic
moran.mc(x=geo.fk$MEANflesch, listw=fk.30nn, nsim=999, zero.policy=TRUE)
# correlogram
plot(sp.correlogram(neighbours=nb.fk, var=geo.fk$MEANflesch, order=4, method="I", style="W", zero.policy=TRUE))
# local Moran's I analysis - LISA
local.mi <- localmoran(x=geo.fk$MEANflesch, listw=fk.30nn, alternative="two.sided", p.adjust.method="fdr", zero.policy=TRUE)
class(local.mi)
colnames(local.mi)
summary(local.mi)
# Moran's I statistic (Ii) or column 5 [,1]
geo.fk$lmi <- local.mi[,1]
# Moran's I p-value (Pr) or column 5 [,5]
geo.fk$lmi.p <- local.mi[,5]
# Moran's I z-value (Z.Ii) or column 4 [,4]
geo.fk$lmi.z <- local.mi[,4]
hist(geo.fk$lmi.z)
geo.fk$lmi.p.sig <- as.factor(ifelse(local.mi[,5]<.001, "Sig p<.001", ifelse(local.mi[,5]<.05,"Sig p<.05", "NS" )))
geo.fk$lmi.svalue <- as.factor(ifelse(local.mi[,4]< -2, "Z SCORE < 2", ifelse(local.mi[,4]< 2,"Z SCORE > 2", "Z SCORE 2 < X >2" )))
geo.fk$lmi.svalue
spplot(geo.fk, "lmi", at=summary(geo.fk$lmi), col.regions=brewer.pal(5, "RdBu"), main="Local Moran's I")
spplot(geo.fk, "lmi.svalue", col.regions=c("white", "#E6550D","#FDAE6B"))
GES 673 ESDA with Flesch Kincaid Index using Twitter
========================================================
Big social data is driven by a social aspect, and ultimately analyzes data that could serve directly, as or as a proxy, for other more substantive variables. The Flesch-Kincaid index, which you may all be familiar with as a consequence of using Microsoft Word, has for some time provided the readability index to documents. Flesch-Kincaid index in a manner measures linguistic standard. A sizable amount of research suggests that how we read/write/speak relates to our ability to learn. Understanding variation of space and neighborhood structure of linguistic standard is there a useful direction of research.
```{r}
#### ESDA Example Flesch Kincaid Index using Twitter ####
# install.packages("spdep", dependencies=TRUE)
require(spdep)
# install.packages("maptools", repos="http://cran.us.r-project.org")
require(maptools)
# install.packages("RColorBrewer")
require(RColorBrewer)
```
Load data:
```{r}
# load county shapefile
geocnty.fk <- readShapePoly("/Users/heimannrichard/Google Drive/GIS Data/flesch_kincaid/TwitterReadingCNTYJoin.shp",
proj4string=CRS('+proj=longlat +datum=NAD83'))
```
```{r}
# load 3 digit zip shapefile
geozip.fk <- readShapePoly("/Users/heimannrichard/Google Drive/GIS Data/TwitterReading3ZIPJoin.shp",
proj4string=CRS('+proj=longlat +datum=NAD83'))
```
```{r}
# histogram MEANflesch (mean center FleschKincaid) on geocnty
hist(geocnty.fk$MEANflesch)
hist(geocnty.fk$MEANflecMC)
# histogram MEANflesch (mean center FleschKincaid) on geozip
hist(geozip.fk$MEANflesch)
hist(geozip.fk$MEANflecMC)
```
```{r, fig.height=12, fig.width=14}
# map of FK at the county level
spplot(geocnty.fk, "MEANflesch", at=quantile(geocnty.fk$MEANflesch,
p=c(0,.25, .5, .75, 1), na.rm=TRUE),
col.regions=brewer.pal(5, "Reds"),
main="County Level Flesch Kincaid", sub="Flesch Kincaid Index using Twitter")
```
```{r, fig.height=12, fig.width=14}
# map of FK at the 3-digit zip level
spplot(geozip.fk, "MEANflesch", at=quantile(geozip.fk$MEANflesch,
p=c(0,.25, .5, .75, 1), na.rm=TRUE),
col.regions=brewer.pal(5, "Reds"),
main="3 digit Zipcode Level Flesch Kincaid", sub="Flesch Kincaid Index using Twitter")
# Create blue-state red-state palette
br.palette <- colorRampPalette(c("blue", "pink"), space = "rgb")
pal <- br.palette(n=5)
var <- geozip.fk$MEANflesch
classes_fx <- classIntervals(var, n=5, style="fixed", fixedBreaks=c(0, 10, 25, 50, 75, 100), rtimes = 1)
cols <- findColours(classes_fx, pal)
par(mar=rep(0,4))
plot(geozip.fk,col=pal,border=NA)
legend(x="bottom", cex=.7, fill=attr(cols,"palette"), bty="n",legend=names(attr(cols, "table")),
title="FK Index using Twitter", ncol=5)
```
```{r}
nb.cntyfk <- poly2nb(geocnty.fk, queen=T)
summary(nb.cntyfk)
nb.zipfk <- poly2nb(geozip.fk, queen=T)
summary(nb.zipfk)
```
```{r}
sw.cntyfk <- nb2listw(neighbours=nb.cntyfk, style="B", zero.policy=TRUE)
plot(geocnty.fk)
plot(sw.cntyfk, coordinates(geocnty.fk), add=T, col="red")
sw.zipfk <- nb2listw(neighbours=nb.zipfk, style="B", zero.policy=TRUE)
plot(geozip.fk)
plot(sw.zipfk, coordinates(geo.fk), add=T, col="red")
```
```{r, fig.height=12, fig.width=14}
moran.mc(x=geocnty.fk$MEANflesch, listw=sw.cntyfk, nsim=499, zero.policy=TRUE)
moran.mc(x=geozip.fk$MEANflesch, listw=sw.zipfk, nsim=499, zero.policy=TRUE)
```
```{r, fig.height=12, fig.width=14}
plot(sp.correlogram(neighbours=nb.cntyfk, var=geocnty.fk$MEANflesch,
order=6, method="I", style="B", zero.policy=TRUE))
plot(sp.correlogram(neighbours=nb.zipfk, var=geozip.fk$MEANflesch,
order=6, method="I", style="B", zero.policy=TRUE))
```
```{r}
local_cnty.mi <- localmoran(x=geocnty.fk$MEANflesch, listw=sw.cntyfk, alternative="two.sided", p.adjust.method="fdr",
zero.policy=TRUE)
local_zip.mi <- localmoran(x=geozip.fk$MEANflesch, listw=sw.zipfk, alternative="two.sided", p.adjust.method="fdr",
zero.policy=TRUE)
```
```{r}
class(local_cnty.mi)
colnames(local_cnty.mi)
class(local_zip.mi)
colnames(local_zip.mi)
summary(local_cnty.mi)
summary(local_zip.mi)
```
```{r}
geocnty.fk$lmi <- local_cnty.mi[,1]
geocnty.fk$lmi.p <- local_cnty.mi[,5]
##
geozip.fk$lmi <- local_zip.mi[,1]
geozip.fk$lmi.p <- local_zip.mi[,5]
geocnty.fk$lmi.p.sig <- as.factor(ifelse(local_cnty.mi[,5]<.001, "Sig p<.001", ifelse(local_cnty.mi[,5]<.05,"Sig p<.05", "NS" )))
##
geozip.fk$lmi.p.sig <- as.factor(ifelse(local_zip.mi[,5]<.001, "Sig p<.001", ifelse(local_zip.mi[,5]<.05,"Sig p<.05", "NS" )))
```
```{r, fig.height=12, fig.width=14}
spplot(geocnty.fk, "lmi", at=summary(geocnty.fk$lmi), col.regions=brewer.pal(5, "RdBu"), main="Local Moran's I")
##
spplot(geozip.fk, "lmi", at=summary(geozip.fk$lmi), col.regions=brewer.pal(5, "RdBu"), main="Local Moran's I")
```
```{r, fig.height=12, fig.width=14}
spplot(geocnty.fk, "lmi.p.sig", col.regions=c("white", "#E6550D","#FDAE6B"))
##
spplot(geozip.fk, "lmi.p.sig", col.regions=c("white", "#E6550D","#FDAE6B"))
```
|
0b5b41519a1d64a16ff1e86540b32e86e671ed87
|
0be6957e9e66f84aa906f351a0a8c48260cb15ba
|
/meansd.R
|
38ddb50622b9f7456065a7a1ceff344a77e31f93
|
[] |
no_license
|
nate-koser/Yoruba-project
|
426e57be3a85572137b04a5465ff3fff8919ceb0
|
a7401c1fabc660fa75f23dd96d0d11a07377296c
|
refs/heads/master
| 2022-05-08T23:15:23.411763
| 2019-06-23T19:37:56
| 2019-06-23T19:37:56
| 173,513,936
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 7,947
|
r
|
meansd.R
|
source("datatidy.R")
#CV----------------------------------------------------------------------------------------
#means, sd, various ------------------------------------------------------------------------
#mean + sd vowel durations
mean(Ltones$target_dur, na.rm = T)
sd(Ltones$f0_2, na.rm = T)
mean(Mtones$target_dur, na.rm = T)
sd(Mtones$f0_2, na.rm = T)
mean(Htones$target_dur, na.rm = T)
sd(Htones$f0_2, na.rm = T)
#minimum f0_2
min(Ltones$f0_2, na.rm = T)
min(Mtones$f0_2, na.rm = T)
min(Htones$f0_2, na.rm = T)
#median f0_2
median(Ltones$f0_2, na.rm = T)
median(Mtones$f0_2, na.rm = T)
median(Htones$f0_2, na.rm = T)
#avg Ltone f0 first vs. second half
mean(Ltones$avg_f0_half1, na.rm = T)
mean(Ltones$avg_f0_half2, na.rm = T)
#avg Ltone f0 slice 1 vs. slice 4
mean(Ltones$f0_1, na.rm = T)
mean(Ltones$f0_4, na.rm = T)
#avg + sd f0 per tone
mean(Ltones$avg_f0, na.rm = T)
sd(Ltones$avg_f0, na.rm = T)
mean(Mtones$avg_f0, na.rm = T)
sd(Mtones$avg_f0, na.rm = T)
mean(Htones$avg_f0, na.rm = T)
sd(Htones$avg_f0, na.rm = T)
#f1-f0
mean(Ltones$avg_f1minusf0, na.rm = T)
sd(Ltones$avg_f1minusf0, na.rm = T)
mean(Mtones$avg_f1minusf0, na.rm = T)
sd(Mtones$avg_f1minusf0, na.rm = T)
mean(Htones$avg_f1minusf0, na.rm = T)
sd(Htones$avg_f1minusf0, na.rm = T)
#avg + sd duration
mean(Ltones$target_voweldur, na.rm = T)
sd(Ltones$target_voweldur, na.rm = T)
mean(Mtones$target_voweldur, na.rm = T)
sd(Mtones$target_voweldur, na.rm = T)
mean(Htones$target_voweldur, na.rm = T)
sd(Htones$target_voweldur, na.rm = T)
#avg + sd HNR
mean(Ltones$avg_hnr, na.rm = T)
sd(Ltones$avg_hnr, na.rm = T)
mean(Mtones$avg_hnr, na.rm = T)
sd(Mtones$avg_hnr, na.rm = T)
mean(Htones$avg_hnr, na.rm = T)
sd(Htones$avg_hnr, na.rm = T)
#avg + sd spec
mean(Ltones$avg_spec, na.rm = T)
sd(Ltones$avg_spec, na.rm = T)
mean(Mtones$avg_spec, na.rm = T)
sd(Mtones$avg_spec, na.rm = T)
mean(Htones$avg_spec, na.rm = T)
sd(Htones$avg_spec, na.rm = T)
#mean L hnr by slice
mean(Ltones$hnr_1, na.rm = T)
mean(Ltones$hnr_2, na.rm = T)
mean(Ltones$hnr_3, na.rm = T)
mean(Ltones$hnr_4, na.rm = T)
#mean M hnr by slice
mean(Mtones$hnr_1, na.rm = T)
mean(Mtones$hnr_2, na.rm = T)
mean(Mtones$hnr_3, na.rm = T)
mean(Mtones$hnr_4, na.rm = T)
#mean L spec by slice
mean(Ltones$specTilt_1, na.rm = T)
mean(Ltones$specTilt_2, na.rm = T)
mean(Ltones$specTilt_3, na.rm = T)
mean(Ltones$specTilt_4, na.rm = T)
#mean L f1f0 by slice
mean(Ltones$f1_1, na.rm = T) - mean(Ltones$f0_1, na.rm = T)
mean(Ltones$f1_2, na.rm = T) - mean(Ltones$f0_2, na.rm = T)
mean(Ltones$f1_3, na.rm = T) - mean(Ltones$f0_3, na.rm = T)
mean(Ltones$f1_4, na.rm = T) - mean(Ltones$f0_4, na.rm = T)
#mean H f1f0 by slice
mean(Htones$f1_1, na.rm = T) - mean(Htones$f0_1, na.rm = T)
mean(Htones$f1_2, na.rm = T) - mean(Htones$f0_2, na.rm = T)
mean(Htones$f1_3, na.rm = T) - mean(Htones$f0_3, na.rm = T)
mean(Htones$f1_4, na.rm = T) - mean(Htones$f0_4, na.rm = T)
#mean M f1f0 by slice
mean(Mtones$f1_1, na.rm = T) - mean(Mtones$f0_1, na.rm = T)
mean(Mtones$f1_2, na.rm = T) - mean(Mtones$f0_2, na.rm = T)
mean(Mtones$f1_3, na.rm = T) - mean(Mtones$f0_3, na.rm = T)
mean(Mtones$f1_4, na.rm = T) - mean(Mtones$f0_4, na.rm = T)
#mean L jitt by slice
mean(Ltones$jitter_1, na.rm = T)
mean(Ltones$jitter_2, na.rm = T)
mean(Ltones$jitter_3, na.rm = T)
mean(Ltones$jitter_4, na.rm = T)
#mean M jitt by slice
mean(Mtones$jitter_1, na.rm = T)
mean(Mtones$jitter_2, na.rm = T)
mean(Mtones$jitter_3, na.rm = T)
mean(Mtones$jitter_4, na.rm = T)
#mean H jitt by slice
mean(Htones$jitter_1, na.rm = T)
mean(Htones$jitter_2, na.rm = T)
mean(Htones$jitter_3, na.rm = T)
mean(Htones$jitter_4, na.rm = T)
#CVCV--------------------------------------------------------------------------------------
#avg + sd f0 per tone by syllable
mean(Ltones_v1$avg_f0_v1, na.rm = T)
sd(Ltones_v1$avg_f0_v1, na.rm = T)
mean(Mtones_v1$avg_f0_v1, na.rm = T)
sd(Mtones_v1$avg_f0_v1, na.rm = T)
mean(Htones_v1$avg_f0_v1, na.rm = T)
sd(Htones_v1$avg_f0_v1, na.rm = T)
mean(Ltones_v2$avg_f0_v2, na.rm = T)
sd(Ltones_v2$avg_f0_v2, na.rm = T)
mean(Mtones_v2$avg_f0_v2, na.rm = T)
sd(Mtones_v2$avg_f0_v2, na.rm = T)
mean(Htones_v2$avg_f0_v2, na.rm = T)
sd(Htones_v2$avg_f0_v2, na.rm = T)
#avg + sd f0 by speaker
mean(Ltones_v11$avg_f0_v1, na.rm = T)
sd(Ltones_v11$avg_f0_v1, na.rm = T)
mean(Mtones_v11$avg_f0_v1, na.rm = T)
sd(Mtones_v11$avg_f0_v1, na.rm = T)
mean(Htones_v11$avg_f0_v1, na.rm = T)
sd(Htones_v11$avg_f0_v1, na.rm = T)
mean(Ltones_v21$avg_f0_v2, na.rm = T)
sd(Ltones_v21$avg_f0_v2, na.rm = T)
mean(Mtones_v21$avg_f0_v2, na.rm = T)
sd(Mtones_v21$avg_f0_v2, na.rm = T)
mean(Htones_v21$avg_f0_v2, na.rm = T)
sd(Htones_v21$avg_f0_v2, na.rm = T)
mean(Ltones_v12$avg_f0_v1, na.rm = T)
sd(Ltones_v12$avg_f0_v1, na.rm = T)
mean(Mtones_v12$avg_f0_v1, na.rm = T)
sd(Mtones_v12$avg_f0_v1, na.rm = T)
mean(Htones_v12$avg_f0_v1, na.rm = T)
sd(Htones_v12$avg_f0_v1, na.rm = T)
mean(Ltones_v22$avg_f0_v2, na.rm = T)
sd(Ltones_v22$avg_f0_v2, na.rm = T)
mean(Mtones_v22$avg_f0_v2, na.rm = T)
sd(Mtones_v22$avg_f0_v2, na.rm = T)
mean(Htones_v22$avg_f0_v2, na.rm = T)
sd(Htones_v22$avg_f0_v2, na.rm = T)
#avg + sd HNR
mean(Ltones_v1$avg_hnr_v1, na.rm = T)
sd(Ltones_v1$avg_hnr_v1, na.rm = T)
mean(Mtones_v1$avg_hnr_v1, na.rm = T)
sd(Mtones_v1$avg_hnr_v1, na.rm = T)
mean(Htones_v1$avg_hnr_v1, na.rm = T)
sd(Htones_v1$avg_hnr_v1, na.rm = T)
mean(Ltones_v2$avg_hnr_v2, na.rm = T)
sd(Ltones_v2$avg_hnr_v2, na.rm = T)
mean(Mtones_v2$avg_hnr_v2, na.rm = T)
sd(Mtones_v2$avg_hnr_v2, na.rm = T)
mean(Htones_v2$avg_hnr_v2, na.rm = T)
sd(Htones_v2$avg_hnr_v2, na.rm = T)
#avg + sd HNR by speaker
mean(Ltones_v11$avg_hnr_v1, na.rm = T)
sd(Ltones_v11$avg_hnr_v1, na.rm = T)
mean(Mtones_v11$avg_hnr_v1, na.rm = T)
sd(Mtones_v11$avg_hnr_v1, na.rm = T)
mean(Htones_v11$avg_hnr_v1, na.rm = T)
sd(Htones_v11$avg_hnr_v1, na.rm = T)
mean(Ltones_v21$avg_hnr_v2, na.rm = T)
sd(Ltones_v21$avg_hnr_v2, na.rm = T)
mean(Mtones_v21$avg_hnr_v2, na.rm = T)
sd(Mtones_v21$avg_hnr_v2, na.rm = T)
mean(Htones_v21$avg_hnr_v2, na.rm = T)
sd(Htones_v21$avg_hnr_v2, na.rm = T)
mean(Ltones_v12$avg_hnr_v1, na.rm = T)
sd(Ltones_v12$avg_hnr_v1, na.rm = T)
mean(Mtones_v12$avg_hnr_v1, na.rm = T)
sd(Mtones_v12$avg_hnr_v1, na.rm = T)
mean(Htones_v12$avg_hnr_v1, na.rm = T)
sd(Htones_v12$avg_hnr_v1, na.rm = T)
mean(Ltones_v22$avg_hnr_v2, na.rm = T)
sd(Ltones_v22$avg_hnr_v2, na.rm = T)
mean(Mtones_v22$avg_hnr_v2, na.rm = T)
sd(Mtones_v22$avg_hnr_v2, na.rm = T)
mean(Htones_v22$avg_hnr_v2, na.rm = T)
sd(Htones_v22$avg_hnr_v2, na.rm = T)
#avg + sd spec
mean(Ltones_v1$avg_spec_v1, na.rm = T)
sd(Ltones_v1$avg_spec_v1, na.rm = T)
mean(Mtones_v1$avg_spec_v1, na.rm = T)
sd(Mtones_v1$avg_spec_v1, na.rm = T)
mean(Htones_v1$avg_spec_v1, na.rm = T)
sd(Htones_v1$avg_spec_v1, na.rm = T)
mean(Ltones_v2$avg_spec_v2, na.rm = T)
sd(Ltones_v2$avg_spec_v2, na.rm = T)
mean(Mtones_v2$avg_spec_v2, na.rm = T)
sd(Mtones_v2$avg_spec_v2, na.rm = T)
mean(Htones_v2$avg_spec_v2, na.rm = T)
sd(Htones_v2$avg_spec_v2, na.rm = T)
#avg + sd spec by speaker
mean(Ltones_v11$avg_spec_v1, na.rm = T)
sd(Ltones_v11$avg_spec_v1, na.rm = T)
mean(Mtones_v11$avg_spec_v1, na.rm = T)
sd(Mtones_v11$avg_spec_v1, na.rm = T)
mean(Htones_v11$avg_spec_v1, na.rm = T)
sd(Htones_v11$avg_spec_v1, na.rm = T)
mean(Ltones_v21$avg_spec_v2, na.rm = T)
sd(Ltones_v21$avg_spec_v2, na.rm = T)
mean(Mtones_v21$avg_spec_v2, na.rm = T)
sd(Mtones_v21$avg_spec_v2, na.rm = T)
mean(Htones_v21$avg_spec_v2, na.rm = T)
sd(Htones_v21$avg_spec_v2, na.rm = T)
mean(Ltones_v12$avg_spec_v1, na.rm = T)
sd(Ltones_v12$avg_spec_v1, na.rm = T)
mean(Mtones_v12$avg_spec_v1, na.rm = T)
sd(Mtones_v12$avg_spec_v1, na.rm = T)
mean(Htones_v12$avg_spec_v1, na.rm = T)
sd(Htones_v12$avg_spec_v1, na.rm = T)
mean(Ltones_v22$avg_spec_v2, na.rm = T)
sd(Ltones_v22$avg_spec_v2, na.rm = T)
mean(Mtones_v22$avg_spec_v2, na.rm = T)
sd(Mtones_v22$avg_spec_v2, na.rm = T)
mean(Htones_v22$avg_spec_v2, na.rm = T)
sd(Htones_v22$avg_spec_v2, na.rm = T)
|
d9c288578ee9bb346608fe3bb6c31d4ee11d3592
|
edecc93bdb59672ef2ae77cd859ef0056e1f1ffc
|
/ui.R
|
7919de43dbc7fc0e22c6e654e55498c463c7717f
|
[] |
no_license
|
Lakshmi-Kovvuri/Data-Science-Capstone-final-project
|
240be42d6124a9e1297abdd3a3b680f1007c0691
|
6205bf03c224bc1d7a12f4084ead63da13e2f83e
|
refs/heads/main
| 2023-01-07T23:42:58.811323
| 2020-11-02T05:23:45
| 2020-11-02T05:23:45
| 308,567,796
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 1,889
|
r
|
ui.R
|
suppressWarnings(library(shiny))
suppressWarnings(library(markdown))
shinyUI(navbarPage("Coursera's Data Science Capstone: Final Project",
tabPanel("Next Word Predictor",
HTML("<strong>Author: Lakshmi Kovvuri </strong>"),
br(),
img(src = "headers.png"),
# Sidebar
sidebarLayout(
sidebarPanel(
textInput("inputString", "Type here and click on the 'Predict' button",value = ""),
submitButton('Predict'),
br(),
br()
),
mainPanel(
h2("The suggested next word for your text input is"),
verbatimTextOutput("prediction"),
strong("You entered the following word or phrase as Input to the application:"),
tags$style(type='text/css', '#text1 {background-color: rgba(0,255,0,0.4 ); color: blue;}'),
textOutput('text1')
)
)
),
tabPanel("Overview",
mainPanel(
img(src = "./headers.png"),
#includeMarkdown("Overview.md")
) ),
tabPanel("Instructions",
mainPanel(
#includeMarkdown("README.md")
)
)
)
)
)
|
5038f4d8aff5397eed3dc2269153346a16f3c9a6
|
9cc58a8eb35ba76bfac93e44722d859a10b1f064
|
/pipeline/getDist.R
|
7d1d57e57d57df9af15e7ad2d09ee5ee09bef5c9
|
[] |
no_license
|
gradjitta/WorkingCode
|
8400a3a7c8cd7b1145d382d381cc5ff8c4b0b289
|
64e07620b5894889b0b6a15d12c311839895ca86
|
refs/heads/master
| 2020-06-05T12:15:29.651940
| 2014-12-31T20:51:19
| 2014-12-31T20:51:19
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 319
|
r
|
getDist.R
|
getDist <- function(g1,g2,gn, cSize) {
source("getBoundingBoxc.R")
ln <- getBoundingBoxc(gn, cSize)
l1 <- getBoundingBoxc(g1, cSize)
l2 <- getBoundingBoxc(g2, cSize)
dist1 <- sqrt( ((ln-l1)[1])^2 + ((ln-l1)[3] )^2)
dist2 <- sqrt( ((ln-l2)[1])^2 + ((ln-l2)[3] )^2)
ret <- c(dist1, dist2)
ret
}
|
ab927bdeb5258fea8fed63ca1922b9fee2899bb7
|
b9fb1d757a4faed32cd5b7a8572c45c442ca44ed
|
/report_util.r
|
8a22cb3256674d937fbbd762f069e835c08784c7
|
[] |
no_license
|
nesl/gprstest
|
6518db01b909f6f98c6e1ac2a96eb72a55101557
|
326347b85d01f95e0552f6ecdcfd5de23d28933c
|
refs/heads/master
| 2021-01-22T04:34:30.419109
| 2007-06-14T22:30:47
| 2007-06-14T22:30:47
| 12,227,957
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 4,711
|
r
|
report_util.r
|
library(chron)
library(quantreg)
get.table <- function(table.name) {
table <- read.table(table.name, header = TRUE,sep = ",")
date.and.time <-
matrix(unlist(strsplit(as.character(table$time_date), " ")),
ncol = 2, byrow = TRUE)
chrons <- chron(date.and.time[,1],
date.and.time[,2],
format = c(dates = "y-m-d",
times = "h:m:s"))
days <- cut(chrons,"day")
weekday <- weekdays(chrons)
hour <- hours(chrons)
minute <- minutes(chrons)
second <- seconds(chrons)
daytime <- hour >= 7 & hour < 19
weekend <- (weekday == "Sat") | (weekday == "Sun") | (weekday == "Fri")
table <- transform(table,
chron = chrons,
day = days,
weekday = weekday,
hour = hour,
minute = minute,
second = second,
phone_id = seq(1:length(chrons)),
daytime = daytime,
weekend = weekend)
return(table)
}
split.table <- function(table, start.chron = 0, end.chron = Inf) {
t <- table[start.chron <= table$chron & table$chron < end.chron,]
k1 <- add.resid(t[t$file_size_bytes == 1024,])
k10 <- add.resid(t[t$file_size_bytes == 10240,])
k50 <- add.resid(t[t$file_size_bytes == 51200,])
k100 <- add.resid(t[t$file_size_bytes == 102400,])
return(list(k1=k1,k10=k10,k50=k50,k100=k100))
}
add.resid <- function(table) {
resids <- resid(rq(table$time_download ~ table$chron))
table <- transform(table, resid = resids)
return (table)
}
get.split.table <- function(table.name,
start.chron = 0,
end.chron = Inf) {
t <- get.table(table.name)
tt <- split.table(t, start.chron = start.chron, end.chron = end.chron)
return (tt)
}
rbind.tables <- function(t1, t2) {
k1 <- rbind(t1$k1, t2$k1)
k10 <- rbind(t$k10, t2$k10)
k50 <- rbind(t1$k50, t2$k50)
k100 <- rbind(t1$k100, t2$k100)
return(list(k1=k1,k10=k10,k50=k50,k100=k100))
}
plot.all <- function(foo1, foo10, foo50, foo100, lx, ly, main, ylim=c(0,11)) {
plot(foo100, xlab = "Date", ylab = "Download time (seconds)", col="red", main=main, pch=19,ylim=ylim)
points(foo50, col = "blue", pch=19)
points(foo10, col = "green", pch=19)
points(foo1, col = "black", pch=19)
legend(lx, ly, legend = rev(c("1k", "10k","50k","100k")), fill=rev(c("black","green","blue","red")), bg="white")
}
doit <- function(data) {
fit <- lm(resid ~ daytime + weekend + signal_dbm + daytime:weekend, data = data)
a <- anova(fit)
ss <- a[2][[1]]
sst <- sum(ss)
print(ss/sst)
print(a[1])
plot(resid ~ chron, data = t$k100)
}
niceplot <- function(data,title){
plot(data$stats[3,], pch=19, ylim=c(-2.0, 4), main=title, xlab="Hour of the Day", ylab="Residual (seconds)", type="o", xaxt="n")
axis(1,seq(1,24,1), as.character(seq(0,23,1)))
lines(data$stats[4,])
lines(data$stats[2,])
lines(data$stats[5,], col="gray")
lines(data$stats[1,], col="gray")
lines(data$stats[3,], col="red")
lines(data$conf[1,], col="blue")
lines(data$conf[2,], col="blue")
lines(data$stats[3,], col="red", lw="2")
points(data$stats[3,], pch=19)
legend(.5, 10.5, legend = c("Largest Non-outlier (< Median + 1.5*IQR)",
" 3rd Quartile",
" Upper 95% Confidence Interval",
" Median",
" Lower 95% Confidence Interval",
" 1st Quartile",
"Smallest Non-outlier (> Median - 1.5*IQR)"),
fill=c("gray", "black", "blue", "red", "blue", "black", "gray"), bg = "white")
}
niceplot2 <- function(data,title){
plot(data$stats[3,], pch=19,ylim=c(-4,18), main=title, xlab="Signal Strength (-dBm)", ylab="Residual (seconds)", type="o", xaxt="n")
#axis(1,seq(1,24,1), as.character(seq(0,23,1)))
axis(1,seq(1,19,1), as.character(seq(80,98,1)))
lines(data$stats[4,])
lines(data$stats[2,])
lines(data$stats[5,], col="gray")
lines(data$stats[1,], col="gray")
lines(data$stats[3,], col="red")
lines(data$conf[1,], col="blue")
lines(data$conf[2,], col="blue")
lines(data$stats[3,], col="red", lw="2")
points(data$stats[3,], pch=19)
legend(.5, 15.5, legend = c("Largest Non-outlier (< Median + 1.5*IQR)",
" 3rd Quartile",
" Upper 95% Confidence Interval",
" Median",
" Lower 95% Confidence Interval",
" 1st Quartile",
"Smallest Non-outlier (> Median - 1.5*IQR)"),
fill=c("gray", "black", "blue", "red", "blue", "black", "gray"), bg = "white")
}
|
23b60af539cad5cd5be840ebb6aa723c4e6604bf
|
94bacf8ae33f625e602140d254c11b6fe9edfbbc
|
/man/group.vect.Rd
|
8a819acb0d07902551a8febbb1be59256cfb47a5
|
[] |
no_license
|
cran/varmixt
|
7deb71f4f40c3fccc8fbd6e84a1327e31cfcb6a2
|
3a4d2d30de189eab78e3cdcec090f91c955a7e08
|
refs/heads/master
| 2021-01-22T01:28:33.623587
| 2005-06-17T00:00:00
| 2005-06-17T00:00:00
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 588
|
rd
|
group.vect.Rd
|
\name{group.vect}
\alias{group.vect}
\title{Extraction of the vector of the variance group of each gene}
\description{This function extracts the vector of the variance group of each gene.
A variance group is determined by the variance mixture model.
}
\usage{
group.vect(data)
}
\arguments{
\item{data}{gene expression data object}
}
\details{
}
\value{
}
\references{}
\author{Paul Delmar}
\note{}
\seealso{}
\examples{
## The function is currently defined as
function(data)
{
data$stat2$group
}
}
\keyword{internal}
|
88886f60a43746da1f8b7b35203558dfc4023395
|
5d690f159266b2c0f163e26fcfb9f9e17a0dc541
|
/envi/R/globals.R
|
b8ac621780280b22f26ba400df5555599e49a4e7
|
[] |
no_license
|
albrizre/spatstat.revdep
|
3a83ab87085895712d7109c813dcc8acb55493e9
|
b6fc1e73985b0b7ed57d21cbebb9ca4627183108
|
refs/heads/main
| 2023-03-05T14:47:16.628700
| 2021-02-20T01:05:54
| 2021-02-20T01:05:54
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 20
|
r
|
globals.R
|
globalVariables("k")
|
b99156fdb115673c76850f97f7b41a051da6b4d4
|
71aaa0ee806fc83cc5fa7ddd136511bb4b117bfb
|
/first.R
|
6c266f2d175aa56567d0b17b599b93a1bb4c4906
|
[] |
no_license
|
IgorP17/R-folder
|
fe7317a51ea070ca7879fb86467dc127798b7f5a
|
948f23034dc7259c24627103cd6db6a06ec35d9e
|
refs/heads/master
| 2021-09-18T11:51:44.197522
| 2018-07-13T19:12:03
| 2018-07-13T19:12:03
| 125,160,632
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 51
|
r
|
first.R
|
my <- c(20,30,40)
barplot(my)
#source("first.R")
|
75a8101e7bfb8e2653bc5171fe03e1a3b0188426
|
75a69dfdfd593dfecf931d497d48fcba90caf356
|
/R/normalise.R
|
99a884b34aee19b9618fe7414dcee830ffa9fc03
|
[] |
no_license
|
ashley-williams/phenoScreen
|
e05ee774ffe2fa4ae5eca1d23566610be6e00f7c
|
431c0e04aeca27ff76fbc603a51c68f5ee87dbf0
|
refs/heads/master
| 2020-04-25T18:07:40.122452
| 2018-11-22T15:26:39
| 2018-11-22T15:26:39
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 1,466
|
r
|
normalise.R
|
#' normalise against negative control
#'
#' description
#'
#' @param data dataframe, can be a grouped dataframe
#' @param compound_col name of column containing compound information
#' @param neg_control name of the negative control compound in `compound_col`
#' @param method how to normalise, either "subtract" or "divide"
#' @param average average function
#' @param metadata_prefix string, prefix of metadata columns
#' @param ... extras arguments passed to average
#'
#' @import dplyr
#' @importFrom stats median
#' @export
normalise <- function(data, compound_col,
neg_control = "DMSO", method = "subtract",
average = median, metadata_prefix = NULL, ...) {
metadata_prefix = get_metadata_prefix(metadata_prefix)
`%op%` = set_operator(method)
feature_cols = get_feature_cols(data, metadata_prefix)
compound_col_ = enquo(compound_col)
data %>%
mutate_at(
vars(feature_cols),
funs(. %op% average(.[(!!!compound_col_) == neg_control], ...)))
}
# alias for American spelling
normalize = normalise
# internal function to set operator
set_operator <- function(method) {
# set normalisation method, error if not valid
if (method == "divide") {
operator = `/`
} else if (method == "subtract") {
operator = `-`
} else {
stop("Invalid method. Options: divide, subtract.",
call. = FALSE)
}
return(operator)
}
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.