blob_id
stringlengths 40
40
| directory_id
stringlengths 40
40
| path
stringlengths 2
327
| content_id
stringlengths 40
40
| detected_licenses
listlengths 0
91
| license_type
stringclasses 2
values | repo_name
stringlengths 5
134
| snapshot_id
stringlengths 40
40
| revision_id
stringlengths 40
40
| branch_name
stringclasses 46
values | visit_date
timestamp[us]date 2016-08-02 22:44:29
2023-09-06 08:39:28
| revision_date
timestamp[us]date 1977-08-08 00:00:00
2023-09-05 12:13:49
| committer_date
timestamp[us]date 1977-08-08 00:00:00
2023-09-05 12:13:49
| github_id
int64 19.4k
671M
⌀ | star_events_count
int64 0
40k
| fork_events_count
int64 0
32.4k
| gha_license_id
stringclasses 14
values | gha_event_created_at
timestamp[us]date 2012-06-21 16:39:19
2023-09-14 21:52:42
⌀ | gha_created_at
timestamp[us]date 2008-05-25 01:21:32
2023-06-28 13:19:12
⌀ | gha_language
stringclasses 60
values | src_encoding
stringclasses 24
values | language
stringclasses 1
value | is_vendor
bool 2
classes | is_generated
bool 2
classes | length_bytes
int64 7
9.18M
| extension
stringclasses 20
values | filename
stringlengths 1
141
| content
stringlengths 7
9.18M
|
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
f4d81a1ce6195fd5c488e5a473de79ef4b4c0b37
|
89b48f1af10fe015001dd8b6adb92b75829f3cdc
|
/man/rtopKrige.Rd
|
1fd157144ca453a3313e4cb9f16e2a3bec2ad38b
|
[] |
no_license
|
cran/rtop
|
561ccd2863299ec410d28abaa99adf0792635367
|
df1ca98e050e57a4c3497d65fe96de3d98469a51
|
refs/heads/master
| 2023-04-06T23:24:15.866989
| 2023-03-31T17:10:02
| 2023-03-31T17:10:02
| 17,699,401
| 4
| 1
| null | null | null | null |
UTF-8
|
R
| false
| false
| 9,406
|
rd
|
rtopKrige.Rd
|
\name{rtopKrige}
\alias{rtopKrige}
\alias{rtopKrige.rtop}
\alias{rtopKrige.SpatialPolygonsDataFrame}
\alias{rtopKrige.STSDF}
\alias{rtopKrige.default}
\title{Spatial interpolation of data with spatial support}
\description{
rtopKrige perform spatial interpolation or cross validation of data with areal support.
}
\usage{
\method{rtopKrige}{rtop}(object, varMatUpdate = FALSE, params = list(), ...)
\method{rtopKrige}{SpatialPolygonsDataFrame}(object, predictionLocations = NULL,
varMatObs, varMatPredObs, varMat, params = list(),
formulaString, sel, ...)
\method{rtopKrige}{STSDF}(object, predictionLocations = NULL,
varMatObs, varMatPredObs, varMat, params = list(),
formulaString, sel, olags = NULL, plags = NULL,
lagExact = TRUE, ...)
\method{rtopKrige}{default}(object, predictionLocations = NULL,
varMatObs, varMatPredObs, varMat, params = list(),
formulaString, sel, wret = FALSE, ...)
}
\arguments{
\item{object}{object of class \code{rtop} or \code{\link[sp]{SpatialPolygonsDataFrame}} or \code{\link[spacetime]{STSDF}}}
\item{varMatUpdate}{logical; if TRUE, also existing variance matrices will
be recomputed, if FALSE, only missing variance matrices will be computed,
see also \code{\link{varMat}}}
\item{predictionLocations}{\code{\link[sp]{SpatialPolygons}} or \code{\link[sp]{SpatialPolygonsDataFrame}} or
\code{\link[spacetime]{STSDF}}
with prediction locations. NULL if cross validation is to be performed.}
\item{varMatObs}{covariance matrix of observations, where diagonal must consist
of internal variance, typically generated from call
to \code{\link{varMat}} }
\item{varMatPredObs}{covariance matrix between observation locations and
prediction locations, typically generated from call
to \code{\link{varMat}}}
\item{varMat}{list covariance matrices including the two above}
\item{params}{a set of parameters, used to modify the default parameters for
the \code{rtop} package, set in \code{\link{getRtopParams}}. Additionally,
it is possible overrule some of the parameters in \code{object$params} by passing
them as separate arguments.}
\item{formulaString}{formula that defines the dependent variable as a linear model
of independent variables, see e.g. \code{\link{createRtopObject}} for more details.}
\item{sel}{array of prediction location numbers, if only a limited number of locations are to be
interpolated/crossvalidated}
\item{wret}{logical; if TRUE, return a matrix of weights instead of the predictions,
useful for batch processing of time series, see also details}
\item{olags}{A vector describing the relative lag which should be applied for the observation locations. See also details}
\item{plags}{A vector describing the relative lag which should be applied for the predicitonLocations. See also details}
\item{lagExact}{logical; whether differences in lagtime should be computed exactly or approximate}
\item{...}{from \code{rtopKrige.rtop}, arguments to be passed to
\code{rtopKrige.default}. In \code{rtopKrige.default},
parameters for modification of the object parameters or default parameters.
Of particular interest are \code{cv}, a logical for doing cross-validation,
\code{nmax}, and \code{maxdist} for maximum number of neighbours and
maximum distance to neighbours, respectively, and \code{wlim}, the limit for
the absolute values of the weights. It can also be useful to set \code{singularSolve} if some of the areas are almost similar, see also details below.}
}
\value{
If called with \code{\link[sp]{SpatialPolygonsDataFrame}}, the function returns a \cr
\code{\link[sp]{SpatialPolygonsDataFrame}} with predictions, either at the
locations defined in \cr
\code{predictionLocations}, or as leave-one-out
cross-validation predicitons at the same locations as in object if
\code{cv = TRUE}
If called with an rtop-object, the function returns the same object with the
predictions added to the object.
}
\details{
This function is the interpolation routine of the rtop-package.
The simplest way of calling the function is with an rtop-object that
contains the fitted variogram model and all the other necessary data (see
\code{\link{createRtopObject}} or \code{\link{rtop-package}}).
The function will, if called with covariance matrices between observations
and between observations and prediction locations, use these for the interpolation.
If the function is called without these matrices, \code{\link{varMat}} will be
called to create them. These matrices can therefore be reused if necessary,
an advantage as it is computationally expensive to create them.
The interpolation that takes part within \code{rtopKrige.default} is based on
the semivariance matrices between observations and between observations and prediction
locations. It is therefore possible to use this function also to interpolate
data where the matrices have been created in other ways, e.g. based on distances
in physiographical space or distances along a stream.
The function returns the weights rather than the predictions if \code{wret = TRUE}.
This is useful for batch processing of time series, e.g. once the weights are
created, they can be used to compute the interpolated values for each time step.
rtop is able to take some advantage of multiple CPUs, which can be invoked with the
parameter \code{nclus}. When it gets a number larger than one, \code{rtopKrige} will start a cluster with \code{nclus} workers,
if the \code{\link{parallel}}-package has been installed.
The parameter \code{singularSolve} can be used when some areas are almost completely overlapping. In this case, the discretization of them might be equal, and the covariances to other areas will also be equal. The kriging matrix will in this case be singular. When \code{singularSolve = TRUE}, \code{rtopKrige} will remove one of the neighbours, and instead work with the mean of the two observations. An overview of removed neighbours can be seen in the resulting object, under the name \code{removed}.
Kriging of time series is possible when \code{observations} and \code{predictionLocations}
are spatiotemporal objects of type \code{\link[spacetime]{STSDF}}. The interpolation is
still spatial, in the sense that the regularisation of the variograms are just done
using the spatial extent of the observations, not a possible temporal extent, such as
done by Skoien and Bloschl (2007). However, it is possible to make predictions based on observations
from different time steps, through the use of the lag-vectors. These vectors describe a typical "delay"
for each observation and prediction location. This delay could for runoff related variables be similar
to travel time to each gauging location. For a certain prediction location, earlier time steps would be picked for neighbours with shorter travel time and later time steps for neighbours with slower travel times.
The lagExact parameter indicates whether to use a weighted average of two time steps, or just the time step which is closest to the difference in lag times.
The use of lag times should in theory increase the computation time, but might, due to different computation methods, even speed up the computation when the number of neighbours to be used (parameter nmax) is small compared to the number of observations. If computation is slow, it can be useful to test olags = rep(0, dim(observations)[1]) and similar for predictionLocations.
}
\references{
Skoien J. O., R. Merz, and G. Bloschl. Top-kriging - geostatistics on stream networks.
Hydrology and Earth System Sciences, 10:277-287, 2006.
Skoien, J. O. and G. Bloschl. Spatio-Temporal Top-Kriging of Runoff Time Series. Water Resources Research 43:W09419, 2007.
Skoien, J. O., Bloschl, G., Laaha, G., Pebesma, E., Parajka, J., Viglione, A., 2014. Rtop: An R package for interpolation of data with a variable spatial support, with an example from river networks. Computers & Geosciences, 67.
}
\author{ Jon Olav Skoien }
\examples{
\donttest{
# The following command will download the complete example data set
# downloadRtopExampleData()
# observations$obs = observations$QSUMMER_OB/observations$AREASQKM
rpath = system.file("extdata",package="rtop")
if (require("rgdal")) {
observations = readOGR(rpath,"observations")
predictionLocations = readOGR(rpath,"predictionLocations")
} else {
library(sf)
observations = st_read(rpath, "observations")
predictionLocations = st_read(rpath,"predictionLocations")
}
# Setting some parameters; nclus > 1 will start a cluster with nclus
# workers for parallel processing
params = list(gDist = TRUE, cloud = FALSE, nclus = 1, rresol = 25)
# Create a column with the specific runoff:
observations$obs = observations$QSUMMER_OB/observations$AREASQKM
# Build an object
rtopObj = createRtopObject(observations, predictionLocations,
params = params)
# Fit a variogram (function also creates it)
rtopObj = rtopFitVariogram(rtopObj)
# Predicting at prediction locations
rtopObj = rtopKrige(rtopObj)
# Cross-validation
rtopObj = rtopKrige(rtopObj,cv=TRUE)
cor(rtopObj$predictions$observed,rtopObj$predictions$var1.pred)
}
}
\keyword{spatial}
|
429239d5fd8f4de8e3f8f0b7355b51ef321cc0c6
|
fb326720436659033b773c7f8f416696357ff89b
|
/man/pkg-internal.Rd
|
ee61f83f861d1fc9b68beae6701a5b3bc35be65d
|
[] |
no_license
|
cran/Rsampletrees
|
82c63e9ae9e0655a74c00e22f0010f330aa89c03
|
c8ec580b900cbf0fe0f17c1222d9c194e7a573ba
|
refs/heads/master
| 2020-06-02T23:52:40.542064
| 2020-03-02T19:30:02
| 2020-03-02T19:30:02
| 30,210,374
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 347
|
rd
|
pkg-internal.Rd
|
\name{changeArgs.default}
\alias{checkfile}
\alias{defaultArgs}
\alias{setWeights}
\alias{summary.treeoutput}
\alias{treeapply}
\alias{writeArgs}
\alias{writeTreeoutput}
\alias{formatArgs}
\alias{changeArgs.default}
\docType{package}
\title{
Internal use functions.
}
\description{
NA
}
\usage{
NA
}
\examples{
\dontrun{
NA
}
}
\keyword{internal}
|
3f4198cc9f7f641221259ffbbc61e5326636e34b
|
5da812b8f96af78dd680df0993e4aeb3d2b87e96
|
/plot1.R
|
3a2bc200c3662607fd041152fbf77b08d03ec777
|
[] |
no_license
|
monzua88/ExploratoryDataAnalysisProject1
|
2ebf855b68f4ce9f8bb21930435a1cf88d2b6ca3
|
bd89df8a9030e8752c3c5e9f93ef57d1229273f0
|
refs/heads/master
| 2021-07-09T15:10:44.432338
| 2017-10-10T16:27:45
| 2017-10-10T16:27:45
| 106,442,475
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 635
|
r
|
plot1.R
|
############
###Plot 1###
############
energy <- read.csv("household_power_consumption.txt",header=T,sep=';',na.strings="?",nrows=2075259,check.names=F,stringsAsFactors=F,comment.char="", quote='\"') #Read the file
subset_energy_dates= subset(energy, Date %in% c("1/2/2007","2/2/2007")) #Subset the two first days of february
hist(subset_energy_dates$Global_active_power, main="Global Active Power", xlab="Global Active Power [kilowatts]", ylab="Frequency", col="Red") #Plot an histogram with lables of axis, title and color
dev.copy(png, file="plot1.png", height=480, width=480) #export the plot
dev.off() #turn off the device
|
1e3fd53c7fa510f70109a19ad73e0b604c823f0b
|
a79c4eefeafac8bbcf0c12cadc620398590a1a07
|
/app.R
|
f7044a2d0ac47344e8428e52cc751bdb7e134a7b
|
[] |
no_license
|
tnguyenfarm/playership
|
d612a221a5f298b8e79b58b1725c198f3c75ef9b
|
697e5906ef91dd8924ec8b64098cdd0d2f584975
|
refs/heads/master
| 2022-11-14T04:59:22.813989
| 2020-06-25T13:57:34
| 2020-06-25T13:57:34
| 268,881,527
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 2,363
|
r
|
app.R
|
# Playership demographic analysis
# Author: Tri Nguyen
# Date: 6/3/2020
library(shiny)
# library(RODBC)
#
# db <- DBI::dbConnect(odbc::odbc(),
# Driver = "SQL Server",
# Server = "tst-biba-dw",
# Database = "FinbidwDB",
# Trusted_Connection = "True")
# Port = 1433
# This is the Playership database
sql<-"with Players (playerzip,activeplayersperzip) as
(SELECT
playerzip,count(*)
from finstagedb.[dbo].[StgPlayer]
WHERE
PlayerState='ca'
AND PlayerLastLoginTime > DATEADD(MONTH,6,PlayerCreateDate)
group by playerzip
)
select [ZIP]
,activeplayersperzip
,[AREA]
,[POPcurrent]
,[POPplus5]
,[#OFBUSS]
,[POPSQMI]
,[DAYPOP]
,[URBANPCT]
,[RURALPCT]
,[WHITECOLLAR]
,[18-24%]
,[25-34%]
,[35-44%]
,[45-54%]
,[55-64%]
,[65+%]
,[INCOMEMED]
,[MALEPOP]
,[FEMALEPOP] from Players
join [FinbidwDB].[dbo].[CAZips4Web$] D
on d.zip=Players.playerzip
"
# query <- DBI::dbGetQuery(db, sql)
players<- read.csv('players.csv')
#regressor = lm(formula = activeplayersperzip ~ .,data = players)
# This builds the app
ui<-(fluidPage(
titlePanel("Summary of Playership per Zipcode"),
sidebarLayout(
sidebarPanel(
selectInput("var",label="Choose a Demographic Factor",choice=c("Active Players"=2,
"CurrentPopulation"=4,
"ProjectedPopulation"=5,
"Number of Businesses"=6), selectize=FALSE)),
mainPanel(
h2("Summary Factor"),
verbatimTextOutput("sum"),
plotOutput("box")
)
))
)
# Define server logic
server <- function(input, output)
{
output$sum <- renderPrint({
summary(players[,as.numeric(input$var)])
})
output$box <- renderPlot({
x<-summary(players[,as.numeric(input$var)])
hist(x,col="sky blue",border="purple",main=names(players[as.numeric(input$var)]))
#summarizes the stats
#writes to csv how the model is performing
#batch jobs scheduled weekly after the db refresh
})
}
shinyApp(ui = ui, server = server)
|
3412150f286e59a00a94bed4e59f462656bea64c
|
41b960b564f4a7021d7e219641fa655efda3ec2d
|
/precip/drizzle/dry_month.R
|
05a49e2d3f8232c59a95f9ad19ad676731effd41
|
[] |
no_license
|
diana-lucatero/calibMET
|
5613a87b9c29456d8f0a1349e284115ecacc7e87
|
a6f8f4b6e36b84afbb74b73e53cc2fbe9cde9b74
|
refs/heads/master
| 2020-07-06T10:19:18.426955
| 2016-08-20T18:43:02
| 2016-08-20T18:43:02
| 66,144,041
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 634
|
r
|
dry_month.R
|
## Function that computes number of dry day per month
dry_month <- function(obs,nyears){
## Compute start and end of months
days <- days_in_month(seq(from=as.Date("1990/1/1"),to=as.Date("2014/12/1"), by = "month"))
days <- matrix(days,nrow = nyears,ncol = 12,byrow = T)
s1 <- t(apply(days,1, cumsum)) # end day
s0 <- s1 - (days-1) # start day
## Computes percentage of dry days
dry_p = array(NaN,dim=c(nyears,12))
for (im in 1:12){
dry_d = sapply(1:nyears, function(iy) sum(obs[iy,s0[iy,im]:s1[iy,im]]==0)/days[iy,im])
dry_p[,im] = dry_d*100
}
return(dry_p) ## A nyearxnmonth matrix with percentages
}
|
c4f85c43752976d44dd1d2a8debe2503fbc9692e
|
cb5f4c98aabcc0c1347c04ae610cf3267dfaac3d
|
/R/find.marg.signal.R
|
4bf3a34d6680c2676af7f392678509858e0a6216
|
[] |
no_license
|
melinabalabala/ARTP2
|
f4e4080097e027aaff8d314982e3f2dd1881bd20
|
b2b97a2c90cad852039b34364f32f3bb50ce722c
|
refs/heads/master
| 2022-02-18T20:26:39.397301
| 2019-08-15T21:39:12
| 2019-08-15T21:39:12
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 1,830
|
r
|
find.marg.signal.R
|
find.marg.signal <- function(sum.stat, allele.info, options){
msg <- paste("Removing SNPs close to marginal signals:", date())
if(options$print) message(msg)
stat <- sum.stat$stat
all.snp <- sort(sum.stat$snps.in.study)
rm(sum.stat)
gc()
nstudy <- length(stat)
nsnp <- length(all.snp)
BETA <- rep(0, nsnp)
SE <- rep(0, nsnp)
names(BETA) <- all.snp
names(SE) <- all.snp
RefAllele <- allele.info$RefAllele
EffectAllele <- allele.info$EffectAllele
names(RefAllele) <- allele.info$SNP
names(EffectAllele) <- allele.info$SNP
for(i in 1:nstudy){
s <- stat[[i]][, 'SNP']
stat[[i]]$sgn <- ifelse(stat[[i]][, 'RefAllele'] == RefAllele[s] & stat[[i]][, 'EffectAllele'] == EffectAllele[s], 1, -1)
# lambda has already been adjusted for SE in complete.sum.stat()
# SE has already been added to stat in complete.sum.stat()
BETA[s] <- BETA[s] + stat[[i]][, 'sgn'] * stat[[i]][, 'BETA']/stat[[i]][, 'SE']^2
SE[s] <- SE[s] + 1/stat[[i]][, 'SE']^2
}
SE <- sqrt(1/SE)
BETA <- BETA * SE^2
P <- pchisq(BETA^2/SE^2, df = 1, lower.tail = FALSE)
names(P) <- names(BETA)
region <- NULL
if(any(P <= options$min.marg.p)){
idx.snp <- names(P)[P <= options$min.marg.p]
idx.snp <- allele.info[allele.info$SNP %in% idx.snp, c('Chr', 'SNP', 'Pos')]
region <- NULL
for(i in 1:nrow(idx.snp)){
chr <- idx.snp$Chr[i]
pos <- idx.snp$Pos[i]
ai <- allele.info[allele.info$Chr == chr, c('Chr', 'SNP', 'Pos')]
tmp <- ai[ai$Pos >= pos - options$window & ai$Pos <= pos + options$window, ]
tmp$comment <- paste0('Close to ', idx.snp$SNP[i], ' (P = ', formatC(P[idx.snp$SNP[i]], digits=0, format='E'), ')')
region <- rbind(region, tmp)
rm(ai)
}
region <- region[!duplicated(region$SNP), ]
}
region
}
|
3540e6202a33f9ffd4ff086ccec766ef765d6e55
|
0466d855d7463cad22e2492adf9fadd10038879a
|
/man/getCCM.Rd
|
a03549cbf633f493d124d39e8bb7f70e5ab35dc0
|
[] |
no_license
|
cran/ccmEstimator
|
d337149ba1b2f0db1aba7af17d5951fc69e0c609
|
3d6e3383c6c89c8f0d4a0bee7df926ab36ed264a
|
refs/heads/master
| 2023-08-11T01:06:45.392123
| 2021-09-28T11:20:02
| 2021-09-28T11:20:02
| 411,340,851
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| true
| 3,927
|
rd
|
getCCM.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/getCCM.R
\name{getCCM}
\alias{getCCM}
\title{Comparative Causal Mediation Analysis}
\usage{
getCCM(Y,T1,T2,M,data = NULL,
noInteraction = TRUE,sigLevel = 0.05,
boots = 1000)
}
\arguments{
\item{Y}{numeric outcome variable. Should be a vector if a data frame is not provided through the \code{data} argument, or the ("character") name of the variable in the data frame if provided.}
\item{T1}{binary indicator for first treatment. Should be a vector if a data frame is not provided through the \code{data} argument, or the ("character") name of the variable in the data frame if provided.}
\item{T2}{binary indicator for second treatment. Should be a vector if a data frame is not provided through the \code{data} argument, or the ("character") name of the variable in the data frame if provided.}
\item{M}{numeric mediator variable. Should be a vector if a data frame is not provided through the \code{data} argument, or the ("character") name of the variable in the data frame if provided.}
\item{data}{an optional data frame containing the variables to be used in analysis.}
\item{noInteraction}{logical. If \code{TRUE} (the default), the assumption of no interaction between the treatments and mediator is employed in the analysis.}
\item{sigLevel}{significance level to use in construction of confidence intervals. Default is 0.05 (i.e. 95 percent confidence intervals).}
\item{boots}{number of bootstrap resamples taken for construction of confidence intervals.}
}
\value{
A \code{ccmEstimation} object, which contains the estimates and confidence intervals for the two comparative causal mediation analysis estimands, as well as the ATE and ACME for each treatment.
Note, however, that the individual ACME estimates are reported only for descriptive purposes, as the comparative causal mediation analysis methods are not designed to produce unbiased or consistent estimates of the individual ACMEs (see Bansak 2020 for details). Users should consider alternative methods if interested in individual ACME estimates.
User should input the \code{ccmEstimation} object into the \code{summary()} function to view the estimation results.
Note also that the comparative causal mediation analysis results and interpretation of the results
will be printed in the console.
}
\description{
Function to perform comparative causal mediation analysis to compare the mediation effects of different treatments via a common mediator.
Function requires two separate treaments (as well as a control condition) and one mediator.
}
\details{
Function will automatically assess the comparative causal mediation analysis scope conditions
(i.e. for each comparative causal mediation estimand, a numerator and denominator that are both estimated with the desired statistical significance and of the same sign).
Results will be returned for each comparative causal mediation estimand only if scope conditions are met for it.
See "Scope Conditions" section in Bansak (2020) for more information.
Results will also be returned for the ATE and ACME for each treatment.
If \code{noInteraction = TRUE} (the default setting), function will automatically assess the possibility of interactions between treatments and mediator and return a warning in case evidence of such interactions are found.
}
\examples{
#Example from application in Bansak (2020)
data(ICAapp)
set.seed(321, kind = "Mersenne-Twister", normal.kind = "Inversion")
ccm.results <-
getCCM(Y = "dapprp", T1 = "trt1", T2 = "trt2", M = "immorp", data = ICAapp,
noInteraction = TRUE, sigLevel = 0.05, boots = 1000)
summary(ccm.results)
}
\references{
Bansak, K. (2020). Comparative causal mediation and relaxing the assumption of no mediator-outcome confounding: An application to international law and audience costs. Political Analysis, 28(2), 222-243.
}
\author{
Kirk Bansak and Xiaohan Wu
}
|
dd710366d594af61caeb9ffc8530334fd7d53f66
|
6de0009ee8554669ec9dab10b50ef60631886e5e
|
/LGAtoLHD.R
|
c2a560950de7fa3f5ce1c9267c050d05f13fe7f8
|
[] |
no_license
|
JKaur1992/STD_AT2
|
8bfc48af62e344407029404445e95c625bb327ce
|
b22f6f93118cfaa32c0efb2f1283688ba010c1c9
|
refs/heads/master
| 2020-05-16T16:40:50.883337
| 2019-05-27T08:38:40
| 2019-05-27T08:38:40
| 183,170,013
| 3
| 5
| null | 2019-05-20T12:59:55
| 2019-04-24T07:12:34
|
R
|
UTF-8
|
R
| false
| false
| 1,118
|
r
|
LGAtoLHD.R
|
library("tidyverse")
# Read in LGA and LHD data from other sources
lga_map <- read_csv("Australia_lga_postcode_mappings_2016.csv")
alcohol_hospitalisations <- read_csv("beh_alcafhos_lhn_trend.csv")
alcohol_hospitalisations <- alcohol_hospitalisations %>%
filter (year !="")
lga_map <- filter(lga_map, State == "New South Wales")
# Get Unique Instances of LGAs and LHDs
LGAs <- unique(lga_map$`LGA region`)
LHDs <- unique(alcohol_hospitalisations$`Local Health Districts`)
library("data.table")
all_LGA <- data.table(LGA = LGAs)
all_LHDs <- data.table(LHD = LHDs)
library(readr)
# Export to .csv Files
write_csv(all_LGA,path = "all_LGAs.csv")
write_csv(all_LHDs,path = "all_LHDs.csv")
# Now manually map the LGA to the LHD they belong to using info found on the NSW got health site https://www.health.nsw.gov.au/lhd/Pages/nbmlhd.aspx; particularly the pdf map at https://www.health.nsw.gov.au/lhd/Documents/lhd-wall-map.pdf.
# For future - find a way to do this by scraping the data somehow!!
# Read back in the csv file that was created manually
library(readxl)
LGA_LHD_Map <- read_excel("LGAtoLHD.xlsx")
|
9691bb22a5f1344a89610d41ce378538532466da
|
86a30d3ca8e3bbf0bf77ec7e856596e619b9d70b
|
/code/scripts_joel_PC/data_pre_processing/make_mixed_data_no_clusters.R
|
6b4dcfb5dc0449017a50279be51a41582bf36917
|
[] |
no_license
|
EpiCompBio/Barracudas
|
c8692874a7565d3703d9f55dfdec729339f195d7
|
2bc606f3cfd8eabab900fdf22c35295ddf27efd2
|
refs/heads/master
| 2020-04-23T23:01:53.827985
| 2019-05-07T00:39:37
| 2019-05-07T00:39:37
| 171,521,953
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 1,527
|
r
|
make_mixed_data_no_clusters.R
|
################################################################################
# LOADING LIBRARIES
################################################################################
using<-function(...) {
libs<-unlist(list(...))
req<-unlist(lapply(libs,require,character.only=TRUE))
need<-libs[req==FALSE]
if(length(need)>0){
install.packages(need)
lapply(need,require,character.only=TRUE)
}
}
using("MASS")
################################################################################
# WORKING DIRECTORY AND SOURCING FUNCTIONS
################################################################################
setwd("C:/Users/JOE/Documents/Imperial College 2018-2019/Translational Data Science/Barracudas")
#15
#5
#25
set.seed(15)
mixed_data=as.data.frame(cbind(rnorm(n=3000, mean = 0, sd = 3),
rnorm(n=3000, mean = 1, sd = 1),
rnorm(n=3000, mean = 3, sd = 2),
rbinom(n=3000,prob=.4,size=1),
rbinom(n=3000,prob=.7,size=1),
rbinom(n=3000,prob=.5,size=1),
sample(c("Category1","Category2","Category3"),3000, replace=TRUE, prob=c(0.4, 0.3, 0.3)))
)
for (k in 1:3) {
mixed_data[,k]=as.numeric(as.character(mixed_data[,k]))
}
colnames(mixed_data)=c("Cont1","Cont2","Cont3","Binary1","Binary2","Binary3","Cat1")
saveRDS(mixed_data,"../data/processed_example_NO_clustering/example_mixed_data_NO_clustering.rds")
|
1c817a71a83f32007701c33d913a2fad707fbe7b
|
ca673b7b73f66a951e5de6850f49bd3bc25f0277
|
/R/do.region.analysis.R
|
305b7d9c91e88caca112031e99de12c6dd58a7cb
|
[
"MIT"
] |
permissive
|
tomashhurst/SpectreMAP
|
a810a65df77dea2ac08150386833fc37ef88a03c
|
6510a655f2ac2846b8057d657d02b435c8c7cb79
|
refs/heads/master
| 2023-06-02T10:45:41.864503
| 2021-06-17T23:36:57
| 2021-06-17T23:36:57
| 289,188,867
| 0
| 1
| null | null | null | null |
UTF-8
|
R
| false
| false
| 7,199
|
r
|
do.region.analysis.R
|
#' do.region.analysis
#'
#' @import data.table
#'
#' @export
do.region.analysis <- function(dat,
sample.col,
pop.col,
region.col,
area.table,
func = 'mean'){
### Test data
# dat <- cell.dat
# sample.col <- "Patient"
# pop.col <- "cell_type_annot"
# region.col <- "regions_annot"
#
# area.table <- area.res
#
# func <- 'mean'
### Function for NAs
###################### NA to 0 ###################
do.rmv.na = function(dat) {
# either of the following for loops
# by name :
for (j in names(dat))
set(dat,which(is.na(dat[[j]])),j,0)
# or by number (slightly faster than by name) :
# for (j in seq_len(ncol(dat)))
# set(dat,which(is.na(dat[[j]])),j,0)
}
################################################
### Setup
pops <- unique(dat[[pop.col]])
samples <- unique(dat[[sample.col]])
regions <- unique(dat[[region.col]])
### Counts of pops per region in each sample
all.counts <- data.table()
for(i in samples){
# i <- samples[[1]]
## Subset sample data
samp.dat <- dat[dat[[sample.col]] == i,]
## Preparation
samp.counts <- as.data.table(regions)
names(samp.counts)[1] <- "REGION"
## Loop for each population across regions
reg.res <- as.data.table(pops)
names(reg.res)[1] <- "POPULATIONS"
for(a in regions){
# a <- regions[[1]]
reg.dat <- samp.dat[samp.dat[[region.col]] == a,]
counts <- reg.dat[, .(count = .N), by = pop.col]
names(counts)[1] <- "POPULATIONS"
names(counts)[2] <- a
reg.res <- do.add.cols(reg.res, "POPULATIONS", counts, "POPULATIONS")
do.rmv.na(reg.res)
}
## Additional versions -- distribution and composition
# Type A -- for each cell type, where is it located (row proportions) -- DISTRIBUTION
a.res <- data.table()
for(o in c(1:nrow(reg.res))){
# o <- 1
nme <- reg.res[o,1]
rw.ttl <- sum(reg.res[o,-1])
res <- reg.res[o,-1]/rw.ttl
res <- res*100
a.res <- rbind(a.res, cbind(nme, res))
rm(o)
rm(nme)
rm(rw.ttl)
rm(res)
}
do.rmv.na(a.res)
a.res
# Type B -- for each region, what cells are in it (column proportions) -- COMPOSITION
b.res <- as.data.table(reg.res[,1])
for(o in c(2:length(names(reg.res)))){
# o <- 2
nme <- names(reg.res)[o]
col.ttl <- sum(reg.res[,..o])
res <- reg.res[,..o]/col.ttl
res <- res*100
b.res <- cbind(b.res, res)
rm(o)
rm(nme)
rm(col.ttl)
rm(res)
}
do.rmv.na(b.res)
b.res
## Wrap up
# COUNTS
reg.res
reg.res.long <- melt(setDT(reg.res), id.vars = c("POPULATIONS"), variable.name = "REGION")
reg.res.long
reg.res.long.new <- data.table()
reg.res.long.new$measure <- paste0("Cell counts in ", reg.res.long$REGION, " - ", reg.res.long$POPULATIONS, " | Cells per region")
reg.res.long.new$counts <- reg.res.long$value
reg.res.long.new
reg.res.long.new <- dcast(melt(reg.res.long.new, id.vars = "measure"), variable ~ measure)
reg.res.long.new$variable <- i
names(reg.res.long.new)[1] <- sample.col
# COUNTS / AREA
reg.res.by.area <- reg.res
for(u in regions){
# u <- regions[[1]]
ar <- area.table[area.table[[sample.col]] == i, u, with = FALSE]
ar <- ar[[1]]
reg.res.by.area[[u]] <- reg.res.by.area[[u]] / ar * 10000 # per 100 um^2
}
reg.res.area.long <- melt(setDT(reg.res.by.area), id.vars = c("POPULATIONS"), variable.name = "REGION")
reg.res.area.long
reg.res.area.long.new <- data.table()
reg.res.area.long.new$measure <- paste0("Cells per area in ", reg.res.area.long$REGION, " - ", reg.res.area.long$POPULATIONS, " | Cells per 100 um^2 of region")
reg.res.area.long.new$counts <- reg.res.area.long$value
reg.res.area.long.new
reg.res.area.long.new <- dcast(melt(reg.res.area.long.new, id.vars = "measure"), variable ~ measure)
reg.res.area.long.new$variable <- NULL
# DISTRIBUTION
a.res
a.res.long <- melt(setDT(a.res), id.vars = c("POPULATIONS"), variable.name = "REGION")
a.res.long
a.res.long.new <- data.table()
a.res.long.new$measure <- paste0("Distribution of ", a.res.long$POPULATIONS, " in ", a.res.long$REGION, " | Percent of cell type in sample")
a.res.long.new$counts <- a.res.long$value
a.res.long.new
a.res.long.new <- dcast(melt(a.res.long.new, id.vars = "measure"), variable ~ measure)
a.res.long.new$variable <- NULL
# a.res.long.new$variable <- i
# names(a.res.long.new)[1] <- sample.col
# COMPOSITION
b.res
b.res.long <- melt(setDT(b.res), id.vars = c("POPULATIONS"), variable.name = "REGION")
b.res.long
b.res.long.new <- data.table()
b.res.long.new$measure <- paste0("Composition of ", b.res.long$REGION, " - ", b.res.long$POPULATIONS, " | Percent of cells in region")
b.res.long.new$counts <- b.res.long$value
b.res.long.new
b.res.long.new <- dcast(melt(b.res.long.new, id.vars = "measure"), variable ~ measure)
b.res.long.new$variable <- NULL
#b.res.long.new$variable <- i
#names(b.res.long.new)[1] <- sample.col
## Adjustments to add it to complete dataset
# long.temp <- melt(setDT(reg.res), id.vars = c("POPULATIONS"), variable.name = "REGION")
# long.temp
#
# long <- data.table()
# long$measure <- paste0(long.temp$POPULATIONS, " in ", long.temp$REGION)
# long$counts <- long.temp$value
#
# long <- dcast(melt(long, id.vars = "measure"), variable ~ measure)
# long$variable <- i
# names(long)[1] <- sample.col
## Add to 'add.counts'
all.counts <- rbind(all.counts, cbind(reg.res.long.new, reg.res.area.long.new, a.res.long.new, b.res.long.new))
# rm(reg.res.long.new)
# rm(a.res.long.new)
# rm(b.res.long.new)
message("... Sample ", i, " complete")
}
return(all.counts)
}
|
b5d68f3e3eb6296ad97a7ae9b3ce28f3093bc747
|
0500ba15e741ce1c84bfd397f0f3b43af8cb5ffb
|
/cran/paws.analytics/man/kafkaconnect_create_custom_plugin.Rd
|
d84210df7a1f8ee5346c3a768bf5ac4ac306ee56
|
[
"Apache-2.0"
] |
permissive
|
paws-r/paws
|
196d42a2b9aca0e551a51ea5e6f34daca739591b
|
a689da2aee079391e100060524f6b973130f4e40
|
refs/heads/main
| 2023-08-18T00:33:48.538539
| 2023-08-09T09:31:24
| 2023-08-09T09:31:24
| 154,419,943
| 293
| 45
|
NOASSERTION
| 2023-09-14T15:31:32
| 2018-10-24T01:28:47
|
R
|
UTF-8
|
R
| false
| true
| 821
|
rd
|
kafkaconnect_create_custom_plugin.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/kafkaconnect_operations.R
\name{kafkaconnect_create_custom_plugin}
\alias{kafkaconnect_create_custom_plugin}
\title{Creates a custom plugin using the specified properties}
\usage{
kafkaconnect_create_custom_plugin(
contentType,
description = NULL,
location,
name
)
}
\arguments{
\item{contentType}{[required] The type of the plugin file.}
\item{description}{A summary description of the custom plugin.}
\item{location}{[required] Information about the location of a custom plugin.}
\item{name}{[required] The name of the custom plugin.}
}
\description{
Creates a custom plugin using the specified properties.
See \url{https://www.paws-r-sdk.com/docs/kafkaconnect_create_custom_plugin/} for full documentation.
}
\keyword{internal}
|
6e6f7fab97c8c661838cc85e0b9447c377c1e0ea
|
ffdea92d4315e4363dd4ae673a1a6adf82a761b5
|
/data/genthat_extracted_code/mathgraph/examples/sort.mathgraph.Rd.R
|
94183e8f90612f53f6646932419161149f2df65d
|
[] |
no_license
|
surayaaramli/typeRrh
|
d257ac8905c49123f4ccd4e377ee3dfc84d1636c
|
66e6996f31961bc8b9aafe1a6a6098327b66bf71
|
refs/heads/master
| 2023-05-05T04:05:31.617869
| 2019-04-25T22:10:06
| 2019-04-25T22:10:06
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 292
|
r
|
sort.mathgraph.Rd.R
|
library(mathgraph)
### Name: sortmathgraph
### Title: Sort a Mathematical Graph
### Aliases: sortmathgraph
### Keywords: math
### ** Examples
jjmg <- c(mathgraph(~ 4:2 * 1:3), mathgraph(~ 3:5 / 1:3))
sortmathgraph(jjmg)
sortmathgraph(jjmg, node=FALSE)
sortmathgraph(jjmg, edge=FALSE)
|
d1be6c01a5849f3fed1123fe305df2c42198ab7a
|
5e4dd55f765889ea962f81767aa7154c5c930f8a
|
/man/Plot.Stability.Rd
|
83a184dad412050fa5587a6484da3f5a04c69be0
|
[
"MIT"
] |
permissive
|
antonio-pgarcia/RRepast
|
84dd5634437d75e1431ecfabb484abe0a5573f19
|
0f4e2c35f5dc8abbc2a46818cdf1e61f280b1129
|
refs/heads/master
| 2021-01-17T06:48:40.924107
| 2020-02-18T15:36:23
| 2020-02-18T15:36:23
| 44,882,973
| 6
| 3
| null | null | null | null |
UTF-8
|
R
| false
| true
| 512
|
rd
|
Plot.Stability.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/rrepast-plots.R
\name{Plot.Stability}
\alias{Plot.Stability}
\title{Plot stability of output}
\usage{
Plot.Stability(obj, title = NULL)
}
\arguments{
\item{obj}{An instance of Morris Object \code{\link{AoE.Morris}}}
\item{title}{Chart title, may be null}
}
\value{
The resulting ggplot2 plot object
}
\description{
Generate plot for visually access the stability of
coefficient of variation as function of simulation sample size.
}
|
407ba341fa3160b45d7d55cedf2acdc460599f14
|
c21a7b8017a9066be15912188756c1e79ba480a3
|
/tests/testthat/test-utils.R
|
7e1192dfbd1719e697f7abe45ea8a852056a14e8
|
[
"MIT"
] |
permissive
|
LCBC-UiO/eprimeParser
|
dbd9f429da034cb2ea95ae7d4d3f27cba101bb08
|
0ceb471b16bb4d7450af4dfec00b38a97a9c6359
|
refs/heads/main
| 2022-10-27T12:58:55.651414
| 2019-10-04T14:25:57
| 2019-10-04T14:25:57
| 164,306,571
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 6,418
|
r
|
test-utils.R
|
test_that("date change works", {
expect_equal(date_change("03-12-2019"),
structure(17967, class = "Date"))
expect_equal(date_change("12-24-2019"),
structure(18254, class = "Date"))
})
test_that("stopping without message works", {
expect_error(stop_quietly())
})
test_that("adding leading zeroes works", {
expect_equal(leading_zero(3, 4),
"0003")
expect_equal(leading_zero("01", 2),
"01")
expect_equal(leading_zero("000009", 3),
"009")
})
test_that("adding leading zeroes works", {
expect_equal(fix_ids(1100333, orig_path = NULL),
1100333)
expect_error(fix_ids(333, orig_path = NULL),
"CrossProject_ID")
expect_equal(fix_ids(333, orig_path = "some/path/HUK/data"),
1100333)
expect_equal(fix_ids(333, orig_path = "some/path/nevrodev/data"),
1000333)
expect_equal(fix_ids(333, orig_path = "some/path/NCP/data"),
1200333)
})
# File changing ----
task <- "Antisaccade"
out_dir <- "test_folder"
in_dir <- "test_files/"
# in_dir <- "tests/testthat/test_files/"
setup_tests <- function(task, in_dir, out_dir){
unlink(out_dir, recursive = TRUE)
MOAS <- readr::read_tsv(paste0(in_dir, "moas.tsv"))
logs <- eprime_setup_logs(out_dir, task, quietly = TRUE)
paths <- eprime_setup_paths(out_dir, task, logs, quietly = TRUE)
list(MOAS = MOAS, paths = paths, logs = logs)
}
unlink(out_dir, recursive = TRUE)
files <- setup_tests(task, in_dir, out_dir)
ff_df <- eprime_find(in_dir, task, files$paths, files$logs, quietly = TRUE)
test_that("copy_raw works", {
expect_output(copy_raw(ff_df[1,], in_dir, files$paths, files$logs, quietly = FALSE),
"Copying")
expect_true(file.exists(paste0(files$paths$raw_etxt,"/", ff_df$files_date_time[1], ".txt")))
expect_true(file.exists(paste0(files$paths$raw_edat,"/", ff_df$files_date_time[1], ".edat2")))
expect_output(copy_raw(ff_df[2,], in_dir, files$paths, files$logs, quietly = FALSE),
"found multiple")
expect_true(file.exists(paste0(files$paths$raw_etxt,"/", ff_df$files_date_time[2], ".txt")))
expect_output(copy_raw(ff_df[3,], in_dir, files$paths, files$logs, quietly = FALSE),
"cannot find")
expect_true(file.exists(paste0(files$paths$raw_etxt,"/", ff_df$files_date_time[3], ".txt")))
})
test_that("copy_file works", {
expect_output(copy_file(paste0(files$paths$raw_etxt,"/", ff_df$files_date_time[3], ".txt"),
paste0(files$paths$etxt,"/", ff_df$files_date_time[3], ".txt"),
files$paths, files$logs, gsub("FILE",ff_df$files_date_time[3], files$logs$file)),
"Copying")
expect_true(file.exists(paste0(files$paths$etxt,"/", ff_df$files_date_time[3], ".txt")))
expect_output(copy_file(paste0(files$paths$raw_etxt,"/", ff_df$files_date_time[3], ".txt"),
paste0(files$paths$etxt,"/", ff_df$files_date_time[3], ".txt"),
files$paths, files$logs, gsub("FILE",ff_df$files_date_time[3], files$logs$file)),
"already exists")
expect_error(copy_file(paste0(files$paths$etxt,"/", ff_df$files_date_time[3], ".txt"),
paste0(files$paths$etxt,"/", ff_df$files_date_time[3], ".txt"),
files$paths, files$logs, gsub("FILE",ff_df$files_date_time[3], files$logs$file)),
"")
})
test_that("move_file works", {
expect_error(move_file("sub-1100920_ses-01_Antisaccade_2017-08-29_11-44-36.txt",
"sub-1100920_ses-01_Antisaccade_2017-08-29_11-44-36.txt",
files$paths, files$logs, gsub("FILE",ff_df$files_date_time[3], files$logs$file), quietly = FALSE),
"")
expect_output(move_file(paste0(files$paths$error, "/sub-1100920_ses-01_Antisaccade_2017-08-29_11-44-36.txt"),
paste0(files$paths$error, "/sub-1100920_ses-01_Antisaccade_2017-08-29_11-44-36_est.txt"),
files$paths, files$logs, gsub("FILE",ff_df$files_date_time[3], files$logs$file), quietly = FALSE),
"Moving")
expect_true(file.exists(paste0(files$paths$error, "/sub-1100920_ses-01_Antisaccade_2017-08-29_11-44-36_est.txt")))
copy_file(paste0(files$paths$raw_etxt, "/sub-1100863_ses-03_Antisaccade_2017-07-14_14-46-09.txt"),
paste0(files$paths$etxt, "/sub-1100863_ses-03_Antisaccade_2017-07-14_14-46-09.txt"),
files$paths, files$logs, gsub("FILE",ff_df$files_date_time[3], files$logs$file), quietly = TRUE)
expect_output(move_file(paste0(files$paths$raw_etxt, "/sub-1100863_ses-03_Antisaccade_2017-07-14_14-46-09.txt"),
paste0(files$paths$etxt, "/sub-1100863_ses-03_Antisaccade_2017-07-14_14-46-09.txt"),
files$paths, files$logs, gsub("FILE",ff_df$files_date_time[2], files$logs$file), quietly = FALSE),
"already exists")
expect_true(file.exists(paste0(files$paths$error, "/sub-1100863_ses-03_Antisaccade_2017-07-14_14-46-09.txt")))
})
test_that("update_filenames works", {
ff <- list.files(files$paths$raw_etx)
expect_output(update_filenames(ff[1], gsub("\\.txt", "testing.txt", ff[1]),
files$paths, files$logs, quietly = FALSE),
"Altering file names to reflect correct session")
ff <- list.files(files$paths$main, pattern="testing",
recursive = TRUE, full.names = TRUE)
expect_true(file.exists(ff[1]))
})
# File extention handling
test_that("getExtension works", {
expect_equal(getExtension("test/path/to/somethere/file_with_123_in_it.pdf"),
"pdf")
expect_equal(getExtension("test/path/file_with_123_in_it.md"),
"md")
})
test_that("barename works", {
expect_equal(barename("test/path/to/somethere/file.md"),
"file")
expect_equal(barename("test/path/to/somethere/file_with_123_in_it.pdf"),
"file_with_123_in_it")
})
test_that("choose_option works", {
expect_equal(choose_option(1), 1)
expect_equal(choose_option(2), 2)
expect_equal(choose_option(0), 0)
expect_equal(choose_option(3, choices = c("these", "are", "choices"),
title = "this is title"), 3)
})
unlink(out_dir, recursive = TRUE)
|
a2ec80f29b53d817ea585e10af97c7c6c6f5b4fb
|
f7408683a4b9f3ea36e6c56588f257eba9761e12
|
/man/fpca.face.Rd
|
c73a9feb56a9c54cc7504840ff8f067d76be5063
|
[] |
no_license
|
refunders/refund
|
a12ad139bc56f4c637ec142f07a78657727cc367
|
93cb2e44106f794491c7008970760efbfc8a744f
|
refs/heads/master
| 2023-07-21T21:00:06.028918
| 2023-07-17T20:52:08
| 2023-07-17T20:52:08
| 30,697,953
| 42
| 22
| null | 2023-06-27T15:17:47
| 2015-02-12T10:41:27
|
R
|
UTF-8
|
R
| false
| true
| 5,796
|
rd
|
fpca.face.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/fpca.face.R
\name{fpca.face}
\alias{fpca.face}
\title{Functional principal component analysis with fast covariance estimation}
\usage{
fpca.face(
Y = NULL,
ydata = NULL,
Y.pred = NULL,
argvals = NULL,
pve = 0.99,
npc = NULL,
var = FALSE,
simul = FALSE,
sim.alpha = 0.95,
center = TRUE,
knots = 35,
p = 3,
m = 2,
lambda = NULL,
alpha = 1,
search.grid = TRUE,
search.length = 100,
method = "L-BFGS-B",
lower = -20,
upper = 20,
control = NULL,
periodicity = FALSE
)
}
\arguments{
\item{Y, ydata}{the user must supply either \code{Y}, a matrix of functions
observed on a regular grid, or a data frame \code{ydata} representing
irregularly observed functions. See Details.}
\item{Y.pred}{if desired, a matrix of functions to be approximated using
the FPC decomposition.}
\item{argvals}{numeric; function argument.}
\item{pve}{proportion of variance explained: used to choose the number of
principal components.}
\item{npc}{how many smooth SVs to try to extract, if \code{NA} (the
default) the hard thresholding rule of Gavish and Donoho (2014) is used (see
Details, References).}
\item{var}{logical; should an estimate of standard error be returned?}
\item{simul}{logical; if \code{TRUE} curves will we simulated using
Monte Carlo to obtain an estimate of the \code{sim.alpha} quantile at each
\code{argval}; ignored if \code{var == FALSE}}
\item{sim.alpha}{numeric; if \code{simul==TRUE}, quantile to estimate at
each \code{argval}; ignored if \code{var == FALSE}}
\item{center}{logical; center \code{Y} so that its column-means are 0? Defaults to
\code{TRUE}}
\item{knots}{number of knots to use or the vectors of knots; defaults to 35}
\item{p}{integer; the degree of B-splines functions to use}
\item{m}{integer; the order of difference penalty to use}
\item{lambda}{smoothing parameter; if not specified smoothing parameter is
chosen using \code{\link[stats]{optim}} or a grid search}
\item{alpha}{numeric; tuning parameter for GCV; see parameter \code{gamma}
in \code{\link[mgcv]{gam}}}
\item{search.grid}{logical; should a grid search be used to find \code{lambda}?
Otherwise, \code{\link[stats]{optim}} is used}
\item{search.length}{integer; length of grid to use for grid search for
\code{lambda}; ignored if \code{search.grid} is \code{FALSE}}
\item{method}{method to use; see \code{\link[stats]{optim}}}
\item{lower}{see \code{\link[stats]{optim}}}
\item{upper}{see \code{\link[stats]{optim}}}
\item{control}{see \code{\link[stats]{optim}}}
\item{periodicity}{Option for a periodic spline basis. Defaults to FALSE.}
}
\value{
A list with components
\enumerate{
\item \code{Yhat} - If \code{Y.pred} is specified, the smooth version of
\code{Y.pred}. Otherwise, if \code{Y.pred=NULL}, the smooth version of \code{Y}.
\item \code{scores} - matrix of scores
\item \code{mu} - mean function
\item \code{npc} - number of principal components
\item \code{efunctions} - matrix of eigenvectors
\item \code{evalues} - vector of eigenvalues
}
if \code{var == TRUE} additional components are returned
\enumerate{
\item \code{sigma2} - estimate of the error variance
\item \code{VarMats} - list of covariance function estimate for each
subject
\item \code{diag.var} - matrix containing the diagonals of each matrix in
\item \code{crit.val} - list of estimated quantiles; only returned if
\code{simul == TRUE}
}
}
\description{
A fast implementation of the sandwich smoother (Xiao et al., 2013)
for covariance matrix smoothing. Pooled generalized cross validation
at the data level is used for selecting the smoothing parameter.
}
\examples{
#### settings
I <- 50 # number of subjects
J <- 3000 # dimension of the data
t <- (1:J)/J # a regular grid on [0,1]
N <- 4 #number of eigenfunctions
sigma <- 2 ##standard deviation of random noises
lambdaTrue <- c(1,0.5,0.5^2,0.5^3) # True eigenvalues
case = 1
### True Eigenfunctions
if(case==1) phi <- sqrt(2)*cbind(sin(2*pi*t),cos(2*pi*t),
sin(4*pi*t),cos(4*pi*t))
if(case==2) phi <- cbind(rep(1,J),sqrt(3)*(2*t-1),
sqrt(5)*(6*t^2-6*t+1),
sqrt(7)*(20*t^3-30*t^2+12*t-1))
###################################################
######## Generate Data #############
###################################################
xi <- matrix(rnorm(I*N),I,N);
xi <- xi \%*\% diag(sqrt(lambdaTrue))
X <- xi \%*\% t(phi); # of size I by J
Y <- X + sigma*matrix(rnorm(I*J),I,J)
results <- fpca.face(Y,center = TRUE, argvals=t,knots=100,pve=0.99)
###################################################
#### FACE ########
###################################################
Phi <- results$efunctions
eigenvalues <- results$evalues
for(k in 1:N){
if(Phi[,k] \%*\% phi[,k]< 0)
Phi[,k] <- - Phi[,k]
}
### plot eigenfunctions
par(mfrow=c(N/2,2))
seq <- (1:(J/10))*10
for(k in 1:N){
plot(t[seq],Phi[seq,k]*sqrt(J),type="l",lwd = 3,
ylim = c(-2,2),col = "red",
ylab = paste("Eigenfunction ",k,sep=""),
xlab="t",main="FACE")
lines(t[seq],phi[seq,k],lwd = 2, col = "black")
}
}
\references{
Xiao, L., Li, Y., and Ruppert, D. (2013).
Fast bivariate \emph{P}-splines: the sandwich smoother,
\emph{Journal of the Royal Statistical Society: Series B}, 75(3), 577-599.
Xiao, L., Ruppert, D., Zipunnikov, V., and Crainiceanu, C. (2016).
Fast covariance estimation for high-dimensional functional data.
\emph{Statistics and Computing}, 26, 409-421.
DOI: 10.1007/s11222-014-9485-x.
}
\seealso{
\code{\link{fpca.sc}} for another covariance-estimate based
smoothing of \code{Y}; \code{\link{fpca2s}} and \code{\link{fpca.ssvd}}
for two SVD-based smoothings.
}
\author{
Luo Xiao
}
|
fe36fccd96de58ad151cfc0657b0ad8afb47ff4d
|
ce68a85c4a6c5d474a6a574c612df3a8eb6685f7
|
/src/dg/financial/courseware/QF13.R
|
0b5244bfb3b74e45e80077dfd969a02fbc227228
|
[] |
no_license
|
xenron/sandbox-da-r
|
c325b63114a1bf17d8849f076bfba22b6bdb34a3
|
c217fdddc26ed523b3860e2000afc699afac55a2
|
refs/heads/master
| 2020-04-06T06:58:17.049181
| 2016-08-24T06:16:32
| 2016-08-24T06:16:32
| 60,466,314
| 1
| 1
| null | null | null | null |
UTF-8
|
R
| false
| false
| 5,321
|
r
|
QF13.R
|
###### A股市场可配对交易股票检验(begin)######
prc <- read.csv("D:/R Quant/data/other/prc.csv",header=TRUE)
prc.xts <- xts(prc[, -1], order.by=as.Date(prc[, 1]))
# - Find pairs ---------
# Ur means unit root test.
# Here uses Engle Granger Two Step Method to test if co-integration.
N<-33
No.ij <- t(combn(1:N,2))
head(No.ij)
St <- 601
pairs.test.res <- t(mapply(No.ij[, 1], No.ij[, 2], FUN=function(i,j){
#Pre Data
stk.i <- prc.xts[1:(St-1), i]
stk.j <- prc.xts[1:(St-1), j]
cor.pair <- cor(stk.i, stk.j)
reg.pair <- lm(stk.i~ stk.j)
coefs <- coef(reg.pair)
error <- as.numeric(reg.pair$residuals)
ur <- ur.df(error,type="none",lags=5,selectlags="AIC")
# Notice that ur.df() returns a s4 class:urca,
# we should use @ instead of $
ur.stat <- ur@teststat
ur.signif <- ur@cval
signif.level <- ""
if ( ur.stat < ur.signif[1] ) { signif.level <- "***"
} else if ( ur.stat < ur.signif[2] ) { signif.level <- "**"
} else if ( ur.stat < ur.signif[3] ) { signif.level <- "*"
} else { signif.level <- "" }
Flag <- 0
if( ur.stat < ur.signif[1] && cor.pair > 0.85 ) { Flag <- 1 }
ret <- c(i, j, names(stk.i), names(stk.j), cor.pair,
coefs[1], coefs[2], ur.stat, signif.level, Flag)
return(ret)
}))
pairs.test.res <- data.frame(pairs.test.res)
names(pairs.test.res) <- c("No.i","No.j","Nme.i","Nme.j","Corr",
"Alpha", "Beta",
"Ur.stat", "Signif.level","Flag")
head(pairs.test.res)
head( pairs.test.res[pairs.test.res$Flag == 1, ] )
### A股市场可配对交易股票检验(end) ###
###### 期货市场Copula高频套利策略实例(begin)######
library(xts)
library(copula)
ag <- read.csv("D:/R Quant/Data/High Freq/ag0613.csv", header=TRUE)[, 1]
au <- read.csv("D:/R Quant/Data/High Freq/au0613.csv", header=TRUE)[, 1]
plot(scale(ag), type='l', col='darkgreen', main="Standadized Prices")
lines(scale(au), col='darkred')
logReturn <- function(prc) { log( prc[-1]/prc[-length(prc)] ) }
ret.ag <- logReturn(ag)
ret.au <- logReturn(au)
plot(ret.ag, ret.au, pch=20, col='darkgreen', main="Scatterplot of the Returns")
fGumbel<-function(u,v,a) { -((-log(u))^(1/a)+(-log(v))^(1/a))^a }
Gumbel<-function(u,v,a) { exp(fGumbel(u,v,a)) }
p1.Gumbel<-function(u,v,a) { exp(fGumbel(u,v,a))*(-log(u))^(-1+1/a)*(((-log(u))^(1/a)+(-log(v))^(1/a))^(-1+a))/u }
p2.Gumbel<-function(u,v,a) { exp(fGumbel(u,v,a))*(-log(v))^(-1+1/a)*(((-log(u))^(1/a)+(-log(v))^(1/a))^(-1+a))/v }
Misp.Idx<-function(r1, r2){
frac1 <- ecdf(r1)
frac2 <- ecdf(r2)
size<-length(r1)
xpar.1 <- c()
xpar.2 <- c()
for(i in 1:size)
{
xpar.1[i] <- frac1(r1[i])
xpar.2[i] <- frac2(r2[i])
}
u0 <- pobs( cbind(xpar.1, xpar.2) )
gumbel.cop<- gumbelCopula(3,dim=2)
fit.ml <- fitCopula(gumbel.cop, u0)
alpha <- fit.ml@estimate
a <- alpha
u <- frac1(r1[size])
v <- frac2(r2[size])
mis1 <- eval(p1.Gumbel(u,v,a))
mis2 <- eval(p2.Gumbel(u,v,a))
return( c(mis1, mis2) )
}
# test:
# Misp.Idx(ret.ag[1:300], ret.au[1:300])
misp.idx.t <- c()
misp.idx.1 <- c()
misp.idx.2 <- c()
trd.prc.1 <- c()
trd.prc.2 <- c()
position <- c()
position.t <- 0
profit <- c()
m0 <- 100000
d1 <- 0.27
d2 <- 0.2
d3 <- 1.7
d4 <- 0.9
d5 <- 0.1
d6 <- 0.5
p1 <- ag
p2 <- au
p10 <- 0
p20 <- 0
k <- 200
for(i in (k+1):(length(p1)-1)){
p.10 <- p1[i+1]
p.20 <- p2[i+1]
r1 <- logReturn(p1[(i-k):i])
r2 <- logReturn(p2[(i-k):i])
misp.idx.t <- Misp.Idx(r1,r2)
misp.idx.1 <- c(misp.idx.1, misp.idx.t[1])
misp.idx.2 <- c(misp.idx.2, misp.idx.t[2])
if( is.nan(misp.idx.t[1]) || is.nan(misp.idx.t[2]) ){
position.t <- position.t
position <- c(position, position.t)
}
else{
if( position.t == 0 ){
if( misp.idx.t[1] > d1 & misp.idx.t[2] < d2 ){
trd.prc.1 <- c(trd.prc.1, p.10)
trd.prc.2 <- c(trd.prc.2, p.20)
vol.1 <- m0*p.20/(p.10+p.20)
vol.2 <- -m0*p.10/(p.10+p.20)
position.t <- 1
position <- c(position, position.t)
}
else {
if( misp.idx.t[1] < d3 && misp.idx.t[2] > d4 ){
trd.prc.1 <- c(trd.prc.1, p.10)
trd.prc.2 <- c(trd.prc.2, p.20)
vol.1 <- -m0*p.20/(p.10+p.20)
vol.2 <- m0*p.10/(p.10+p.20)
position.t <- -1
position <- c(position, position.t)
}
else{
position.t <- position.t;
position <- c(position, position.t)
}
}
}
else{
if((misp.idx.t[1] < d5 && position.t == 1) || (misp.idx.t[2] > d6 && position.t == -1)){
profit.t <- position.t*( vol.1*(trd.prc.1[length(trd.prc.1)]-p.10) + vol.2*(p.20-trd.prc.2[length(trd.prc.2)]) )
profit <- c(profit, profit.t)
position.t <- 0
position <- c(position, position.t)
}
else { # do nothing
position.t <- position.t
position <- c(position, position.t)
}
}
}
}
win <- sum(profit > 0)
lose <- sum(profit <= 0)
win.ratio <- win/(win+lose)
win.ratio
plot(cumsum(profit), type='l')
plot(profit, type='l')
hist(profit, 50)
mean(profit)
## 期货市场Copula高频套利策略实例(end)
|
1185f54427ec4d36a6b574f06c785996cb40dbd6
|
7ba3c22fc7c41ef0948b19c89e6f574b0f40f0f3
|
/plot3.R
|
6a933e943ccbb07620b35fcd0b27f8e513682d7a
|
[] |
no_license
|
emilieprang/ExData_Plotting1
|
3a7538228e99205a0a2e576b665e2d6bea450437
|
877ef75c6b91af763ebae9e2727ac18736699d55
|
refs/heads/master
| 2022-12-19T04:39:06.590939
| 2020-09-14T11:48:04
| 2020-09-14T11:48:04
| 295,367,163
| 0
| 0
| null | 2020-09-14T09:26:19
| 2020-09-14T09:26:18
| null |
UTF-8
|
R
| false
| false
| 966
|
r
|
plot3.R
|
#Setting work directory
setwd("/Users/emilienielsen/Documents/Coursera/Project1")
#Read in data
data <- read.table("household_power_consumption.txt", header=T, sep=";")
#Subsetting data
sub <- subset(data, data$Date == "1/2/2007" | data$Date == "2/2/2007")
#Transforming sub metering variables into numeric
sub$Sub_metering_1 <- as.numeric(sub$Sub_metering_1)
sub$Sub_metering_2 <- as.numeric(sub$Sub_metering_2)
sub$Sub_metering_3 <- as.numeric(sub$Sub_metering_3)
#Making the plot and saving
png(filename="plot3.png", width = 480, height = 480)
plot(sub$Sub_metering_1, type="n", xaxt="n", xlab="", ylab="Energy sub metering")
lines(sub$Sub_metering_1, col="black")
lines(sub$Sub_metering_2, col="red")
lines(sub$Sub_metering_3, col="blue")
legend("topright", legend=c("Sub_metering_1", "Sub_metering_2", "Sub_metering_3"),
lty=c(1,1,1), col=c("black", "red","blue"))
axis(1,at=c(1,1+1440,2880), labels=c("Thu", "Fri", "Sat"))
dev.off()
|
1f4c5402ad70d3d6f82f0bbaa121e3538e570aac
|
be80300e5a23c8cebd14cda7af261c2686686010
|
/global.R
|
ee9cab5a2cacdd190b6c402107f3fedae06bec20
|
[] |
no_license
|
kfvargas/prueba
|
ef99e88532ccb1de7f97f07adcf9d9633d96fa2b
|
cc21ce2bcbc59b216f185e500f5bc28ced52a582
|
refs/heads/master
| 2022-02-12T02:02:54.550392
| 2019-07-22T12:56:54
| 2019-07-22T12:56:54
| 197,819,210
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 1,159
|
r
|
global.R
|
library(shiny)
library(shinydashboard)
library(dplyr)
library(data.table)
library("readxl")
library(lubridate)
library(data.table)
library(tidyverse)
library(DT)
library(plotly)
consumo_producto<-fread("data/consumo_producto.txt", sep = ";", header = T)
consumo_producto$consumo<-as.integer(consumo_producto$consumo)
train<-fread("data/train.csv", header = T)
train_demografica<-read_xlsx("data/train_test_demograficas.xlsx")
train$Disponible.Avances<-as.integer(gsub(" ","",train$Disponible.Avances))
train$Limite.Cupo<-as.integer(gsub(" ","",train$Limite.Cupo))
train$Fecha.Expedicion<-as.Date(train$Fecha.Expedicion,"%d/%m/%Y")
data <- train_demografica %>%
inner_join(train, by = c("id","id")) %>%
select (Fecha.Expedicion,categoria,segmento,nivel_educativo,edad,estrato, Disponible.Avances, Limite.Cupo)
data_cancelacion<-train_demografica %>%
inner_join(train, by = c("id","id")) %>%
select(categoria,segmento,nivel_educativo,edad,estrato,Disponible.Avances,ANO_MES) %>%
filter(!is.na(ANO_MES)) %>%
mutate(fecha=as.Date(paste("01",substring(ANO_MES,5,6),substring(ANO_MES,1,4), sep = "/"), "%d/%m/%Y"))
|
4a5c150b9f8ee9af01c1b936fd953dc80481a8a8
|
e247a3e98934585db6716c1e22bd62743554b0ec
|
/man/winsor.mean.Rd
|
0920c3d6c5f5e8ace09b493de73122dac5c71fd6
|
[] |
no_license
|
husseingb/SciencePo
|
37b9de88189055ab3ca2439e6a14bfc65d6fea9f
|
eacfc7a8a8af0bf1edc75fb4f8ea2a5f20d2459e
|
refs/heads/master
| 2020-09-17T10:02:40.633828
| 2013-06-12T00:00:00
| 2013-06-12T00:00:00
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 1,659
|
rd
|
winsor.mean.Rd
|
\encoding{latin1}
\name{winsor.mean}
\alias{winsor.mean}
\title{ Winsorized Mean
}
\description{This function computes winsorized mean. The winsorization consists of recoding the top k values.
}
\usage{
winsor.mean(x, k = 1, na.rm = TRUE)
}
\arguments{
\item{x}{ is the vector to be winsorized.
}
\item{k}{is an integer for the quantity of outlier elements that should be replaced to the computation purpose.
}
\item{na.rm}{A logical value indicating whether NA values should be stripped before the computations.
}
}
\details{Winsorizing a vector will produce different results than trimming it. By trimming a vector, the extreme values are discarded, while by Winsorizing it will replace the extreme values by certain percentiles instead.
}
\value{ An object of the same type as \code{x}.
}
\references{
%TODO include a paper that I red about winsorization and robustness, but could not remember at that time.
Dixon, W. J., and Yuen, K. K. (1999) Trimming and winsorization: A review. \emph{The American Statistician,} \bold{53(3),} 267--269.
Wilcox, R. R. (2012) \emph{Introduction to robust estimation and hypothesis testing.} Academic Press, 30-32.
Statistics Canada (2010) \emph{Survey Methods and Practices.}
}
\author{Daniel Marcelino
}
\note{One might want winsorize estimators, but note that Winsorization tends to be used for one-variable situations, it is rarely used in the multivariate sample survey situation.
}
\seealso{\code{\link{detail}}.
}
\examples{
x <- rnorm(100)
winsor.mean(x)
# see this function in context.
detail(x)
}
\keyword{ descriptive stats }
\keyword{ average }
\keyword{winsorization}
\keyword{outliers}
|
3fcb89d9322b712e076057e04dd5a8de21efa784
|
bcb9aea78c90f9e243ddf3a82524796b0ecf52a8
|
/man/runGenericDiscovery.Rd
|
6dfccfb6b5012a2fe9ddeb5a931b43b2fd78a3b8
|
[
"MIT"
] |
permissive
|
MassDynamics/MassExpression
|
ab35451ac89662b992d3633b5c06bea5ce2c521c
|
24b59e6cb7afc07b8b4b59473c72dca93582fe41
|
refs/heads/main
| 2023-05-10T15:38:58.488481
| 2023-05-04T22:43:13
| 2023-05-04T22:43:13
| 377,716,166
| 6
| 0
| null | null | null | null |
UTF-8
|
R
| false
| true
| 2,292
|
rd
|
runGenericDiscovery.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/runGenericDiscovery.R
\name{runGenericDiscovery}
\alias{runGenericDiscovery}
\title{This function orchestrates the MassExpression workflow (could be called by a workflow step)}
\usage{
runGenericDiscovery(
experimentDesign,
proteinIntensities,
normalisationMethod = "None",
species,
labellingMethod,
fitSeparateModels = TRUE,
returnDecideTestColumn = FALSE,
conditionSeparator = " - "
)
}
\arguments{
\item{experimentDesign}{data.frame. Experiment design provided in input by the user. Required columsn are: `SampleName` and `Condition`.}
\item{proteinIntensities}{data.frame. Wide matrix of intensities. Rows are proteins and columns are SampleNames. Required column: `ProteinId`.}
\item{normalisationMethod}{Normalisation method. One of "None" or "Median".}
\item{species}{Species. One of 'Human', 'Mouse', 'Yeast', 'Other'}
\item{labellingMethod}{One of 'LFQ' or 'TMT'}
\item{fitSeparateModels}{logical. TRUE to fit separate limma models for each pairwise comparisons
(e.g. filtering and `lmFit` are run separately by comparison).}
\item{returnDecideTestColumn}{logical. If TRUE the row data of the `CompleteIntensityExperiment` will contain the output from
`limma::decideTests`. If FALSE a single model is run for all contrasts.}
\item{conditionSeparator}{string. String used to separate up and down condition in output.}
}
\value{
List of two SummarisedExperiment objects: `IntensityExperiment`
containing the raw intensities and `CompleteIntensityExperiment` including
imputed intensities and the results of the limma DE analysis.
}
\description{
This function orchestrates the MassExpression workflow (could be called by a workflow step)
}
\examples{
design <- fragpipe_data$design
intensities <- fragpipe_data$intensities
parameters <- fragpipe_data$parameters
normalisation_method <- parameters[parameters[,1] == "UseNormalisationMethod",2]
species <- parameters[parameters[,1] == "Species",2]
labellingMethod <- parameters[parameters[,1] == "LabellingMethod",2]
listIntensityExperiments <- runGenericDiscovery(experimentDesign = design,
proteinIntensities = intensities,
normalisationMethod = normalisation_method,
species = species,
labellingMethod = labellingMethod)
}
|
40aaf2feaf279de83fb7552c94aa92c99ef4c5b1
|
639b8ca98fe73eb7732322ea2260031286f4aedc
|
/qcaeval.old/frontend_plots.R
|
71cec9c07b9d2e0a043d5f5655c15ad90dfe6b7c
|
[] |
no_license
|
cbengibson/QCArevision2
|
373d443b390597c10561b1ef1fdb700bc80db4bb
|
2b50bad8e79cc194af50490cf357bcbb6b54f785
|
refs/heads/master
| 2020-06-16T08:27:20.749639
| 2017-03-01T23:02:28
| 2017-03-01T23:02:28
| 75,122,791
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 954
|
r
|
frontend_plots.R
|
library(plyr)
load("final_data_set.Rdata")
df <- ldply(data.list, data.frame)
sam<-sample(1:length(df[,1]),length(df[,1])/3,replace=T)
df<-df[sam,]
tmp<-df[df$CTH==.8,]
counter<-0
prop<-matrix(nrow=5,ncol=2)
for (i in unique(tmp$dist)){
counter<-counter+1
prop[counter,1]<-i
prop[counter,2]<-sum(tmp$OUT[tmp$dist==i]==1, na.rm=T)/length(tmp$OUT[tmp$dist==i])
}
prop
plot(prop[,1],prop[,2], type="l", ylim=c(0,1))
tmp<-df[df$CTH==.9,]
counter<-0
prop<-matrix(nrow=5,ncol=2)
for (i in unique(tmp$dist)){
counter<-counter+1
prop[counter,1]<-i
prop[counter,2]<-sum(tmp$OUT[tmp$dist==i]==1, na.rm=T)/length(tmp$OUT[tmp$dist==i])
}
prop
lines(prop[,1],prop[,2], col="red")
tmp<-df[df$CTH==1,]
counter<-0
prop<-matrix(nrow=5,ncol=2)
for (i in unique(tmp$dist)){
counter<-counter+1
prop[counter,1]<-i
prop[counter,2]<-sum(tmp$OUT[tmp$dist==i]==1, na.rm=T)/length(tmp$OUT[tmp$dist==i])
}
prop
lines(prop[,1],prop[,2], col="blue")
|
5e4c9a36f08606d14a10dbadd693036a0c4d0405
|
bca358dc83ae998f0e8478b7b3f06d21a6325ea0
|
/Pedro_Insurance.R
|
a1fdf5ad91c5281d27bc07135a38518fd19ed2e3
|
[] |
no_license
|
cbradsky/healthinsuranceanalysis
|
6cbc7973f71ca0afee94c34ab10e94efce0bc11c
|
9ad4024641edb75bf343f120a581a6c19ccb75bf
|
refs/heads/master
| 2022-08-01T05:02:58.321731
| 2020-05-23T03:07:36
| 2020-05-23T03:07:36
| 265,984,621
| 0
| 1
| null | null | null | null |
UTF-8
|
R
| false
| false
| 2,381
|
r
|
Pedro_Insurance.R
|
library(readxl)
Insurance<- read_excel("/Users/pedroandrade/Documents/ECON 386.xlsx")
View(Insurance)
Insurance$isFemale <- as.integer(Insurance$sex == "female")
Insurance$isSmoker <- as.integer(Insurance$smoker == "yes")
Insurance$isNorthwest <- as.integer(Insurance$region == "northwest")
Insurance$isNortheast <- as.integer(Insurance$region == "northeast")
Insurance$isSouthwest <- as.integer(Insurance$region == "southwest")
Insurance$isSoutheast <- as.integer(Insurance$region == "southeast")
summary(Insurance)
?cor
cor(Insurance$age, Insurance$charges)
cor(Insurance$bmi, Insurance$charges)
cor(Insurance$children, Insurance$charges)
cor(Insurance$isFemale, Insurance$charges)
cor(Insurance$isSmoker, Insurance$charges)
cor(Insurance$isSouthwest, Insurance$charges)
cor(Insurance$isSoutheast, Insurance$charges)
cor(Insurance$isNorthwest, Insurance$charges)
cor(Insurance$isNortheast, Insurance$charges)
plot(charges~bmi, Insurance)
model1<-lm(charges~ 0 + bmi, Insurance)
summary(model1)
RSS<-c(crossprod(model1$residuals))
MSE<-RSS/length(model1$residuals)
RMSE1<-sqrt(MSE)
RMSE1
##Splitting the data##
ind<-sample(2, nrow(Insurance), replace=TRUE, prob=c(0.7,0.3))
training<-Insurance[ind==1,]
testing<-Insurance[ind==2,]
dim(training)
dim(testing)
##model using training data##
trainmodel<-lm(charges~0+bmi, training)
summary(trainmodel)
trainmodel$coefficients
confint(trainmodel)
##Prediction##
pred<-predict(trainmodel,testing)
head(pred)
head(testing$charges)
View(pred)
RMSE=sqrt(sum((pred-testing$charges)^2)/(length(testing$charges)-1))
RMSE
x<-mean(Insurance$charges)
chargesbin<-ifelse(Insurance$charges >= x, 1, 0)
View(chargesbin)
lrmodel<-glm(chargesbin~0+bmi, data = Insurance, family = "binomial")
summary(lrmodel)
signal <- predict(lrmodel, Insurance)
pred_prob <- (1/(1 + exp(-signal)))
View(pred_prob)
?exp
confint(lrmodel)
confint.default(lrmodel)
point_conf_table<-cbind(lrmodel$coefficients, confint(lrmodel))
point_conf_table
exp(point_conf_table)
?sample
##trying to use bmi as binary variable##
bmimean<-mean(Insurance$bmi)
Insurance$cat.bmi<-ifelse(Insurance$bmi >= bmimean, 1, 0)
View(Insurance)
lrmodel2<-glm(chargesbin~cat.bmi, Insurance, family = "binomial")
summary(lrmodel2)
confint(lrmodel2)
confint.default(lrmodel2)
point_conf_table<-cbind(lrmodel2$coefficients, confint(lrmodel2))
point_conf_table
exp(point_conf_table)
|
95c710e6037c15f814b4a9d9eec3616295c5e659
|
292067fed8e3a82199771e5c78874c6ae0ce4fe0
|
/cachematrix.R
|
a5507ced3585699a1b7605bef712f60b9ad2c3e6
|
[] |
no_license
|
csaezcalvo/ProgrammingAssignment2
|
a2de262bec5c96889a9e80752bd1de6eabdc2a10
|
50ebfd97d3665807bca8d6d2a6cf694aeb18b9ba
|
refs/heads/master
| 2021-01-11T16:47:29.955689
| 2017-01-21T21:21:02
| 2017-01-21T21:21:02
| 79,673,008
| 0
| 0
| null | 2017-01-21T21:08:17
| 2017-01-21T21:08:17
| null |
UTF-8
|
R
| false
| false
| 1,083
|
r
|
cachematrix.R
|
## The first function makeCacheMatrix creates an object representing a matrix
## that can cache its inverse and the second function computes its inverse
## Creates an object with four functions
## set() sets the value of the matrix
## get() returns the value of the matrix
## setinv() allows to set the value of the inverse of the matrix
## getinv() returns the set value of the inverse
makeCacheMatrix <- function(x = matrix()) {
inv <- NULL
set <- function(y) {
x <<- y
inv <<- NULL
}
get <- function() x
setinv <- function(inverse) inv <<- inverse
getinv <- function() inv
list(set = set, get = get, setinv = setinv, getinv = getinv)
}
## Takes as input a "CacheMatrix" object and returns the value of the inverse
## from the cache if it has been computed, or computes it and sets it to the
## cache if not
cacheSolve <- function(x, ...) {
## Return a matrix that is the inverse of 'x'
inv <- x$getinv()
if(!is.null(inv)){
message("getting cache data")
return(inv)
}
data <- x$get()
xinv <- solve(data,...)
x$setinv(xinv)
xinv
}
|
a54e054b2d7784be856e5b356c53d4b5bec0864d
|
8769749bb0a919299b66db7aaafa400d1d412469
|
/arrowhead_tads/plot.stage_specific_tad.overlap_mTAD.r
|
81a17c337e1724877e06d3a5d0d32b259de0eee3
|
[] |
no_license
|
bioinfx/cvdc_scripts
|
e9e113fae866d2d3f0c2515fae1b410b2d7a3eeb
|
d33757f9d02fa6d503b5cb65336c0e4e410caa78
|
refs/heads/master
| 2022-03-19T00:15:13.372417
| 2019-12-05T04:38:48
| 2019-12-05T04:38:48
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 887
|
r
|
plot.stage_specific_tad.overlap_mTAD.r
|
setwd("../../analysis/tads/stage_specific_tads")
name = c("stable","CM-","CM+","ES+","ES+HERVH+")
total = c(11096,302,76, 354, 34)
hit = c( (6309+6238)/2,
(171+173)/2,
(33+40)/2,
(96+91)/2,
6)
dat = data.frame(name,total,hit)
dat$ratio = dat$hit/dat$total
pval = 1
for ( i in 2:5) {
test = matrix(c(dat[i,3],dat[i,2]-dat[i,3],dat[1,3],dat[1,2]-dat[1,3]),ncol=2)
pval[i] = prop.test(test,alternative="l")$p.value
}
pdf("conservation_with_mouse.pdf",height=3,width=3)
ggplot(dat) + geom_bar(aes(x=1:nrow(dat),y=ratio),stat="identity",fill="black") +
scale_x_continuous(breaks=1:nrow(dat), labels=name) + xlab("") +
annotate("text", x=(1+2:5)/2,y= 0.6+0.05*1:4, label=format(pval[2:5],scientific=T, digits=2)) +
theme_bw() + ylab("Fraction of TAD conserved in mouse") +
theme( axis.text.x = element_text(angle=45,hjust=1) )
dev.off()
|
8b1bdf9fe05987dee821b30e1e5ee5eb8ea12d69
|
441d37f4560544747687187fd88250edcfeba9f8
|
/R/nplcm_fit_NoReg_BrS_Nest.R
|
2004ba892a4b053ac7f42a6ee36aecea0705e914
|
[
"MIT"
] |
permissive
|
swihart/nplcm
|
4afccd1ff9a7b5c61d3f080f192697c80403ee76
|
d36bac6a9615f2fbc7881706f2b062cf61fedd4a
|
refs/heads/master
| 2020-12-11T09:09:43.284150
| 2015-05-13T02:15:08
| 2015-05-13T02:15:08
| 35,523,147
| 0
| 0
| null | 2015-05-13T02:12:59
| 2015-05-13T02:12:59
| null |
UTF-8
|
R
| false
| false
| 5,981
|
r
|
nplcm_fit_NoReg_BrS_Nest.R
|
#' Fit nested partially-latent class model (low-level)
#'
#' Features:
#' \itemize{
#' \item no regression;
#' \item bronze- (BrS) measurements;
#' \item conditional dependence;
#' \item all pathogens have BrS measurements.
#' }
#'
#' @inheritParams nplcm
#' @importFrom R2WinBUGS bugs
#' @return WinBUGS fit results.
#'
#' @export
nplcm_fit_NoReg_BrS_Nest <-
function(Mobs,Y,X,model_options,mcmc_options){
# define generic function to call WinBUGS:
call.bugs <- function(data, inits, parameters,m.file,
bugsmodel.dir = mcmc_options$bugsmodel.dir,
winbugs.dir = mcmc_options$winbugs.dir,
nitermcmc = mcmc_options$n.itermcmc,
nburnin = mcmc_options$n.burnin,
nthin = mcmc_options$n.thin,
nchains = mcmc_options$n.chains,
dic = FALSE,
is.debug = mcmc_options$debugstatus,
workd= mcmc_options$result.folder,...) {
m.file <- paste(bugsmodel.dir, m.file, sep="");
f.tmp <- function() {
##winbugs
gs <- bugs(data, inits, parameters,
model.file = m.file,
working.directory=workd,
n.chains = nchains,
n.iter = nitermcmc,
n.burnin = nburnin,
n.thin = nthin,
bugs.directory=winbugs.dir,
DIC=dic,
debug=is.debug,...);
gs;
}
bugs.try <- try(rst.bugs <- f.tmp(), silent=FALSE);
if (class(bugs.try) == "try-error") {
rst.bugs <- NULL;
}
rst.bugs;
}
#-------------------------------------------------------------------#
# prepare data:
parsing <- assign_model(Mobs,Y,X,model_options)
Nd <- sum(Y==1)
Nu <- sum(Y==0)
cat("==True positive rate (TPR) prior(s) for ==\n",
model_options$M_use,"\n",
" is(are respectively): \n",
model_options$TPR_prior,"\n")
cause_list <- model_options$cause_list
pathogen_BrS_list <- model_options$pathogen_BrS_list
pathogen_SSonly_list <- model_options$pathogen_SSonly_list
# get the count of pathogens:
# number of all BrS available pathogens:
JBrS <- length(pathogen_BrS_list)
# number of all SS only pathogens:
JSSonly <- length(pathogen_SSonly_list)
# number of all causes possible: singletons, combos, NoA, i.e.
# the number of rows in the template:
Jcause <- length(cause_list)
template <- rbind(as.matrix(rbind(symb2I(c(cause_list),
c(pathogen_BrS_list,pathogen_SSonly_list)))),
rep(0,JBrS+JSSonly)) # last row for controls.
# fit model :
# plcm - BrS + SS and SSonly:
MBS.case <- Mobs$MBS[Y==1,]
MBS.ctrl <- Mobs$MBS[Y==0,]
MBS <- as.matrix(rbind(MBS.case,MBS.ctrl))
#MSS.case <- Mobs$MSS[Y==1,1:JBrS]
#MSS.case <- as.matrix(MSS.case)
#SS_index <- which(colMeans(is.na(MSS.case))<0.9)#.9 is arbitrary; any number <1 will work.
#JSS <- length(SS_index)
#MSS <- MSS.case[,SS_index]
# set priors:
alpha <- eti_prior_set(model_options)
TPR_prior_list <- TPR_prior_set(model_options,Mobs,Y,X)
alphaB <- TPR_prior_list$alphaB
betaB <- TPR_prior_list$betaB
#alphaS <- TPR_prior_list$alphaS
#betaS <- TPR_prior_list$betaS
# if (parsing$measurement$SSonly){
# MSS.only.case <- Mobs$MSS[Y==1,(1:JSSonly)+JBrS]
# MSS.only <- as.matrix(MSS.only.case)
# alphaS.only <- TPR_prior_list$alphaS.only
# betaS.only <- TPR_prior_list$betaS.only
# }
K <- model_options$k_subclass
mybugs <- function(...){
inits <- function(){list(pEti = rep(1/Jcause,Jcause),
r0 = c(rep(.5,K-1),NA),
r1 = cbind(matrix(rep(.5,Jcause*(K-1)),
nrow=Jcause,ncol=K-1),
rep(NA,Jcause)),
alphadp0 = 1)};
data <- c("Nd","Nu","JBrS","Jcause",
"alpha","template","K",
#"JSS","MSS",
#"MSS.only","JSSonly","alphaS.only","betaS.only",
"MBS","alphaB","betaB");
if (mcmc_options$individual.pred==FALSE & mcmc_options$ppd==TRUE){
parameters <- c("pEti","Lambda","Eta","alphadp0","MBS.new",
"ThetaBS.marg","PsiBS.marg","PsiBS.case",
"ThetaBS","PsiBS")
} else if(mcmc_options$individual.pred==TRUE & mcmc_options$ppd==TRUE){
parameters <- c("pEti","Lambda","Eta","alphadp0","Icat","MBS.new",
"ThetaBS.marg","PsiBS.marg","PsiBS.case",
"ThetaBS","PsiBS")
} else if (mcmc_options$individual.pred==TRUE & mcmc_options$ppd==FALSE){
parameters <- c("pEti","Lambda","Eta","alphadp0","Icat",
"ThetaBS.marg","PsiBS.marg","PsiBS.case",
"ThetaBS","PsiBS")
} else if (mcmc_options$individual.pred==FALSE & mcmc_options$ppd==FALSE){
parameters <- c("pEti","Lambda","Eta","alphadp0",
"ThetaBS.marg","PsiBS.marg","PsiBS.case",
"ThetaBS","PsiBS")
}
rst.bugs <- call.bugs(data, inits, parameters,...);
rst.bugs
}
if (mcmc_options$ppd==TRUE){
gs <- mybugs("model_NoReg_BrS_nplcm_ppd.bug")
} else {
gs <- mybugs("model_NoReg_BrS_nplcm.bug")
}
}
|
0fcd41bd942f1241dab940ba4264de8f303837e4
|
ce2deb9f1b4e02f16592ed78acad49bec5a674c8
|
/projects/acn/src/.trash/acn_cnm_live.R
|
41b3617cc6420f75c284a1e7892bef53d69bede3
|
[] |
no_license
|
RavenDaddy/Brent-Thomas-Tripp-
|
e748b9e48ebd96bfc9ca11124283fb2acde7b86a
|
fe81f7952e67ab093d9e455b9bf520178c724383
|
refs/heads/master
| 2017-12-15T11:41:30.341714
| 2017-01-02T01:02:36
| 2017-01-02T02:31:22
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 2,170
|
r
|
acn_cnm_live.R
|
###Arthropod Co-occurrence Networks
###Null modeling to get ses and p-values
########################################################################
print('Loading packages')
source('../../lichen_coo/src/seenetR.R')
library(vegan)
library(pbapply)
########################################################################
print('Loading data..')
pit <- read.csv('~/projects/dissertation/projects/art_coo/data/arth_cooc_PIT_Lau.csv')
pit$tree <- sub('\\.0','\\.',pit$tree)
pit <- pit[pit$geno!='1007',]
pit[is.na(pit)] <- 0
#merge categories
#pemphigus mergers
pit$pb.upper <- pit$pb.upper + pit$pb.woody
pb <- pit$pb.upper + pit$pb.lower
pit <- cbind(pit,pb=pb)
pit$pb.pred <- pit$pb.pred + pit$pb.hole + pit$pb.woody.pred
pit <- pit[,colnames(pit)!='pb.woody'&colnames(pit)!='pb.woody.pred'&colnames(pit)!='pb.hole'&colnames(pit)!='mite'&colnames(pit)!='pb.upper'&colnames(pit)!='pb.lower']
#remove fungal
pit <- pit[,colnames(pit)!='fungal']
#remove species with less than 10 observations
pit.com <- pit[,-1:-6]
pit.com <- pit.com[,apply(pit.com,2,sum)>10]
print(colnames(pit.com))
#separate live and senescing leaves
liv <- pit.com[pit[,1]=='live',]
pit.l <- split(liv,paste(pit$tree[pit[,1]=='live'],pit$geno[pit[,1]=='live']))
########################################################################
print('Tree level co-occurrence')
obs.cs <- unlist(lapply(pit.l,cscore))
acn.sim <- pblapply(pit.l,function(x) if (sum(sign(apply(x,2,sum)))>1){nullCom(x)}else{NA})
acn.cs <- pblapply(acn.sim,function(x) if (any(is.na(x[[1]]))){NA}else{lapply(x,cscore)})
acn.cs <- pblapply(acn.cs,unlist)
acn.ses <- obs.cs*0
acn.p <- obs.cs*0
for (i in 1:length(acn.ses)){
print(i)
acn.ses[i] <- (obs.cs[i] - mean(acn.cs[[i]])) / sd(acn.cs[[i]])
acn.p[i] <- length(acn.cs[[i]][acn.cs[[i]]<=obs.cs[i]])/length(acn.cs[[i]])
}
print('Writing output')
dput(acn.cs,'../data/acn_cs_live.Rdata')
dput(acn.ses,'../data/acn_ses_live.Rdata')
dput(acn.p,'../data/acn_pval_live.Rdata')
|
d891ebcae317c6533ca996506fbdbb3cf40e8c90
|
2bec5a52ce1fb3266e72f8fbeb5226b025584a16
|
/MESS/man/clotting.Rd
|
95bcb022de829eadc60c9523e0c64a1b4cb776ac
|
[] |
no_license
|
akhikolla/InformationHouse
|
4e45b11df18dee47519e917fcf0a869a77661fce
|
c0daab1e3f2827fd08aa5c31127fadae3f001948
|
refs/heads/master
| 2023-02-12T19:00:20.752555
| 2020-12-31T20:59:23
| 2020-12-31T20:59:23
| 325,589,503
| 9
| 2
| null | null | null | null |
UTF-8
|
R
| false
| true
| 1,696
|
rd
|
clotting.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/MESS-package.R
\docType{data}
\name{clotting}
\alias{clotting}
\title{Blood clotting for 158 rats}
\format{
A data frame with 158 observations on the following 6 variables.
\describe{ \item{rat}{a numeric vector} \item{locality}{a
factor with levels \code{Loc1} \code{Loc2}} \item{sex}{a factor with
levels \code{F} \code{M}} \item{weight}{a numeric vector}
\item{PCA0}{a numeric vector with percent blood clotting activity at
baseline} \item{PCA4}{a numeric vector with percent blood clotting
activity on day 4} }
}
\source{
Ann-Charlotte Heiberg, project at The Royal Veterinary and
Agricultural University, 1999. \cr Added by Ib M. Skovgaard <ims@life.ku.dk>
}
\description{
Blood clotting activity (PCA) is measured for 158 Norway rats from two
locations just before (baseline) and four days after injection of an
anticoagulant (bromadiolone). Normally this would cause reduced blood
clotting after 4 days compared to the baseline, but these rats are known to
possess anticoagulent resistence to varying extent. The purpose is to relate
anticoagulent resistence to gender and location and perhaps weight. Dose of
injection is, however, admistered according to weight and gender.
}
\examples{
data(clotting)
dim(clotting)
head(clotting)
day0= transform(clotting, day=0, pca=PCA0)
day4= transform(clotting, day=4, pca=PCA4)
day.both= rbind(day0,day4)
m1= lm(pca ~ rat + day*locality + day*sex, data=day.both)
anova(m1)
summary(m1)
m2= lm(pca ~ rat + day, data=day.both)
anova(m2)
## Log transformation suggested.
## Random effect of rat.
## maybe str(clotting) ; plot(clotting) ...
}
\keyword{datasets}
|
a4ff42b85ab3e7c8d5fb2f87dc3919f846d79621
|
4ed740aeec1366c7647bd599406f65ef78f7786b
|
/man/abbreviations.Rd
|
2fb1c48cc81ed7038747cb5bef3c043a1a033317
|
[] |
no_license
|
trinker/qdap2
|
00f97557a43eeee487c6a11074f940f0204d042c
|
b9244fe90c5f5cec9cd891b1ba1f55b157467e5f
|
refs/heads/master
| 2021-01-01T17:21:41.171478
| 2013-01-29T03:55:12
| 2013-01-29T03:55:12
| 7,884,841
| 3
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 376
|
rd
|
abbreviations.Rd
|
\docType{data}
\name{abbreviations}
\alias{abbreviations}
\title{Small Abbreviations Data Set}
\format{A data frame with 14 rows and 2 variables}
\description{
A dataset containing abbreviations and their qdap
friendly form.
}
\details{
\itemize{ \item abv. Common transcript abbreviations
\item rep. qdap representation of those abbreviations }
}
\keyword{datasets}
|
c84817ccd05bab0862bbe51aef68f3d17b54013e
|
99d08ce85ab007262c2cf0e85826be3519969aac
|
/unbiased_dgp/analysis_main.R
|
59403bd5b9089087d71d797b4b3f9079a4e6a16a
|
[] |
no_license
|
AlbertogGarcia/defor_econometrics
|
e042f220153796fc18b85591895fcfb8203e9327
|
d368194410f2153ca159b439506598f0d8d09aa0
|
refs/heads/master
| 2023-07-06T09:49:25.160781
| 2023-07-02T20:51:41
| 2023-07-02T20:51:41
| 186,687,689
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 11,541
|
r
|
analysis_main.R
|
library(tidyverse)
library(tictoc)
library(here)
library(DeclareDesign)
library(survival)
library(ggplot2)
library(dplyr)
library(ggfortify)
#%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
# Parameterization
#%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
years = 6
nobs = 150^2
n = 500
cellsize_small = 5
cellsize_med = 10
cellsize_large = 30
ppoints = 225
cpoints = 25
avg_parea = nobs/ppoints
avg_carea = nobs/cpoints
std_v = 0.5
std_a = 0
std_p = 0
# here are the landscape characteristics in this parameterization
# note that the bias for the TWFE model will be equal to the pre-treatment difference in deforestation rtes, which is 0.03
base_0 = .02
base_1 = .05
trend = -.005
ATT = -.01
###############################################################################################################
######## Baseline showing aggregation resolves pixel fixed effects issue
###############################################################################################################
source(here::here('unbiased_dgp', 'specifications.R'))
std_avp = (std_a^2+std_v^2+std_p^2)^.5
b0 = qnorm(base_0, mean = 0, sd = std_avp)
b1 = qnorm(base_1, mean = 0, sd = std_avp) - b0
b2_0 = qnorm(trend + base_0, mean = 0, sd = std_avp) - b0
b2_1 = qnorm(trend + base_1, mean = 0, sd = std_avp) - b0 - b1
b3 = qnorm( pnorm(b0+b1+b2_1, mean = 0, sd = std_avp) + ATT , mean = 0, sd = std_avp) - (b0 + b1 + b2_1)
set.seed(0930)
# summary function that estimates all of the different specifications
aggregation <- specifications(n, nobs, years, b0, b1, b2_0, b2_1, b3, std_a, std_v, std_p, std_c = 0.0, cellsize_small, cellsize_med, cellsize_large, ppoints, cpoints, nestedprops = FALSE, proptreatassign = FALSE)
summary_long <- aggregation$summary_long
library(rio)
export(summary_long, "unbiased_dgp/results/summary_long.rds")
###############################################################################################################
######## Introduction pixel level unobservables, which impact non-random selection
###############################################################################################################
std_a = 0.1
# we'll need to recompute the parameters if we change the value of sigma_p
# we'll need to compute the parameters
std_avp = (std_a^2+std_v^2+std_p^2)^.5
b0 = qnorm(base_0, mean = 0, sd = std_avp)
b1 = qnorm(base_1, mean = 0, sd = std_avp) - b0
b2_0 = qnorm(trend + base_0, mean = 0, sd = std_avp) - b0
b2_1 = qnorm(trend + base_1, mean = 0, sd = std_avp) - b0 - b1
b3 = qnorm( pnorm(b0+b1+b2_1, mean = 0, sd = std_avp) + ATT , mean = 0, sd = std_avp) - (b0 + b1 + b2_1)
# summary function that estimates all of the different specifications
aggregation_0 <- specifications(n, nobs, years, b0, b1, b2_0, b2_1, b3, std_a, std_v, std_p, std_c = 0.0, cellsize_small, cellsize_med, cellsize_large, ppoints, cpoints, nestedprops = FALSE, proptreatassign = FALSE)
summary_long_0 <- aggregation_0$summary_long
export(summary_long_0, "unbiased_dgp/results/summary_selection.rds")
###############################################################################################################
######## Adding in property level disturbances
###############################################################################################################
#### 0.1
std_p = 0.1
# we'll need to compute the parameters
std_avp = (std_a^2+std_v^2+std_p^2)^.5
b0 = qnorm(base_0, mean = 0, sd = std_avp)
b1 = qnorm(base_1, mean = 0, sd = std_avp) - b0
b2_0 = qnorm(trend + base_0, mean = 0, sd = std_avp) - b0
b2_1 = qnorm(trend + base_1, mean = 0, sd = std_avp) - b0 - b1
b3 = qnorm( pnorm(b0+b1+b2_1, mean = 0, sd = std_avp) + ATT , mean = 0, sd = std_avp) - (b0 + b1 + b2_1)
aggregation_1 <- specifications(n, nobs, years, b0, b1, b2_0, b2_1, b3, std_a, std_v, std_p, std_c = 0.0, cellsize_small, cellsize_med, cellsize_large, ppoints, cpoints, nestedprops = FALSE, proptreatassign = FALSE)
summary_long_1 <- aggregation_1$summary_long
##### 0.2
std_p = 0.2
# we'll need to compute the parameters
std_avp = (std_a^2+std_v^2+std_p^2)^.5
b0 = qnorm(base_0, mean = 0, sd = std_avp)
b1 = qnorm(base_1, mean = 0, sd = std_avp) - b0
b2_0 = qnorm(trend + base_0, mean = 0, sd = std_avp) - b0
b2_1 = qnorm(trend + base_1, mean = 0, sd = std_avp) - b0 - b1
b3 = qnorm( pnorm(b0+b1+b2_1, mean = 0, sd = std_avp) + ATT , mean = 0, sd = std_avp) - (b0 + b1 + b2_1)
#ATT = pnorm(b0+b1+b2_1+b3, 0, (std_a^2+std_v^2 )^.5) - pnorm(b0+b1+b2_1, 0, (std_a^2+std_v^2 )^.5)
aggregation_2 <- specifications(n, nobs, years, b0, b1, b2_0, b2_1, b3, std_a, std_v, std_p, std_c = 0.0, cellsize_small, cellsize_med, cellsize_large, ppoints, cpoints, nestedprops = FALSE, proptreatassign = FALSE)
summary_long_2 <- aggregation_2$summary_long
#### 0.3
std_p = 0.3
std_avp = (std_a^2+std_v^2+std_p^2)^.5
b0 = qnorm(base_0, mean = 0, sd = std_avp)
b1 = qnorm(base_1, mean = 0, sd = std_avp) - b0
b2_0 = qnorm(trend + base_0, mean = 0, sd = std_avp) - b0
b2_1 = qnorm(trend + base_1, mean = 0, sd = std_avp) - b0 - b1
b3 = qnorm( pnorm(b0+b1+b2_1, mean = 0, sd = std_avp) + ATT , mean = 0, sd = std_avp) - (b0 + b1 + b2_1)
aggregation_3 <- specifications(n, nobs, years, b0, b1, b2_0, b2_1, b3, std_a, std_v, std_p, std_c = 0.0, cellsize_small, cellsize_med, cellsize_large, ppoints, cpoints, nestedprops = FALSE, proptreatassign = FALSE)
summary_long_3 <- aggregation_3$summary_long
summary_full <- rbind(summary_long_0, summary_long_1, summary_long_2, summary_long_3)
export(summary_full, "unbiased_dgp/results/summary_full.rds")
###############################################################################################################
######## alternative parameterization
###############################################################################################################
std_p = 0.3
std_a = 0.1
base_0 = .05
base_1 = .02
trend = -.005
ATT = -.01
# we'll need to compute the parameters
std_avp = (std_a^2+std_v^2+std_p^2)^.5
b0 = qnorm(base_0, mean = 0, sd = std_avp)
b1 = qnorm(base_1, mean = 0, sd = std_avp) - b0
b2_0 = qnorm(trend + base_0, mean = 0, sd = std_avp) - b0
b2_1 = qnorm(trend + base_1, mean = 0, sd = std_avp) - b0 - b1
b3 = qnorm( pnorm(b0+b1+b2_1, mean = 0, sd = std_avp) + ATT , mean = 0, sd = std_avp) - (b0 + b1 + b2_1)
set.seed(0930)
cellsize = cellsize_med
aggregation_alt <- specifications(n, nobs, years, b0, b1, b2_0, b2_1, b3, std_a, std_v, std_p, std_c = 0.0, cellsize_small, cellsize_med, cellsize_large, ppoints, cpoints, nestedprops = FALSE, proptreatassign = FALSE)
summary_long_alt <- aggregation_alt$summary_long
export(summary_long_alt, "unbiased_dgp/results/summary_long_alt.rds")
base_0 = .02
base_1 = .05
trend = .005
ATT = .01
# we'll need to compute the parameters
std_avp = (std_a^2+std_v^2+std_p^2)^.5
b0 = qnorm(base_0, mean = 0, sd = std_avp)
b1 = qnorm(base_1, mean = 0, sd = std_avp) - b0
b2_0 = qnorm(trend + base_0, mean = 0, sd = std_avp) - b0
b2_1 = qnorm(trend + base_1, mean = 0, sd = std_avp) - b0 - b1
b3 = qnorm( pnorm(b0+b1+b2_1, mean = 0, sd = std_avp) + ATT , mean = 0, sd = std_avp) - (b0 + b1 + b2_1)
aggregation_alt2 <- specifications(n, nobs, years, b0, b1, b2_0, b2_1, b3, std_a, std_v, std_p, std_c = 0.0, cellsize_small, cellsize_med, cellsize_large, ppoints, cpoints, nestedprops = FALSE, proptreatassign = FALSE)
summary_long_alt2 <- aggregation_alt2$summary_long
export(summary_long_alt2, "unbiased_dgp/results/summary_long_alt2.rds")
#%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
#### weighting analysis
#%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
source(here::here('unbiased_dgp', 'heterogeneous_propertyarea.R'))
# we start with our base parameterization without property level perturbations
std_a = 0
std_v = 0.5
std_p = 0.0
std_b3 = .05
ppoints = 225
# here are the landscape characteristics in this parameterization
# note that the bias for the TWFE model will be equal to the pre-treatment difference in deforestation rtes, which is 0.03
base_0 = .02
base_1 = .05
trend = -.005
ATT = -.01
# we'll need to compute the parameters
std_avp = (std_a^2+std_v^2+std_p^2)^.5
std_avpt = (std_a^2+std_v^2+std_p^2+std_b3^2)^.5
b0 = qnorm(base_0, mean = 0, sd = std_avp)
b1 = qnorm(base_1, mean = 0, sd = std_avp) - b0
b2_0 = qnorm(trend + base_0, mean = 0, sd = std_avp) - b0
b2_1 = qnorm(trend + base_1, mean = 0, sd = std_avp) - b0 - b1
set.seed(0930)
weights <- heterogeneous_propertyarea(n, nobs, years, b0, b1, b2_0, b2_1, std_a, std_v, std_p, std_b3, given_ATT = ATT, cellsize = 10, ppoints, cpoints)
summary_pweights <- weights$summary_long
library(rio)
export(summary_pweights, "unbiased_dgp/results/summary_pweights.rds")
#%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
#### outcome analysis
#%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
source(here::here('unbiased_dgp', 'outcome_fcn.R'))
# we start with our base parameterization without property level perturbations
std_a = 0
std_v = 0.5
std_p = 0
# here are the landscape characteristics in this parameterization
# note that the bias for the TWFE model will be equal to the pre-treatment difference in deforestation rtes, which is 0.03
base_0 = .02
base_1 = .05
trend = -.005
ATT = -.01
std_avp = (std_a^2+std_v^2+std_p^2)^.5
b0 = qnorm(base_0, mean = 0, sd = std_avp)
b1 = qnorm(base_1, mean = 0, sd = std_avp) - b0
b2_0 = qnorm(trend + base_0, mean = 0, sd = std_avp) - b0
b2_1 = qnorm(trend + base_1, mean = 0, sd = std_avp) - b0 - b1
b3 = qnorm( pnorm(b0+b1+b2_1, mean = 0, sd = std_avp) + ATT , mean = 0, sd = std_avp) - (b0 + b1 + b2_1)
set.seed(0930)
outcomes <- outcome_fcn(n, nobs, years, b0, b1, b2_0, b2_1, b3, std_a, std_v, cellsize = 10)
outcome <- outcomes$coeff_bias
library(rio)
export(outcome, "unbiased_dgp/results/outcomes.rds")
#%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
#### DID keeping vs. dropping obs
#%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
source(here::here('unbiased_dgp', 'DID_keep.R'))
set.seed(0930)
keeps <- DID_keep(n, nobs, years, b0, b1, b2_0, b2_1, b3, std_a, std_v)
keeps <- keeps$did_keeps
library(rio)
export(keeps, "unbiased_dgp/results/keeps.rds")
set.seed(0930)
#%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
######## show TWFE is equivalent to dropping all pixels deforested in first period
#%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
base_0 = .02
base_1 = .05
trend = -.005
ATT = -.01
std_avp = (std_a^2+std_v^2+std_p^2)^.5
b0 = qnorm(base_0, mean = 0, sd = std_avp)
b1 = qnorm(base_1, mean = 0, sd = std_avp) - b0
b2_0 = qnorm(trend + base_0, mean = 0, sd = std_avp) - b0
b2_1 = qnorm(trend + base_1, mean = 0, sd = std_avp) - b0 - b1
b3 = qnorm( pnorm(b0+b1+b2_1, mean = 0, sd = std_avp) + ATT , mean = 0, sd = std_avp) - (b0 + b1 + b2_1)
estimator_comp <- TWFE_expost(n, nobs, years, b0, b1, b2_0, b2_1, b3, std_a, std_v)
summary_coeff <- estimator_comp$summary_long %>%
mutate_at(vars(bias), as.numeric)
summary_wide <- summary_coeff %>%
group_by(model)%>%
summarise(RMSE = rmse(bias, 0),
q25 = quantile(bias, probs = .25),
q75 = quantile(bias, probs = .75),
Bias = mean(bias))
export(summary_wide, "unbiased_dgp/results/twfe_comp.rds")
|
4c3757b395094c3c4cdbb4db7a5fa150ac58a2f2
|
a3f9b39352ae4409dab117b1a1c129a8778585fb
|
/EngPopxIMDxAge.R
|
3de30e0f4e287bc46995d0941b7d961e35b1a872
|
[] |
no_license
|
VictimOfMaths/Routine-Data
|
01a7a416b4f0bde909a0e15518c6cf767739f362
|
466ed22342dcb8ec941806497385f2b7f7e1d8ca
|
refs/heads/master
| 2023-07-20T10:29:15.453387
| 2023-07-17T11:52:15
| 2023-07-17T11:52:15
| 245,402,797
| 9
| 1
| null | null | null | null |
UTF-8
|
R
| false
| false
| 4,866
|
r
|
EngPopxIMDxAge.R
|
rm(list=ls())
library(tidyverse)
library(curl)
library(readxl)
library(ggtext)
library(ggridges)
library(paletteer)
#Population data by LSOA
temp <- tempfile()
temp2 <- tempfile()
source <- "https://www.ons.gov.uk/file?uri=%2fpeoplepopulationandcommunity%2fpopulationandmigration%2fpopulationestimates%2fdatasets%2flowersuperoutputareamidyearpopulationestimates%2fmid2019sape22dt2/sape22dt2mid2019lsoasyoaestimatesunformatted.zip"
temp <- curl_download(url=source, destfile=temp, quiet=FALSE, mode="wb")
unzip(zipfile=temp, exdir=temp2)
LSOApop <- read_excel(file.path(temp2, "SAPE22DT2-mid-2019-lsoa-syoa-estimates-unformatted.xlsx"), sheet=4,
range="A5:CT34758")
#Bring in IMD data (for England)
temp <- tempfile()
source <- "https://assets.publishing.service.gov.uk/government/uploads/system/uploads/attachment_data/file/833973/File_2_-_IoD2019_Domains_of_Deprivation.xlsx"
temp <- curl_download(url=source, destfile=temp, quiet=FALSE, mode="wb")
EIMD <- read_excel(temp, sheet=2, range="A2:T32845")[,c(1,6,8,10,12,14,16,18,20)]
colnames(EIMD) <- c("LSOAcode", "IMDdecile", "Income", "Employment", "Education, Skills & Training",
"Health & Disability", "Crime", "Bariers to Housing and Services", "Living Environment")
data <- merge(LSOApop, EIMD, by.x="LSOA Code", by.y="LSOAcode")
data_long <- data %>%
gather(age, pop, c(8:98)) %>%
mutate(age=as.numeric(if_else(age=="90+", "90", age)))
data_overall <- data_long %>%
group_by(age, IMDdecile) %>%
summarise(pop=sum(pop))
tiff("Outputs/EngPopxIMDxAge.tiff", units="in", width=8, height=6, res=500)
ggplot(data_overall, aes(x=age, y=pop, colour=as.factor(IMDdecile)))+
geom_line()+
scale_colour_paletteer_d("RColorBrewer::BrBG", direction=-1, name="IMD Decile",
labels=c("1 - most deprived", "2", "3", "4", "5", "6", "7", "8",
"9", "10 - least deprived"))+
scale_x_continuous(name="Age")+
scale_y_continuous(name="Population")+
annotate("text", x=88, y=66000, label="Ages 90+ are\ngrouped together", colour="Grey50", size=rel(2.2))+
theme_classic()+
labs(title="Younger people in England are more likely to live in more deprived areas",
subtitle="Age distribution of the English population by deciles of the Index of Multiple Deprivation",
caption="Data from ONS & DHCLG | Plot by @VictimOfMaths")
dev.off()
data_income <- data_long %>%
group_by(age, Income) %>%
summarise(pop=sum(pop)) %>%
rename(decile=Income) %>%
mutate(domain="Income")
data_employment <- data_long %>%
group_by(age, Employment) %>%
summarise(pop=sum(pop)) %>%
rename(decile=Employment) %>%
mutate(domain="Employment")
data_education <- data_long %>%
group_by(age, `Education, Skills & Training`) %>%
summarise(pop=sum(pop)) %>%
rename(decile=`Education, Skills & Training`) %>%
mutate(domain="Education, Skills & Training")
data_health <- data_long %>%
group_by(age, `Health & Disability`) %>%
summarise(pop=sum(pop)) %>%
rename(decile=`Health & Disability`) %>%
mutate(domain="Health & Disability")
data_crime <- data_long %>%
group_by(age, Crime) %>%
summarise(pop=sum(pop)) %>%
rename(decile=Crime) %>%
mutate(domain="Crime")
data_housing <- data_long %>%
group_by(age, `Bariers to Housing and Services`) %>%
summarise(pop=sum(pop)) %>%
rename(decile=`Bariers to Housing and Services`) %>%
mutate(domain="Bariers to Housing and Services")
data_environment <- data_long %>%
group_by(age, `Living Environment`) %>%
summarise(pop=sum(pop)) %>%
rename(decile=`Living Environment`) %>%
mutate(domain="Living Environment")
data_domains <- bind_rows(data_income, data_employment, data_education, data_health, data_health,
data_crime, data_housing, data_environment)
tiff("Outputs/EngPopxIMDxAgexDomain.tiff", units="in", width=10, height=8, res=500)
ggplot(data_domains, aes(x=age, y=pop, colour=as.factor(decile)))+
geom_line()+
scale_colour_paletteer_d("RColorBrewer::BrBG", direction=-1, name="IMD Decile",
labels=c("1 - most deprived", "2", "3", "4", "5", "6", "7", "8",
"9", "10 - least deprived"))+
scale_x_continuous(name="Age")+
scale_y_continuous(name="Population")+
facet_wrap(~domain)+
theme_classic()+
theme(strip.background=element_blank(), strip.text=element_text(face="bold", size=rel(1)))+
labs(title="The age distribution of deprivation depends on how you measure deprivation",
subtitle="English population by age across deciles of each domain of the Index of Multiple Deprivation",
caption="Data from ONS & DHCLG | Plot by @VictimOfMaths")
dev.off()
|
aa38db6ec6d1e4215d0d7245456aa273fc2a8fe0
|
4cd34a0dc01b93df42fb4972a29cbe4f111ad366
|
/ER_exp_res.R
|
b0fcbcec8a62e905735aa0fb19b423d5b4a2f705
|
[] |
no_license
|
ShayanBordbar/ER_scripts
|
06a8692552699d99dd7b362d6e0c0ece69237bc5
|
0cbb484acd3644edd3ce631fabc95e31a08ae3f6
|
refs/heads/main
| 2023-01-11T09:29:27.588125
| 2020-11-12T12:31:00
| 2020-11-12T12:31:00
| 307,785,178
| 1
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 79
|
r
|
ER_exp_res.R
|
# plotting ER experimental data
aavariant_1
aavariant_2
aavariant_3
aavariant_4
|
f943a8183b9d81dec682ca07523f0782503a6666
|
2e888d2f1e04f076abc5ca085d473970403635d7
|
/man/resASICS-methods.Rd
|
8bf1ced741cfb57d0beef2234ab47ae913a9d7f2
|
[] |
no_license
|
cran/ASICS
|
a2c769dd9661de2fe930dac52dd90293a6704771
|
63fb18f3c3a1e1108b051022af6c4af7756ac3b9
|
refs/heads/master
| 2021-09-05T01:38:33.022053
| 2018-01-23T12:57:21
| 2018-01-23T12:57:21
| 106,000,140
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| true
| 2,106
|
rd
|
resASICS-methods.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/resASICS-class.R
\docType{methods}
\name{resASICS-methods}
\alias{resASICS-methods}
\alias{summary,resASICS-method}
\alias{summary.resASICS}
\alias{show,resASICS-method}
\alias{show.resASICS}
\alias{print,resASICS-method}
\alias{print.resASICS}
\alias{plot,resASICS-method}
\alias{plot.resASICS}
\title{S4 methods to represent results of ASICS.}
\usage{
\S4method{summary}{resASICS}(object, ...)
\S4method{show}{resASICS}(object)
\S4method{print}{resASICS}(x)
\S4method{plot}{resASICS}(x, y, xmin = 0.5, xmax = 10, ymin = 0,
ymax = NULL, add_metab = NULL)
}
\arguments{
\item{object}{an object of class resASICS}
\item{...}{not used}
\item{x}{an object of class resASICS}
\item{y}{not used}
\item{xmin, xmax, ymin, ymax}{lower and upper bounds for x and y, respectively}
\item{add_metab}{name of one metabolite to add to the plot. Default to
\code{NULL} (no pure spectrum added to the plot)}
}
\value{
plot the true and recomposed (as estimated by \code{\link{ASICS}})
spectra on one figure. In addition, one pure metabolite spectrum (as
provided in the reference library) can be superimposed to the plot.
}
\description{
S4 methods to represent results of ASICS.
}
\examples{
\dontshow{
lib_file <- system.file("extdata", "library_for_examples.rda",
package = "ASICS")
cur_path <- system.file("extdata", "example_spectra", "AG_faq_Beck01",
package = "ASICS")
to_exclude <- matrix(c(4.5,5.1,5.5,6.5), ncol = 2, byrow = TRUE)
result <- ASICS(path = cur_path, exclusion.areas = to_exclude,
nb.iter.signif = 10, library.metabolites = lib_file)
#Results of ASICS
result
summary(result)
plot(result)
}
\dontrun{
cur_path <- system.file("extdata", "example_spectra", "AG_faq_Beck01",
package = "ASICS")
to_exclude <- matrix(c(4.5,5.1,5.5,6.5), ncol = 2, byrow = TRUE)
result <- ASICS(path = cur_path, exclusion.areas = to_exclude)
result
summary(result)
plot(result)
}
}
\seealso{
\code{\link{ASICS}} \code{\link{resASICS-class}}
}
|
24f4d33a9335316c00f6dc52b309f863c90192c5
|
7c20b3e203339f9880c7eafdab032977fd7055d1
|
/R/pep.massmatch.R
|
25510f60c4fc4f6fd35e0ac406cec9f6cd305d71
|
[] |
no_license
|
goat-anti-rabbit/labelpepmatch.R
|
17832e25c458c1d6738ae052b0ac6677c8ea48fd
|
798b055a6826139af5e1539cb7a60baebf9458f6
|
refs/heads/master
| 2021-01-10T10:47:26.439821
| 2015-11-23T15:00:09
| 2015-11-23T15:00:09
| 36,013,471
| 1
| 1
| null | null | null | null |
UTF-8
|
R
| false
| false
| 9,521
|
r
|
pep.massmatch.R
|
#' Mass match a vector of masses to a database.
#'
#' Mass match peak pairs from a pepmatched object to known databases. Can call inbuilt databases, but you can also use your own local database.
#' @author Rik Verdonck & Gerben Menschaert
#' @seealso \code{\link{pep.id}}, \code{\link{pep.massmatch}}
#' @param input Numeric. Either a single value, a vector of values, or a dataframe or matrix with one column with name MW
#' @param ID_thresh Numeric. Maximal allowed mass difference in ppm for identification.
#' @param presetdb A preset database. For the future (not ready yet)
#' @param db In case no preset database is chosen: a database (table) with the first 3 columns \code{name, MW} and \code{sequence} (as read in with the \code{\link{download_lpm_db}} function) The amino acid sequences have to obey rules when you want to use them for mass recalculation or generation of a decoy database. Amino acids have to be capitalized one letter codes. For details, see \code{\link{calculate_peptide_mass}}
#' @param dbpath Character. In case a local database is used. Should be the filepath of a table, separated by dbseparator, with first tree columns: name, mass in Dalton and sequence.
#' @param dbseparator Character. Column separator of database.
#' @param dbheader Logical. Does the database have a header? Default FALSE
#' @param masscorrection Logical. Should masses be corrected on the basis of identifications? Caution should be payed here, since this will only work with a sufficiently high number of real matches. This rather sketchy feature should be considered something interesting to have a look at, rather than a compulsory part of the pipeline. Always also run without mass correction and compare!!! Default is FALSE.
#' @param FDR Logical. Test false discovery rate of peptide mass match identification by calculating a decoy database and look how many hits you get there. Uses \code{\link{generate_random_db}}.
#' @param iterations How many iterations of FDR should be ran? This might take some time for larger datasets.
#' @param checkdb Look if the masses and sequences in your database make sense. UNDER CONSTRUCTION!!!
#' @param graphics Only applies when FDR is TRUE.
#' @param verbose Logical. If TRUE verbose output is generated during identifications.
#' @export
#' @exportClass pep_massmatched
#' @return An object of class \code{pep_massmatched} that can be used for subsequent analysis using labelpepmatch functions.
### TO DO: plot functie werkt nog niet. Heb ze voorlopig uitgeschakeld.
### TO DO: iets is niet in orde met de input-output structuur. Als dit een standalone functie is, kan de output class niet "pepmatched" zijn! Er was oorspronkelijk een "run=1" parameter, maar die heb ik eruit gehaald omdat hij enkel voorkomt in de uitgehashte stukjes. Dit dient nagekeken te worden!
### TO DO: N_identifications kolom blijft leeg bij standalone gebruik.
pep.massmatch <-
function (input,db,presetdb=NA,dbpath=NA,dbseparator=",",dbheader=FALSE,ID_thresh=10,masscorrection=F,FDR=F,iterations=10,checkdb=F,graphics=F,verbose=F)
{
### Read in database
if (!is.na(presetdb))
{
db<-download_lpm_db(presetdb)
}else if(missing(db)==F){
db<-db
}else{
if(missing(dbpath)){stop("ERROR: no database found")}
db<-read.table(dbpath,sep=dbseparator,header=dbheader)
}
SINGLECOLUMN<-FALSE
if(length(input)==1){masscorrection<-F;input<-as.data.frame(input)}
if(ncol(as.data.frame(input))==1)
{
SINGLECOLUMN<-TRUE
input<-as.data.frame(input)
colnames(input)<-"MW"
input<-cbind.data.frame("MW"=input,"N_identifications"=NA,"pepID"="unknown","pepseq"=NA,"pepmass"=NA,"delta_m"=NA,stringsAsFactors=F)
}
if("MW"%in%colnames(input)==F){stop("No column with column 'MW' found in input\n")}
########################################################################
###Routine to calculate systematic error on experimental mass measure###
########################################################################
### idDifs is a vector containing all the delta observed-theoretical mass
if(masscorrection==TRUE)
{
idDifs <- c()
for (i in 1 : nrow(input))
{
for (j in 1 : nrow(db))
{
if (abs(as.numeric(input$MW[i]) - as.numeric(db[j,2])) <= as.numeric(input$MW[i])*ID_thresh*10e-6)
{
idDifs <- c(idDifs, (as.numeric(input$MW[i]) - as.numeric(db[j,2])))
}
}
}
#print(idDifs)
if(length(idDifs)<10){cat("warning: mass correction is based on less than 10 peptides\n")}
if(length(idDifs)<4){stop("error: mass correction on the basis of 3 or less peptides is impossible, run again with masscorrection is false\n")}
### delta is the mean difference between observed and predicted
delta<- -(mean(idDifs))
### if (meanIdDifs <= 0) delta <- (sd(idDifs)) else delta <- (-sd(idDifs))
if(verbose==TRUE){print(paste(nrow(input),"peak pairs"))}
if(verbose==TRUE){print(paste("Delta is ",delta))}
if(masscorrection==TRUE)
{
if(verbose==TRUE){print(paste(length(idDifs),"identifications before correction"))}
}
### Add two columns and column names to ResultMatrix
deltaBKP<-delta
}else
{delta=0}
########################################################################
### Here the real identifications start ###
########################################################################
### (delta-adjusted) MW's are compared to database of known peptides!
idDifsNew <- NULL
for (i in 1 : nrow(input))
{
for (j in 1 : nrow(db))
{
if(abs((as.numeric(input$MW[i])+delta)- as.numeric(db[j,2])) <= as.numeric(input$MW[i])*ID_thresh*10e-6)
{
idDifsNew <- c(idDifsNew,(as.numeric(input$MW[i])+delta)- as.numeric(db[j,2]))
### print (c("MW",input[i,15],"pepMass",identifMatrix[j,5]))
input$N_identifications [i] <- input$N_identifications[i]+1
input$pepID [i] <- as.character (db[j,1])
input$pepseq [i] <- as.character (db[j,3])
input$pepmass [i] <- as.numeric (db[j,2])
input$delta_m [i] <- as.numeric (db[j,2])-as.numeric(input$MW[i])
#}else
#{ input$pepId [i] <- as.character (input$MW[i]+delta)
}
}
}
### Generate mock db as a decoy
numberofhits <- NULL
FDR_summaryvector<-NULL
FDR_precisionvector<-NULL
dblist<-NULL
if(FDR==T)
{
if(verbose==T){cat("FDR estimation in progress\n")}
dblist<-list()
for (iteration in 1:iterations)
{
if(verbose==T){cat(".")}
decoy<-generate_random_db(db,size=1)
dblist[[iteration]]<-decoy
idDifsDECOY<-NULL
for (i in 1:nrow(input))
{
for(j in 1:nrow(db))
{
if(abs((as.numeric(input$MW[i])+delta)- as.numeric(decoy[j,2])) <= as.numeric(input$MW[i])*ID_thresh*10e-6)
{
idDifsDECOY <- c(idDifsDECOY,(as.numeric(input$MW[i])+delta)- as.numeric(decoy[j,2]))
}
}
}
FDR_precisionvector<-c(FDR_precisionvector,idDifsDECOY)
numberofhits<-c(numberofhits,length(idDifsDECOY))
}
if(verbose==T){cat("\n")}
FDR_mean<-mean(numberofhits)
FDR_median<-median(numberofhits)
FDR_sd<-sd(numberofhits)
FDR_sem<-FDR_sd/sqrt(iterations)
FDR_max<-max(numberofhits)
FDR_min<-min(numberofhits)
FDR_summaryvector<-c("mean"=FDR_mean,"median"=FDR_median,"min"=FDR_min,"max"=FDR_max,"sd"=FDR_sd,"sem"=FDR_sem)
#print(FDR_summaryvector)
if(graphics==T)
{
#hist(numberofhits, col="lightgreen", prob=TRUE, xlab="number of false positives", main=paste("FDR estimation for run ",pepmatched$design[run,1],sep=""))
#curve(dnorm(x, mean=FDR_mean, sd=FDR_sd),col="darkblue", lwd=2, add=TRUE, yaxt="n")
}
}
if(masscorrection==TRUE)
{
deltanew<- -(mean(idDifsNew))
delta <- deltaBKP
}
if(masscorrection==TRUE & verbose == TRUE)
{
print(paste("Delta after correction is ",deltanew))
print(paste(length(idDifsNew),"identifications after correction"))
if(FDR==T)
{
print(idDifsDECOY)
}
}else
{
if(verbose==TRUE)
{
print("No correction used")
print(paste(length(idDifsNew), "identifications"))
if(FDR==T)
{
print(idDifsDECOY)
}
}
}
#finally: fill up isID column
input$isID=as.logical(input$N_identifications)
### Sort by Id and print to screen
identified_peptides<-unique(input$pepID)
identifylist=list(
"matchlist"=input,
"delta"=delta,
"identified_peptides"=identified_peptides,
"FDR_hits"=as.vector(numberofhits),
"FDR_summaryvector"=FDR_summaryvector,
"FDR_precisionvector"=FDR_precisionvector,
"dblist"=dblist
)
class(identifylist)="pep_massmatched"
return(identifylist)
}
|
fdf338c9738f31b8462cd68a440b135bcd5ed813
|
46186b16ffaa98311ca997dee7c91008131e2e7e
|
/ui.R
|
cba905fa075770ff3c879d86b0bd4c6c236dc1c5
|
[] |
no_license
|
SiminaB/ShinyDemo
|
c751146758d7cbeada1ce6f5fa6fa9a8bc52e770
|
b406e7a5ea7527d38b20f79e271f76b6ce9cafa2
|
refs/heads/master
| 2016-09-03T06:35:12.100376
| 2015-05-07T20:33:56
| 2015-05-07T20:33:56
| 35,241,036
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 928
|
r
|
ui.R
|
library(shiny)
# Define UI for application that draws a histogram
shinyUI(fluidPage(
# Application title
titlePanel("Hello World!"),
# Sidebar with a slider input for the number of bins
sidebarLayout(
sidebarPanel(
sliderInput("bins",
"Number of bins:",
min = 5,
max = 50,
value = 30),
selectInput("colHist", "Histogram color",
choices=c("skyblue","darkgray","red")),
helpText("Here is some plain text"),
selectInput("pch", "Point type",
choices=0:25),
helpText("Here is more text. More and more and more and more and more.
Like a whole paragraph's worth...")
),
# Show a plot of the generated distribution
mainPanel(
plotOutput("distPlot"),
plotOutput("distPlot2"),
plotOutput("distPlot3")
)
)
))
|
c1744d32c520801228275cdbd133fdf86896da20
|
0ad14c42e1bdf6597e3358f210fdf7bad6f939b2
|
/R/utils.R
|
2434dda33300256059395dfb1c660b9429efa002
|
[] |
no_license
|
aj2duncan/msmbstyle
|
b6611cb334099285fdd9d735e007589125b5a4c5
|
8e07df39dbfe61450b755dfcaa7e214fab4cafd8
|
refs/heads/master
| 2021-08-29T01:07:43.900133
| 2021-08-26T15:29:39
| 2021-08-26T15:29:39
| 249,950,203
| 0
| 0
| null | 2020-03-25T10:35:51
| 2020-03-25T10:35:50
| null |
UTF-8
|
R
| false
| false
| 2,428
|
r
|
utils.R
|
template_resources = function(name, package, ...) {
system.file('rmarkdown', 'templates', name, 'resources', ..., package = package)
}
gsub_fixed = function(...) {
gsub(..., fixed = TRUE)
}
pandoc2.0 = function() {
rmarkdown::pandoc_available('2.0')
}
generate_id2 <- function() {
f1 <- file.path(tempdir(), "solution_idx")
id <- ifelse(file.exists(f1), readLines(f1), "1")
id_new <- as.character(as.integer(id) + 1)
writeLines(text = id_new, con = f1)
return(id)
}
toggle_script <- function() {
return(
paste("<script>
function toggle_visibility(id1, id2) {
var e = document.getElementById(id1);
var f = document.getElementById(id2);
e.style.display = ((e.style.display!='none') ? 'none' : 'block');
if(f.classList.contains('fa-plus-square')) {
f.classList.add('fa-minus-square')
f.classList.remove('fa-plus-square')
} else {
f.classList.add('fa-plus-square')
f.classList.remove('fa-minus-square')
}
}
</script>",
'<script>
var prevScrollpos = window.pageYOffset;
window.onscroll = function() {
if ($(window).width() < 768) {
var currentScrollPos = window.pageYOffset;
if (prevScrollpos > currentScrollPos) {
document.getElementById("navbar").style.top = "0";
} else {
document.getElementById("navbar").style.top = "-50px";
}
prevScrollpos = currentScrollPos;
}
}
</script>',
# '<script>$("#mySidenav").BootSideMenu({pushBody:false, width:"25%"});</script>',
# '<script src="https://ajax.googleapis.com/ajax/libs/jquery/3.3.1/jquery.min.js"></script>',
'<!-- Latest compiled and minified CSS -->
<link rel="stylesheet" href="https://maxcdn.bootstrapcdn.com/bootstrap/3.3.7/css/bootstrap.min.css" integrity="sha384-BVYiiSIFeK1dGmJRAkycuHAHRg32OmUcww7on3RYdg4Va+PmSTsz/K68vbdEjh4u" crossorigin="anonymous">
<!-- Optional theme
<link rel="stylesheet" href="https://maxcdn.bootstrapcdn.com/bootstrap/3.3.7/css/bootstrap-theme.min.css" integrity="sha384-rHyoN1iRsVXV4nD0JutlnGaslCJuC7uwjduW9SVrLvRYooPp2bWYgmgJQIXwl/Sp" crossorigin="anonymous"> -->
<!-- Latest compiled and minified JavaScript -->
<script src="https://maxcdn.bootstrapcdn.com/bootstrap/3.3.7/js/bootstrap.min.js" integrity="sha384-Tc5IQib027qvyjSMfHjOMaLkfuWVxZxUPnCJA7l2mCWNIpG9mGCD8wGNIcPD7Txa" crossorigin="anonymous"></script>',
sep = "\n")
)
}
|
e9ed707fe89d5859e2b3c4d7e57d28b326825995
|
e4f2d9241c124c4f7665241e7021837f60875868
|
/Calculate.Returns.R
|
2be6f9f21b8894c5286147e9a6fc147d9694a7c0
|
[] |
no_license
|
xiaopz0/theme_invest
|
5104812d748a18638057ee319a5b075f29b047fe
|
788c24effd8add7560b4115ad7b62ba1c05ae382
|
refs/heads/master
| 2021-01-10T01:18:56.850307
| 2016-12-12T02:00:44
| 2016-12-12T02:00:44
| 55,928,007
| 2
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 1,976
|
r
|
Calculate.Returns.R
|
Extract.Returns <- function(returns_){
if("Date" %in% colnames(returns_)){
dates <- returns_$Date
}else {
dates <- as.Date(rownames(returns_))
}
if("Last" %in% colnames(returns_)){
returns <- log(zoo(returns_$Last+1, order.by = dates))
}else {
returns <- log(zoo(returns_$Settle+1, order.by = dates))
}
returns[ is.na(returns) ] <- 0
returns[ is.infinite(returns) ] <- 0
return(returns)
}
Extract.Price <- function(price_){
if("Date" %in% colnames(price_)){
dates <- price_$Date
}else {
dates <- as.Date(rownames(price_))
}
if("Last" %in% colnames(price_)){
price <- zoo(price_$Last, order.by = dates)
}else {
price <- zoo(price_$Settle, order.by = dates)
}
price[ is.na(price) ] <- 0
price[ is.infinite(price) ] <- 0
return(price)
}
Calculate.Returns.list <- function(asset){
for(i in 1:4){
if(i == 1){
list_returns <- Calculate.Returns(eval(parse(text = asset)))
} else {
if(exists(paste(asset,i, sep=""))){
list_returns <- merge(list_returns,
Calculate.Returns(eval(parse(text = paste(asset,i, sep="")))))
}
}
}
return(list_returns)
}
Extract.Returns.list <- function(asset){
for(i in 1:4){
if(i == 1){
list_returns <- Extract.Returns(eval(parse(text = asset)))
} else {
if(exists(paste(asset,i, sep=""))){
list_returns <- merge(list_returns,
Extract.Returns(eval(parse(text = paste(asset,i, sep="")))))
}
}
}
return(list_returns)
}
Calculate.Raw <- function(list_returns){
if(is.null(dim(list_returns))){
return( list_returns - list_returns)
}
else if(dim(list_returns)[2]==3){
return( list_returns[,1] - rowMeans(list_returns[,2:3], na.rm = T))
}
else if(dim(list_returns)[2]== 2){
return( list_returns[,1] - list_returns[,2])
}
else if(dim(list_returns)[2]==4){
return( list_returns[,1] - rowMeans(list_returns[,2:4], na.rm = T))
}
}
|
ef64aa979ca31f448929a869a65b419b836a2b9e
|
09e486d1029b335c4c103184559f203661bf05da
|
/-/-.R
|
50e0fb20e104c20682949fefb3f086ffe51c1031
|
[] |
no_license
|
shrinivas93/-
|
7c1a46fca93c3060b947e1d8bd1e66e44ae78b74
|
fb0e63b5cbe03af4c2a086baa6d49914fb7626ff
|
refs/heads/master
| 2020-04-06T04:03:38.887326
| 2017-02-24T19:38:36
| 2017-02-24T19:38:36
| 83,063,540
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 1,691
|
r
|
-.R
|
sink(tempfile())
library(NLP)
library(tm)
library(RColorBrewer)
library(wordcloud)
library(wordcloud2)
library(sentiment)
library(DBI)
library(RMySQL)
output = "D:/wordclouds/"
db =
dbConnect(
MySQL(),
user = "root",
password = "root",
dbname = "semicolon",
host = "localhost"
)
on.exit(dbDisconnect(db))
dt = dbReadTable(db,
'comments')
removeURL = function(x)
gsub("http[^[:space:]]*", "", x)
removeNumPunct = function(x)
gsub("[^[:alpha:][:space:]]*", "", x)
myStopwords = c(stopwords('english'), "test", "can")
myStopwords = setdiff(myStopwords, "not")
myCorpus = Corpus(VectorSource(dt$comment))
myCorpus = tm_map(myCorpus, content_transformer(tolower))
myCorpus = tm_map(myCorpus, content_transformer(removeURL))
myCorpus = tm_map(myCorpus, content_transformer(removeNumPunct))
myCorpus = tm_map(myCorpus, stripWhitespace)
myCorpus = tm_map(myCorpus, removeWords, myStopwords)
tdm =
TermDocumentMatrix(myCorpus, control = list(wordLengths = c(2, Inf)))
m = as.matrix(tdm)
word.freq = sort(rowSums(m), decreasing = T)
timestamp = format(Sys.time(), "%d%m%Y_%H%M%S")
jpeg(
paste(output ,timestamp, ".jpeg"),
width = 1920,
height = 1920,
res = 400,
quality = 70
)
wordcloud(
words = names(word.freq),
freq = word.freq,
min.freq = 3,
random.order = FALSE,
rot.per=0.1,
colors = brewer.pal(8, "Dark2")
)
dev.off()
class_pol = classify_polarity(dt$comment , algorithm = 'naive bayes')
polarity = class_pol[, 4]
dt$score = 0
dt$score[polarity == "positive"] = (1)
dt$score[polarity == "negative"] = (-1)
dbWriteTable(
conn = db,
name = 'sentiment',
value = as.data.frame(dt),
overwrite = TRUE
)
sink()
|
d8a21287d4bfcd4cfe2eb0ae924cab10eda5013f
|
72d9009d19e92b721d5cc0e8f8045e1145921130
|
/QRM/man/Credit.Rd
|
1a10e778485eda67eb4444ccc9694c37b383b335
|
[] |
no_license
|
akhikolla/TestedPackages-NoIssues
|
be46c49c0836b3f0cf60e247087089868adf7a62
|
eb8d498cc132def615c090941bc172e17fdce267
|
refs/heads/master
| 2023-03-01T09:10:17.227119
| 2021-01-25T19:44:44
| 2021-01-25T19:44:44
| 332,027,727
| 1
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 8,769
|
rd
|
Credit.Rd
|
\name{Credit}
\alias{Credit}
\alias{cal.beta}
\alias{cal.claytonmix}
\alias{cal.probitnorm}
\alias{dclaytonmix}
\alias{pclaytonmix}
\alias{rclaytonmix}
\alias{dprobitnorm}
\alias{pprobitnorm}
\alias{rprobitnorm}
\alias{rlogitnorm}
\alias{rtcopulamix}
\alias{fit.binomial}
\alias{fit.binomialBeta}
\alias{fit.binomialLogitnorm}
\alias{fit.binomialProbitnorm}
\alias{momest}
\alias{rbinomial.mixture}
\title{
Credit Risk Modelling
}
\description{
Functions for modelling credit risk:
\itemize{
\item Bernoulli mixture model with prescribed default and joint
default probabilities
\item Bernoulli mixture model with Clayton copula dependencies of
default.
\item Probitnormal Mixture of Bernoullis
\item Beta-Binomial Distribution
\item Logitnormal-Binomial Distribution
\item Probitnormal-Binomial Distribution
}
}
\usage{
cal.beta(pi1, pi2)
cal.claytonmix(pi1, pi2)
cal.probitnorm(pi1, pi2)
dclaytonmix(x, pi, theta)
pclaytonmix(q, pi, theta)
rclaytonmix(n, pi, theta)
rtcopulamix(n, pi, rho.asset, df)
dprobitnorm(x, mu, sigma)
pprobitnorm(q, mu, sigma)
rprobitnorm(n, mu, sigma)
rbinomial.mixture(n = 1000, m = 100,
model = c("probitnorm", "logitnorm", "beta"), ...)
rlogitnorm(n, mu, sigma)
fit.binomial(M, m)
fit.binomialBeta(M, m, startvals = c(2, 2), ses = FALSE, ...)
fit.binomialLogitnorm(M, m, startvals = c(-1, 0.5), ...)
fit.binomialProbitnorm(M, m, startvals = c(-1, 0.5), ...)
momest(data, trials, limit = 10)
}
\arguments{
\item{data}{\code{vector}, numbers of defaults in each time period.}
\item{df}{\code{numeric}, degree of freedom.}
\item{limit}{\code{intgeger}, maximum order of joint default probability
to estimate.}
\item{M}{\code{vector}, count of successes.}
\item{m}{\code{vector}, count of trials.}
\item{model}{\code{character}, name of mixing distribution.}
\item{mu}{\code{numeric}, location parameter.}
\item{n}{\code{integer}, count of random variates.}
\item{pi}{\code{numeric}, default probability.}
\item{pi1}{\code{numeric}, default probability.}
\item{pi2}{\code{numeric}, joint default probability.}
\item{q}{\code{numeric}, values at which CDF should be evaluated.}
\item{sigma}{\code{numeric}, scale parameter.}
\item{ses}{\code{logical}, whether standard errors should be returned.}
\item{startvals}{\code{numeric}, starting values.}
\item{theta}{\code{numeric}, parameter of distribution.}
\item{trials}{\code{vector}, group sizes in each time period.}
\item{x}{\code{numeric}, values at which density should be evaluated.}
\item{rho.asset}{\code{numeric}, asset correlation parameter.}
\item{...}{ellipsis, arguments are passed down to either mixing
distribution or \code{nlminb()}.}
}
\details{
\code{cal.beta()}: calibrates a beta mixture distribution on unit
interval to give an exchangeable Bernoulli mixture model with
prescribed default and joint default probabilities (see pages 354-355
in QRM).\cr
\code{cal.claytonmix()}: calibrates a mixture distribution on unit
interval to give an exchangeable Bernoulli mixture model with
prescribed default and joint default probabilities. The mixture
distribution is the one implied by a Clayton copula model of default
(see page 362 in QRM).\cr
\code{cal.probitnorm()}: calibrates a probitnormal mixture
distribution on unit interval to give an exchangeable Bernoulli
mixture model with prescribed default and joint default probabilities
(see page 354 in QRM).\cr
\code{dclaytonmix()}, \code{pclaytonmix()}, \code{rclaytonmix()}:
density, cumulative probability, and random generation for a mixture
distribution on the unit interval which gives an exchangeable
Bernoulli mixture model equivalent to a Clayton copula model (see page
362 in QRM).\cr
\code{fit.binomial()}: fits binomial distribution by maximum
likelihood.\cr
\code{dprobitnorm()}, \code{pprobitnorm()}, \code{rprobitnorm()}:
density, cumulative probability and random number generation for
distribution of random variable Q on unit interval such that the
probit transform of Q has a normal distribution with parameters
\eqn{\mu}{mu} and \eqn{\sigma}{sigma} (see pages 353-354 in QRM).\cr
\code{fit.binomialBeta()}: fit a beta-binomial distribution by maximum
likelihood.\cr
\code{fit.binomialLogitnorm()}: fits a mixed binomial distribution
where success probability has a logitnormal distribution. Lower and
upper bounds for the input parameters M and m can be specified by
means of the arguments \code{lower} and \code{upper}, which are passed to
\code{nlminb()}. If convergence occurs at an endpoint of either limit,
one need to reset lower and upper parameter estimators and run the
function again.\cr
\code{fit.binomialProbitnorm()}: Fits a mixed binomial distribution
where success probability has a probitnormal distribution. Lower and
upper bounds for the input parameters M and m can be specified by
means of the arguments \code{lower} and \code{upper}, which are passed to
\code{nlminb()}. If convergence occurs at an endpoint of either limit,
one need to reset lower and upper parameter estimators and run the
function again.\cr
\code{momest()}: calculates moment estimator of default probabilities
and joint default probabilities for a homogeneous group. First
returned value is default probability estimate; second value is
estimate of joint default probability for two firms; and so on (see
pages 375-376 in QRM).\cr
\code{rbinomial.mixture()}: random variates from mixed binomial
distribution (see pages 354-355 and pages 375-377 of QRM).\cr
\code{rlogitnorm()}: Random number generation for distribution of
random variable Q on unit interval such that the probit transform of Q
has a normal distribution with parameters \eqn{\mu}{mu} and
\eqn{\sigma}{sigma} (see pages 353-354 in QRM).\cr
\code{rtcopulamix()}: random generation for mixing distribution on
unit interval yielding Student's t copula model (see page 361 in QRM,
exchangeable case of this model is considered).
}
\seealso{
\code{link[stats]{nlminb}}
}
\examples{
## calibrating models
pi.B <- 0.2
pi2.B <- 0.05
probitnorm.pars <- cal.probitnorm(pi.B, pi2.B)
probitnorm.pars
beta.pars <- cal.beta(pi.B, pi2.B)
beta.pars
claytonmix.pars <- cal.claytonmix(pi.B, pi2.B)
claytonmix.pars
q <- (1:1000) / 1001
q <- q[q < 0.25]
p.probitnorm <- pprobitnorm(q, probitnorm.pars[1],
probitnorm.pars[2])
p.beta <- pbeta(q, beta.pars[1], beta.pars[2])
p.claytonmix <- pclaytonmix(q, claytonmix.pars[1],
claytonmix.pars[2])
scale <- range((1 - p.probitnorm), (1 - p.beta), (1 - p.claytonmix))
plot(q, (1 - p.probitnorm), type = "l", log = "y", xlab = "q",
ylab = "P(Q > q)",ylim=scale)
lines(q, (1 - p.beta), col = 2)
lines(q, (1 - p.claytonmix), col = 3)
legend("topright", c("Probit-normal", "Beta", "Clayton-Mixture"),
lty=rep(1,3),col = (1:3))
## Clayton Mix
pi.B <- 0.0489603
pi2.B <- 0.003126529
claytonmix.pars <- cal.claytonmix(pi.B, pi2.B)
claytonmix.pars
q <- (1:1000) / 1001
q <- q[q < 0.25]
d.claytonmix <- dclaytonmix(q, claytonmix.pars[1], claytonmix.pars[2])
head(d.claytonmix)
## SP Data
data(spdata.raw)
attach(spdata.raw)
BdefaultRate <- Bdefaults / Bobligors
## Binomial Model
mod1a <- fit.binomial(Bdefaults, Bobligors)
## Binomial Logitnorm Model
mod1b <- fit.binomialLogitnorm(Bdefaults, Bobligors)
## Binomial Probitnorm Model
mod1c <- fit.binomialProbitnorm(Bdefaults, Bobligors)
## Binomial Beta Model
mod1d <- fit.binomialBeta(Bdefaults, Bobligors);
## Moment estimates for default probabilities
momest(Bdefaults, Bobligors)
pi.B <- momest(Bdefaults, Bobligors)[1]
pi2.B <- momest(Bdefaults, Bobligors)[2]
## Probitnorm
probitnorm.pars <- cal.probitnorm(pi.B, pi2.B)
q <- (1:1000)/1001
q <- q[ q < 0.25]
d.probitnorm <- dprobitnorm(q, probitnorm.pars[1], probitnorm.pars[2])
p <- c(0.90,0.95,0.975,0.99,0.995,0.999,0.9999,0.99999,0.999999)
sigma <- 0.2 * 10000 / sqrt(250)
VaR.t4 <- qst(p, df = 4, sd = sigma, scale = TRUE)
VaR.t4
detach(spdata.raw)
## Binomial Mixture Models
pi <- 0.04896
pi2 <- 0.00321
beta.pars <- cal.beta(pi, pi2)
probitnorm.pars <- cal.probitnorm(pi, pi2)
n <- 1000
m <- rep(500, n)
mod2a <- rbinomial.mixture(n, m, "beta", shape1 = beta.pars[1],
shape2 = beta.pars[2])
mod2b <- rbinomial.mixture(n, m, "probitnorm",
mu = probitnorm.pars[1],
sigma = probitnorm.pars[2])
}
\keyword{models}
|
265528fb421178b1228c3df4c9237196319764b4
|
91b3ec69b21860a68179cb4060ea1c9b2f2eaaf3
|
/Chap04.R
|
c8dc6848fb14cf2f2bc5fe222bfd639fdd763e92
|
[] |
no_license
|
zer05um2017/SiliconValleyDataScientist
|
95e5596d48540c7afbf7b12413ff5710b9ea4755
|
1236c2b0c3e8fbc508d114523f075526083f9d7b
|
refs/heads/master
| 2020-07-16T11:50:27.789127
| 2019-09-11T05:47:00
| 2019-09-11T05:47:00
| 205,784,726
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 4,109
|
r
|
Chap04.R
|
install.packages("gapminder")
help(package = "gapminder")
library(gapminder)
?gapminder
gapminder
head(gapminder)
tail(gapminder)
library(dplyr)
glimpse(gapminder)
gapminder$lifeExp
gapminder$gdpPercap
gapminder[, c('lifeExp', 'gdpPercap')]
gapminder[,c(4,6)]
gapminder %>% select(gdpPercap, lifeExp)
summary(gapminder$lifeExp)
summary(gapminder$gdpPercap)
cor(gapminder$lifeExp, gapminder$gdpPercap)
opar = par(mfrow=c(2,2))
hist(gapminder$lifeExp)
hist(gapminder$gdpPercap, nclass=50)
hist(sqrt(gapminder$gdpPercap), nclass=50)
hist(log10(gapminder$gdpPercap), nclass=50)
plot(log10(gapminder$gdpPercap), gapminder$lifeExp, cex=.5)
par(opar)
cor(gapminder$lifeExp, log10(gapminder$gdpPercap))
library(ggplot2)
library(dplyr)
library(gapminder)
gapminder %>% ggplot(aes(x=lifeExp)) + geom_histogram()
gapminder %>% ggplot(aes(x=gdpPercap)) + geom_histogram()
gapminder %>% ggplot(aes(x=gdpPercap)) + geom_histogram() + scale_x_log10()
gapminder %>% ggplot(aes(x=gdpPercap, y=lifeExp)) + geom_point() + scale_x_log10() + geom_smooth()
library(ggplot2)
?ggplot
example(ggplot)
glimpse(df)
ggplot(gapminder, aes(lifeExp)) + geom_histogram()
gapminder %>% ggplot(aes(lifeExp)) + geom_histogram() # 데이터 셋의 변수명 자동완성지원
?diamonds
?mpg
glimpse(diamonds)
glimpse(mpg)
gapminder %>% ggplot(aes(x=gdpPercap)) + geom_histogram()
gapminder %>% ggplot(aes(x=gdpPercap)) + geom_histogram() + scale_x_log10()
gapminder %>% ggplot(aes(x=gdpPercap)) + geom_freqpoly() + scale_x_log10()
gapminder %>% ggplot(aes(x=gdpPercap)) + geom_density() + scale_x_log10()
summary(gapminder)
diamonds %>% ggplot(aes(cut)) + geom_bar()
table(diamonds$cut)
prop.table(table(diamonds$cut))
round(prop.table(table(diamonds$cut)) * 100, 1)
diamonds %>%
group_by(cut) %>%
tally() %>%
mutate(pct=round(n/sum(n) * 100, 1))
diamonds %>% ggplot(aes(carat, price)) + geom_point()
diamonds %>% ggplot(aes(carat, price)) + geom_point(alpha=.1)
mpg %>% ggplot(aes(cyl, hwy)) + geom_point()
mpg %>% ggplot(aes(cyl, hwy)) + geom_jitter()
pairs(diamonds %>% sample_n(1000))
mpg %>% ggplot(aes(class, hwy)) + geom_boxplot()
unique(mpg$class)
mpg %>% ggplot(aes(class, hwy)) + geom_jitter(col='gray') + geom_boxplot(alpha=.5)
mpg %>% mutate(class=reorder(class, hwy, median)) %>%
ggplot(aes(class, hwy)) +
geom_jitter(col='gray') +
geom_boxplot(alpha=.5)
mpg %>%
mutate(class=factor(class, levels=c("2seater","subcompact","compact","midsize","minivan","suv","pickup"))) %>%
ggplot(aes(class, hwy)) + geom_jitter(col='gray') +
geom_boxplot(alpha=.5)
mpg %>%
mutate(class=factor(class, levels=c("2seater","subcompact","compact","midsize","minivan","suv","pickup"))) %>%
ggplot(aes(class, hwy)) + geom_jitter(col='gray') +
geom_boxplot(alpha=.5) + coord_flip()
library(dplyr)
glimpse(data.frame(Titanic))
xtabs(Freq ~ Class + Sex + Age + Survived, data.frame(Titanic))
?Titanic
Titanic
mosaicplot(Titanic, main = "Survival on the Titanic")
mosaicplot(Titanic, main = "Survival on the Titanic", color=TRUE)
apply(Titanic, c(3, 4), sum)
round(prop.table(apply(Titanic, c(3, 4), sum), margin = 1), 3)
apply(Titanic, c(2,4), sum)
round(prop.table(apply(Titanic, c(2, 4), sum), margin = 1), 3)
t2 = data.frame(Titanic)
t2 %>% group_by(Sex) %>%
summarize(n = sum(Freq),
survivors=sum(ifelse(Survived=="Yes", Freq, 0))) %>%
mutate(rate_survival=survivors/n)
library(tidyverse)
library(gapminder)
gapminder %>% filter(year==2007) %>%
ggplot(aes(gdpPercap, lifeExp)) +
geom_point() + scale_x_log10() +
ggtitle("Gapminder data for 2007")
gapminder %>% filter(year==2007) %>%
ggplot(aes(gdpPercap, lifeExp)) +
geom_point(aes(size=pop, col=continent)) + scale_x_log10() +
ggtitle("Gapminder data for 2007")
gapminder %>%
ggplot(aes(year, lifeExp, group=country)) +
geom_line()
gapminder %>%
ggplot(aes(year, lifeExp, group=country, col=continent)) +
geom_line() + scale_x_log10() +
ggtitle("Gapminder data for 2007")
gapminder %>%
ggplot(aes(year, lifeExp, group=country, col=continent)) +
geom_line() +
facet_wrap(~ continent)
|
de021c5a79be2328b830837d2e93ade3d70e5a30
|
e80f305e8baca14ca6ebae55665e522ecaa25bc3
|
/tests/testthat/test_01_included-data.R
|
030a2ed1298834e3026caea73dc787837368f2ae
|
[
"MIT"
] |
permissive
|
ha0ye/GPEDM
|
9162f56cdf925b30e3eda0110836acef4fad80db
|
2281a6a708f3faa2c8f08560f671d24da1ce330a
|
refs/heads/master
| 2020-07-07T06:53:10.094358
| 2019-08-20T19:19:42
| 2019-08-20T19:19:42
| 203,283,570
| 4
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 82
|
r
|
test_01_included-data.R
|
context("Check structure of included datasets")
test_that("data is correct", {
})
|
22b75dedeeb24a053493011a49f153075fbfce8d
|
ffdea92d4315e4363dd4ae673a1a6adf82a761b5
|
/data/genthat_extracted_code/plink/vignettes/plink-UD.R
|
a13b403142b7902431557f6d2c05b9a6df5cf9ca
|
[] |
no_license
|
surayaaramli/typeRrh
|
d257ac8905c49123f4ccd4e377ee3dfc84d1636c
|
66e6996f31961bc8b9aafe1a6a6098327b66bf71
|
refs/heads/master
| 2023-05-05T04:05:31.617869
| 2019-04-25T22:10:06
| 2019-04-25T22:10:06
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 12,257
|
r
|
plink-UD.R
|
### R code from vignette source 'plink-UD.Rnw'
###################################################
### code chunk number 1: plink-UD.Rnw:12-15
###################################################
library("plink")
options(prompt = "R> ", continue = "+ ", width = 70,
digits = 4, show.signif.stars = FALSE, useFancyQuotes = FALSE)
###################################################
### code chunk number 2: plink-UD.Rnw:385-396
###################################################
x <- matrix(c(
0.844, -1.630, 0.249, NA, NA, NA, NA, NA, NA, NA,
1.222, -0.467, -0.832, 0.832, NA, NA, NA, NA, NA, NA,
1.101, -0.035, -1.404, -0.285, 0.541, 1.147, NA, NA, NA, NA,
1.076, 0.840, 0.164, NA, NA, NA, NA, NA, NA, NA,
0.972, -0.140, 0.137, NA, NA, NA, NA, NA, NA, NA,
0.905, 0.522, -0.469, -0.959, NA, 0.126, -0.206, -0.257, 0.336, NA,
0.828, 0.375, -0.357, -0.079, -0.817, 0.565, 0.865, -1.186, -1.199, 0.993,
1.134, 2.034, 0.022, NA, NA, NA, NA, NA, NA, NA,
0.871, 1.461, -0.279, 0.279, NA, NA, NA, NA, NA, NA),
9, 10, byrow = TRUE)
###################################################
### code chunk number 3: plink-UD.Rnw:398-399
###################################################
round(x, 2)
###################################################
### code chunk number 4: plink-UD.Rnw:407-432
###################################################
a <- round(matrix(c(
0.844, NA, NA, NA, NA,
1.222, NA, NA, NA, NA,
1.101, NA, NA, NA, NA,
1.076, NA, NA, NA, NA,
0.972, NA, NA, NA, NA,
0.905, 0.522, -0.469, -0.959, NA,
0.828, 0.375, -0.357, -0.079, -0.817,
1.134, NA, NA, NA, NA,
0.871, NA, NA, NA, NA),
9, 5, byrow = TRUE), 2)
b <- round(matrix(c(
-1.630, NA, NA, NA, NA,
-0.467, -0.832, 0.832, NA, NA,
-0.035, -1.404, -0.285, 0.541, 1.147,
0.840, NA, NA, NA, NA,
-0.140, NA, NA, NA, NA,
0.126, -0.206, -0.257, 0.336, NA,
0.565, 0.865, -1.186, -1.199, 0.993,
2.034, NA, NA, NA, NA,
1.461, -0.279, 0.279, NA, NA),
9, 5, byrow = TRUE), 2)
c <- round(c(0.249, NA, NA, 0.164, 0.137, NA, NA, 0.022, NA), 2)
###################################################
### code chunk number 5: plink-UD.Rnw:434-435
###################################################
list(a = a, b = b, c = c)
###################################################
### code chunk number 6: plink-UD.Rnw:442-443
###################################################
cat <- c(2, 3, 5, 2, 2, 4, 5, 2, 3)
###################################################
### code chunk number 7: plink-UD.Rnw:457-459
###################################################
pm <- as.poly.mod(9, c("drm", "grm", "nrm"),
list(c(1, 4, 5, 8), c(2, 3, 9), 6:7))
###################################################
### code chunk number 8: plink-UD.Rnw:462-464
###################################################
pm <- as.poly.mod(9, c("grm", "drm", "nrm"),
list(c(2, 3, 9), c(1, 4, 5, 8), 6:7))
###################################################
### code chunk number 9: plink-UD.Rnw:490-491
###################################################
pars <- as.irt.pars(x, cat = cat, poly.mod = pm, location = TRUE)
###################################################
### code chunk number 10: plink-UD.Rnw:495-497
###################################################
common <- matrix(c(51:60, 1:10), 10, 2)
common
###################################################
### code chunk number 11: plink-UD.Rnw:502-505 (eval = FALSE)
###################################################
## pars <- as.irt.pars(x = list(x.D, x.E, x.F),
## common = list(common.DE, common.EF), cat = list(cat.D, cat.E, cat.F),
## poly.mod = list(poly.mod.D, poly.mod.E, poly.mod.F))
###################################################
### code chunk number 12: plink-UD.Rnw:510-511 (eval = FALSE)
###################################################
## pars <- combine.pars(x = list(pars.DE, pars.F), common = common.EF)
###################################################
### code chunk number 13: plink-UD.Rnw:576-580
###################################################
cat <- rep(2, 36)
pm <- as.poly.mod(36)
x <- as.irt.pars(KB04$pars, KB04$common, cat = list(cat, cat),
poly.mod = list(pm, pm), grp.names = c("new", "old"))
###################################################
### code chunk number 14: plink-UD.Rnw:583-585
###################################################
out <- plink(x)
summary(out)
###################################################
### code chunk number 15: plink-UD.Rnw:590-592
###################################################
out <- plink(x, rescale = "SL", base.grp = 2)
summary(out)
###################################################
### code chunk number 16: plink-UD.Rnw:597-598
###################################################
ability <- list(group1 = -4:4, group2 = -4:4)
###################################################
### code chunk number 17: plink-UD.Rnw:601-604
###################################################
out <- plink(x, rescale = "SL", ability = ability, base.grp = 2,
weights.t = as.weight(30, normal.wt = TRUE), symmetric = TRUE)
summary(out)
###################################################
### code chunk number 18: plink-UD.Rnw:607-608
###################################################
link.ability(out)
###################################################
### code chunk number 19: plink-UD.Rnw:614-617
###################################################
pm1 <- as.poly.mod(55, c("drm", "gpcm", "nrm"), dgn$items$group1)
pm2 <- as.poly.mod(55, c("drm", "gpcm", "nrm"), dgn$items$group2)
x <- as.irt.pars(dgn$pars, dgn$common, dgn$cat, list(pm1, pm2))
###################################################
### code chunk number 20: plink-UD.Rnw:620-622
###################################################
out <- plink(x)
summary(out)
###################################################
### code chunk number 21: plink-UD.Rnw:625-627
###################################################
out1 <- plink(x, exclude = "nrm")
summary(out1, descrip = TRUE)
###################################################
### code chunk number 22: plink-UD.Rnw:633-640
###################################################
pm1 <- as.poly.mod(41, c("drm", "gpcm"), reading$items[[1]])
pm2 <- as.poly.mod(70, c("drm", "gpcm"), reading$items[[2]])
pm3 <- as.poly.mod(70, c("drm", "gpcm"), reading$items[[3]])
pm4 <- as.poly.mod(70, c("drm", "gpcm"), reading$items[[4]])
pm5 <- as.poly.mod(72, c("drm", "gpcm"), reading$items[[5]])
pm6 <- as.poly.mod(71, c("drm", "gpcm"), reading$items[[6]])
pm <- list(pm1, pm2, pm3, pm4, pm5, pm6)
###################################################
### code chunk number 23: plink-UD.Rnw:659-663
###################################################
grp.names <- c("Grade 3.0", "Grade 4.0", "Grade 4.1", "Grade 5.1",
"Grade 5.2", "Grade 6.2")
x <- as.irt.pars(reading$pars, reading$common, reading$cat, pm,
grp.names = grp.names)
###################################################
### code chunk number 24: plink-UD.Rnw:666-668 (eval = FALSE)
###################################################
## out <- plink(x, method = c("HB", "SL"), base.grp = 4)
## summary(out)
###################################################
### code chunk number 25: plink-UD.Rnw:670-672
###################################################
out <- plink(x, method = c("HB", "SL"), base.grp = 4)
summary(out)
###################################################
### code chunk number 26: plink-UD.Rnw:699-705
###################################################
dichot <- matrix(c(1.2, -1.1, 0.19, 0.8, 2.1, 0.13), 2, 3, byrow = TRUE)
poly <- t(c(0.64, -1.8, -0.73, 0.45))
mixed.pars <- rbind(cbind(dichot, matrix(NA, 2, 1)), poly)
cat <- c(2, 2, 4)
pm <- as.poly.mod(3, c("drm", "gpcm"), list(1:2, 3))
mixed.pars <- as.irt.pars(mixed.pars, cat = cat, poly.mod = pm)
###################################################
### code chunk number 27: plink-UD.Rnw:707-709
###################################################
out <- mixed(mixed.pars, theta = -4:4)
round(get.prob(out), 3)
###################################################
### code chunk number 28: plink-UD.Rnw:745-748
###################################################
pm <- as.poly.mod(36)
x <- as.irt.pars(KB04$pars, KB04$common,
cat = list(rep(2, 36), rep(2, 36)), poly.mod = list(pm, pm))
###################################################
### code chunk number 29: plink-UD.Rnw:750-752
###################################################
out <- plink(x, rescale = "MS", base.grp = 2, D = 1.7,
exclude = list(27, 27), grp.names = c("new", "old"))
###################################################
### code chunk number 30: plink-UD.Rnw:757-760
###################################################
wt <- as.weight(theta = c(-5.21, -4.16, -3.12, -2.07, -1.03, 0.02, 1.06,
2.11, 3.15, 4.20), weight = c(0.0001, 0.0028, 0.0302, 0.1420, 0.3149,
0.3158, 0.1542, 0.0359, 0.0039, 0.0002))
###################################################
### code chunk number 31: plink-UD.Rnw:763-765
###################################################
eq.out <- equate(out, method = c("TSE", "OSE"), weights1 = wt,
syn.weights = c(1, 0), D = 1.7)
###################################################
### code chunk number 32: plink-UD.Rnw:771-772
###################################################
eq.out$tse[1:10,]
###################################################
### code chunk number 33: plink-UD.Rnw:776-777
###################################################
eq.out$ose$scores[1:10,]
###################################################
### code chunk number 34: plink-UD.Rnw:813-818
###################################################
pdf.options(family = "Times")
trellis.device(device = "pdf", file = "IRC.pdf")
tmp <- plot(pars, incorrect = TRUE, auto.key = list(space = "right"))
print(tmp)
dev.off()
###################################################
### code chunk number 35: plink-UD.Rnw:820-821
###################################################
plot(pars, incorrect = TRUE, auto.key = list(space = "right"))
###################################################
### code chunk number 36: plink-UD.Rnw:835-860
###################################################
pm1 <- as.poly.mod(41, c("drm", "gpcm"), reading$items[[1]])
pm2 <- as.poly.mod(70, c("drm", "gpcm"), reading$items[[2]])
pm3 <- as.poly.mod(70, c("drm", "gpcm"), reading$items[[3]])
pm4 <- as.poly.mod(70, c("drm", "gpcm"), reading$items[[4]])
pm5 <- as.poly.mod(72, c("drm", "gpcm"), reading$items[[5]])
pm6 <- as.poly.mod(71, c("drm", "gpcm"), reading$items[[6]])
pm <- list(pm1, pm2, pm3, pm4, pm5, pm6)
grp.names <- c("Grade 3.0", "Grade 4.0", "Grade 4.1", "Grade 5.1", "Grade 5.2", "Grade 6.2")
x <- as.irt.pars(reading$pars, reading$common, reading$cat, pm)
out <- plink(x, method = "SL",rescale = "SL", base.grp = 4, grp.names = grp.names)
pdf.options(family="Times")
pdf("drift_a.pdf", 4, 4.2)
plot(out, drift = "a", sep.mod = TRUE, groups = 4, drift.sd = 2)
dev.off()
pdf.options(family="Times")
pdf("drift_b.pdf", 4, 4.2)
plot(out, drift = "b", sep.mod = TRUE, groups = 4, drift.sd = 2)
dev.off()
pdf.options(family="Times")
pdf("drift_c.pdf", 4, 4.2)
plot(out, drift = "c", sep.mod = TRUE, groups = 4, drift.sd = 2)
dev.off()
###################################################
### code chunk number 37: plink-UD.Rnw:863-864
###################################################
plot(out, drift = "pars", sep.mod = TRUE, groups = 4, drift.sd = 2)
|
5d9c2ba54b8c8b781bd6e8e0f766d5e87c456b4b
|
a3489a94b96f64bd6f3fe166d3f0affecd23c17a
|
/R/acrEularRA.R
|
de1a4def3b829f789efad9cd8c540427cb925c74
|
[
"MIT"
] |
permissive
|
fragla/acreular
|
a9e9f52bab3576b30ce3b498b1de4d0b003c3317
|
60491f59e7332a916a07f782d53206e345941775
|
refs/heads/master
| 2021-08-07T23:22:55.761150
| 2020-06-03T08:02:38
| 2020-06-03T08:02:38
| 186,041,450
| 1
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 12,065
|
r
|
acrEularRA.R
|
#' Create a new acrEularRA class
#'
#' \code{new_acrEularRA} returns an acrEularRA object.
#'
#' @param ljc numeric large joint count. Number of swollen and/or tender
#' large joints.
#' @param sjc numeric small joint count. Number of swollen and/or tender
#' small joints.
#' @param duration numeric patient’s self-report on the maximum duration
#' (in days) of signs and symptoms of any joint that is clinically
#' involved at the time of assessment.
#' @param apr character acute phase reactant levels. "Normal" or "Abnormal"
#' @param serology character CCP and/or rheumatoid factor levels. "Negative",
#' "Low" positive or "High" positive.
#'
#' @return An acrEularRA object.
#'
#' @examples
#' obj <- new_acrEularRA(ljc=8, sjc=12, duration=43, apr="Normal", serology="High")
#'
#' @export
new_acrEularRA <- function(ljc=numeric(), sjc=numeric(), duration=numeric(),
apr=character(), serology=character()) {
value <- list(ljc=ljc, sjc=sjc, duration=duration, apr=apr, serology=serology)
attr(value, "class") <- "acrEularRA"
return(value)
}
#' Helper function for creating an acrEularRA class.
#'
#' Creates an acrEular RA object from different parameters. Converts dates to
#' duration value and serology and acute phase reactant values to
#' classifications.
#'
#' @param ljc large joint count. Numeric between 0 and 10 of total
#' number of swollen and/or tender large joints.
#' @param sjc small joint count. Numeric between 0 and 18 of total
#' number of swollen and/or tender small joints.
#' @param duration numeric patient’s self-report on the maximum duration
#' (in days) of signs and symptoms of any joint that is clinically
#' involved at the time of assessment.
#' @param onset Date signs and symptoms started.
#' @param assessment Date of initial assessment.
#' @param apr character acute phase reactant levels. "Normal" or "Abnormal"
#' @param crp numeric of C-reactive protein test result.
#' @param esr numeric of erythrocyte sedimentation rate test result.
#' @param crp.uln numeric for upper limit of normal for the C-reactive protein test.
#' @param esr.uln numeric for upper limit of normal for the erythrocyte sedimentation
#' rate test.
#' @param serology character CCP and/or rheumatoid factor levels. "Negative",
#' "Low" positive or "High" positive.
#' @param ccp numeric of ccp test result.
#' @param rf numeric of rheumatoid factor test result.
#' @param ccp.uln numeric for upper limit of normal for the ccp test
#' @param rf.uln numeric for upper limit of normal for the RF test
#'
#' @return An acrEularRA object.
#'
#' @examples
#' obj1 <- acrEularRA(ljc=8, sjc=12, duration=43, apr="Normal", serology="High")
#' obj2 <- acrEularRA(ljc=8, sjc=12,
#' onset=as.Date("2010-01-01"), assessment=as.Date("2010-02-13"),
#' crp=5, esr=12, ccp=32, rf=71)
#'
#' all.equal(obj1, obj2)
#'
#' @export
acrEularRA <- function(ljc=numeric(), sjc=numeric(), duration=numeric(),
onset=NULL, assessment=NULL, apr=character(),
crp=numeric(), esr=numeric(), crp.uln=10, esr.uln=15,
serology=character(), ccp=numeric(), rf=numeric(),
ccp.uln=10, rf.uln=20) {
object <- new_acrEularRA()
##Joint
if(length(ljc)==1 && ljc >=0 && ljc <=10) {
object$ljc <- ljc
}
if(length(sjc)==1 && sjc >=0) { #} && sjc <=18) {
object$sjc <- sjc
}
#Duration
if(length(onset)==1 && length(assessment)==1) {
if(!is.na(onset) && !is.na(assessment)) {
object$duration <- datesToDuration(onset, assessment)
}
}
if(!is.na(duration) && length(duration)==1 && duration > 0) {
if(length(object$duration) > 0 && object$duration!=duration) {
stop("duration and onset/assessment parameters used that give different value.")
}
object$duration <- duration
}
##Serology
if((!is.na(ccp) && length(ccp)==1 && ccp>=0) || (!is.na(rf) && length(rf)==1 && rf>=0)) {
object$serology <- serologyClassification(ccp=ccp, rf=rf, ccp.uln=ccp.uln, rf.uln=rf.uln)
}
if(length(serology)==1 && tolower(serology) %in% c("negative", "low", "high")) {
if(length(object$serology) > 0 && object$serology!=serology) {
stop("Serology test results and serology classification give different values.")
}
object$serology <- serology
}
##Acute phase reactants
if((!is.na(crp) && length(crp)==1 && crp>=0) || (!is.na(esr) && length(esr)==1 && esr>=0)) {
object$apr <- aprClassification(crp=crp, esr=esr, crp.uln=crp.uln, esr.uln=esr.uln)
}
if(length(apr)==1 && tolower(apr) %in% c("normal", "abnormal")) {
if(length(object$apr) > 0 && object$apr!=apr) {
stop("Acute phase reactant test results and classification give different values.")
}
object$apr <- apr
}
return(object)
}
#' Calculate acute phase reactant component score
#'
#' Calculate acute phase reactant component score. Converts acute phase
#' reactant status to a numeric score
#'
#' @param object acrEularRA object
#' @examples
#' acreular <- new_acrEularRA(ljc=3,sjc=4,duration=60,apr="Abnormal",serology="High")
#' aprScore(acreular)
#'
#' @export
aprScore <- function(object) {
score <- NA
if(is.na(object$apr) || length(object$apr)==0) {
return(NA)
}
if(object$apr=="Abnormal") {
score <- 1
} else if(object$apr=="Normal") {
score <- 0
} else {
score <- NA
}
return(score)
}
#' Calculate duration component score
#'
#' Calculate duration component score. Converts patients self-reported duration
#' of signs and symptoms (in days) to a numeric score
#'
#' @param object acrEularRA object
#' @examples
#' acreular <- new_acrEularRA(ljc=3,sjc=4,duration=60,apr="Abnormal",serology="High")
#' durationScore(acreular)
#'
#' @export
durationScore <- function(object) {
score <- 0
if(is.na(object$duration) || length(object$duration)==0) {
return(NA)
}
if(object$duration > 42) {
score <- 1
}
return(score)
}
#' Calculate joint component score
#'
#' Calculate joint component score. Converts patients swollen/tender joint
#' counts to a numeric score.
#'
#' @param object acrEularRA object
#' @examples
#' acreular <- new_acrEularRA(ljc=3,sjc=4,duration=60,apr="Abnormal",serology="High")
#' jointScore(acreular)
#'
#' @export
jointScore <- function(object) {
score <- 0
large <- object$ljc
small <- object$sjc
if(length(large)==0 || is.na(large) || length(small)==0 || is.na(small)) {
return(NA)
}
if (large != as.integer(large) || small != as.integer(small)) {
stop("Non-integer joint count value provided.")
}
if (large==1) {
score <- 0
}
if (large >= 2 & large <= 10) {
score <- 1
}
if (small >=1 & small <= 3) {
score <- 2
}
if (small >= 4 & small <= 10) {
score <- 3
}
if (large + small > 10 & small >= 1) {
score <- 5
}
return(score)
}
#' Calculate serology component score
#'
#' Calculate joint component score. Converts patients serology status to
#' a numeric score.
#'
#' @param object acrEularRA object
#' @examples
#' acreular <- new_acrEularRA(ljc=3,sjc=4,duration=60,apr="Abnormal",serology="High")
#' serologyScore(acreular)
#'
#' @export
serologyScore <- function(object) {
score <- NA
if(length(object$serology)==0) {
return(score)
}
if((!is.na(object$serology) & tolower(object$serology) == "negative")) {
score <- 0
}
if((!is.na(object$serology) & tolower(object$serology) == "low")) {
score <- 2
}
if((!is.na(object$serology) & tolower(object$serology) == "high")) {
score <- 3
}
return(score)
}
#' Calculate ACR/EULAR 2010 RA score
#'
#' Calculates ACR/EULAR 2010 RA score from the individual components.
#'
#' @param object acrEularRA object
#' @param na.rm boolean specifying whether to remove NAs from calculation
#' @examples
#' acreular <- new_acrEularRA(ljc=3,sjc=4,duration=60,apr="Abnormal",serology="High")
#' acrEularRAScore(acreular)
#'
#' @export
acrEularRAScore <- function(object, na.rm=FALSE) {
sum(aprScore(object), durationScore(object), jointScore(object), serologyScore(object), na.rm=na.rm)
}
#' Calculate ACR/EULAR 2010 RA classification
#'
#' Calculates ACR/EULAR 2010 RA classification from the individual components.
#'
#' @param object acrEularRA object
#' @examples
#' acreular <- new_acrEularRA(ljc=3,sjc=4,duration=60,apr="Abnormal",serology="High")
#' acrEularRAClassification(acreular)
#'
#' @export
acrEularRAClassification <- function(object) {
classif <- NA
apr <- aprScore(object)
duration <- durationScore(object)
joint <- jointScore(object)
serology <- serologyScore(object)
components <- c(apr=aprScore(object), duration=durationScore(object), joint=jointScore(object), serology=serologyScore(object))
score <- sum(components, na.rm=TRUE)
if(score >= 6) {
classif <- "RA (ACR/EULAR 2010)"
} else {
if(all(!is.na(c(joint, duration, apr, serology)))) {
return("UA")
} else {
max.scores <- c(apr=1, duration=1, joint=5, serology=3)
missing <- names(components)[which(is.na(components))]
if(6 - score > sum(max.scores[missing])) {
classif <- "UA"
} else {
classif <- "More information required"
}
}
}
#classif <- ifelse(score >= 6, "RA (ACR/EULAR 2010)", "UA")
return(classif)
}
#' Calculate serology classification from test scores and ULN
#'
#' Calculates serology classification for CCP and/or rheumatoid factor given
#' the test scores and the upper limit of normal..
#'
#' @param ccp numeric of ccp test result
#' @param rf numeric of rheumatoid factor test result
#' @param ccp.uln numeric for upper limit of normal for the ccp test
#' @param rf.uln numeric for upper limit of normal for the RF test
#' @examples
#' serologyClassification(ccp=9, rf=21, ccp.uln=10, rf.uln=20)
#'
#' @export
serologyClassification <- function(ccp, rf, ccp.uln=10, rf.uln=20) {
#what if only ccp or rf done
ccp <- .serologyClassif(ccp, ccp.uln)
rf <- .serologyClassif(rf, rf.uln)
if(is.na(ccp) & is.na(rf)) {
return(NA)
}
classif <- "Negative"
if((!is.na(ccp) && ccp=="Low") || (!is.na(rf) && rf== "Low")) {
classif <- "Low"
}
if((!is.na(ccp) && ccp=="High") || (!is.na(rf) && rf== "High")) {
classif <- "High"
}
return(classif)
}
.serologyClassif <- function(score, uln) {
if(is.na(score) || !is.numeric(score)) {
return(NA)
}
if(is.na(uln) || !is.numeric(uln)) {
stop("Incorrect serology ULN parameter used.")
}
classification <- "Negative"
if(score > uln * 3) {
classification <- "High"
} else if(score > uln) {
classification <- "Low"
}
return(classification)
}
#' Calculate acute phase reactant classification from test scores and ULN.
#'
#' Calculates acute phase reactant classification for given the C-reactive
#' protein and ESR test scores and the upper limit of normal.
#'
#' @param crp numeric of C-reactive protein test result.
#' @param esr numeric of erythrocyte sedimentation rate test result.
#' @param crp.uln numeric for upper limit of normal for the C-reactive protein test.
#' @param esr.uln numeric for upper limit of normal for the erythrocyte sedimentation
#' rate test.
#' @examples
#' aprClassification(crp=9, esr=16, crp.uln=10, esr.uln=15)
#'
#' @export
aprClassification <- function(crp, esr, crp.uln=10, esr.uln=15) { #esr correct for age and gender?
#what if only ccp or rf done
crp <- .aprClassif(crp, crp.uln)
esr <- .aprClassif(esr, esr.uln)
if(is.na(crp) & is.na(esr)) {
return(NA)
}
classif <- "Normal"
if((!is.na(crp) && crp=="Abnormal") || (!is.na(esr) && esr== "Abnormal")) {
classif <- "Abnormal"
}
return(classif)
}
.aprClassif <- function(score=numeric(), uln=numeric()) {
if(is.na(score) || !is.numeric(score) || length(score)==0) {
return(NA)
}
if(is.na(uln) || !is.numeric(uln) || length(uln)==0) {
stop("Incorrect APR ULN parameter used.")
}
classification <- "Normal"
if(score > uln) {
classification <- "Abnormal"
}
return(classification)
}
|
eb8f37f666deb67ca3b04eeca4dc82b3b25ad5e7
|
012636f34220bbac0081af5ababf12eec60822a5
|
/man/estNBparams.Rd
|
7347a9f995f0d8c6e863002750ccd89d7ec97168
|
[] |
no_license
|
CenterForStatistics-UGent/RCM
|
c718cd843ead868ae814d7834723cf31f8cb0211
|
b1f32241b8582f5dd896239fae68c86f60b56449
|
refs/heads/master
| 2023-04-07T17:08:47.161337
| 2023-03-29T15:19:30
| 2023-03-29T15:19:30
| 107,241,048
| 14
| 1
| null | 2020-04-26T11:12:22
| 2017-10-17T08:39:17
|
R
|
UTF-8
|
R
| false
| true
| 1,470
|
rd
|
estNBparams.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/F_estNBparams.R
\name{estNBparams}
\alias{estNBparams}
\title{A function to estimate the taxon-wise NB-params}
\usage{
estNBparams(
design,
thetas,
muMarg,
psi,
X,
nleqslv.control,
ncols,
initParam,
v,
dynamic = FALSE,
envRange,
allowMissingness,
naId
)
}
\arguments{
\item{design}{an n-by-v design matrix}
\item{thetas}{a vector of dispersion parameters of length p}
\item{muMarg}{an offset matrix}
\item{psi}{a scalar, the importance parameter}
\item{X}{the data matrix}
\item{nleqslv.control}{a list of control elements, passed on to nleqslv()}
\item{ncols}{an integer, the number of columns of X}
\item{initParam}{a v-by-p matrix of initial parameter estimates}
\item{v}{an integer, the number of parameters per taxon}
\item{dynamic}{a boolean, should response function be determined dynamically?
See details}
\item{envRange}{a vector of length 2, giving the range of observed
environmental scores}
\item{allowMissingness}{A boolean, are missing values present}
\item{naId}{The numeric index of the missing values in X
If dynamic is TRUE, quadratic response functions are fitted for every taxon.
If the optimum falls outside of the observed range of environmental scores,
a linear response function is fitted instead}
}
\value{
a v-by-p matrix of parameters of the response function
}
\description{
A function to estimate the taxon-wise NB-params
}
|
609291bca4c6eb4fedd8663f9018dc829500e02c
|
beb5780afbe8cb5dd1abeadf86a21aa6063e62cc
|
/utility/pvs_mat.R
|
278a833e4f9f6515e7c459c2900da341287ebed7
|
[] |
no_license
|
SolbiatiAlessandro/EWMA_RiskMetrics
|
c331692f6d9e1d2b7a76d283a4486d4693d5584f
|
c07675a5e1d8f014cde86a763e2170c679207e78
|
refs/heads/master
| 2021-01-22T09:32:36.967230
| 2016-08-07T15:10:38
| 2016-08-07T15:10:38
| 64,223,457
| 13
| 3
| null | null | null | null |
UTF-8
|
R
| false
| false
| 1,179
|
r
|
pvs_mat.R
|
#
# Alessandro Solbiati - EWMA_RiskMetrics GITHUB project - 26/06/2016
# reference: Quantitative Finance for R (Bee, Santi 2013)
# reference: RiskMetrics(TM) Technical Document (JPMorgan and Retuters 1996)
#
# --------------------------------------------------------------------------
#
# pvs_mat() : computes pvalues matrix
# rows: confidence level (0.9,0.95,0.99,0.995)
# cols: usage (1,2,3)
#
# Usage:
# my_matrix=pvs_mat(IBM,"start_date","end_date")
# my_matrix
#
pvs_mat <- function(serie,start,end){
pvs <- matrix(nrow=3,ncol=4)
rownames(pvs) <- c("Non-Parametric","Normal Distr","Student T Distr")
colnames(pvs) <- c("conf 0.90","conf 0.95","conf 0.99","conf 0.995")
conf <- 0.9
j <- 1
for(i in 1:3){
pvs[i,j]=as.numeric(EWMA_RiskMetrics(serie,conf,i,start,end,VaR=TRUE)[1,5])
}
conf <- 0.95
j <- 2
for(i in 1:3){
pvs[i,j]=as.numeric(EWMA_RiskMetrics(serie,conf,i,start,end,VaR=TRUE)[1,5])
}
conf <- 0.99
j <- 3
for(i in 1:3){
pvs[i,j]=as.numeric(EWMA_RiskMetrics(serie,conf,i,start,end,VaR=TRUE)[1,5])
}
conf <- 0.995
j <- 4
for(i in 1:3){
pvs[i,j]=as.numeric(EWMA_RiskMetrics(serie,conf,i,start,end,VaR=TRUE)[1,5])
}
pvs
}
|
b220b3c5b80627c499bd0ddf07a3f6b7940c8e6a
|
4318b106169588a392a4aeb45ab73759443d5639
|
/USYD_14_d2u/prepare_trace_data_for_PM.R
|
8585419c583e9a01ff24679ffcf63893c876b9be
|
[] |
no_license
|
abelardopardo/Flipped_course_analysis
|
a1c8b3bba030afad54463aa79a4ec154df69e1a2
|
6ff8c6e3eba53023302c800fe9982b3f7d0e6050
|
refs/heads/master
| 2021-06-22T00:01:58.637256
| 2017-07-06T17:36:56
| 2017-07-06T17:36:56
| 56,097,175
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 12,189
|
r
|
prepare_trace_data_for_PM.R
|
########################################################################
## Transform the events data into the format suitable for process mining
########################################################################
all.events <- read.csv(file = "datasets/data2u_sem2_14_events_labeled.csv", stringsAsFactors = F)
str(all.events)
## remove irrelevant columns
all.events <- all.events[,-c(1,3,7)]
###############################
## FILTERING OF THE SOURCE DATA
###############################
## remove activities with action-id equal to "activity-collapse-expand" or "activity-duration"
## these are (currently) not considered
filtered.events <- all.events[!(all.events$action_id %in% c("activity-collapse-expand", "activity-duration")),]
## check that these are really excluded
length(which(filtered.events$action_id == "activity-duration"))
length(which(filtered.events$action_id == "activity-collapse-expand"))
## exclude "resource-view" actions where topic is not one of the course subject topics nor one
## of these: "ORG", "DBOARD"
relevant.topics <- c('CST', 'COD', 'DRM', 'CDL', 'SDL', 'ARC', 'ISA',
'ASP', 'ADM', 'HLP', 'ORG', 'DBOARD')
to.remove <- which((filtered.events$action_id == "resource-view") &
!(filtered.events$topic %in% relevant.topics))
filtered.events <- filtered.events[-to.remove,]
## exclude "embedded-video" actions where the 1st element of the payload is not "PLAY"
## example value of the payload for this action type: "{\"PLAY\":\"F0Ri2TpRBBg\"}"
## f. for transforming the payload value of the embedded-video action
## into a vector of two elements the payload consists of
video.payload.split <- function(x) {
temp <- gsub("\\{\"(\\w+)\":\"(.+)\"\\}", "\\1 \\2", x)
result <- strsplit(temp, " ")
result[[1]]
}
## first, create a new vector by extracting the indices of the observations
## where action_id is "embedded-video" and the 1st element of the payload column is "PLAY"
indices.to.remove <- vector()
counter <- 1
video.events <- filtered.events[filtered.events$action_id=="embedded-video",]
for (i in 1:nrow(video.events)) {
payload.vec <- video.payload.split(video.events[i,4])
if (payload.vec[1]!="PLAY") {
indices.to.remove[counter] <- row.names(video.events[i,])
counter <- counter + 1
}
}
## remove observations with indices in indices.to.remove
indices.to.remove <- as.integer(indices.to.remove)
filtered.events <- filtered.events[ !(row.names(filtered.events) %in% indices.to.remove), ]
#################################################################################
## USE PAYLOAD TO ADD NEW VARIABLES REQUIRED FOR THE MAPPING TO THE TARGER FORMAT
#################################################################################
## f. for transforming the payload value of the "embedded-question" action
## into a vector of two elements the payload consists of
mcq.payload.split <- function(x) {
temp <- gsub("\\{\"([a-zA-Z0-9_-]+)\":[\"]?([-]?\\d)[\"]?\\}", "\\1 \\2", x)
result <- strsplit(temp, " ")
result[[1]]
}
## f. for transforming the payload value of the "exco-answer" action
## into a vector of two elements the payload consists of
exco.payload.split <- function(x) {
temp <- gsub("\\{\"([^:]+)\":\\s\"([a-z]+)\"\\}", "\\1 \\2", x)
result <- strsplit(temp, " ")
result[[1]]
}
filtered.events$payload.part <- vector(mode = "character", length = nrow(filtered.events))
for (i in 1:nrow(filtered.events)) {
if (filtered.events$action_id[i] == "embedded-question") {
temp <- mcq.payload.split(filtered.events$payload[i])
filtered.events$payload.part[i] <- temp[2] # this will be "1", "0", or "-1"
} else {
if (filtered.events$action_id[i] == "exco-answer") {
temp <- exco.payload.split(filtered.events$payload[i])
filtered.events$payload.part[i] <- temp[2] # this will be either "correct" or "incorrect"
}
}
}
## TODO: store the filtered data
######################################################
## TRANSFORMATION OF EVENT DATA INTO THE TARGET FORMAT
######################################################
## remove the payload as it's not needed any more
target.trace <- filtered.events[,-4]
colnames(target.trace)[1:2] <- c('TIMESTAMP','CASE_ID')
target.trace$ACTIVITY_NAME <- vector(mode = 'character',length = nrow(target.trace))
subject.topics <- c('CST', 'COD', 'DRM', 'CDL', 'SDL', 'ARC', 'ISA', 'ASP', 'ADM', 'HLP')
for (i in 1:nrow(target.trace)) {
# ORIENT activity: action-id:resource-view + topic:ORG
if (target.trace$action_id[i]=="resource-view" & target.trace$topic[i]=="ORG") {
target.trace$ACTIVITY_NAME[i] <- "ORIENT"
next
}
# EXE_CO activity: action-id: exco-answer + payload: 2nd element = "correct"
if (target.trace$action_id[i]=="exco-answer" & target.trace$payload.part[i]=="correct") {
target.trace$ACTIVITY_NAME[i] <- "EXE_CO"
next
}
# EXE_IN activity: action-id: exco-answer + payload: 2nd element = "incorrect"
if (target.trace$action_id[i]=="exco-answer" & target.trace$payload.part[i]=="incorrect") {
target.trace$ACTIVITY_NAME[i] <- "EXE_IN"
next
}
# MCQ_CO activity: action-id: embedded-question + payload: 2nd element = 1
if (target.trace$action_id[i]=="embedded-question" & target.trace$payload.part[i]=="1") {
target.trace$ACTIVITY_NAME[i] <- "MCQ_CO"
next
}
# MCQ_IN activity: action-id: embedded-question + payload: 2nd element = 0
if (target.trace$action_id[i]=="embedded-question" & target.trace$payload.part[i]=="0") {
target.trace$ACTIVITY_NAME[i] <- "MCQ_IN"
next
}
# MCQ_SR activity: action-id: embedded-question + payload: 2nd element = -1
if (target.trace$action_id[i]=="embedded-question" & target.trace$payload.part[i]=="-1") {
target.trace$ACTIVITY_NAME[i] <- "MCQ_SR"
next
}
# DBOARD_ACCESS activity: action-id: dboard-view
if (target.trace$action_id[i]=="dboard-view") {
target.trace$ACTIVITY_NAME[i] <- "DBOARD_ACCESS"
next
}
# # HOF_ACCESS activity: action-id:resource-view + topic:HOF
# if (target.trace$action_id[i]=="resource-view" & target.trace$topic[i]=="HOF") {
# target.trace$ACTIVITY_NAME[i] <- "HOF_ACCESS"
# next
# }
# VIDEO_PLAY activity: action-id: embedded-video + payload: 1st element = “PLAY”
# since in the filtering phase all other forms of interaction with videos except PLAY
# have been removed, this mapping can be based only on the action-id
if (target.trace$action_id[i]=="embedded-video") {
target.trace$ACTIVITY_NAME[i] <- "VIDEO_PLAY"
next
}
# CONTENT_ACCESS activity: action-id: resource-view +
# topic: “CST” | “COD” | “DRM” | “CDL” | “SDL” | “ARC” | “ISA” | “ASP” | “ADM” | “HLP”
if (target.trace$action_id[i]=="resource-view" & target.trace$topic[i] %in% subject.topics) {
target.trace$ACTIVITY_NAME[i] <- "CONTENT_ACCESS"
next
}
}
## keep only the columns/variables that are needed
target.trace <- target.trace[,c(1,2,4,5,7)]
str(target.trace)
colnames(target.trace)[3:4] <- c("WEEK", "TOPIC")
## change the order of columns, to have:
## CASE_ID, ACTIVITY_NAME, TIMESTAMP, WEEK, TOPIC
target.trace <- target.trace[,c(2,5,1,3,4)]
## store the generated trace data
write.csv(target.trace, file = "Intermediate_files/trace_data_w0-16.csv", row.names = F)
###############################################################################
## CREATE NEW TRACE FORMAT WITH SESSIONS REPRESENTING CASES; SO, THE FORMAT IS:
## CASE_ID (SESSION_ID), ACTIVITY, TIMESTAMP, RESOURCE_ID (USER_ID), WEEK
##
## session is defined as a continuous sequence of events/activities where any
## two events are separated not more than 30 minutes
###############################################################################
## load the trace data (without sessions)
target.trace <- read.csv(file = "Intermediate_files/trace_data_w0-16.csv",
stringsAsFactors = F)
str(target.trace)
target.trace$TIMESTAMP <- as.POSIXct(target.trace$TIMESTAMP)
## rename the colums to prevent confusion
colnames(target.trace) <- c('user_id', 'activity', 'timestamp', 'week', 'topic')
str(target.trace)
## order the trace data first based on the user_id, then based on the timestamp
target.trace <- target.trace[ order(target.trace$user_id, target.trace$timestamp),]
head(target.trace)
## add the session_id column
target.trace$session_id <- vector(mode = "numeric", length = nrow(target.trace))
## session counter, and also session id
s <- 1
target.trace$session_id[1] <- s
for (i in 1:(nrow(target.trace)-1)) {
## if the two consecutive events, i and (i+1) do not relate to the same user,
## consider that a new session has started
if ( target.trace$user_id[i] != target.trace$user_id[i+1] ) {
s <- s + 1
} else {
## the two events are related to the same user; now check if they are not
## separated more than 30 mins
td <- difftime(time1 = target.trace$timestamp[i],
time2 = target.trace$timestamp[i+1],
units = "mins")
## if the time diff is >= 30 mins, consider that a new session has started
if (abs(td) >= 30) s <- s + 1
}
target.trace$session_id[i+1] <- s
}
## rename the columns
colnames(target.trace) <- c('RESOURCE_ID', 'ACTIVITY', 'TIMESTAMP','WEEK','TOPIC', 'CASE_ID')
## change the order of the columns
target.trace <- target.trace[,c(6,1,3,2,4,5)]
## write to a file
write.csv(target.trace, file = "Intermediate_files/trace_data_with_sessions_w0-16.csv",
quote = F, row.names = F)
## filter out sessions that consists of only one event
one.event.sessions <- vector()
event.count <- 1
j <- 1
for(i in 1:(nrow(target.trace)-1)) {
if ( target.trace$CASE_ID[i] == target.trace$CASE_ID[i+1] )
event.count <- event.count + 1
else {
if ( event.count == 1 ) {
one.event.sessions[j] <- target.trace$CASE_ID[i]
j <- j + 1
}
event.count <- 1
}
}
filtered.traces <- target.trace[!(target.trace$CASE_ID %in% one.event.sessions), ]
## write data without one-event sessions
write.csv(filtered.traces, file = "Intermediate_files/trace_data_with_sessions_(no-1-event)_w0-16.csv",
quote = F, row.names = F)
#######################################################################
## EXTRACT TRACE DATA ONLY FOR:
## - THE BEST PERFORMING STUDENTS (the top 25% or 10% of students
## both on midterm and final exams)
## - THE WORST PERFORMING STUDENTS (the bottom 25% or 10% of students
## both on midterm and final exams)
#######################################################################
## load the f. for identifying the best abd worst performing students
source("util_functions.R")
counts.data <- read.csv(file = "datasets/data2u_sem2_14_student_all_variables.csv")
## extract exam scores
scores <- counts.data[, c('user_id', 'SC_MT_TOT', 'SC_FE_TOT')]
selection <- best.and.worst.performers(scores)
## load the trace data
target.trace <- read.csv(file = "Intermediate_files/trace_data_with_sessions_w0-16.csv",
stringsAsFactors = F)
## extract trace data for top 10% students
top10perc.trace <- target.trace[target.trace$RESOURCE_ID %in% selection$top10,]
table(top10perc.trace$ACTIVITY)
table(top10perc.trace$WEEK)
write.csv(x = top10perc.trace,
file = "Intermediate_files/top10perc_students_trace_data_with_sessions_w0-16.csv",
row.names = F, quote = F)
## extract trace data for the bottom 10% students
bottom10perc.trace <- target.trace[target.trace$RESOURCE_ID %in% selection$top10$worst10,]
table(bottom10perc.trace$WEEK)
table(bottom10perc.trace$ACTIVITY)
write.csv(bottom10perc.trace,
file = "Intermediate_files/bottom10perc_students_trace_data_with_sessions_w0-16.csv",
row.names = F, quote = F)
## extract trace data for the bottom 25% students
bottom25perc.trace <- target.trace[target.trace$RESOURCE_ID %in% selection$worst25,]
table(bottom25perc.trace$WEEK)
table(bottom25perc.trace$ACTIVITY)
write.csv(bottom25perc.trace,
file = "Intermediate_files/bottom25perc_students_trace_data_with_sessions_w0-16.csv",
row.names = F, quote = F)
|
79a0b48942badd5dd5d664921821406bd0b347f7
|
4b7337fd69e9a6d4cf91ae31d3cc593d16a613b6
|
/examples/wordpress/R/exec_plot.R
|
c274f4e0af38fd50bb2d1295530da6726db9260d
|
[] |
no_license
|
cmendesce/crawler
|
3d9ae3fff3465f1bc5c1b696f233437e537e9a5b
|
9cc0d81011efa3dcf136ef82d3db919db809593f
|
refs/heads/master
| 2021-01-19T07:02:07.895160
| 2016-04-16T16:28:04
| 2016-04-16T16:28:04
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 4,738
|
r
|
exec_plot.R
|
setwd("C:/Users/Matheus/VMS/crawler/repos/crawler/examples/wordpress/R")
require(ggplot2)
require(doBy)
roc = read.csv("cap_result.csv", header = TRUE, stringsAsFactors = FALSE)
roc$workload <- factor(roc$workload, order=TRUE)
pdf("Execution_x_Prediction.pdf", width = 9.5, height = 6)
capacitor <- roc
graph_base <- summaryBy(capacitor$EXEC ~ capacitor$heuristic + capacitor$sla , capacitor, FUN = c(sum))
graph_base_2 <- summaryBy(capacitor$PREDICT ~ capacitor$heuristic + +capacitor$sla , capacitor, FUN = c(sum))
graph_base_3 <- summaryBy(capacitor$PRICE ~ capacitor$heuristic + +capacitor$sla , capacitor, FUN = c(sum))
graph_base <- merge(graph_base, graph_base_2, by = c("heuristic","sla"))
graph_base <- merge(graph_base, graph_base_3, by = c("heuristic","sla"))
colnames(graph_base) <- c("heuristic","sla", "EXEC", "PREDICT","PRICE")
#U$ 178.5 - 280
BF <- data.frame(c("BF","BF","BF","BF","BF","BF","BF","BF","BF","BF"), c(10000,20000,30000,40000,50000,60000,70000,80000,90000,100000),c(280,280,280,280,280,280,280,280,280,280))
colnames(BF) <- c("heuristic","sla", "EXEC")
graph_base$sla <- factor(graph_base$sla, order=TRUE)
ggplot(graph_base, aes(x = sla)) +
geom_point(size=3, aes(colour=heuristic, y = EXEC, shape=heuristic), fill="white") +
scale_shape_manual(values=c(0,1,2,3,4,5,6,7,8,9,10,11,12,13,14,15,16))+
geom_line(aes(group=heuristic, colour=heuristic, y = EXEC), linetype="solid", size=1) +
theme_bw(base_size = 12, base_family = "") +
scale_x_discrete("Sla") +
scale_y_continuous("Execution") +
theme(
title = element_text(face="bold", size = 14),
axis.title = element_text(face="bold", size = 12)
)
ggplot(graph_base, aes(x = sla)) +
geom_point(size=3, aes(colour=heuristic, y = PRICE, shape=heuristic), fill="white") +
scale_shape_manual(values=c(0,1,2,3,4,5,6,7,8,9,10,11,12,13,14,15,16))+
geom_line(aes(group=heuristic, colour=heuristic, y = PRICE), linetype="solid", size=1) +
theme_bw(base_size = 12, base_family = "") +
scale_x_discrete("Sla") +
scale_y_continuous("U$/Hour") +
theme(
title = element_text(face="bold", size = 14),
axis.title = element_text(face="bold", size = 12)
)
ggplot(graph_base, aes(y = PRICE/178.5, x = EXEC/280)) +
geom_point(size=3, aes(colour=heuristic, shape=heuristic), fill="white") +
scale_shape_manual(values=c(0,1,2,3,4,5,6,7,8,9,10,11,12,13,14,15,16))+
facet_grid(sla ~ .) +
scale_y_continuous("Relative Price", limits=c(0, 1)) +
scale_x_continuous("Relative Execution", limits=c(0, 1)) +
theme_bw(base_size = 12, base_family = "") +
theme(
axis.title.x = element_text(face="bold"),
axis.title.y = element_text(face="bold")
)
graph_base <- subset(graph_base, heuristic != "CR")
graph_base <- subset(graph_base, heuristic != "OR")
graph_base <- subset(graph_base, heuristic != "PR")
graph_base <- subset(graph_base, heuristic != "RR")
graph_base <- subset(graph_base, heuristic != "RC")
graph_base <- subset(graph_base, heuristic != "RO")
graph_base <- subset(graph_base, heuristic != "RP")
ggplot(graph_base, aes(x = sla)) +
geom_point(size=3, aes(colour=heuristic, y = EXEC, shape=heuristic), fill="white") +
scale_shape_manual(values=c(0,1,2,3,4,5,6,7,8,9,10,11,12,13,14,15,16))+
geom_line(aes(group=heuristic, colour=heuristic, y = EXEC), linetype="solid", size=1) +
theme_bw(base_size = 12, base_family = "") +
scale_x_discrete("Sla") +
scale_y_continuous("Execution") +
theme(
title = element_text(face="bold", size = 14),
axis.title = element_text(face="bold", size = 12)
)
ggplot(graph_base, aes(x = sla)) +
geom_point(size=3, aes(colour=heuristic, y = PRICE, shape=heuristic), fill="white") +
scale_shape_manual(values=c(0,1,2,3,4,5,6,7,8,9,10,11,12,13,14,15,16))+
geom_line(aes(group=heuristic, colour=heuristic, y = PRICE), linetype="solid", size=1) +
theme_bw(base_size = 12, base_family = "") +
scale_x_discrete("Sla") +
scale_y_continuous("U$/Hour") +
theme(
title = element_text(face="bold", size = 14),
axis.title = element_text(face="bold", size = 12)
)
ggplot(graph_base, aes(y = PRICE/178.5, x = EXEC/280)) +
geom_point(size=3, aes(colour=heuristic, shape=heuristic), fill="white") +
scale_shape_manual(values=c(0,1,2,3,4,5,6,7,8,9,10,11,12,13,14,15,16))+
facet_grid(sla ~ .) +
scale_y_continuous("Relative Price", limits=c(0, 1)) +
scale_x_continuous("Relative Execution", limits=c(0, 1)) +
theme_bw(base_size = 12, base_family = "") +
theme(
axis.title.x = element_text(face="bold"),
axis.title.y = element_text(face="bold")
)
dev.off()
|
f3743a4439fd52aa7cc000262f589835498c2bf7
|
ac85a0ebc0a14bcffa076de6a6225a118f6f70fb
|
/machine_learning/NN_code_release/pcaReduce.R
|
817294531a43528153410802f553aeccb84efd19
|
[
"MIT"
] |
permissive
|
KECB/learn
|
8d5705ec4aece009252d1c2429f9c37e47228e27
|
5b52c5c3ac640dd2a9064c33baaa9bc1885cf15f
|
refs/heads/master
| 2021-01-22T13:23:06.403694
| 2017-12-25T08:51:47
| 2017-12-25T08:51:47
| 100,665,918
| 2
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 437
|
r
|
pcaReduce.R
|
args = commandArgs(trailingOnly=TRUE)
vct=as.numeric(args[1])
indata=args[2]
inans=args[3]
outfile=args[4]
b <-read.table(indata, sep='\t', header = T,row.names=1)
ans <-read.table(inans, sep='\t', header = F)
Input <- t(b) # data matrix, cells in rows, genes in columns
if(T){
library("pcaReduce")
Output_S <- PCAreduce(Input, nbt=1, q=vct*3, method='M')
res=Output_S[[1]][,vct*2+2]
write(res,file=outfile,sep='\n')
}
|
7b5dae8cded536927514da3fb3a38befc7b0a782
|
9639c79f6aabf27f7b124d820fefd2e2e4a0bcb0
|
/PolynomialRegression/poly.R
|
d6041e8697cb60c38f1529377a3359edcc51cc7f
|
[
"MIT"
] |
permissive
|
tom147465/stream-summarization
|
51c5637e329921e2d4a16151a5182ca262108a31
|
7901cf8f82ff1ee5d93432611ac405956f39aa44
|
refs/heads/master
| 2021-01-25T13:41:30.370717
| 2019-07-13T22:28:53
| 2019-07-13T22:28:53
| 123,605,852
| 0
| 0
|
MIT
| 2018-04-04T15:14:50
| 2018-03-02T16:54:06
|
C
|
UTF-8
|
R
| false
| false
| 1,294
|
r
|
poly.R
|
mydata <- read.csv(file="../dataInCsv/running-1000.csv", header = FALSE, sep=",")
# mydata <- read.csv(file="../dataInCsv/walking-1000.csv", header = FALSE, sep=",")
epsilon <- 100
order <- 3
begin <- 1
i <- order+begin
as <- list()
while (i <= 1000){
t = mydata[begin:i,1]
x = mydata[begin:i,2]
y = mydata[begin:i,3]
z = mydata[begin:i,4]
m1 <- lm(x~poly(t,order))
m2 <- lm(y~poly(t,order))
m3 <- lm(z~poly(t,order))
##### Infinity norm
error <- max(max(abs(m1$residuals)),max(abs(m2$residuals)),max(abs(m3$residuals)))
##### Eclidean norm
# tmp_error = c()
# j=1
# for (j in 1:length(m1$residuals)){
# tmp_error[j] <- sqrt(m1$residuals[j]^2 + m2$residuals[j]^2 + m3$residuals[j]^2)
# }
# error <- max(tmp_error)
if(error <= 100){
fm1 = m1
fm2 = m2
fm3 = m3
i = i+1
} else{
as[[length(as)+1]] <- list(fm1,fm2,fm3,i-1)
begin <- i
i <- order+begin
}
}
##### plot
# plot(mydata$V1, mydata$V2, main="Regression ax order=5", ylab = "Acceleration(mg*2048)", xlab = "Time")
# lines(predict(as[[1]][[1]]), col="red",lwd=3)
# j=2
# for (j in 2:length(as)){
# lines((as[[j-1]][[4]]+1):as[[j]][[4]],predict(as[[j]][[1]]), col="red",lwd=3)
# }
# lines((as[[length(as)]][[4]]+1):1000, predict(m1), col="red", lwd=3)
|
15c20905ab31d66fc53321f113a0eb62076c84a4
|
77fbc44cb47755eddb857f7f71cd61759c2a91bf
|
/summarizeProtPositions.R
|
bd851a19b6ddf6afc7428fb7900f2930191c55ed
|
[] |
no_license
|
jgmeyerucsd/SUMO-remnant
|
10383b1b2311081e5d78ca7fef2dc95e7815cae0
|
3349797c85e589a649075962943cc8da4643187e
|
refs/heads/master
| 2020-06-02T19:52:27.005328
| 2015-04-05T20:04:36
| 2015-04-05T20:04:36
| 33,084,918
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 4,130
|
r
|
summarizeProtPositions.R
|
#mgpl.ave<-summarizeProtPositions()
#mgpl.ave@data[1,]
#mgpl.ave@modindex
### works with peptides containing multiple sites, sets their weighted ratio =0
summarizeProtPositions=function(object=mgpl.pos){
proteinIDs<-unique(as.character(object@data[,"protein"]))
prot.pos.list<-list()
### gives a list of proteins with their corresponding unique locations
for(i in 1:length(proteinIDs)){
#print(i)
prot.pos.list[[proteinIDs[i]]]<-unique(unlist(object@modposition.protein[which(object@data$protein==proteinIDs[i])]))
#print(proteinIDs[i])
#print(prot.pos.list[[proteinIDs[i]]])
}
prot.lines.list<-list()
#### gives the lines in object@data that correspond to each protein
for(i in 1:length(proteinIDs)){
#print(i)
prot.lines.list[[proteinIDs[i]]]<-which(object@data$protein==proteinIDs[i])
}
#range(unlist(prot.lines.list))
position.index.list<-list()
#### assign each unique position an index
### loop through the proteins
index=1
for(i in 1:length(proteinIDs)){
#print(i)
temp.positions<-prot.pos.list[[which(names(prot.pos.list)==proteinIDs[i])]]
temp.prot.lines<-which(object@data$protein==proteinIDs[i]) ### gives row numbers of of mods in object@data
protein.position.list<-object@modposition.protein[temp.prot.lines] ### gives the values of mod positions as vector
### set lines that have multiple mods to 0
for(j in 1:length(protein.position.list)){
#print(length(protein.position.list[[j]]))
if(length(protein.position.list[[j]])>1){
protein.position.list[[j]]<-0
}
}
unique.positions<-unique(unlist(protein.position.list))
unique.positions.l<-length(unique.positions)
### loop through the positions and assign those each an index number
for(x in unique.positions){
#print(x)
### if x!=0, do give an index
if(x!=0){
temprowlen<-length(temp.prot.lines[which(protein.position.list==x)])
for(j in 1:temprowlen){
#print(j)
position.index.list[[temp.prot.lines[which(protein.position.list==x)][j]]]<-index
}
index=index+1
}
### if x==0 (peptide with two mods), assign index=0
if(x==0){
temprowlen<-length(temp.prot.lines[which(protein.position.list==x)])
for(j in 1:temprowlen){
#print(j)
position.index.list[[temp.prot.lines[which(protein.position.list==x)][j]]]<-0
}
}
}
}
### which(position.index.list==111)
#### now loop through those values and take weighted averages of the ones with more than 1 line
unique.indexes.len<-length(unique(unlist(position.index.list)))
unique.indexes<-unique(unlist(position.index.list))
#unique.indexes
weighted.ratios<-list()
linesum=0
for(j in 1:unique.indexes.len){
#print(j)
templines<-which(position.index.list==unique.indexes[[j]])
#print(templines)
object@data[templines,]
if(unique.indexes[[j]]>=1){
### divide by temparea
temparea=sum(object@data[templines,"light_area"])+sum(object@data[templines,"heavy_area"])
templines.l<-length(templines)
tempsum<-0
linesum=linesum+templines.l
weights<-rep(0,times=templines.l)
for(i in 1:templines.l){
weights[i]<-(object@data[templines[i],"light_area"]+object@data[templines[i],"heavy_area"])/temparea
}
for(i in 1:templines.l){
tempsum=tempsum+object@data[templines[i],"xpress"]*weights[i]
#print(object@data[templines[i],"light_area"]+object@data[templines[i],"heavy_area"])
#print(tempsum)
}
for(i in 1:templines.l){
weighted.ratios[[templines[i]]]<-tempsum
}
}
if(unique.indexes[[j]]==0){
templines.l<-length(templines)
for(i in 1:templines.l){
weighted.ratios[[templines[i]]]<-0
}
}
}
object@modsummary<-prot.pos.list
object@modindex<-position.index.list
object@data<-cbind(object@data,weighted.ratios=unlist(weighted.ratios))
print("unique SUMO modified protein IDs")
print(length(proteinIDs))
print("unique SUMO modification sites from single-site peptides")
print(length(index))
length(prot.pos.list)
### write something to make these text and put them in one column
#paste(unlist(prot.pos.list[1]))
return(object)
}
|
3b360f459fb8467e399d3c9ce5c4643eb27b9cc5
|
1346a6b53de84b2b3abc4529e0e8cea8f4c5378c
|
/man/chpt.Rd
|
ba0cc4ad49335ea6e687a314459b746c6d4556f1
|
[] |
no_license
|
glacierpoint/rnasteps
|
9b8d1cde957f85b110a2b7aeddf4723ee385140d
|
e3312125fc858832cf0af45e48e819f9d04c2ead
|
refs/heads/master
| 2021-05-16T02:04:46.510036
| 2020-05-01T03:29:57
| 2020-05-01T03:29:57
| 22,822,546
| 1
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 1,760
|
rd
|
chpt.Rd
|
% Generated by roxygen2 (4.0.1): do not edit by hand
\name{chpt}
\alias{chpt}
\title{Fits multiple step models for a given trace}
\usage{
chpt(y, times, w)
}
\arguments{
\item{y}{RNA unwinding trace}
\item{times}{Time points at which RNA trace was recorded}
\item{w}{Sequence of window sizes}
\item{cutoff}{Upper percentile for choosing plausible step locations. Default is 0.9 i.e. only top 10\% highest values are choosen by default}
\item{cor}{Default is TRUE indicating that the noise is correlated. If FALSE the noise is assumed uncorrelated}
}
\value{
Returns a list with the following elements
\itemize{
\item results - is a list of length equal to number of models fitted. Each element in this list comprises of elements returned in get.model
\item chpt0 - A list with y, times, w and zstat - the statistics based on which the model is fitted
}
}
\description{
Fits multiple step models for a given trace
}
\examples{
\dontrun{
w<-c(seq(10,90,by=10),seq(100,1000,by=25))
times<-RNA[,2]
chpt1<-chpt(y,times,w)
plot.step(chpt1)
# to save as a pdffile
# plot.step(chpt1,pdfname="RNAFIG1.pdf")
summary(chpt1) # to get bic fit
summary(chpt1,type="aic") # to get aic fit
summary(chpt1,type=5) # to get fit summary of model number 5
plot(chpt1)
mymodel<-get.model(chpt1)
names(mymodel)
get.location(chpt1)
}
}
\references{
Arunajadai SG, Cheng W (2013) Step Detection in Single-Molecule Real Time Trajectories Embedded in Correlated Noise. PLoS ONE 8(3): e59279.
Cheng W, Arunajadai SG, Moffitt JR, Tinoco I Jr, Bustamante C. Single-base pair unwinding and asynchronous RNA release by the hepatitis C virus NS3 helicase. Science. 2011 Sep 23;333(6050):1746-9. doi: 10.1126/science.1206023.
}
\seealso{
get.model, plot.chpt, summary.chpt, get.location
}
|
a84cc5a83f1ec5cd728f628dfe9ac4d6d2819a63
|
6a28ba69be875841ddc9e71ca6af5956110efcb2
|
/Introduction_To_Probability_And_Statistics_by_William_Mendenhall,_Robert_J_Beaver,_And_Barbara_M_Beaver/CH15/EX15.2/Ex15_2.R
|
e12f1e9529ff467e7c750efb48f67109cbdba990
|
[] |
permissive
|
FOSSEE/R_TBC_Uploads
|
1ea929010b46babb1842b3efe0ed34be0deea3c0
|
8ab94daf80307aee399c246682cb79ccf6e9c282
|
refs/heads/master
| 2023-04-15T04:36:13.331525
| 2023-03-15T18:39:42
| 2023-03-15T18:39:42
| 212,745,783
| 0
| 3
|
MIT
| 2019-10-04T06:57:33
| 2019-10-04T05:57:19
| null |
UTF-8
|
R
| false
| false
| 949
|
r
|
Ex15_2.R
|
standard1 <- c(1.21,1.43,1.35,1.51,1.39,1.17,1.48,1.42,1.29,1.40)
treated2 <- c(1.49,1.37,1.67,1.50,1.31,1.29,1.52,1.37,1.44,1.53)
n1 <- length(standard1)
n2 <- length(treated2)
alpha = 0.05
x <- c(standard1,treated2)
ranksum<-function(x,start,end){
return(sum(x[start:end]))
}
rank <- rank(x)
t1 <- ranksum(rank,1,n1)
t2 <- n1 * (n1 + n2 + 1) - t1
if(t1 <= 82){ #critical value of T at n1=n2=10 at alpha = 0.05 is 82
print("Reject the hypothesis")
}else{
print("Insufficient evidence to conclude that the treated kraft paper is stronger than the standard paper")
}
muo_t <- (n1 * (n1 * n2 + 1))/2
sigma_sqr_t <- ((n1 * n2) *(n1 + n2 +1))/12
sigma_t <- sqrt(sigma_sqr_t)
z <- (t1 - muo_t)/sigma_t;
p_value <- 0.5 - 0.4292
p_value
if(p_value <= alpha){
print("Reject the hypothesis")
}else{
print("Cannot conclude the treated kraft paper is stronger than the standard paper")
}
|
490529a465033496705e402e6d82952f5cf70dca
|
595a4ead5c1d7761c429d9dde8f3c84e5a6e99ca
|
/man/ANOVA_exact.Rd
|
c9276ed622462b88c6c761c147e51c3cfce7c4ce
|
[
"MIT"
] |
permissive
|
arcaldwell49/Superpower
|
163804ae7682be43c3f7241671948bc73f1706ea
|
ed624538f6b28d9243720994b3428edfe80b8bfa
|
refs/heads/master
| 2023-02-16T19:24:16.312351
| 2023-02-11T00:20:47
| 2023-02-11T00:20:47
| 206,843,269
| 60
| 17
|
NOASSERTION
| 2023-02-10T23:41:36
| 2019-09-06T17:28:44
|
HTML
|
UTF-8
|
R
| false
| true
| 4,744
|
rd
|
ANOVA_exact.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/ANOVA_exact.R
\name{ANOVA_exact}
\alias{ANOVA_exact}
\alias{ANOVA_exact2}
\title{Simulates an exact dataset (mu, sd, and r represent empirical, not population, mean and covariance matrix) from the design to calculate power}
\usage{
ANOVA_exact(
design_result,
correction = Superpower_options("correction"),
alpha_level = Superpower_options("alpha_level"),
verbose = Superpower_options("verbose"),
emm = Superpower_options("emm"),
emm_model = Superpower_options("emm_model"),
contrast_type = Superpower_options("contrast_type"),
liberal_lambda = Superpower_options("liberal_lambda"),
emm_comp
)
ANOVA_exact2(
design_result,
correction = Superpower_options("correction"),
alpha_level = Superpower_options("alpha_level"),
verbose = Superpower_options("verbose"),
emm = Superpower_options("emm"),
emm_model = Superpower_options("emm_model"),
contrast_type = Superpower_options("contrast_type"),
emm_comp,
liberal_lambda = Superpower_options("liberal_lambda")
)
}
\arguments{
\item{design_result}{Output from the ANOVA_design function}
\item{correction}{Set a correction of violations of sphericity. This can be set to "none", "GG" Greenhouse-Geisser, and "HF" Huynh-Feldt}
\item{alpha_level}{Alpha level used to determine statistical significance}
\item{verbose}{Set to FALSE to not print results (default = TRUE)}
\item{emm}{Set to FALSE to not perform analysis of estimated marginal means}
\item{emm_model}{Set model type ("multivariate", or "univariate") for estimated marginal means}
\item{contrast_type}{Select the type of comparison for the estimated marginal means. Default is pairwise. See ?emmeans::`contrast-methods` for more details on acceptable methods.}
\item{liberal_lambda}{Logical indicator of whether to use the liberal (cohen_f^2\*(num_df+den_df)) or conservative (cohen_f^2\*den_df) calculation of the noncentrality (lambda) parameter estimate. Default is FALSE.}
\item{emm_comp}{Set the comparisons for estimated marginal means comparisons. This is a factor name (a), combination of factor names (a+b), or for simple effects a | sign is needed (a|b)}
}
\value{
Returns dataframe with simulation data (power and effect sizes!), anova results and simple effect results, plot of exact data, and alpha_level. Note: Cohen's f = sqrt(pes/1-pes) and the noncentrality parameter is = f^2*df(error)
\describe{
\item{\code{"dataframe"}}{A dataframe of the simulation result.}
\item{\code{"aov_result"}}{\code{aov} object returned from \code{\link{aov_car}}.}
\item{\code{"aov_result"}}{\code{emmeans} object returned from \code{\link{emmeans}}.}
\item{\code{"main_result"}}{The power analysis results for ANOVA level effects.}
\item{\code{"pc_results"}}{The power analysis results for the pairwise (t-test) comparisons.}
\item{\code{"emm_results"}}{The power analysis results of the pairwise comparison results.}
\item{\code{"manova_results"}}{Default is "NULL". If a within-subjects factor is included, then the power of the multivariate (i.e. MANOVA) analyses will be provided.}
\item{\code{"alpha_level"}}{The alpha level, significance cut-off, used for the power analysis.}
\item{\code{"method"}}{Record of the function used to produce the simulation}
\item{\code{"plot"}}{A plot of the dataframe from the simulation; should closely match the meansplot in \code{\link{ANOVA_design}}}
}
}
\description{
Simulates an exact dataset (mu, sd, and r represent empirical, not population, mean and covariance matrix) from the design to calculate power
}
\section{Functions}{
\itemize{
\item \code{ANOVA_exact2}: An extension of ANOVA_exact that uses the effect sizes calculated from very large sample size empirical simulation. This allows for small sample sizes, where ANOVA_exact cannot, while still accurately estimating power. However, model objects (emmeans and aov) are not included as output, and pairwise (t-test) results are not currently supported.
}}
\section{Warnings}{
Varying the sd or r (e.g., entering multiple values) violates assumptions of homoscedascity and sphericity respectively
}
\examples{
## Set up a within design with 2 factors, each with 2 levels,
## with correlation between observations of 0.8,
## 40 participants (who do all conditions), and standard deviation of 2
## with a mean pattern of 1, 0, 1, 0, conditions labeled 'condition' and
## 'voice', with names for levels of "cheerful", "sad", amd "human", "robot"
design_result <- ANOVA_design(design = "2w*2w", n = 40, mu = c(1, 0, 1, 0),
sd = 2, r = 0.8, labelnames = c("condition", "cheerful",
"sad", "voice", "human", "robot"))
exact_result <- ANOVA_exact(design_result, alpha_level = 0.05)
}
|
6469cf65f75f981745e7fdb7a039be43e2f8bde4
|
98812f2e5b9efd30c62883d30ad8a989eb34a156
|
/Laurel Messer/Tandem/Code/4times/03c_models_all.R
|
7b0a854201acbeaa434718f0dcb4533fb5d0d313
|
[] |
no_license
|
childhealthbiostatscore/BDC-Code
|
8983b1cc0d1bceab9e7ccb7b8b4d885ba1b45fa8
|
eb48c29233fc3d956167bf2481a5c5ed827ebb72
|
refs/heads/master
| 2023-09-02T09:46:41.745174
| 2023-09-01T20:04:44
| 2023-09-01T20:04:44
| 168,424,966
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 15,963
|
r
|
03c_models_all.R
|
######MODELS FOR PRO TANDEM STUDY
###AUTHOR: KRISTEN CAMPBELL
###DATE: 5/4/2020
library(RColorBrewer)
source('C:/Users/campbkri/Documents/GitHub/BDC-Code/Laurel Messer/Tandem/Code/4times/00_data_4times.R')
source('C:/Users/campbkri/Documents/GitHub/BDC-Code/Laurel Messer/Tandem/Code/4times/01_survey_4times.R')
dat.model<-dat[,c(which(colnames(dat) %in% c("ExternalReference","B_RESPONDENT","Baseline_A1C",
"Age","Gender","BaselineAGE","duration_of_diabetes_at_baseline_years","cgm_yn","method_cat",
"baseline_factor1","post2m_factor1","post4m_factor1","post6m_factor1",
"baseline_factor2","post2m_factor2","post4m_factor2","post6m_factor2")))]
dat.model$factor1_baseline<-dat.model$baseline_factor1
dat.model$factor2_baseline<-dat.model$baseline_factor2
###Mixed modeling:
dat.long<-reshape(dat.model,
varying=c("baseline_factor1","baseline_factor2",
"post2m_factor1","post2m_factor2",
"post4m_factor1","post4m_factor2",
"post6m_factor1","post6m_factor2"),
v.names = c("factor1","factor2"),
timevar = "time",
times = c("baseline", "post2m", "post4m","post6m"),
idvar="ExternalReference",direction="long")
dat.long<-dat.long[order(dat.long$ExternalReference,dat.long$time),]
dat.long$time<-factor(dat.long$time)
dat.long$factor1[dat.long$factor1=="NaN"]<-NA
###spaghetti plots:
#A: full trajectories:
num_measures<-function(ID,data){
temp<-lapply(unique(ID), function(x){
dat.temp <- subset(data, ID == x)
##dat.temp <- subset(dat.long,dat.long$ExternalReference=="BDC_0001")
dat.temp$num_factor1<-nrow(subset(dat.temp,!is.na(dat.temp$factor1)))
dat.temp$num_factor2<-nrow(subset(dat.temp,!is.na(dat.temp$factor2)))
dat.temp
dat.temp})
dat<-do.call(rbind,temp)
}
dat.long<-num_measures(dat.long$ExternalReference,dat.long)
#####FACTOR 1 - not normal
dat.long.1<-subset(dat.long,!is.na(dat.long$factor1))
dat.long.1<-subset(dat.long.1,!is.na(dat.long.1$factor1_baseline))
hist(dat.long.1$factor1)
dat.long.1$factor1_beta<-(dat.long.1$factor1-1)/(10-1)
quantile(dat.long.1$factor1_beta)
dat.long.1$factor1_beta_ex<-(dat.long.1$factor1_beta*(nrow(dat.long.1)-1)+0.5)/nrow(dat.long.1)
quantile(dat.long.1$factor1_beta_ex)
hist(dat.long.1$factor1_beta_ex)
#export to SAS:
#write.csv(dat.long.1,"S:/Shared Projects/Laura/BDC/Projects/Laurel Messer/Tandem/Data/data_factor1_05122020.csv")
dat.long.2<-subset(dat.long,!is.na(dat.long$factor2))
dat.long.2<-subset(dat.long.2,!is.na(dat.long.2$factor2_baseline))
hist(dat.long.2$factor2)
dat.long.2$factor2_beta<-(dat.long.2$factor2-1)/(10-1)
quantile(dat.long.2$factor2_beta)
dat.long.2$factor2_beta_ex<-(dat.long.2$factor2_beta*(nrow(dat.long.2)-1)+0.5)/nrow(dat.long.2)
quantile(dat.long.2$factor2_beta_ex)
hist(dat.long.2$factor2_beta_ex)
#write.csv(dat.long.2,"S:/Shared Projects/Laura/BDC/Projects/Laurel Messer/Tandem/Data/data_factor2_05122020.csv")
###Read in LSMeans and create tables:
dat1<-read.csv("S:/Shared Projects/Laura/BDC/Projects/Laurel Messer/Tandem/Data/lsmeans_factor1.csv")
est1<-read.csv("S:/Shared Projects/Laura/BDC/Projects/Laurel Messer/Tandem/Data/estimate_factor1.csv")
dat2<-read.csv("S:/Shared Projects/Laura/BDC/Projects/Laurel Messer/Tandem/Data/lsmeans_factor2.csv")
est2<-read.csv("S:/Shared Projects/Laura/BDC/Projects/Laurel Messer/Tandem/Data/estimate_factor2.csv")
#Tables of change:
mod1_inj<-data.frame("Baseline to 2mo"=est1$Adjp[est1$Label=="Injections: baseline to 2mo"],
"2mo to 4mo"=est1$Adjp[est1$Label=="Injections: 2mo to 4mo"],
"4mo to 6mo"=est1$Adjp[est1$Label=="Injections: 4mo to 6mo"])
mod1_nt<-data.frame("Baseline to 2mo"=est1$Adjp[est1$Label=="Non-Tandem: baseline to 2mo"],
"2mo to 4mo"=est1$Adjp[est1$Label=="Non-Tandem: 2mo to 4mo"],
"4mo to 6mo"=est1$Adjp[est1$Label=="Non-Tandem: 4mo to 6mo"])
mod1_t<-data.frame("Baseline to 2mo"=est1$Adjp[est1$Label=="Tandem: baseline to 2mo"],
"2mo to 4mo"=est1$Adjp[est1$Label=="Tandem: 2mo to 4mo"],
"4mo to 6mo"=est1$Adjp[est1$Label=="Tandem: 4mo to 6mo"])
mod1<-rbind(mod1_inj,mod1_nt,mod1_t)
mod1_data<-dat1[,c(3,2,16:18)]
mod1_data$mu_trans<-round(mod1_data$mu_trans,2)
mod1_data$muUpper_trans<-round(mod1_data$muUpper_trans,2)
mod1_data$muLower_trans<-round(mod1_data$muLower_trans,2)
mod2_inj<-data.frame("Baseline to 2mo"=est2$Adjp[est2$Label=="Injections: baseline to 2mo"],
"2mo to 4mo"=est2$Adjp[est2$Label=="Injections: 2mo to 4mo"],
"4mo to 6mo"=est2$Adjp[est2$Label=="Injections: 4mo to 6mo"])
mod2_nt<-data.frame("Baseline to 2mo"=est2$Adjp[est2$Label=="Non-Tandem: baseline to 2mo"],
"2mo to 4mo"=est2$Adjp[est2$Label=="Non-Tandem: 2mo to 4mo"],
"4mo to 6mo"=est2$Adjp[est2$Label=="Non-Tandem: 4mo to 6mo"])
mod2_t<-data.frame("Baseline to 2mo"=est2$Adjp[est2$Label=="Tandem: baseline to 2mo"],
"2mo to 4mo"=est2$Adjp[est2$Label=="Tandem: 2mo to 4mo"],
"4mo to 6mo"=est2$Adjp[est2$Label=="Tandem: 4mo to 6mo"])
mod2<-rbind(mod2_inj,mod2_nt,mod2_t)
mod2_data<-dat2[,c(3,2,16:18)]
mod2_data$mu_trans<-round(mod2_data$mu_trans,2)
mod2_data$muUpper_trans<-round(mod2_data$muUpper_trans,2)
mod2_data$muLower_trans<-round(mod2_data$muLower_trans,2)
#Estimated plots
tiff("S:/Shared Projects/Laura/BDC/Projects/Laurel Messer/Tandem/Results/DS_model_bw_07082020.tiff",
units = 'in',width=5,height=7,res=500,compression="lzw")
par(mar=c(5.1,5,4.1,2.1))
plot(c(1,4),c(7,10),type="n",xlab="Time",ylab="Estimated Device Satisfaction (DS) Score \n (Higher Score = Higher Satisfaction)",xaxt="n",
main="",frame.plot = F,yaxt="n")
axis(1,at=c(1,2,3,4),c("Baseline","2 months","4 months","6 months"))
axis(2,at=c(7,7.5,8,8.5,9,9.5,10),las=1)
col<-col2rgb(brewer.pal(9, "Greys")[4])
polygon(c(1,2,3,4,4,3,2,1),c(dat1$muLower_trans[dat1$method_cat=="Injections"],
rev(dat1$muUpper_trans[dat1$method_cat=="Injections"])),
col=rgb(col[1], col[2], col[3], max = 255, alpha = 125, names = "blue50"),
border=NA)
points(c(1,2,3,4),dat1$mu_trans[dat1$method_cat=="Injections"],pch=19,col=brewer.pal(9, "Greys")[4])
lines(c(1,2,3,4),dat1$mu_trans[dat1$method_cat=="Injections"],col=brewer.pal(9, "Greys")[4])
col<-col2rgb(brewer.pal(9, "Greys")[6])
polygon(c(1,2,3,4,4,3,2,1),c(dat1$muLower_trans[dat1$method_cat=="Non-Tandem Pump"],
rev(dat1$muUpper_trans[dat1$method_cat=="Non-Tandem Pump"])),
col=rgb(col[1], col[2], col[3], max = 255, alpha = 125, names = "blue50"),
border=NA)
points(c(1,2,3,4),dat1$mu_trans[dat1$method_cat=="Non-Tandem Pump"],pch=19,col=brewer.pal(9, "Greys")[6])
lines(c(1,2,3,4),dat1$mu_trans[dat1$method_cat=="Non-Tandem Pump"],col=brewer.pal(9, "Greys")[6])
col<-col2rgb(brewer.pal(9, "Greys")[9])
polygon(c(1,2,3,4,4,3,2,1),c(dat1$muLower_trans[dat1$method_cat=="Tandem Pump"],
rev(dat1$muUpper_trans[dat1$method_cat=="Tandem Pump"])),
col=rgb(col[1], col[2], col[3], max = 255, alpha = 125, names = "blue50"),
border=NA)
points(c(1,2,3,4),dat1$mu_trans[dat1$method_cat=="Tandem Pump"],pch=19,col=brewer.pal(9, "Greys")[9])
lines(c(1,2,3,4),dat1$mu_trans[dat1$method_cat=="Tandem Pump"],col=brewer.pal(9, "Greys")[9])
legend("bottomright",c("MDI","Non-Tandem Pump","Tandem Pump"),
lty=1,pch=19,col=c(brewer.pal(9, "Greys")[4],
brewer.pal(9, "Greys")[6],
brewer.pal(9, "Greys")[9]),title = "Previous Insulin Method",bty="n")
dev.off()
#########DIABETES BURDEN
tiff("S:/Shared Projects/Laura/BDC/Projects/Laurel Messer/Tandem/Results/DI_model_bw_07082020.tiff",
height=7,width=5,units = "in",res=500,compression="lzw")
par(mar=c(5.1,5,4.1,2.1))
plot(c(1,4),c(1,6),type="n",xlab="Time",ylab="Estimated Diabetes Impact (DI) Score \n (Higher Score = Higher Impact)",xaxt="n",
main="",frame.plot = F,yaxt="n")
axis(1,at=c(1,2,3,4),c("Baseline","2 months","4 months","6 months"))
axis(2,at=c(1,2,3,4,5,6),las=1)
col<-col2rgb(brewer.pal(9, "Greys")[4])
polygon(c(1,2,3,4,4,3,2,1),c(dat2$muLower_trans[dat2$method_cat=="Injections"],
rev(dat2$muUpper_trans[dat2$method_cat=="Injections"])),
col=rgb(col[1], col[2], col[3], max = 255, alpha = 125, names = "blue50"),
border=NA)
points(c(1,2,3,4),dat2$mu_trans[dat2$method_cat=="Injections"],pch=19,col=brewer.pal(9, "Greys")[4])
lines(c(1,2,3,4),dat2$mu_trans[dat2$method_cat=="Injections"],col=brewer.pal(9, "Greys")[4])
col<-col2rgb(brewer.pal(9, "Greys")[6])
polygon(c(1,2,3,4,4,3,2,1),c(dat2$muLower_trans[dat2$method_cat=="Non-Tandem Pump"],
rev(dat2$muUpper_trans[dat2$method_cat=="Non-Tandem Pump"])),
col=rgb(col[1], col[2], col[3], max = 255, alpha = 125, names = "blue50"),
border=NA)
points(c(1,2,3,4),dat2$mu_trans[dat2$method_cat=="Non-Tandem Pump"],pch=19,col=brewer.pal(9, "Greys")[6])
lines(c(1,2,3,4),dat2$mu_trans[dat2$method_cat=="Non-Tandem Pump"],col=brewer.pal(9, "Greys")[6])
col<-col2rgb(brewer.pal(9, "Greys")[9])
polygon(c(1,2,3,4,4,3,2,1),c(dat2$muLower_trans[dat2$method_cat=="Tandem Pump"],
rev(dat2$muUpper_trans[dat2$method_cat=="Tandem Pump"])),
col=rgb(col[1], col[2], col[3], max = 255, alpha = 125, names = "blue50"),
border=NA)
points(c(1,2,3,4),dat2$mu_trans[dat2$method_cat=="Tandem Pump"],pch=19,col=brewer.pal(9, "Greys")[9])
lines(c(1,2,3,4),dat2$mu_trans[dat2$method_cat=="Tandem Pump"],col=brewer.pal(9, "Greys")[9])
legend("bottomright",c("Non-Tandem Pump","Tandem Pump","MDI"),
lty=1,pch=19,col=c(brewer.pal(9, "Greys")[6],
brewer.pal(9, "Greys")[9],
brewer.pal(9, "Greys")[4]),title = "Previous Insulin Method",bty="n")
dev.off()
###overlaid on boxplots:
# boxplot(dat$baseline_factor1[dat$method_cat=="Injections"],
# dat$post2m_factor1[dat$method_cat=="Injections"],
# dat$post4m_factor1[dat$method_cat=="Injections"],
# dat$post6m_factor1[dat$method_cat=="Injections"],
# xlab="Time Point",xaxt="n",main="Previous Injections",
# ylim=c(1,10))
# axis(1,at=c(1,2,3,4),labels=c("Baseline","2 Month","4 Month","6 Month"))
#
# col<-col2rgb(brewer.pal(3, "Set1")[1])
#
# polygon(c(1,2,3,4,4,3,2,1),c(dat1$muLower_trans[dat1$method_cat=="Injections"],
# rev(dat1$muUpper_trans[dat1$method_cat=="Injections"])),
# col=rgb(col[1], col[2], col[3], max = 255, alpha = 125, names = "blue50"),
# border=NA)
#
# points(c(1,2,3,4),dat1$mu_trans[dat1$method_cat=="Injections"],pch=19,col=brewer.pal(3, "Set1")[1])
# lines(c(1,2,3,4),dat1$mu_trans[dat1$method_cat=="Injections"],col=brewer.pal(3, "Set1")[1])
#
#
# boxplot(dat$baseline_factor1[dat$method_cat=="Non-Tandem Pump"],
# dat$post2m_factor1[dat$method_cat=="Non-Tandem Pump"],
# dat$post4m_factor1[dat$method_cat=="Non-Tandem Pump"],
# dat$post6m_factor1[dat$method_cat=="Non-Tandem Pump"],
# xlab="Time Point",xaxt="n",main="Previous Non-Tandem pump",
# ylim=c(1,10))
# axis(1,at=c(1,2,3,4),labels=c("Baseline","2 Month","4 Month","6 Month"))
#
#
# col<-col2rgb(brewer.pal(3, "Set1")[2])
#
# polygon(c(1,2,3,4,4,3,2,1),c(dat1$muLower_trans[dat1$method_cat=="Non-Tandem Pump"],
# rev(dat1$muUpper_trans[dat1$method_cat=="Non-Tandem Pump"])),
# col=rgb(col[1], col[2], col[3], max = 255, alpha = 125, names = "blue50"),
# border=NA)
#
# points(c(1,2,3,4),dat1$mu_trans[dat1$method_cat=="Non-Tandem Pump"],pch=19,col=brewer.pal(3, "Set1")[2])
# lines(c(1,2,3,4),dat1$mu_trans[dat1$method_cat=="Non-Tandem Pump"],col=brewer.pal(3, "Set1")[2])
#
#
# boxplot(dat$baseline_factor1[dat$method_cat=="Tandem Pump"],
# dat$post2m_factor1[dat$method_cat=="Tandem Pump"],
# dat$post4m_factor1[dat$method_cat=="Tandem Pump"],
# dat$post6m_factor1[dat$method_cat=="Tandem Pump"],
# xlab="Time Point",xaxt="n",main="Previous Tandem Pump",
# ylim=c(1,10))
# axis(1,at=c(1,2,3,4),labels=c("Baseline","2 Month","4 Month","6 Month"))
#
#
# col<-col2rgb(brewer.pal(3, "Set1")[3])
#
# polygon(c(1,2,3,4,4,3,2,1),c(dat1$muLower_trans[dat1$method_cat=="Tandem Pump"],
# rev(dat1$muUpper_trans[dat1$method_cat=="Tandem Pump"])),
# col=rgb(col[1], col[2], col[3], max = 255, alpha = 125, names = "blue50"),
# border=NA)
# points(c(1,2,3,4),dat1$mu_trans[dat1$method_cat=="Tandem Pump"],pch=19,col=brewer.pal(3, "Set1")[3])
# lines(c(1,2,3,4),dat1$mu_trans[dat1$method_cat=="Tandem Pump"],col=brewer.pal(3, "Set1")[3])
#
#
# boxplot(dat$baseline_factor2[dat$method_cat=="Injections"],
# dat$post2m_factor2[dat$method_cat=="Injections"],
# dat$post4m_factor2[dat$method_cat=="Injections"],
# dat$post6m_factor2[dat$method_cat=="Injections"],
# xlab="Time Point",xaxt="n",main="Previous Injections",
# ylim=c(1,10))
# axis(1,at=c(1,2,3,4),labels=c("Baseline","2 Month","4 Month","6 Month"))
#
# col<-col2rgb(brewer.pal(3, "Set1")[1])
#
# polygon(c(1,2,3,4,4,3,2,1),c(dat2$muLower_trans[dat2$method_cat=="Injections"],
# rev(dat2$muUpper_trans[dat2$method_cat=="Injections"])),
# col=rgb(col[1], col[2], col[3], max = 255, alpha = 125, names = "blue50"),
# border=NA)
#
# points(c(1,2,3,4),dat2$mu_trans[dat2$method_cat=="Injections"],pch=19,col=brewer.pal(3, "Set1")[1])
# lines(c(1,2,3,4),dat2$mu_trans[dat2$method_cat=="Injections"],col=brewer.pal(3, "Set1")[1])
#
#
# boxplot(dat$baseline_factor2[dat$method_cat=="Non-Tandem Pump"],
# dat$post2m_factor2[dat$method_cat=="Non-Tandem Pump"],
# dat$post4m_factor2[dat$method_cat=="Non-Tandem Pump"],
# dat$post6m_factor2[dat$method_cat=="Non-Tandem Pump"],
# xlab="Time Point",xaxt="n",main="Previous Non-Tandem pump",
# ylim=c(1,10))
# axis(1,at=c(1,2,3,4),labels=c("Baseline","2 Month","4 Month","6 Month"))
#
# col<-col2rgb(brewer.pal(3, "Set1")[2])
#
# polygon(c(1,2,3,4,4,3,2,1),c(dat2$muLower_trans[dat2$method_cat=="Non-Tandem Pump"],
# rev(dat2$muUpper_trans[dat2$method_cat=="Non-Tandem Pump"])),
# col=rgb(col[1], col[2], col[3], max = 255, alpha = 125, names = "blue50"),
# border=NA)
#
# points(c(1,2,3,4),dat2$mu_trans[dat2$method_cat=="Non-Tandem Pump"],pch=19,col=brewer.pal(3, "Set1")[2])
# lines(c(1,2,3,4),dat2$mu_trans[dat2$method_cat=="Non-Tandem Pump"],col=brewer.pal(3, "Set1")[2])
#
#
# boxplot(dat$baseline_factor2[dat$method_cat=="Tandem Pump"],
# dat$post2m_factor2[dat$method_cat=="Tandem Pump"],
# dat$post4m_factor2[dat$method_cat=="Tandem Pump"],
# dat$post6m_factor2[dat$method_cat=="Tandem Pump"],
# xlab="Time Point",xaxt="n",main="Previous Tandem Pump",
# ylim=c(1,10))
# axis(1,at=c(1,2,3,4),labels=c("Baseline","2 Month","4 Month","6 Month"))
#
# col<-col2rgb(brewer.pal(3, "Set1")[3])
#
# polygon(c(1,2,3,4,4,3,2,1),c(dat2$muLower_trans[dat2$method_cat=="Tandem Pump"],
# rev(dat2$muUpper_trans[dat2$method_cat=="Tandem Pump"])),
# col=rgb(col[1], col[2], col[3], max = 255, alpha = 125, names = "blue50"),
# border=NA)
# points(c(1,2,3,4),dat2$mu_trans[dat2$method_cat=="Tandem Pump"],pch=19,col=brewer.pal(3, "Set1")[3])
# lines(c(1,2,3,4),dat2$mu_trans[dat2$method_cat=="Tandem Pump"],col=brewer.pal(3, "Set1")[3])
|
72a055dd2a7e705fac383b92bac56ea47cd8c860
|
0a906cf8b1b7da2aea87de958e3662870df49727
|
/diceR/inst/testfiles/connectivity_matrix/libFuzzer_connectivity_matrix/connectivity_matrix_valgrind_files/1609958689-test.R
|
34347ab3a256c853ad72993e22a38f2f8a1042c0
|
[] |
no_license
|
akhikolla/updated-only-Issues
|
a85c887f0e1aae8a8dc358717d55b21678d04660
|
7d74489dfc7ddfec3955ae7891f15e920cad2e0c
|
refs/heads/master
| 2023-04-13T08:22:15.699449
| 2021-04-21T16:25:35
| 2021-04-21T16:25:35
| 360,232,775
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 1,891
|
r
|
1609958689-test.R
|
testlist <- list(x = c(-1.81742001484345e-130, 3.56078486194419e+175, 6.07590246477201e+144, -5.46354690055813e-108, -2.55409533999453e-126, 1.34518625924781e-284, NaN, 5.25470539658555e-312, NaN, 3.53443932031926e-111, 9.3470424439443e-307, 1.38542983196395e-309, 7.41752795766083e-68, 1.78027637917481e-307, 2.35713802174392e-306, 2.71615461496516e-312, 1.42609294145491e-101, NaN, NaN, 3.39883266988882e-315, 7.29112200597562e-304, 2.23750363635562e-154, 5.48676962125445e-310, -1.35807722332605e-309, 1.08441445059778e-311, -5.18453389182304e-130, 5.48588770660083e+303, 3.02127655160755e-306, NaN, NaN, 5.41114290240681e-312, 2.60698787710058e-312, -1.8033866471139e-130, -1.79199026903378e+268, 7.05704425499948e-304, -5.48745820213966e+303, 3.56470723518021e+59, 3.55259342462103e+59, NaN, 4.63423715564756e-299, 1.96568260790928e-236, 2.28700218383386e-309, 6.27335258267354e+283, NaN, 7.04903371223352e-305, 3.01351536995325e+296, -1.0010786776111e-307, 2.78133171315505e-309, -8.37116099364271e+298, 4.24460690709598e-314, 2.48104144581179e-265, 2.52467545024877e-321, -2.46045035826595e+260, NaN, NaN, 1.88274914064291e-183, 6.96396578678978e-310, NaN, 1.41283359185427e-303, 5.48684451346174e-310, -5.48745808368284e+303, -1.53286278513146e-129, NaN, 3.31031343389594e-312, -2.35351790629134e+130, NaN, 5.91526093083387e-270, 3.19471221495295e-236, 1.98283051335198e-279, 6.69050427972785e-198, NaN, 1.34518630944296e-284, NaN, 1.3851114500267e-309, 2.39021606422747e-306, 1.39067113231181e-309, -1.99999999976723, 7.317830266744e-304, 3.78668712981951e-270, NaN, NaN, 7.07128472236262e-304, NaN, -7.78775850482709e-307, NaN, 1.41283359185427e-303, 5.48684451346174e-310, 1.62636991832101e-260, -5.8648214839349e-148, -8.57207224290277e+303, 5.41108894317552e-312, 6.2733561141161e+283))
result <- do.call(diceR:::connectivity_matrix,testlist)
str(result)
|
4dc95ead1965443890ab7bc641d987c36fa6f24b
|
024d2ee48d6eae806e98cba16819eb2b3fd52f1e
|
/man/clean_data_package.Rd
|
a374a20394d45eb48ccfbbbd5b06b2f77d60526f
|
[] |
no_license
|
Bioconductor/BiocContributions
|
eb4421f440afeb18f6c926fa0de2b6a4778e1949
|
13595f647f6b95af566583ffed0fe9a465147a69
|
refs/heads/master
| 2020-04-10T00:31:58.441908
| 2017-06-27T16:49:41
| 2017-06-27T16:49:41
| 40,479,641
| 1
| 1
| null | null | null | null |
UTF-8
|
R
| false
| true
| 834
|
rd
|
clean_data_package.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/addPackage.R
\name{clean_data_package}
\alias{clean_data_package}
\title{Clean and copy a Data Experiment package}
\usage{
clean_data_package(tarball, svn_pkgs = proj_path("experiment/pkgs"),
svn_data_store = proj_path("experiment/data_store"), data_dirs = c("data",
"inst/extdata"))
}
\arguments{
\item{tarball}{package tarball}
\item{svn_pkgs}{the location of Data Experiment \sQuote{pkgs} checkout.}
\item{svn_data_store}{the location of Data Experiment
\sQuote{data_store} checkout.}
}
\value{
File paths to the copied locations (invisibly).
}
\description{
Clean and copy a Data Experiment package
}
\examples{
\dontrun{
pkg <- system.file(package="BiocContributions",
"testpackages", "RNASeqPower_1.11.0.tar.gz")
clean_data_package(pkg)
}
}
|
bb0fd51dab232bb565aea55344b8d8bf743724ee
|
a2d7f12aee075a70a15e87a5ee994d9dcbec3e41
|
/man/add_chunk.Rd
|
a09bebfb21b32a1988015c432311f55560d88796
|
[] |
no_license
|
iMarcello/chronicle
|
75294b2ec2c918ea16dda97bf487f3e8728b92df
|
2f28134222dd1e9945f40d36f2bba864dbfe3a79
|
refs/heads/master
| 2023-05-31T10:37:59.769876
| 2021-06-25T13:23:45
| 2021-06-25T13:23:45
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| true
| 1,790
|
rd
|
add_chunk.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/enchunk.R
\name{add_chunk}
\alias{add_chunk}
\title{Transforms a function call into an Rmarkdown chunk}
\usage{
add_chunk(
report = "",
fun,
params,
chunk_title = NULL,
title_level = 2,
echo = FALSE,
message = FALSE,
warning = FALSE,
fig_width = NULL,
fig_height = NULL,
guess_title = TRUE
)
}
\arguments{
\item{report}{Character string containing all the R Markdown chunks previously added. Default is '', an empty report.}
\item{fun}{Function to call.}
\item{params}{List of parameters to be passed to fun.}
\item{chunk_title}{Title of the Rmarkdown chunk. If NULL, chronicle will try to parse a generic title based on the function and parameters passed using make_title()}
\item{title_level}{Level of the section title of this plot (ie, number of # on Rmarkdown syntax.)}
\item{echo}{Whether to display the source code in the output document. Default is FALSE.}
\item{message}{Whether to preserve messages on rendering. Default is FALSE.}
\item{warning}{Whether to preserve warnings on rendering. Default is FALSE.}
\item{fig_width}{Width of the plot (in inches).}
\item{fig_height}{Height of the plot (in inches).}
\item{guess_title}{If TRUE, tries to generate a generic title for chronicle::make_* family of functions (eg 'Sepal.Length vs Sepal.Width by Species' for make_scatter)}
}
\value{
An rmarkdown chunk as a character string.
}
\description{
Transforms a function call into an Rmarkdown chunk
}
\examples{
library(chronicle)
html_chunk <- add_chunk(fun = chronicle::make_barplot,
params = list(dt = 'iris',
value = 'Sepal.Width',
bars = 'Species'))
cat(html_chunk)
}
|
58f898a358921889662c53bdedd2295ef12324a0
|
d7629ee49c54708846411299b9efe174cffae655
|
/man/plot_weather_data.Rd
|
4c46a381317e0543730ca1a6eb4954797f79968a
|
[
"MIT"
] |
permissive
|
dbandrews/noaastnr
|
9d1a12b4ca2ef710919ab7943908e86caaeff21d
|
522026593c2178a288120809cbcc55ea46c19e6e
|
refs/heads/main
| 2023-03-24T04:56:21.208947
| 2021-03-20T19:33:24
| 2021-03-20T19:33:24
| 350,523,680
| 0
| 0
|
NOASSERTION
| 2021-03-22T23:53:10
| 2021-03-22T23:53:10
| null |
UTF-8
|
R
| false
| true
| 648
|
rd
|
plot_weather_data.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/noaastnr.R
\name{plot_weather_data}
\alias{plot_weather_data}
\title{Plot weather data}
\usage{
plot_weather_data(obs_df, col_name, time_basis)
}
\arguments{
\item{obs_df}{data.frame}
\item{col_name}{factor}
\item{time_basis}{factor}
}
\value{
'ggplot2'
}
\description{
Visualizes the weather station observations including air temperature,
atmospheric pressure, wind speed, and wind direction changing over time.
}
\examples{
weather_df <- get_weather_data("911650-22536", 2020)
plot_weather_data(obs_df = weather_df, col_name = "air_temp", time_basis = "monthly")
}
|
341113ba4dc5c0036c4b0689b564e72fa20083db
|
a330ecd2f5248e1b8886ed7810343fb5313dca20
|
/man/RelationPict.Rd
|
27bd72050e262d06e2ea932f638f31a372940987
|
[] |
no_license
|
cran/StakeholderAnalysis
|
c890d407b1b2f486b9fc1cac0097320ec2ace8f4
|
7d468a677fd693611743b471ac15f447f76c84b2
|
refs/heads/master
| 2021-01-21T12:58:12.319532
| 2017-11-13T20:18:07
| 2017-11-13T20:18:07
| 102,109,335
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 2,008
|
rd
|
RelationPict.Rd
|
\name{RelationPict}
\alias{RelationPict}
\title{RelationPict}
\description{Draws a picture of stakeholder relationships}
\usage{RelationPict(path, tofile, MeanImpact, StakeholdClassif)}
\arguments{
\item{path}{a path of a particular catalogue in which pictures are saved, set path="" when tofile=0}
\item{tofile}{logical. 1=save-to-file. 0=show-on-screen}
\item{MeanImpact}{the Leontief coefficient matrix. The $MeanImpact from the ImpactAnalysis function should be used}
\item{StakeholdClassif}{the result of the StakeholdClassif function}
}
\details{The function draws a picture of stakeholder relationships with arrows and circles in different colours}
\value{A picture of stakeholder relationships}
\author{Sebastian Susmarski, Lech Kujawski, Anna Zamojska, Piotr Zientar}
\examples{
# first import DataExp
data(DataExp)
# then execute PrelCalc(), RespVerif(), AttribIdent(), CollabPotential()
# BenefCost(), StakeholdClassif(), ImpactAnalysis()
PrelCalcExp=PrelCalc(data=DataExp, NoAtt=c(2,11,13,15),NoPow=c(3,8,14,16),
NoUrg=c(4,6,10,12),NoLeg=c(5,7,9,17),NoBen=18:22,NoCos=23:27)
RespVerifExp=RespVerif(CountResponses=PrelCalcExp$CountResponses,
NoStakeholders=PrelCalcExp$NoStakeholders)
AttribIdentExp=AttribIdent(TestedResponses=RespVerifExp,
NoAttrib=PrelCalcExp$NoAttrib, NoStakeholders=PrelCalcExp$NoStakeholders,
NameStakeholders=PrelCalcExp$NameStakeholders)
CollabPotentialExp=CollabPotential(AttribIdent=AttribIdentExp)
BenefCostExp=BenefCost(CountResponses=PrelCalcExp$CountResponses)
StakeholdClassifByMean=StakeholdClassif(BenefCostTest=BenefCostExp$BenefCostTest,
CollabPotential=CollabPotentialExp$Mean,AttribIdent=AttribIdentExp$Mean)
ImpactAnalysisExp=ImpactAnalysis(data=DataExp, BenefCost=BenefCostExp$BenefCostInd,
NoStakeholders=PrelCalcExp$NoStakeholders,
NameStakeholders=PrelCalcExp$NameStakeholders)
# RelationPict()
RelationPict(path="",tofile=0,MeanImpact=ImpactAnalysisExp$MeanImpact,
StakeholdClassif=StakeholdClassifByMean)
}
|
4e9814c3c7f3eab8fed438f51e3b91442ccf9501
|
b2f61fde194bfcb362b2266da124138efd27d867
|
/code/dcnf-ankit-optimized/Results/QBFLIB-2018/E1+A1/Database/Kronegger-Pfandler-Pichler/dungeon/dungeon_i25-m12-u3-v0.pddl_planlen=132/dungeon_i25-m12-u3-v0.pddl_planlen=132.R
|
592e5fc5adb48895f01aec5d84f4b288d24ee180
|
[] |
no_license
|
arey0pushpa/dcnf-autarky
|
e95fddba85c035e8b229f5fe9ac540b692a4d5c0
|
a6c9a52236af11d7f7e165a4b25b32c538da1c98
|
refs/heads/master
| 2021-06-09T00:56:32.937250
| 2021-02-19T15:15:23
| 2021-02-19T15:15:23
| 136,440,042
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 92
|
r
|
dungeon_i25-m12-u3-v0.pddl_planlen=132.R
|
e2c78cdc7634fb4565b3c695a2be027a dungeon_i25-m12-u3-v0.pddl_planlen=132.qdimacs 94309 893453
|
822b823a6483e027ddd48eb8451e10bc4dec6878
|
098e44f11fdb08c992da7651ba33a4d72d44fbdc
|
/Prostate Cancer.R
|
5042b89b439fd76b5a08a2b4a0da05cffabad7a3
|
[] |
no_license
|
migscanet/ML-Algorithms-Applied-in-Prostate-Cancer-Data
|
3734da05584b37400eeeff9bb88b82392781318f
|
d30fdba1ea5c04c2e98cde89f27f69e5c30ddb96
|
refs/heads/main
| 2023-06-04T15:12:03.716163
| 2021-06-24T03:34:06
| 2021-06-24T03:34:06
| 379,789,139
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 4,058
|
r
|
Prostate Cancer.R
|
#Machine Learning Algorithms in Prostate Cancer data
#EDA
#Bar Plot for
ggplot(as.data.frame(df), aes(factor(diagnosis_result), fill = factor(diagnosis_result))) + geom_bar()
#Histogram
library(magrittr)
mean_area = round(mean(area), 2)
mean_area
ggplot(df, aes(x = diagnosis_result, y = mean_area)) +
geom_bar(stat = "identity")
#Supervised Learning
#Support Vector Machine
#Import data
df <- read.table(file.choose(), header = T, sep = )
df <- subset(df, select = -c(id))
attach(df)
names(df)
head(df)
#Normalize data
#define Min-Max normalization function
min_max_norm <- function(x) {
(x - min(x)) / (max(x) - min(x))
}
#apply Min-Max normalization
df_norm <- as.data.frame(lapply(df[1:9], min_max_norm))
head(df_norm)
#First we need to divide test and train data
sample.size <- floor(0.70 * nrow(df_norm))
indexes <- sample(seq_len(nrow(df_norm)), size = sample.size)
train <- df_norm[indexes,]
test <- df_norm[-indexes,]
train
test
write.table(train, file="train.csv")
read.table("train.csv", header = TRUE)
#Run the SVM
library(e1071)
svmdata <- svm(formula= diagnosis_result ~ radius + texture+perimeter+area+smoothness+compactness+symmetry+fractal_dimension
, data = train, type = "C-classification", kernel="linear")
pred <- predict(svmdata,test)
pred
table(pred, test$diagnosis_result)
#ANN
library(nnet)
nnet1 = nnet(factor(diagnosis_result) ~ radius + texture+perimeter+area+smoothness+compactness+symmetry+fractal_dimension,
data=train, size=4, decay = 0.05, MaxNWTS = 20)
predict1 <- predict(nnet1, test, type = "class")
table(predict1, test$diagnosis_result)
#Unsupervised Learning
#Hierarhical Clustering
df.clustering <- subset(train, select = -c(diagnosis_result))
sl.out <- hclust(dist(df.clustering, method="euclidian"), method="single")
plot(sl.out)
#Non-Hierarchical Clustering
cl <- kmeans(df.clustering,4)
cl
#To get center and membership of countries
plot(df.clustering, col = cl$cluster)
points(cl$centers, col = 1:2, pch = 8, cex=2)
#PCA
df.pca.train <- subset(train, select = -c(diagnosis_result))
df.pca.train
prin_comp <- prcomp(t(df.pca.train), scale. = TRUE)
plot(prin_comp$x[,1], prin_comp$x[,2])
prin_comp.var <- prin_comp$sdev^2
prin_comp.var.per <- round(prin_comp.var/sum(prin_comp.var)*100, 1)
barplot(prin_comp.var.per, main="Scree Plot", xlab="Principal Component", ylab="Percent Variation")
library(ggplot2)
pca.data <- data.frame(Sample=rownames(prin_comp$x), X=prin_comp$x[,1], Y=prin_comp$x[,2])
pca.data
ggplot(data=pca.data, aes(x=X, y=Y, label=Sample))+
geom_text()+
xlab(paste("PC1 - ", prin_comp.var.per[1], "%", sep=""))+
ylab(paste("PC2 - ", prin_comp.var.per[2], "%", sep=""))+
theme_bw()+
ggtitle("PCA Graph")
prin_comp.var.per[3]
prin_comp.var.per[4]
################################
names(prin_comp)
prin_comp$center
prin_comp$scale
prin_comp$rotation
dim(prin_comp$x)
biplot(prin_comp, scale =0)
#compute standard deviation of each principal component
std_dev <- prin_comp$sdev
#compute variance
pr_var <- std_dev^2
#check variance of first 10 components
pr_var
#proportion of variance explained
prop_varex <- pr_var/sum(pr_var)
prop_varex
#scree plot
plot(prop_varex, xlab ="Principal Component", ylab="Proportion Variance Explained", type="b")
#cumulative scree plot
plot(cumsum(prop_varex), xlab="Principal Component", ylab = "Cumulative Proportion of Variance Explained", type="b")
train.data <- data.frame()
#Pca 183
K <- eigen(cor(train))
K
print(train%*%K$vec, digits=5)
pca <- princomp(train, center = TRUE, cor=TRUE, scores=TRUE)
pca$scores
train
library(jmv)
data3 <- read.table(file.choose(), header=T, sep= )
pca(train, vars = c('diagnosis_result', 'radius', 'texture','perimeter', 'area', 'smoothness', 'compactness', 'symmetry', 'fractal_dimension'),
nFactorMethod = "parallel", nFactors = 1, minEigen = 1,
rotation = "varimax", hideLoadings = 0.3, screePlot =FALSE,
eigen = FALSE, factorCor =FALSE,
factorSummary = FALSE,
kmo =FALSE, bartlett = FALSE)
#######################################
|
263a6107b94ee2150036b082ed370c76aaa0b1fa
|
0e2d99a925a895676411be39c4aa12ca28ffb155
|
/man/pmid2doi-defunct.Rd
|
9c692cc6e4b41afd67c9f59ce0a00f09675fa4e1
|
[
"MIT"
] |
permissive
|
ropensci/rcrossref
|
850c774c9abbb692d8e6b789a891238be68de751
|
bf3fcfe0d7deede6c93847081f84e8ab45ba079e
|
refs/heads/main
| 2023-05-23T00:22:10.594719
| 2023-03-17T11:08:55
| 2023-03-17T11:08:55
| 13,002,070
| 158
| 37
|
NOASSERTION
| 2023-03-17T11:08:57
| 2013-09-21T20:42:30
|
R
|
UTF-8
|
R
| false
| true
| 299
|
rd
|
pmid2doi-defunct.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/pmid2doi.R
\name{pmid2doi}
\alias{pmid2doi}
\alias{doi2pmid}
\title{Get a PMID from a DOI, and vice versa.}
\usage{
pmid2doi(...)
doi2pmid(...)
}
\description{
Get a PMID from a DOI, and vice versa.
}
\keyword{internal}
|
fed8732d923f09929c3f3870c8cc96986f2d75d5
|
0484ddd6f392fecfa542747f550248bba6a9bf2a
|
/data-raw/longbmkr.R
|
65eeea38b6f41a7d968dff6004eca826a549965f
|
[] |
no_license
|
lengning/gClinBiomarker
|
d0115d4a699ca12866b9776c6a3835d9c0ece6c9
|
726d3bb9edbd8ecc450fc650ea7ab9922737629b
|
refs/heads/master
| 2021-10-24T06:16:06.064819
| 2019-03-22T18:25:07
| 2019-03-22T18:25:07
| 125,939,464
| 5
| 1
| null | null | null | null |
UTF-8
|
R
| false
| false
| 2,700
|
r
|
longbmkr.R
|
rbimodal <- function (n, cpct, mu1, mu2, sig1, sig2) {
y0 <- rnorm(n,mean=mu1, sd = sig1)
y1 <- rnorm(n,mean=mu2, sd = sig2)
flag <- rbinom(n,size=1,prob=cpct)
y <- y0*(1 - flag) + y1*flag
}
#' Simulated longitudinal biomarker data
#'
#' Contains the following variables
#'
#' @format A data frame with 8,500 rows and 8 variables:
#' \describe{
#' \item{pid}{Patient Identifier}
#' \item{trt}{Treatment Arm (1, 0)}
#' \item{sex}{Patient Sex (m, f)}
#' \item{age}{Patient Age}
#' \item{edu}{Patient years of education}
#' \item{bmkr}{Baseline biomarker reading}
#' \item{vm}{Patient visit time in months}
#' \item{ep}{Biomarker endpoint reading}
#' }
#'
"longbmkr"
longbmkr <-
# build list of parameters
list(
patients_n = 1000, # number of patients
visits_n = 10, # number of visits
visits_d = 6, # months of separation between visits
endpt_b = 200, # lambda used for poisson distribution
endpt_m = 5, # endpoint slope
endpt_m_sd = 1.5 # endpoint slope standard deviation
) %>%
# set up variables given initial parameter list
(function(.) { data.frame(list(
pid = rep(1:.$patients_n, each=.$visits_n),
trt = rep(round(runif(1:.$patients_n)), each=.$visits_n),
sex = rep(round(runif(1:.$patients_n)), each=.$visits_n),
age = 18 + rep(runif(1:.$patients_n) * (80 - 18), each=.$visits_n),
edu = 8 + rep(rpois(.$patients_n, lambda=4), each=.$visits_n),
bmkr = rep(rbimodal(.$patients_n, 0.5, 1, 2, 0.2, 0.5), each=.$visits_n),
vm = rep((1:.$visits_n - 1) * .$visits_d, .$patients_n),
ep_b = rep(rpois(.$patients_n, lambda=.$endpt_b), each=.$visits_n),
ep_m = rep(.$endpt_m, .$patients_n * .$visits_n),
ep_s = rep(.$endpt_m_sd, .$patients_n * .$visits_n)
)) }) %>%
# build timecourse data and add noise
group_by(pid) %>%
#mutate(ep_m = rep(rnorm(1, first(ep_m) * (1 - first(trt)), first(ep_s)), n())) %>%
ungroup() %>%
mutate(ep = rnorm(n(), ep_b, ep_b/10) + vm *
(1 * rnorm(n(), ep_m, ep_s)
+ (sex - 0.5) * rnorm(n(), 1, 0.2)
+ (mean(edu) - edu)/mean(edu) * rnorm(n(), 0.14, 0.05)
+ (mean(age) - age)/mean(age) * rnorm(n(), 0.18, 0.05)
+ (bmkr/mean(bmkr) ^ 3 ) * rnorm(n(), 0.7, 0.05)
+ (0.5 - trt) * rnorm(n(), 2, 0.2)) ) %>%
#mutate(ep = rnorm(n(), ep, ep_b * 0.01 * (vm * 0.5 + 12)))%>%
mutate(sex = ifelse(sex == 1, "f", "m")) %>%
select(-ep_m, -ep_s, -ep_b) %>%
sample_frac(0.85) %>%
arrange(pid, vm) %>%
mutate(trt = as.factor(trt),
sex = as.factor(sex))
usethis::use_data(longbmkr, overwrite = TRUE)
|
0fd469c5fadafe7611127b9c1c27cec20fa688eb
|
97c2cfd517cdf2a348a3fcb73e9687003f472201
|
/R/src/QFPortfolio/tests/testExposure.r
|
b10cef8437e3c3ab6bc5850416ced11257e5f6c6
|
[] |
no_license
|
rsheftel/ratel
|
b1179fcc1ca55255d7b511a870a2b0b05b04b1a0
|
e1876f976c3e26012a5f39707275d52d77f329b8
|
refs/heads/master
| 2016-09-05T21:34:45.510667
| 2015-05-12T03:51:05
| 2015-05-12T03:51:05
| 32,461,975
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 6,475
|
r
|
testExposure.r
|
# Exposure tests
#
# Author: RSheftel
###############################################################################
library(QFPortfolio)
testdata <- squish(system.file("testdata", package="QFPortfolio"),'/Exposure/')
smashCheck <- function(ep,expected){
checkSame(sort(names(ep$smash())), sort(names(expected)))
checkSame(all(ep$smash()==expected[,names(ep$smash())]),TRUE)
}
test.constructor <- function(){
checkInherits(Exposure(), "Exposure")
checkSame(Exposure(verbose=FALSE)$groupName(), 'AllSystemsQ')
}
test.weights <- function(){
ep <- Exposure('TestCurveGroup')
checkSame(ep$groupName(), 'TestCurveGroup')
expected <- c(1,1)
names(expected) <- c('TestCurveGroup1', 'TestCurveGroup2')
checkSame(ep$weights(), expected)
ep$weights(list(TestCurveGroup2=4, TestCurveGroup1=5))
expected[1:2] <- c(5,4)
checkSame(ep$weights(), expected)
shouldBombMatching(ep$weights(list(BadName=1, TestCurveGroup2=1)), 'Not a child group name: BadName')
ep$weights(list(TestCurveGroup1=99))
expected['TestCurveGroup1'] <- 99
checkSame(ep$weights(), expected)
}
test.smashFrame <- function(){
ep <- Exposure('TestExposure')
expected <- data.frame( group=c('TestExposureSub1','TestExposureSub1','TestExposureSub1','TestExposureSub2','TestExposureSub2'),
market=c('TEST.SP.1C','TEST.SP.1C','TEST.US.1C','RE.TEST.TU.1C','RE.TEST.TY.1C'),
system=c('TestSystem1','TestSystem1','TestSystem1','TestSystem2','TestSystem2'),
pv=c('Fast','Slow','Slow','TestPV3','TestPV3'),
weight.base=c(1,0.5,0.5,1,1), weight.user=c(1,1,1,1,1),
weight = c(1,0.5,0.5,1,1),stringsAsFactors=FALSE)
smashCheck(ep, expected)
}
test.sizingParameter <- function(){
ep <- Exposure('TestExposure')
ep$addSizingParameter()
expected <- data.frame( group=c('TestExposureSub1','TestExposureSub1','TestExposureSub1','TestExposureSub2','TestExposureSub2'),
market=c('TEST.SP.1C','TEST.SP.1C','TEST.US.1C','RE.TEST.TU.1C','RE.TEST.TY.1C'),
system=c('TestSystem1','TestSystem1','TestSystem1','TestSystem2','TestSystem2'),
pv=c('Fast','Slow','Slow','TestPV3','TestPV3'),
weight.base=c(1,0.5,0.5,1,1), weight.user=c(1,1,1,1,1),
weight = c(1,0.5,0.5,1,1),stringsAsFactors=FALSE)
expected$sizingParameter <- c(rep('TestParameter1',3),rep('TestStrategy3Param1',2))
expected$sizingParameter.orig <- c(10,15,15,1,1)
expected$sizingParameter.weighted <- c(10,15*0.5,15*0.5,1,1)
smashCheck(ep,expected)
weights <- list(TestExposureSub1=5, TestExposureSub2=10)
expected$weight <- c(5,5*0.5,5*0.5,10,10)
expected$weight.user <- c(5,5,5,10,10)
expected$sizingParameter.weighted <- c(50,75*0.5,75*0.5,10,10)
ep$weights(weights)
smashCheck(ep,expected)
expected <- data.frame( system=c('TestSystem1','TestSystem1','TestSystem2'),
pv = c('Fast','Slow','TestPV3'),
parameter = c('TestParameter1','TestParameter1','TestStrategy3Param1'),
original = c(10,15,1),
weighted = c(50,75*0.5,10),
weight.final = c(5,5*0.5,10),
weight.user = c(5,5,10),
weight.base = c(1,0.5,1))
checkSameLooking(ep$sizing(), expected)
}
test.riskDollars <- function(){
ep <- Exposure('TestExposure')
ep$addRiskDollars()
expected <- data.frame(system=c('TestSystem1','TestSystem2'), riskDollars=c(2500,0.02))
checkSameLooking(ep$riskDollars(),expected)
checkSame(ep$riskDollars('system+pv',margins=TRUE), dget(squish(testdata,'riskDollars_system_pv.dput')))
checkSame(ep$riskDollars('group+system',margins=TRUE), dget(squish(testdata,'riskDollars_group_system.dput')))
checkSame(ep$riskDollars('market+system',margins=TRUE), dget(squish(testdata,'riskDollars_market_system.dput')))
shouldBombMatching(ep$riskDollars('market+system+pv'), 'Only works for 2 or less aggregationLevels.')
}
test.sizingWriteCsv <- function(){
ep <- Exposure('TestExposure')
ep$weights(list(TestExposureSub1=5, TestExposureSub2=10))
filename <- ep$writeSizingCsv(filename=squish(testdata,'sizing.csv'),asOfDate='19520105')
fileMatches(filename, squish(testdata,'testSizingCsv.csv'))
}
test.aggregateCurves <- function(){
ep <- Exposure('TestExposure')
ep$loadCurves(squish(testdata,'curves/'))
ep$weights(list(TestExposureSub1=5, TestExposureSub2=10))
ep$aggregateCurvesWeights(2)
checkSame(as.numeric(ep$aggregateCurvesObject()$smash()$weight), rep(2,5))
ep$aggregateCurvesWeights('weight')
checkSame(as.numeric(ep$aggregateCurvesObject()$smash()$weight), c(5,2.5,2.5,10,10))
collapse <- ep$aggregateCurves(aggregationLevels=c('system','pv'), weights=NULL)
checkInherits(collapse[[1]], 'WeightedCurves')
collapse <- ep$aggregateCurves(aggregationLevels='system', weights='weight.user')
checkSame(round(collapse$TestSystem1$curve()$metric(NetProfit),2), 8916642.45)
checkSame(collapse$TestSystem2$curve()$metric(NetProfit), 19484925)
}
test.metricFrame <- function(){
ep <- Exposure('TestExposure')
ep$loadCurves(squish(testdata,'curves/'))
expected <- data.frame( id=c('TestSystem2_TestPV3', 'TestSystem1_Fast', 'TestSystem1_Slow','ALL','DiversityBenefit'),
NetProfit=c(1948492, 0, 1783328, 3731821, 0),
NetProfit.percent=c(0.52213, 0, 0.47787, 1, 0))
mf <- ep$metricFrame(aggregationLevels=c('system','pv'),metrics=list(NetProfit,DailyStandardDeviation),
weights=NULL,percentages=TRUE,allRow=TRUE)
checkSameLooking(expected$id, mf$id)
checkSameLooking(expected$NetProfit, round(mf$NetProfit,0))
checkSameLooking(expected$NetProfit.percent, round(mf$NetProfit.percent,5))
ep$range(Range('2009-04-13','2009-05-29'))
expected <- data.frame( id=c('TestSystem2_TestPV3', 'TestSystem1_Fast', 'TestSystem1_Slow'), NetProfit=c(27800, 0, 71176))
mf <- ep$metricFrame(aggregationLevels=c('system','pv'),metrics=list(NetProfit,DailyStandardDeviation),
weights=NULL,percentages=FALSE,allRow=FALSE)
checkSameLooking(expected$id, mf$id)
checkSameLooking(expected$NetProfit, round(mf$NetProfit,0))
}
test.correlations <- function(){
ep <- Exposure('TestExposure')
ep$loadCurves(squish(testdata,'curves/'))
corrs <- ep$correlations(aggregationLevels=c('system','pv'))
checkSame(round(corrs[3,2],5), -0.06094)
ep$range(Range('2009-04-13','2009-05-29'))
corrs <- ep$correlations(aggregationLevels=c('system','pv'))
checkSame(round(corrs[3,2],5), -0.28919)
}
|
d0114fd2f16d68674f7e93ff9cbaa267467eb8a2
|
7869559a54d096edc4fcc10769f3470cf6c1527f
|
/plot3.R
|
d68f8889ba3211621d73748c51b0cc40ab23334f
|
[] |
no_license
|
scleeton/ExData_Plotting1
|
50174e9708d4aebb24a4e2060484f327d50d8b87
|
9e154d7f48b8376b3f21bf21fa8f5531856f5fb9
|
refs/heads/master
| 2021-01-15T11:42:53.900826
| 2016-04-28T13:46:06
| 2016-04-28T13:46:06
| 57,170,850
| 0
| 0
| null | 2016-04-27T00:20:49
| 2016-04-27T00:20:49
| null |
UTF-8
|
R
| false
| false
| 958
|
r
|
plot3.R
|
setwd("~/Coursera/Exploratory Data Analysis")
library(data.table)
dat <- read.table("household_power_consumption.txt", header = TRUE, sep = ";", na.strings = "?")
vars <- c("Sub_metering_1", "Sub_metering_2", "Sub_metering_3")
dat$Date <- as.Date(dat$Date, "%d/%m/%Y")
chartDat <- subset(dat, Date == "2007-02-01" | Date == "2007-02-02", select = c("Date", "Time", vars))
chartDat$Time <- strptime(paste(chartDat$Date, chartDat$Time), "%Y-%m-%d %H:%M:%S")
png("plot3.png", width = 480, height = 480)
plot(chartDat$Time
, chartDat$Sub_metering_1
, col = "black"
, type = "l"
, xlab = ""
, ylab = "Energy sub metering"
, ylim = c(0, max(chartDat[vars])))
lines(chartDat$Time
, chartDat$Sub_metering_2
, col = "red")
lines(chartDat$Time
, chartDat$Sub_metering_3
, col = "blue")
legend("topright"
, vars
, col = c("black", "red", "blue")
, lty = c(1, 1, 1))
dev.off()
|
3ff6baeb4e947f1cf6528c52cbd82e148ced1453
|
257b39265a6b796d54e0e861825984e7e205bbd8
|
/man/getStandingKatzData.Rd
|
0b2a2dbcf127a1d917903c14f061d64a6c0819bf
|
[] |
no_license
|
yaoguodong/zFactor-1
|
230c8576f004efb6bde669c60e249fd36134ca4f
|
66d6f0732e35c8e84bcd98d28251a0badc7fe423
|
refs/heads/master
| 2020-04-20T04:26:18.046950
| 2017-10-23T06:22:46
| 2017-10-23T06:22:46
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| true
| 821
|
rd
|
getStandingKatzData.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/Standing-Katz.R
\name{getStandingKatzData}
\alias{getStandingKatzData}
\title{Read a file with readings from Standing-Katz chart. Similar to
`getStandingKatzCurve` function but this gets only the data.}
\usage{
getStandingKatzData(tpr = 1.3, pprRange = "lp")
}
\arguments{
\item{tpr}{Pseudo-reduced temperature curve in SK chart. Default Tpr=1.30}
\item{pprRange}{Takes one of two values: "lp": low pressure, or "hp" for
high pressure. Default is "lp".}
}
\description{
Read a .txt file that was created from readings of the Standing-Katz chart
and retrieve the points
}
\examples{
getStandingKatzData(tpr = 1.5, pprRange = 'lp')
# with a vector
#tpr <- c(1.05, 1.1, 1.2)
#getStandingKatzData(tpr, pprRange = 'lp')
}
|
177f50c136380dbe75ff74ca266dff8c71fe430f
|
cc7b0df200fa7561c89a023d6a4ee98ad787da90
|
/R/stat_map.R
|
f3af62046b25ecfc327ec8c6c6f4f86291afa53b
|
[
"MIT"
] |
permissive
|
uribo/easyestat
|
cf8de14c4a389b69034cb22bedf042640d6d4931
|
f91204a7d1f89bad3a720a3bf11d85105c93f9c1
|
refs/heads/main
| 2022-07-20T02:16:14.188138
| 2022-06-21T07:39:51
| 2022-06-21T07:39:51
| 232,941,767
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 3,577
|
r
|
stat_map.R
|
#' Download Japan prefecture map from e-Stat
#' @param prefcode The JIS-code for prefecture and city identical number.
#' If prefecture, must be from 1 to 47.
#' @param dest If *TRUE*, to unzip downloaded files.
#' @param .survey_id survey id (A00200521YYYY is small area,
#' D00200521YYYY is did area)
#' @inheritParams utils::unzip
#' @seealso [https://www.e-stat.go.jp/gis](https://www.e-stat.go.jp/gis)
#' @export
download_stat_map <- function(prefcode, exdir = ".", dest = TRUE, .survey_id = c("A002005212015")) {
prefcode <-
stringr::str_pad(prefcode, width = 2, pad = "0")
rlang::arg_match(prefcode, values = stringr::str_pad(seq.int(47), width = 2, pad = "0"))
.survey_id <-
rlang::arg_match(
.survey_id,
c(paste0(rep(c("A00200521", "D00200521"), each = 2),
rep(c(2015, 2010), 2)),
"A002005212005",
"A002005212000"))
x <-
glue::glue(
"https://www.e-stat.go.jp/gis/statmap-search/data?dlserveyId={.survey_id}&code={prefcode}&coordSys=1&format=shape&downloadType=5&datum=2000"
)
qry <-
purrr::pluck(httr::parse_url(x), "query")
destfile <-
glue::glue(
"{exdir}/{serveyId}{coordSys}{prefcode}.zip",
serveyId = qry$dlserveyId,
coordSys = dplyr::case_when(qry$coordSys == "1" ~ "DDSWC")
)
utils::download.file(url = x,
destfile = destfile)
if (dest == TRUE) {
utils::unzip(
zipfile = destfile,
exdir = glue::glue("{tools::file_path_sans_ext(destfile)}")
)
}
}
#' Read e-Stat aggregation unit boundary data
#' @description
#' The GIS data downloaded from e-Stat is read and converted into an easy to process [sf::sf] format.
#' You can use [download_stat_map()] to download the data.
#' @param file Path to downloaded e-Stat shape file
#' @param type Currently, only "aggregate_unit" is used.
#' @param remove_cols Whether or not to remove redundant columns.
#' When *TRUE* (the default), the following columns are removed (See details).
#' These columns can be substituted or sought with values from other columns.
#' * S_AREA
#' * KAxx_, KAxx_id
#' * KEN, KEN_NAME
#' * DUMMY1
#' * X_CODE, Y_CODE
#' @export
read_estat_map <- function(file, type = "aggregate_unit", remove_cols = TRUE) {
x_code <- y_code <- s_area <- ken <- ken_name <- dummy1 <- NULL
area <- perimeter <- menseki <- km2 <- m2 <- m <- NULL
d <-
sf::st_read(file,
as_tibble = TRUE,
stringsAsFactors = FALSE)
d <-
d %>%
purrr::set_names(d %>%
names() %>%
tolower())
if (utils::hasName(d, "area")) {
d <-
d %>%
dplyr::mutate(area = units::set_units(area, m2),
perimeter = units::set_units(perimeter, m))
}
if (utils::hasName(d, "menseki")) {
d <-
d %>%
dplyr::mutate(menseki = units::set_units(menseki, km2))
}
if (remove_cols == TRUE) {
ncols <-
ncol(d)
if (ncols == 36L) {
d <-
d %>%
dplyr::select(
-x_code, -y_code,
-s_area,
-tidyselect::contains("kaxx_"),
-ken, -ken_name, -dummy1)
} else if (ncols == 26L) {
d <-
d
}
}
d
}
# prefcode = "08"
# .survey_id = "A002005212010"
# https://www.e-stat.go.jp/gis/statmap-search?page=1&type=2&aggregateUnitForBoundary=A&toukeiCode=00200521&toukeiYear=2005&serveyId=A002005212005&prefCode=08&coordsys=1&format=shape
# x
# https://www.e-stat.go.jp/gis/statmap-search/data?dlserveyId=A002005212010&code=08&coordSys=1&format=shape&downloadType=5
|
21005212589f7f08d03dbb96822c0c2613613656
|
39064ef5f61acaecd5691f794c4021835a394b0e
|
/05 visnetwork/05_04 igraph/igraph-and-visNetwork.R
|
5700f3e9022cf0ec198f20ab746b66a80d2c6dc8
|
[] |
no_license
|
barathevergreen/interactive_vis_html_widgets_R
|
4c6f199649119579cb0db727977a60c7611f0070
|
72b6d67cdc4853421d951b951861074c3378e616
|
refs/heads/master
| 2023-06-19T06:25:21.157247
| 2021-07-10T04:51:26
| 2021-07-10T04:51:26
| 384,606,184
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 1,137
|
r
|
igraph-and-visNetwork.R
|
library("tidyverse")
library("visNetwork")
library("igraph")
# ======= Basic network
map_nodes <- read_csv("data/nodes.csv")
map_edges <- read_csv("data/edges.csv")
map_nodes <- map_nodes %>%
mutate(id = row_number()) %>%
mutate(title = location,
label = city) %>%
select(id, everything())
map_edges <- map_edges %>%
mutate(
from = plyr::mapvalues(send.location, from = map_nodes$location, to = map_nodes$id),
to = plyr::mapvalues(receive.location, from = map_nodes$location, to = map_nodes$id)
) %>%
select(from, to, everything())
visNetwork(map_nodes,
map_edges)
#use graph.data.frame to convert to igraph object
letters_igraph <- graph.data.frame(map_edges,
vertices = map_nodes)
#verify igraph object
class(letters_igraph)
#Arrange and get all details - from largest to smallest
decompose(letters_igraph)
#Biggest component
decompose(letters_igraph)[[1]]
#Plot a vis
#by default idToLabel is TRUE
decompose(letters_igraph)[[1]] %>%
visIgraph()
#Explicitly set idToLabel is FALSE
decompose(letters_igraph)[[1]] %>%
visIgraph(idToLabel = FALSE)
|
2855ec9d4183065e30e524d24886151d8544c565
|
f250a44c25c77ef54eb6c2ff10644fd0621a0e51
|
/STAT 575 Final Project.R
|
e157ab61ee6d916af1d38b70430fb12dba712470
|
[] |
no_license
|
BenRohlfing/STAT-575-Final-Project
|
fe72a14936c8d312843d339c36e069a7ab232406
|
a67655836466d10ca03d813973cd1cf7f4a419de
|
refs/heads/master
| 2020-12-08T03:35:56.845718
| 2020-01-09T19:18:27
| 2020-01-09T19:18:27
| 232,873,020
| 1
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 4,433
|
r
|
STAT 575 Final Project.R
|
library(energy)
library(mlbench)
library(mvtnorm)
d <- 2
n <- 30
m <- 1000
alpha <- .1
(cvk <- qnorm(1-(alpha/2), d*(d+2), sqrt(8*d*(d+2)/n)))
(cvs <- qchisq(1-alpha, (d*(d+1)*(d+2))/6))
sigma <- matrix(c(1,0,0,1), ncol=2)
x <- rmvnorm(n, mean=c(0,0), sigma=sigma, method="chol")
as.integer(abs(skew(x,2,20)) >= cvs)
as.integer(abs(kur(x,2,20)) >= cvk)
abs(skew(x,2,5))
abs(kur(x,2,5))
#Algorithm for sample multivariate normal skewness
zbar <- numeric(d)
bz <- matrix(NA, nrow=n, ncol=n)
skew <- function(z,d,n){
c <- cov(z)
ic <- solve(c)
for(k in 1:d){
zbar[k] <- mean(z[,k])
}
for(i in 1:n){
tZ <- t(z[i,]-zbar)
cZ <- tZ %*% ic
for(j in 1:n){
Z <- z[j,]-zbar
bz[i,j] <- (cZ %*% Z)^3
}
}
b1 <- sum(bz)/n^2
return(n*b1/6)
}
(s <- skew(x,d,n))
#Algorithm for sample multivariate normal kurtosis
xbar <- numeric(d)
bx <- numeric(n)
kur <- function(x,d,n){
c <- cov(x)
ic <- solve(c)
for(i in 1:d){
xbar[i] <- mean(x[,i])
}
for(j in 1:n){
X <- x[j,]-xbar
tX <- t(x[j,]-xbar)
bx[j] <- (tX %*% ic %*% X)^2
}
return(mean(bx))
}
(k <- kur(x,d,n))
#Multivariate Normality Tests
epsilon <- seq(0,1,.05)
N <- length(epsilon)
y <- numeric(2*n)
Y <- matrix(y, ncol=2)
skewness <- kurtosis <- shapwilk <- energy <- numeric(m)
m.skewness <- m.kurtosis <- m.shapwilk <- m.energy <- numeric(N)
for(i in 1:N){ #for each epsilon
e <- epsilon[i]
for(j in 1:m){ #for each replicate
isigma <- sample(c(1,10), replace=TRUE,
size=n, prob=c(1-e,e))
for(k in 1:n){ #creating the multivariate distribution
sigma <- matrix(c(isigma[k],0,0,isigma[k]),ncol=2)
Y[k,1:2] <- rmvnorm(1,mean=c(0,0),
sigma=sigma)
}
skewness[j] <- as.integer(abs(skew(Y,d,n)) >= cvs)
kurtosis[j] <- as.integer(abs(kur(Y,d,n)) >= cvk)
shapwilk[j] <- as.integer(
shapiro.test(Y)$p.value <= alpha)
energy[j] <- as.integer(
mvnorm.etest(Y, R=200)$p.value <= alpha)
}
m.skewness[i] <- mean(skewness)
m.kurtosis[i] <- mean(kurtosis)
m.shapwilk[i] <- mean(shapwilk)
m.energy[i] <- mean(energy)
print(c(epsilon[i], m.skewness[i], m.kurtosis[i],
m.shapwilk[i], m.energy[i]))
}
#Results
data.frame(epsilon = epsilon, skewness = m.skewness, kurtosis = m.kurtosis,
shapiro.wilk = m.shapwilk, energy = m.energy)
#Plot the empirical estimates of power
plot(epsilon, m.skewness, ylim=c(0,1), type="l",
xlab = "epsilon", ylab = "power")
lines(epsilon, m.kurtosis, lty=2, col="red")
lines(epsilon, m.shapwilk, lty=3, col="blue")
lines(epsilon, m.energy, lty=4, col = "green")
legend("topright", 1, col = c("black", "red", "blue", "green"),
c("skewness", "kurtosis", "S-W", "energy"),
lty = c(1,2,3,4), inset = .02)
##Scratch Work##
for(j in 1:m){
isigma <- sample(c(1,10), replace=TRUE,
size=n, prob=c(1-e,e))
for(k in 1:n){
sigma <- matrix(c(isigma[k],0,0,isigma[k]),ncol=2)
Y[k,1:2] <- rmvnorm(1,mean=c(0,0),
sigma=sigma)
}
}
#Checking means between two multivariate normal dist random generations
z1 <- rmvnorm(20, mean=c(0,0), sigma=matrix(c(1,0,0,1), ncol=2))
data.frame(mean(z1[,1]), mean(z1[,2]))
z2 <- numeric(20)
Z2 <- matrix(z2, ncol=2)
for(k in 1:10){
Z2[k,1:2] <- rmvnorm(1,mean=c(0,0),
sigma=matrix(c(1,0,0,1), ncol=2))
}
data.frame(mean(Z2[,1]), mean(Z2[,2]))
#Testing sample function
mean <- sample(c(1,10), replace=TRUE,
size=10, prob=c(.5,.5))
sample(1:ncol(mean), 1)
mean2 <- seq(0,10,1)
rnorm(10, mean2, 1)
(isigma <- sample(c(1,10), replace=TRUE,
size=1, prob=c(.5,.5)))
sigma <- matrix(c(isigma,0,0,isigma), ncol=2)
(samp <- replicate(5, rmvnorm(1, c(0,0), sigma)))
xbar[1] <- mean(x[,1])
xbar[2] <- mean(x[,2])
c <- cov(x)
ic <- solve(c)
X <- x[2,]-xbar
tX <- t(x[2,]-xbar)
(bx[2] <- (tX %*% ic %*% X))
for(j in 1:n){
X <- x[j,]-xbar
tX <- t(x[j,]-xbar)
bx[j] <- (tX %*% ic %*% X)^2
}
(xbar(x,2))
mean(x[,1])
xbar[1] <- mean(x[,1])
sigma <- matrix(c(1,0,0,1), ncol=2)
z <- rmvnorm(n, mean=c(0,0), sigma=sigma, method="chol")
bZ <- matrix(NA, nrow=n, ncol=n)
c <- cov(z)
ic <- solve(c)
for(k in 1:d){
zbar[k] <- mean(z[,k])
}
for(i in 1:n){
tZ <- t(z[i,]-zbar)
cZ <- tZ %*% ic
for(j in 1:n){
Z <- z[j,]-zbar
bz[i,j] <- cZ %*% Z
bZ[i,j] <- (bz[i,j])^3
}
}
(b1 <- sum(bZ)/n^2)
n*b1/6
cvs
|
a590b1cd5062c063fcfb19e4237d05c6c3ffc1de
|
b3f764c178ef442926a23652c4848088ccd40dca
|
/man/pca.scoreplot.Rd
|
153634bb6440366c0b97e5dc609c8e019c1254f2
|
[] |
no_license
|
armstrtw/rrcov
|
23e7642ff2fd2f23b676d4ad8d5c451e89949252
|
684fd97cdf00750e6d6fd9f9fc4b9d3d7a751c20
|
refs/heads/master
| 2021-01-01T19:51:52.146269
| 2013-07-24T18:18:24
| 2013-07-24T18:18:24
| 11,597,037
| 1
| 1
| null | null | null | null |
UTF-8
|
R
| false
| false
| 1,153
|
rd
|
pca.scoreplot.Rd
|
\name{pca.scoreplot}
\alias{pca.scoreplot}
\title{
Score plot for Principal Components (objects of class 'Pca')
}
\description{
Produces a score plot from an object (derived from) \code{\link{Pca-class}}.
}
\usage{
pca.scoreplot(obj, i=1, j=2, main, id.n=0, \dots)
}
\arguments{
\item{obj}{an object of class (derived from) \code{"Pca"}.}
\item{i}{First score coordinate, defaults to \code{i=1}.}
\item{j}{Second score coordinate, defaults to \code{j=2}.}
\item{main}{The main title of the plot.}
\item{id.n}{ Number of observations to identify by a label. Defaults to \code{id.n=0}.}
\item{\dots}{optional arguments to be passed to the internal graphical functions.}
}
%\details{}
%\value{}
%\references{}
%\note{}
\author{Valentin Todorov \email{valentin.todorov@chello.at}}
\seealso{
\code{\link{Pca-class}},
\code{\link{PcaClassic}},
\code{\link{PcaRobust-class}}.
}
\examples{
require(graphics)
## PCA of the Hawkins Bradu Kass's Artificial Data
## using all 4 variables
data(hbk)
pca <- PcaHubert(hbk)
pca
pca.scoreplot(pca)
}
\keyword{robust}
\keyword{multivariate}
|
1105b83381ae845905a6092092ba7c245cac0ebe
|
3ed24cf1d44b746f91b04e79547975a8d52ceeae
|
/R/class-endpoint.R
|
3ef379bb4031da413c9da16746f358e412ff8037
|
[
"CC-BY-4.0",
"Apache-2.0"
] |
permissive
|
52North/sensorweb4R
|
2a420c6a86554da44ef0917543dac862475b968b
|
b4f9d19df2421284ce83f975232b1b0c133ec924
|
refs/heads/master
| 2021-07-20T12:54:07.561001
| 2020-03-24T18:48:36
| 2020-03-24T18:48:36
| 24,712,323
| 8
| 7
|
Apache-2.0
| 2021-02-02T16:13:50
| 2014-10-02T08:43:07
|
R
|
UTF-8
|
R
| false
| false
| 6,248
|
r
|
class-endpoint.R
|
#' @include generic-methods.R
#' @include helper-methods.R
#' @include virtual-class-http-resource.R
NULL
#' @title Endpoint class
#' @description A class representing a Timeseries API endpoint.
#' @slot url The URL.
#' @slot label A human readable name.
#' @author Christian Autermann \email{c.autermann@@52north.org}
#' @exportClass Endpoint
#' @rdname Endpoint-class
#' @name Endpoint-class
setClass("Endpoint",
contains = "HttpResource",
slots = list(label = "character",
url = "character"),
validity = function(object) {
errors <- assert.same.length(url = object@url,
label = object@label)
if (length(errors) == 0) TRUE else errors
})
#' @export
#' @describeIn Endpoint-class Checks wether \code{x} is an \code{Endpoint}.
is.Endpoint <- function(x) is(x, "Endpoint")
#' @export
#' @describeIn Endpoint-class Coerces \code{x} into an \code{Endpoint}.
as.Endpoint <- function(x) as(x, "Endpoint")
#' @export
#' @rdname length-methods
setMethod("length",
signature("Endpoint"),
function(x) length(resourceURL(x)))
normalize.URL <- function(x) {
x <- stringi::stri_replace_last_regex(x, "\\?.*", "")
x <- stringi::stri_replace_last_regex(x, "#.*", "")
x <- stringi::stri_trim_right(x, pattern = "[^/]")
return(x)
}
#' @export
#' @describeIn Endpoint-class Constructs a new \code{Endpoint}.
Endpoint <- function(url = character(), label = NULL, ...) {
url <- normalize.URL(as.character(url))
label <- stretch(length(url), label, NA, as.character)
new("Endpoint", url = url, label = label)
}
setClassUnion("Endpoint_or_characters", c("Endpoint", "character"))
setClassUnion("Endpoint_or_NULL", c("Endpoint", "NULL"))
#' @rdname url-methods
setMethod("resourceURL",
signature(x = "Endpoint"),
function(x) x@url)
#' @rdname url-methods
setMethod("stationsURL",
signature(x = "Endpoint"),
function(x) subresourceURL(x, "stations"))
#' @rdname url-methods
setMethod("servicesURL",
signature(x = "Endpoint"),
function(x) subresourceURL(x, "services"))
#' @rdname url-methods
setMethod("timeseriesURL",
signature(x = "Endpoint"),
function(x) subresourceURL(x, "timeseries"))
#' @rdname url-methods
setMethod("categoriesURL",
signature(x = "Endpoint"),
function(x) subresourceURL(x, "categories"))
#' @rdname url-methods
setMethod("offeringsURL",
signature(x = "Endpoint"),
function(x) subresourceURL(x, "offerings"))
#' @rdname url-methods
setMethod("featuresURL",
signature(x = "Endpoint"),
function(x) subresourceURL(x, "features"))
#' @rdname url-methods
setMethod("proceduresURL",
signature(x = "Endpoint"),
function(x) subresourceURL(x, "procedures"))
#' @rdname url-methods
setMethod("phenomenaURL",
signature(x = "Endpoint"),
function(x) subresourceURL(x, "phenomena"))
#' @rdname accessor-methods
setMethod("label",
signature(x = "Endpoint"),
function(x) x@label)
#' @rdname accessor-methods
setMethod("label<-",
signature(x = "Endpoint",
value = "character_or_NULL"),
function(x, value) {
x@label <- stretch(length(x), value, as.character(NA), as.character)
invisible(x)
})
#' @rdname accessor-methods
setMethod("names",
signature(x = "Endpoint"),
function(x) sensorweb4R::label(x))
#' @rdname accessor-methods
setMethod("names<-",
signature(x = "Endpoint",
value = "character_or_NULL"),
function(x, value) {
sensorweb4R::label(x) <- value
invisible(x)
})
setAs("character", "Endpoint",
function(from) Endpoint(url = from))
setAs("list", "Endpoint",
function(from) concat.list(from))
rbind2.Endpoint <- function(x, y) {
x <- as.Endpoint(x)
y <- as.Endpoint(y)
Endpoint(url = c(resourceURL(x), resourceURL(y)),
label = c(label(x), label(y)))
}
#' @rdname rbind2-methods
setMethod("rbind2",
signature("Endpoint", "Endpoint"),
function(x, y) rbind2.Endpoint(x, y))
#' @rdname rbind2-methods
setMethod("rbind2",
signature("Endpoint", "ANY"),
function(x, y) rbind2.Endpoint(x, as.Endpoint(y)))
#' @rdname rbind2-methods
setMethod("rbind2",
signature("ANY", "Endpoint"),
function(x, y) rbind2.Endpoint(as.Endpoint(x), y))
#' @rdname rep-methods
setMethod("rep", signature(x = "Endpoint"),
function(x, ...) Endpoint(url = rep(resourceURL(x), ...),
label = rep(label(x), ...)))
#' @export
random.Timeseries <- function(e) {
random <- function(x, n = 1) x[sample(seq_len(length(x)), n)]
srv <- random(services(e))
sta <- random(stations(e, service = srv))
ts <- random(timeseries(e, station = sta))
fetch(ts)
}
#' Example API endpoints.
#'
#' \code{example.endpoints} returns an instance of \linkS4class{Endpoint}
#' that can be used for testing.
#' @param the optional name of the endpoint
#' @return R object with the further endpoints offered by the service or the
#' endpoint with the specified name
#' @author Daniel Nuest \email{d.nuest@@52north.org}
#' @author Christian Autermann \email{c.autermann@@52north.org}
#'
#' @export
#'
#' @examples
#' example.endpoints()
#' services(example.endpoints()[1])
#' example.endpoints("UoL")
example.endpoints <- function(name) {
e <- Endpoint(url = c("http://sensorweb.demo.52north.org/sensorwebclient-webapp-stable/api/v1/",
"http://sosrest.irceline.be/api/v1/",
"http://www.fluggs.de/sos2/api/v1/",
"http://sensors.geonovum.nl/sos/api/v1/",
"http://www57.lamp.le.ac.uk/52n-sos-webapp/api/v1/"),
label = c("52N Demo",
"IRCEL-CELINE",
"WV",
"Geonovum",
"UoL"))
if (missing(name)) e
else e[label(e) == name]
}
|
6df2b5efa696441183450c3a61fb9d508e4668c1
|
bca194c55442436b19599cb90989c3b9d02083d4
|
/R/clean.adhb.revision.one.recalculated.op.dt.R
|
c5247036be09306c6b3e89be7cca6190a1ec6da9
|
[] |
no_license
|
mattmoo/checkwho_analysis
|
7a9d50ab202e7ac96b417c11227f9db429007c48
|
3474382423aad80b8061bfdf0a32c7632215640a
|
refs/heads/main
| 2023-07-14T22:33:54.320577
| 2021-08-24T23:27:37
| 2021-08-24T23:27:37
| 311,141,531
| 1
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 3,213
|
r
|
clean.adhb.revision.one.recalculated.op.dt.R
|
##' .. content for \description{} (no empty lines) ..
##'
##' .. content for \details{} ..
##'
##' @title
##' @param adhb.revision.one.recalculated.event.op.patient.dt
clean.adhb.revision.one.recalculated.op.dt <- function(adhb.revision.one.recalculated.event.op.patient.dt) {
adhb.revision.one.recalculated.op.dt = adhb.revision.one.recalculated.event.op.patient.dt[,
c(
"Theatre Event ID",
"Event ID",
"Proc 1 Code",
"Proc 1 Desc",
"Proc 2 Code",
"Proc 2 Desc",
"Proc 3 Code",
"Proc 3 Desc",
"Proc 4 Code",
"Proc 4 Desc",
"Proc 5 Code",
"Proc 5 Desc",
"Proc 6 Code",
"Proc 6 Desc",
"Proc 7 Code",
"Proc 7 Desc",
"Proc 8 Code",
"Proc 8 desc",
"Proc 9 Code",
"Proc 9 Desc",
"Proc 10 Code",
"Proc 10 Desc"
),
with = FALSE]
setnames(
adhb.revision.one.recalculated.op.dt,
old = c(
"Proc 1 Code",
"Proc 1 Desc",
"Proc 2 Code",
"Proc 2 Desc",
"Proc 3 Code",
"Proc 3 Desc",
"Proc 4 Code",
"Proc 4 Desc",
"Proc 5 Code",
"Proc 5 Desc",
"Proc 6 Code",
"Proc 6 Desc",
"Proc 7 Code",
"Proc 7 Desc",
"Proc 8 Code",
"Proc 8 desc",
"Proc 9 Code",
"Proc 9 Desc",
"Proc 10 Code",
"Proc 10 Desc"
),
new = c(
"Proc Code 1",
"Proc Desc 1",
"Proc Code 2",
"Proc Desc 2",
"Proc Code 3",
"Proc Desc 3",
"Proc Code 4",
"Proc Desc 4",
"Proc Code 5",
"Proc Desc 5",
"Proc Code 6",
"Proc Desc 6",
"Proc Code 7",
"Proc Desc 7",
"Proc Code 8",
"Proc Desc 8",
"Proc Code 9",
"Proc Desc 9",
"Proc Code 10",
"Proc Desc 10"
)
)
adhb.revision.one.recalculated.op.dt = melt(adhb.revision.one.recalculated.op.dt,
measure = patterns("^Proc Code ", "^Proc Desc "),
value.name = c("op.code", "op.desc"),
variable.name = 'op.number')[!is.na(op.code)]
adhb.revision.one.recalculated.op.dt = unique(adhb.revision.one.recalculated.op.dt)
setorder(adhb.revision.one.recalculated.op.dt, `Event ID`, op.number)
return(adhb.revision.one.recalculated.op.dt)
}
|
3a35669b73444da484bd89b65bc0ab697fd7bd29
|
518b2416600395af0058186dba8fd1e15651d3c2
|
/server.R
|
a209315ddef6f0f5acc4dda4b02dfbf04a51d2ff
|
[] |
no_license
|
cmdr/ShinyProject
|
dc2ce0410e0cdc1bfd8eb063e788090c63408ae5
|
1c82a042769d324713b4e3eb9f79d7504e1efd21
|
refs/heads/master
| 2021-01-01T19:35:16.817842
| 2015-06-21T21:51:23
| 2015-06-21T21:51:23
| 37,827,546
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 3,144
|
r
|
server.R
|
# This is the server part for a Shiny application
# It's a part of the student project for Developing Data Products course at Coursera
library(shiny)
library(klaR)
library(glmnet)
library(pROC)
data(GermanCredit)
shinyServer(function(input, output) {
### Preprocess "GermanCredit" data set
# Ids of categorical predictors
cat_data_id <- setdiff(which(unname(unlist(lapply(GermanCredit, function(x) "factor" %in% class(x))))), 21)
# Evaluate WoE (weight of eveidence) of the categorical predictors
woe_data <- woe(GermanCredit[, cat_data_id], GermanCredit$credit_risk, appont = 0.5)
# Create the final data frame
GermanCreditWOE <- cbind(woe_data$xnew, GermanCredit[, setdiff(1:21, cat_data_id)])
# Prepare data for glmnet model fitting
xx <- as.matrix(GermanCreditWOE[, 1:20])
yy <- GermanCreditWOE$credit_risk
reactive_data = reactive({
# Settings and user input
nfolds = input$nfolds # min(nfolds) = 3
# Fit the glm
model <- cv.glmnet(xx, yy, family = "binomial", type.measure = "auc", nfolds = nfolds)
})
output$plot <- renderPlot({
# Load the model
model = reactive_data()
# Evaluate the Gini parameters on both data sets
test_probs <- predict(model, newx = xx, s = "lambda.min", type = "response")
test_ROC <- model$cvm[which(model$glmnet.fit["lambda"][[1]] == model$lambda.min)]
all_ROC <- roc(yy, test_probs)
# Plot
plot(model)
})
output$text1 <- renderText({
# Load the model
model = reactive_data()
# Evaluate AUC on test and out-of-sample set
test_probs <- predict(model, newx = xx, s = "lambda.min", type = "response")
test_ROC <- model$cvm[which(model$glmnet.fit["lambda"][[1]] == model$lambda.min)]
paste0("Gini on a test set = ", round(2*test_ROC-1, 4))
})
output$text2 <- renderText({
# Load the model
model = reactive_data()
# Evaluate AUC on test and out-of-sample set
test_probs <- predict(model, newx = xx, s = "lambda.min", type = "response")
all_ROC <- roc(yy, test_probs)
result <- 2*all_ROC$auc-1
paste0("Gini on the whole set = ", round(result, 4))
})
output$text3 <- renderText({
# Load the model
model = reactive_data()
# Evaluate AUC on test and out-of-sample set
test_probs <- predict(model, newx = xx, s = "lambda.min", type = "response")
test_ROC <- model$cvm[which(model$glmnet.fit["lambda"][[1]] == model$lambda.min)]
all_ROC <- roc(yy, test_probs)
result <- (2*test_ROC-1)/(2*all_ROC$auc-1)
paste0("Ratio: Gini(test set)/Gini(the whole set) = ", round(result, 4))
})
})
|
556ea5a03eca4af55e140bb3ae5de059ea6ba8f2
|
87f9a43a17ea28fc9b012dfbe3e7f36bcab05261
|
/man/fred_get_series.Rd
|
4ae5f8d3b00a577221bed00269687d1afa7d4e07
|
[] |
no_license
|
XiuqiZheng/zxq
|
6c1b5c66eedffbc61914b712b3425ea0c514907f
|
b8e52e93857f3097e55c1a5bed22c83291bbfe73
|
refs/heads/main
| 2023-01-24T09:33:24.404426
| 2020-12-12T04:40:55
| 2020-12-12T04:40:55
| 320,743,892
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| true
| 371
|
rd
|
fred_get_series.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/fred_get_series.R
\name{fred_get_series}
\alias{fred_get_series}
\title{Title:unemployment rate data from FRED}
\usage{
fred_get_series()
}
\value{
return unemployment rate data from 2010-01-01 to current
}
\description{
Title:unemployment rate data from FRED
}
\examples{
fred_get_series()
}
|
676f2f1cd6a4b33337293abb92e58f97149fc3e7
|
262a9376b6f0cafd767ab41d8625b010bed61bf4
|
/src/task-1/Icd-swor.R
|
a817955f5fb3b7b8ff1451b4fca0bc7556762305
|
[] |
no_license
|
tofikhidayatxyz/UTS-Statistics
|
dc753e3be61b9948417a06b57c6a5a597173c381
|
5ad96c3551b6268a837769751c87af5d3da87b39
|
refs/heads/master
| 2023-04-10T05:51:53.804530
| 2021-05-02T03:13:52
| 2021-05-02T03:13:52
| 363,085,589
| 1
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 926
|
r
|
Icd-swor.R
|
library(rjson)
jsonData <- fromJSON(file = "./data/tar.json")
maxSample = 154
maxDataNum = 250
sampleResultSWOR = c() # Canot duplidate
currentLoop = 1
while (length(sampleResultSWOR) < maxSample) {
for(itm in jsonData) {
if(length(sampleResultSWOR) < maxSample) {
currentNum = strtoi(substr(itm, currentLoop, currentLoop + 2))
if(!is.na(currentNum)) {
if(currentNum <= maxDataNum && currentNum > 0) {
if(!is.element(currentNum, sampleResultSWOR)) {
sampleResultSWOR <- append(sampleResultSWOR, currentNum)
}
} else if(currentNum <= 0) {
if(!is.element(maxDataNum, sampleResultSWOR)) {
sampleResultSWOR <- append(sampleResultSWR, maxDataNum)
}
}
}
}
}
currentLoop = currentLoop + 1
}
print(sampleResultSWOR)
jsonResult <- toJSON(sampleResultSWOR)
write(jsonResult, "./data/swor.json")
|
650b3aa71db5cc0802fdbcc51d6c4f2b30412f76
|
1859302e5260023ef5e8efb975f82672a1c44c47
|
/Session 11 - Unsupervised Learning III.R
|
b522c99a11dffc7e0ee49a6590cc95f486060e4e
|
[] |
no_license
|
anhnguyendepocen/Text_as_Data-1
|
b9b4942afbd0ff3e733336aecbace474b5c94f06
|
4983b04f74268b02dc230aa7a19e129675c6f95c
|
refs/heads/master
| 2023-03-17T08:02:52.404303
| 2017-04-25T23:18:26
| 2017-04-25T23:18:26
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 4,490
|
r
|
Session 11 - Unsupervised Learning III.R
|
# TA: Patrick Chester
# Course: Text as Data
# Date: 4/3/2017
# Recitation 11: Unsupervised Learning III
rm(list = ls())
## 1 Running LDA
# Make sure you have the appropriate packages installed
install.packages("tidytext")
install.packages("topicmodels")
install.packages("ldatuning")
install.packages("stringi")
libs <- c("ldatuning","topicmodels","ggplot2","dplyr","rjson","quanteda","lubridate","parallel","doParallel","tidytext")
lapply(libs, library, character.only = T)
rm(libs)
# First, you need to go to my github and download the data
# Save the two folders to your desktop
setwd("E:/Documents/Data/HPC")
# Setting seed
set.seed(2017)
#blm_tweets <- read.csv("eg_samples.csv", stringsAsFactors = F) %>% sample_n(10000)
write.csv(blm_tweets, "blm_samp.csv")
blm_tweets <- read.csv("blm_samp.csv", stringsAsFactors = F)
## 1 Preprocessing
# Creates a more managable date vector
blm_tweets$date <- as.POSIXct(strptime(blm_tweets$created_at, "%a %b %d %T %z %Y",tz = "GMT"))
blm_tweets$date2 <- mdy(paste(month(blm_tweets$date), day(blm_tweets$date), year(blm_tweets$date), sep = "-"))
# Collapse tweets so we are looking at the total tweets at the day level
blm_tweets_sum <- blm_tweets %>% group_by(date2) %>% summarise(text = paste(text, collapse = " "))
# Remove non ASCII characters
blm_tweets_sum$text2 <- stringi::stri_trans_general(blm_tweets_sum$text, "latin-ascii")
# Removes solitary letters
blm_tweets_sum$text3 <- gsub(" [A-z] ", " ", blm_tweets_sum$text2)
# Create DFM
mat <-dfm(blm_tweets_sum$text3, stem=F, removePunct = T, tolower=T,removeTwitter = T, removeNumbers = T,
remove = c(stopwords(kind="english"), "http","https","rt", "t.co"))
## 2 Selecting K
# Identify an appropriate number of topics (FYI, this function takes a while)
result <- FindTopicsNumber(
mat,
topics = seq(from = 2, to = 30, by = 1),
metrics = c("Griffiths2004", "CaoJuan2009", "Arun2010", "Deveaud2014"),
method = "Gibbs",
control = list(seed = 2017),
mc.cores = 3L,
verbose = TRUE
)
FindTopicsNumber_plot(result)
# What should you consider when choosing the number of topics you use in a topic model?
# What does robustness mean here?
# About 16-19 topics
## 3 Visualizing Word weights
# Set number of topics
k <-19
# Run the topic model (this may also take a while)
TM<-LDA(mat, k = k, method = "Gibbs", control = list(seed = 2017)) # Keep in mind that in "control" you can set the LDA parameters
# Quickly extracts the word weights and transforms them into a data frame
blm_topics <- tidy(TM, matrix = "beta")
# Generates a df of top terms
blm_top_terms <- blm_topics %>%
group_by(topic) %>%
top_n(10, beta) %>%
ungroup() %>%
arrange(topic, -beta)
# Creates a plot of the weights and terms by topic
blm_top_terms %>%
mutate(term = reorder(term, beta)) %>%
ggplot(aes(term, beta, fill = factor(topic))) +
geom_col(show.legend = FALSE) +
facet_wrap(~ topic, scales = "free") +
coord_flip()
## Relevant topics:
# 1 - Sandra Bland
# 10 - Eric Garner
# 16 - Freddie Gray
## 4 Visualizing topic trends over time
# Store the results of the distribution of topics over documents
doc_topics<-TM@gamma
# Store the results of words over topics
words_topics<-TM@beta
# Transpose the data so that the days are columns
doc_topics <- t(doc_topics)
# Arrange topics
max<-apply(doc_topics, 2, which.max)
# Write a function that finds the second max
which.max2<-function(x){
which(x == sort(x,partial=(k-1))[k-1])
}
max2<- apply(doc_topics, 2, which.max2)
max2<-sapply(max2, max)
# Coding police shooting events
victim <- c("Freddie Gray", "Sandra Bland")
shootings <- mdy(c("04/12/2015","7/13/2015"))
# Combine data
index<-seq(1:nrow())
top2<-data.frame(max = max, max2 = max2,
index = index, date = ymd(blm_tweets_sum$date2))
# Plot
z<-ggplot(top2, aes(x=date, y=max, pch="First"))
z + geom_point(aes(x=date, y=max2, pch="Second") ) +theme_bw() +
ylab("Topic Number") + ggtitle("BLM-Related Tweets from 2014 to 2016 over Topics") + geom_point() + xlab(NULL) +
geom_vline(xintercept=as.numeric(shootings[1]), color = "blue", linetype=4) + # Freddie Gray (Topic)
geom_vline(xintercept=as.numeric(shootings[2]), color = "black", linetype=4) + # Sandra Bland
scale_shape_manual(values=c(18, 1), name = "Topic Rank")
# Thanks guys!
|
d0a68809c7087df6693af529cb1ff353d3c7a88d
|
dd0ea6ad4e9b2c0325974509fe28ea18deb7a24f
|
/man/get_rmdl.Rd
|
d7ffb9fbb90745150dd2b07204dd160985f9d9f9
|
[
"MIT"
] |
permissive
|
kasperdanielhansen/recountmethylation
|
b4f14784cab50c71c82111b920bacfd143e2c250
|
65e794c618c89398d19cd9014ef898e78bd60306
|
refs/heads/master
| 2022-04-21T11:11:18.539683
| 2020-04-23T00:09:58
| 2020-04-23T00:09:58
| 258,048,493
| 0
| 0
| null | 2020-04-23T00:00:36
| 2020-04-23T00:00:35
| null |
UTF-8
|
R
| false
| true
| 752
|
rd
|
get_rmdl.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/accessors.R
\name{get_rmdl}
\alias{get_rmdl}
\title{Get DNAm assay data.}
\usage{
get_rmdl(
which.dn = c("h5se-test_gr", "h5se_gr", "h5se_gm", "h5se_rg", "\\\\.h5"),
url = "https://recount.bio/data/",
dfp = "data",
verbose = TRUE
)
}
\arguments{
\item{which.dn}{Type of data dir to be downloaded.}
\item{url}{Server URL containing assay data.}
\item{dfp}{Target local directory for downloaded files.}
\item{verbose}{Whether to return verbose messages.}
}
\value{
New filepath to dir with downloaded data.
}
\description{
Uses RCurl to recursively download latest H5SE and HDF5 data objects the from server.
}
\examples{
get_rmdl("h5se-test_gr", verbose = TRUE)
}
|
6a5b293c326d187ba6915a249c372b2d21b2b526
|
125ad3e88f7720f301ce916752c08cc5434cd89a
|
/man/merge_pathway.Rd
|
671af9b15abb2a434a0dc69137683081abbfe0c0
|
[] |
no_license
|
github-gs/QPA
|
5f02ddb8e6aa9e853cc3a755dec3a41ece94a1ae
|
1ff4d7124544e71c15bb103ae1c187dc331576e2
|
refs/heads/master
| 2020-04-23T19:25:06.018793
| 2019-09-10T03:12:44
| 2019-09-10T03:12:44
| 171,402,784
| 1
| 0
| null | null | null | null |
UTF-8
|
R
| false
| true
| 695
|
rd
|
merge_pathway.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/merge_pathway.R
\name{merge_pathway}
\alias{merge_pathway}
\title{Merge enriched pathways between different groups together.}
\usage{
merge_pathway(pathway_list)
}
\arguments{
\item{pathway_list}{A list containing KEGG IDs of enriched pathways in different groups.}
}
\value{
Return a list containing three elements KEGG IDs,ENTREZ ID of genes which are pathway members,pathway and gene interaction.
}
\description{
This function combines the enriched pathways together,preparing for the next step comparing pathways between different groups.
}
\examples{
data(example)
pathway_info=merge_pathway(pathway_vector)
}
|
0cb98aa7910deb5522c49f9b26bd04d20e464949
|
5d0ad197f94a53680dc4172ed3b8f1e8384a7d27
|
/code/mongodb_functions.R
|
27356beb12b89959a8f2c3babd68b83adae46137
|
[
"MIT"
] |
permissive
|
markrobinsonuzh/os_monitor
|
3356cbc8fb2a826572a8f4d64d1a454a180ffe2b
|
a6acd4740c657b9ebae0a09945862666bf1345f0
|
refs/heads/master
| 2022-02-28T20:44:27.516655
| 2022-02-17T12:43:52
| 2022-02-17T12:43:52
| 243,106,445
| 2
| 1
|
MIT
| 2020-10-07T05:55:18
| 2020-02-25T21:29:29
|
R
|
UTF-8
|
R
| false
| false
| 454
|
r
|
mongodb_functions.R
|
# for use in docker, with second docker container containing the mongodb
library(mongolite)
oadoi_fetch_local <- function(dois,collection="unpaywall", db="oa", url="mongodb://192.168.16.3/20"){
con <- mongo(collection=collection, db=db, url=url)
con$find(paste0('{"doi": { "$in": ["',paste0(doi,collapse = '","'),'"]}}'), fields = '{"doi":1, "oa_status":1}')
}
doi <- c("10.2217/14750708.2.5.745","10.1192/bjp.89.375.270")
oadoi_fetch_local(doi)
|
4081c7f468cc2694ad5d81270a298fe00d5c911f
|
7b122933da2451a501a6f6a930653d8c52f55bdc
|
/scripts/importSEQ.R
|
648ff23710326abad7bfd7d7a41713f76bfc9539
|
[] |
no_license
|
rtraborn/Promoter_PopGen
|
54e5c31a6ca66fc93c8307a11cb2ed4ae5c2bfb5
|
384120a928451a73533e4067e701547c99609a70
|
refs/heads/master
| 2022-03-17T17:48:48.288667
| 2019-12-06T17:38:19
| 2019-12-06T17:38:19
| 67,723,999
| 0
| 0
| null | 2018-04-07T21:49:53
| 2016-09-08T17:12:11
|
R
|
UTF-8
|
R
| false
| false
| 600
|
r
|
importSEQ.R
|
#' Imports fasta files from Drosophila Genome Nexus datasets and creates a DNAStringSet object.
#' @param fileName a fasta file containing multiple SEQ strings
#' @import biostrings readDNAstringSet
#' @return an object of class DNAStringSet containing the sequences from the SEQ files
#' @export
importSEQ <- function(fileName) {
library("Biostrings")
if (is.character(fileName)==FALSE) {
stop("\nfileName must be of class 'character'\n")
}
readDNAStringSet(fileName, format="fasta", use.names=TRUE) -> this.string
return(this.string)
}
|
9fadb7d405662a53987fc792c7b1e76061dadde6
|
d746fef241f9a0e06ae48cc3b1fe72693c43d808
|
/ark_87287/d7pp4q/d7pp4q-012/rotated.r
|
345f33c339212852a8f1742ff3b22819ca2c368b
|
[
"MIT"
] |
permissive
|
ucd-library/wine-price-extraction
|
5abed5054a6e7704dcb401d728c1be2f53e05d78
|
c346e48b5cda8377335b66e4a1f57c013aa06f1f
|
refs/heads/master
| 2021-07-06T18:24:48.311848
| 2020-10-07T01:58:32
| 2020-10-07T01:58:32
| 144,317,559
| 5
| 0
| null | 2019-10-11T18:34:32
| 2018-08-10T18:00:02
|
JavaScript
|
UTF-8
|
R
| false
| false
| 199
|
r
|
rotated.r
|
r=359.78
https://sandbox.dams.library.ucdavis.edu/fcrepo/rest/collection/sherry-lehmann/catalogs/d7pp4q/media/images/d7pp4q-012/svc:tesseract/full/full/359.78/default.jpg Accept:application/hocr+xml
|
b78be778d7a21e3bc1c4e0a579c7ee00a92fcea5
|
8c29e32ce7fceb71e6bd5356e0461f2143a94994
|
/ui.R
|
9c69d843f7195af97e77fb3eaf6bcef67ef2c5f5
|
[] |
no_license
|
alpreyes/GENAVi
|
47539c5901bc71e59574b532b24a460866cabe02
|
ad6abbce7da819bf052acfddbc49d6b146408090
|
refs/heads/master
| 2021-05-05T15:18:03.274175
| 2020-04-29T22:39:45
| 2020-04-29T22:39:45
| 117,302,113
| 13
| 15
| null | 2020-04-29T22:39:47
| 2018-01-13T00:52:28
|
HTML
|
UTF-8
|
R
| false
| false
| 35,213
|
r
|
ui.R
|
source("aux_functions.R")$value
ui <- fluidPage(title = "GENAVi",
theme = shinytheme("spacelab"),
tags$head(tags$style(
HTML('
#sidebar {
background-color: #ffffff;
}
body, label, input, button, select {
font-family: "Arial";
}
.btn-file {
background-color:#5B81AE;
border-color: #5B81AE;
background: #5B81AE;
}
.bttn-bordered.bttn-sm {
width: 200px;
text-align: left;
margin-bottom : 0px;
margin-top : 20px;
}
'
)
)),
titlePanel("GENAVi"),
useShinyjs(),
tabsetPanel( #type = "pills",
tabPanel("Gene Expression", ##changing from tab 1, but still using tab1 in the other parts of code
icon = icon("table"),
column(2,
#sidebarPanel(id="sidebar",width = 3,
dropdown(label = "Data upload",
icon = icon("file-excel"),
style = "bordered",
status = "primary",
width = "300px",
size = "sm",
animate = animateOptions(
enter = animations$fading_entrances$fadeInLeftBig,
exit = animations$fading_exits$fadeOutLeft
),
fileInput("rawcounts", "Choose CSV File",
multiple = TRUE,
accept = c("text/csv",
"text/comma-separated-values,text/plain",
".csv")),
tags$div(
HTML(paste(help_text))
)
),
dropdown(label = "Table selection",
icon = icon("table"),
style = "bordered",
status = "primary",
width = "300px",
size = "sm",
animate = animateOptions(
enter = animations$fading_entrances$fadeInLeftBig,
exit = animations$fading_exits$fadeOutLeft
),
selectInput("select_tab1", "Select Transform", transforms, multiple = FALSE)
), ##need individual selectInputs for each tab
dropdown(label = "Download data",
icon = icon("download"),
style = "bordered",
status = "primary",
width = "300px",
size = "sm",
animate = animateOptions(
enter = animations$fading_entrances$fadeInLeftBig,
exit = animations$fading_exits$fadeOutLeft
),
downloadButton("downloadNormalizedData", "Download normalized files",class = "btn-primary")
),
dropdown(label = "Generate report",
icon = icon("file-code"),
style = "bordered",
status = "primary",
width = "300px",
size = "sm",
animate = animateOptions(
enter = animations$fading_entrances$fadeInLeftBig,
exit = animations$fading_exits$fadeOutLeft
),
downloadButton("reportNorm", "Download report",class = "btn-primary")
),
dropdown(label = "Gene selection",
icon = icon("mouse-pointer"),
style = "bordered",
status = "primary",
width = "300px",
size = "sm",
animate = animateOptions(
enter = animations$fading_entrances$fadeInLeftBig,
exit = animations$fading_exits$fadeOutLeft
),
fileInput("input_gene_list_tab1", "Input Gene Symbol List (Optional)",
multiple = FALSE,
accept = NULL,
width = NULL,
buttonLabel = "Browse",
placeholder = "No file selected"), ##how to increase max upload size
textAreaInput(inputId = "input_gene_list_area",
label = "Gene list filter: separate gene names by , or ; or newline",
value = "",
width = "100%"),
actionButton("input_gene_list_but",
"Select Rows",
width = "100%",
class = "btn-primary"), ##do this to put selected rows at top of data table, trying it out
actionButton("select_most_variable",
"Select 1000 genes of highest variance",
width = "100%",
class = "btn-primary"), ##do this to put selected rows at top of data table, trying it out
actionButton("unselect_all",
"Deselect all genes",
width = "100%",
class = "btn-primary")
) ##do this to put selected rows at top of data table, trying it out
#selectInput("select_sort_tab1", "Sort Table By", sortby, multiple = FALSE),
),
column(10,
DT::dataTableOutput('tbl.tab1')
)
),
tabPanel("Visualization", ##changing from tab 2, but still usibg tab2 in other parts of code
#icon = icon("object-group"),
icon = icon("image"),
tabsetPanel(type = "pills",
tabPanel("Expression plots",
icon = icon("bar-chart-o"),
bsAlert("genemessage"),
hidden(
div(id = "expression_plots",
h3('Expression Barplot'),
plotlyOutput("barplot", width = "auto") %>% withSpinner(type = 6)
)),
hidden(
div(id = "expression_heatmap",
h3('Expression Heatmap'),
#selectInput("select_z_score",
# label = "Standardized scores?",
# choices = c("No","Rows z-score", "Columns z-score"),
# multiple = FALSE),
iheatmaprOutput("heatmap_expr",height = "auto") %>% withSpinner(type = 6)
)
)
),
tabPanel("Clustering plots",
icon = icon("object-group"),
div(id = "cluster_plots",
column(2,
h3('Correlation Heatmap'),
selectInput("select_clus_type",
label = "Cluster correlation",
choices = c("Sample","Genes"),
multiple = FALSE),
selectInput("select_clus", "Cluster by what genes",
c("All genes", "Selected genes"),
multiple = FALSE)
),
column(9,
iheatmaprOutput("heatmap_clus",height = "800px") %>% withSpinner(type = 6)
)
)
), tabPanel("PCA plots",
icon = icon("object-group"),
div(id = "pca_plots",
bsAlert("genemessage3"),
column(2,
selectInput("select_pca_type",
label = "PCA genes",
choices = c("Top 1000 variable genes", "All genes", "Selected genes"),
multiple = FALSE),
selectInput("pca_dimensions",
label = "Number of dimensions",
choices = c("2D", "3D"),
multiple = FALSE),
selectInput("pcacolor", "Color samples by", NULL, multiple = FALSE),
downloadButton("reportPCA", "Generate report",class = "btn-primary")),
column(6,
plotlyOutput("pca_plot",height = "600",width = "600") %>% withSpinner(type = 6)
)
)
)
)
),
tabPanel("Differential Expression Analysis",
icon = icon("flask"),
column(2,
#sidebarPanel(id="sidebar",width = 3,
dropdown(label = "Metadata upload",
icon = icon("file-excel"),
style = "bordered",
status = "primary",
width = "300px",
size = "sm",
animate = animateOptions(
enter = animations$fading_entrances$fadeInLeft,
exit = animations$fading_exits$fadeOutLeft
),
# Input: Select a file ----
downloadButton('downloadData', 'Download example metadata file',class = "btn-primary"),
fileInput("metadata", "Choose CSV File",
multiple = TRUE,
accept = c("text/csv",
"text/comma-separated-values,text/plain",
".csv")),
tags$div(
HTML(paste(help_text2))
)
),
dropdown(label = "DE analysis",
icon = icon("flask"),
style = "bordered",
status = "primary",
width = "300px",
size = "sm",
animate = animateOptions(
enter = animations$fading_entrances$fadeInLeftBig,
exit = animations$fading_exits$fadeOutLeft
),
selectInput("condition", "Select condition column for DEA", NULL, multiple = FALSE), ##need individual selectInputs for each tab
selectInput("covariates",
label = "Select covariates for DEA",
choices = NULL,
multiple = TRUE), ##need individual selectInputs for each tab
verbatimTextOutput("formulatext"),
selectInput("reference", "Select reference level for DEA", NULL, multiple = FALSE), ##need individual selectInputs for each tab
actionButton("dea", "Perform DEA", class = "btn-primary")
),
dropdown(label = "Select Results",
icon = icon("table"),
style = "bordered",
status = "primary",
width = "300px",
size = "sm",
animate = animateOptions(
enter = animations$fading_entrances$fadeInLeftBig,
exit = animations$fading_exits$fadeOutLeft
),
selectInput("deaSelect", "Select results", NULL, multiple = FALSE), ##need individual selectInputs for each tab
checkboxInput(inputId="lfc", label = "Perform Log fold change shrinkage", value = FALSE, width = NULL),
downloadButton("downloadDEAFiles", "Download DEA Results",class = "btn-primary")
),
dropdown(label = "Volcano plot",
icon = icon("chart-bar"),
style = "bordered",
status = "primary",
width = "300px",
size = "sm",
animate = animateOptions(
enter = animations$fading_entrances$fadeInLeftBig,
exit = animations$fading_exits$fadeOutLeft
),
numericInput("log2FoldChange", "log2FoldChange cut-off:", value = 0, min = 0, max = 10, step = 0.1),
numericInput("padj", "P adjusted cut-off:", 0.01, min = 0, max = 1,step = 0.1)
),
dropdown(label = "Generate report",
icon = icon("file-code"),
style = "bordered",
status = "primary",
width = "300px",
size = "sm",
animate = animateOptions(
enter = animations$fading_entrances$fadeInLeftBig,
exit = animations$fading_exits$fadeOutLeft
),
downloadButton("reportDEA", "Download Report",class = "btn-primary")
)
),
column(10,
tabsetPanel(type = "pills",
id = "DEA",
tabPanel("Metadata",
tags$hr(),
DT::dataTableOutput('metadata.tbl')
),
tabPanel("DEA results",
tags$hr(),
DT::dataTableOutput('dea.results')
),
tabPanel("Volcano plot",
tags$hr(),
plotlyOutput('volcanoplot') %>% withSpinner(type = 6)
)
)
)
),
tabPanel("Enrichment analysis",
icon = icon("flask"),
column(2,
#sidebarPanel(id="sidebar",width = 3,
dropdown(label = "DEA results upload ",
icon = icon("file-excel"),
style = "bordered",
status = "primary",
width = "300px",
size = "sm",
animate = animateOptions(
enter = animations$fading_entrances$fadeInLeftBig,
exit = animations$fading_exits$fadeOutLeft
),
downloadButton('downloadExampleDEAData', 'Download example DEA file',class = "btn-primary"),
fileInput("deafile", "Choose CSV File",
multiple = TRUE,
accept = c("text/csv",
"text/comma-separated-values,text/plain",
".csv"))
),
dropdown(label = "Enrichment Analysis",
icon = icon("flask"),
style = "bordered",
size = "sm",
status = "primary",
width = "300px",
animate = animateOptions(
enter = animations$fading_entrances$fadeInLeftBig,
exit = animations$fading_exits$fadeOutLeft
),
selectInput("deaanalysistype",
"Select the type of analysis",
c("ORA (over representation analysis)" = "ORA",
"GSEA (gene set enrichment analysis)" = "GSEA"),
multiple = FALSE),
selectInput("deaanalysisselect",
"Select the analysis",
c("WikiPathways analysis",
"MSigDb analysis",
"Gene Ontology Analysis",
"KEGG Analysis",
"Disease Ontology Analysis"),
multiple = FALSE),
selectInput("msigdbtype",
"Select collection for Molecular Signatures Database",
c("All human gene sets" = "All",
"H: hallmark gene sets" = "H",
"C1: positional gene sets" = "C1",
"C2: curated gene sets" = "C2",
"C3: motif gene sets" = "C3",
"C4: computational gene sets" = "C4",
"C5: GO gene sets" = "C5",
"C6: oncogenic signatures" = "C6",
"C7: immunologic signatures" = "C7"),
multiple = FALSE),
selectInput("gotype",
"Select collection for Molecular Signatures Database",
c("Molecular Function"="MF",
"Cellular Component"="CC",
"Biological Process" = "BP"),
multiple = FALSE),
numericInput("enrichmentfdr",
"P-value cut-off:",
value = 0.05,
min = 0,
max = 1,
step = 0.05),
div(id = "eaorasectui",
tags$hr(),
h3('ORA - selecting genes'),
numericInput("ea_subsetfdr", "P-adj cut-off", value = 0.05, min = 0, max = 1, step = 0.05),
numericInput("ea_subsetlc", "LogFC cut-off", value = 1, min = 0, max = 3, step = 1),
selectInput("ea_subsettype",
"Gene status",
c("Upregulated",
"Downregulated"),
multiple = FALSE)
),
div(id = "eagsearankingui",
tags$hr(),
h3('GSEA - ranking method'),
selectInput("earankingmethod",
"Select the ranking method",
c("log Fold Change",
"-log10(P-value) * sig(log2FC)",
"-log10(P-value) * log2FC"),
multiple = FALSE)
),
actionButton("enrichementbt", "Perform analysis", class = "btn-primary")
),
dropdown(label = "Plot options",
icon = icon("image"),
size = "sm",
style = "bordered",
status = "primary",
width = "300px",
animate = animateOptions(
enter = animations$fading_entrances$fadeInLeftBig,
exit = animations$fading_exits$fadeOutLeft
),
selectInput("ea_plottype",
"Plot type",
c("Dot plot",
"Ridgeline",
"Running score and preranked list",
"Ranked list of genes"),
multiple = FALSE),
numericInput("ea_nb_categories", "Number of categories", value = 10, min = 2, max = 30, step = 1),
selectInput("gsea_gene_sets", "Plot gene sets", NULL, multiple = TRUE)
),
dropdown(
label = "Export image",
icon = icon("save"),
size = "sm",
style = "bordered",
status = "primary",
width = "300px",
animate = animateOptions(
enter = animations$fading_entrances$fadeInLeftBig,
exit = animations$fading_exits$fadeOutLeft
),
tooltip = tooltipOptions(title = "Export image"),
textInput("enrichementPlot.filename", label = "Filename", value = "enrichement_plot.pdf"),
bsTooltip("enrichementPlot.filename", "Filename (pdf, png, svg)", "left"),
numericInput("ea_width", "Figure width (in)", value = 10, min = 5, max = 30, step = 1),
numericInput("ea_height", "Figure height (in)", value = 10, min = 5, max = 30, step = 1),
downloadButton('saveenrichementpicture', 'Export figure',class = "btn-primary")
),
dropdown(
label = "Generate report",
size = "sm",
icon = icon("file-code"),
style = "bordered",
status = "primary",
width = "300px",
animate = animateOptions(
enter = animations$fading_entrances$fadeInLeftBig,
exit = animations$fading_exits$fadeOutLeft
),
downloadButton('reportEA', 'Download HTML report',class = "btn-primary"))
,
dropdown(
label = "Help material",
icon = icon("book"),
style = "bordered",
size = "sm",
status = "primary",
width = "300px",
animate = animateOptions(
enter = animations$fading_entrances$fadeInLeftBig,
exit = animations$fading_exits$fadeOutLeft
),
shiny::actionButton(inputId='ab1',
label = "Learn More",
icon = icon("th"),
onclick = "window.open('https://guangchuangyu.github.io/pathway-analysis-workshop/', '_blank')",
class = "btn-primary"),
shiny::actionButton(inputId = 'ab1',
label="MSigDB Collections",
icon = icon("th"),
onclick = "window.open('http://software.broadinstitute.org/gsea/msigdb/collection_details.jsp', '_blank')",
class = "btn-primary"))
),
column(8,
tabsetPanel(type = "pills",
tabPanel("Plots",
jqui_resizable(
plotOutput("plotenrichment", height = "600")
) #%>% withSpinner(type = 6)
),
tabPanel("Table",
DT::dataTableOutput('tbl.analysis') # %>% withSpinner(type = 6)
)
)
)
),
tabPanel("Help", ##changing from tab 2, but still usibg tab2 in other parts of code
#icon = icon("object-group"),
icon = icon("book"),
tabsetPanel(type = "pills",
tabPanel("Vignette",
icon = icon("book"),
includeMarkdown("Genavi.Rmd")
),
tabPanel("Tutorial",
icon = icon("book"),
includeHTML("GENAVi_Tutorial.html")
),
tabPanel("References",
icon = icon("book"),
includeMarkdown("References.Rmd")
)
)
)
)
)
|
bbac96492d73606aab6c782f2c583de06d2cc450
|
b9b96aee722f984edf62eeabe0fb32ebbdc2598f
|
/R/mcmc.2pnoh.R
|
f33f874fc76c2cdd0a6cb39cbd9c4e331da2e91d
|
[] |
no_license
|
daniloap/sirt
|
46690dca1382385d0fdfc39c12f15d7dd2adf291
|
8c4cb12ffafd70c14b28c9ca34bfd28c58734e83
|
refs/heads/master
| 2021-01-17T10:49:37.611683
| 2015-03-03T00:00:00
| 2015-03-03T00:00:00
| 35,530,314
| 1
| 0
| null | 2015-05-13T05:38:46
| 2015-05-13T05:38:46
| null |
UTF-8
|
R
| false
| false
| 6,343
|
r
|
mcmc.2pnoh.R
|
##############################################
# MCMC estimation 2PNO model
mcmc.2pnoh <- function(dat , itemgroups , prob.mastery = c(.5 , .8 ) ,
weights=NULL , burnin=500 , iter=1000 , N.sampvalues = 1000 ,
progress.iter=50 , prior.variance=c(1,1) , save.theta=FALSE ){
s1 <- Sys.time()
# data preparation
dat0 <- dat
dat <- as.matrix(dat)
dat[ is.na(dat0) ] <- 0
dat.resp <- 1-is.na(dat0)
N <- nrow(dat)
I <- ncol(dat)
eps <- 10^(-10)
# itemgroups
itemgroups.unique <- sort( unique( itemgroups ) )
K <- length(itemgroups.unique)
itemgroup <- match( itemgroups , itemgroups.unique )
Ik <- aggregate( 1 + 0*1:I , list(itemgroup) , sum )[,2]
# redefine weights
if (! is.null(weights) ){
weights <- N * weights / sum(weights )
}
# set initial values
a <- rep(1,I)
b <- - qnorm( (colMeans(dat0 , na.rm=TRUE) + .01 )/1.02 )
xi <- rep(0,K)
omega <- rep(1,K)
sig <- 1.5 # SD of item difficulties
nu <- .30 # SD of item discriminations
# item parameters in matrix form
aM <- matrix( a , nrow=N , ncol=I , byrow=TRUE)
bM <- matrix( b , nrow=N , ncol=I , byrow=TRUE)
theta <- qnorm( ( rowMeans( dat0,na.rm=TRUE ) + .01 ) / 1.02 )
# define lower and upper thresholds
ZZ <- 1000
threshlow <- -ZZ + ZZ*dat
threshlow[ is.na(dat0) ] <- -ZZ
threshupp <- ZZ*dat
threshupp[ is.na(dat0) ] <- ZZ
# saved values
SV <- min( N.sampvalues , iter - burnin )
svindex <- round( seq( burnin , iter , len=SV ) )
a.chain <- matrix( NA , SV , I )
b.chain <- matrix( NA , SV , I )
theta.chain <- matrix( NA , SV , N )
nu.chain <- sig.chain <- deviance.chain <- rep(NA, SV)
transition.chain <- nonmastery.chain <- mastery.chain <-
xi.chain <- omega.chain <- matrix(NA , SV , K )
M.beta.chain <- M.alpha.chain <- matrix(NA,SV,K)
zz <- 0
#**********************
# begin iterations
for (ii in 1:iter){
#****
# draw latent data Z
Z <- .draw.Z.2pnoh( aM , bM, theta , N , I , threshlow , threshupp )
#***
# draw latent traits theta
res <- .draw.theta.2pnoh( aM , bM , N , I , Z )
theta <- res$theta
#***
# draw item parameters alpha, beta, xi and omega
res <- .draw.itempars.2pnoh( theta , Z , I , N , a , b ,
xi , omega , sig , nu , itemgroup , K , Ik , weights)
a <- res$a
b <- res$b
xi <- res$xi
omega <- res$omega
# draw item variance
res <- .draw.itemvariances.2pnoh(a,b,I , itemgroup , xi , omega , prior.variance)
sig <- res$sig
nu <- res$nu
# item parameters in matrix form
aM <- matrix( a , nrow=N , ncol=I , byrow=TRUE)
bM <- matrix( b , nrow=N , ncol=I , byrow=TRUE)
# save parameters
if ( ii %in% svindex ){
zz <- zz+1
a.chain[ zz , ] <- a
b.chain[ zz , ] <- b
xi.chain[zz,] <- xi
omega.chain[zz,] <- omega
theta.chain[ zz , ] <- theta
# mean alpha and beta
# M.alpha.chain[zz,] <- aggregate( a , list(itemgroup) , mean)[,2]
M.alpha.chain[zz,] <- rowsum( a , itemgroup )[,1] / Ik
M.beta.chain[zz,] <- rowsum( b , itemgroup )[,1] / Ik
# calculate deviance
deviance.chain[zz] <- .mcmc.deviance.2pl( aM , bM , theta , dat ,
dat.resp , weights , eps )
# calculate mastery probabilities
res <- .mcmc.mastery.2pnoh( xi , omega , N , K , weights , theta , prob.mastery)
nonmastery <- res$nonmastery
transition <- res$transition
mastery <- res$mastery
nonmastery.chain[zz,] <- res$nonmastery
transition.chain[zz,] <- res$transition
mastery.chain[zz,] <- res$mastery
nu.chain[zz] <- nu
sig.chain[zz] <- sig
}
# print progress
if ( ( ii %% progress.iter ) == 0 ){
cat( "Iteration" , ii , " | " , paste(Sys.time()) , "\n")
flush.console() }
}
##############################
# output
# Information criteria
ic <- .mcmc.ic.2pl( a.chain , b.chain , theta.chain , N , I ,
dat , dat.resp , weights , eps ,
deviance.chain )
# EAP reliability and person parameter estimates
res <- .mcmc.person.2pno( theta.chain, weights )
EAP.rel <- res$EAP.rel
person <- res$person
#-----
# MCMC object
a <- a.chain
b <- b.chain
d <- a*b
theta <- theta.chain
colnames(a) <- paste0("alpha[", 1:I , "]")
colnames(b) <- paste0("beta[", 1:I , "]")
colnames(d) <- paste0("d[", 1:I , "]")
colnames(theta) <- paste0("theta[", 1:N , "]")
mcmcobj <- cbind( deviance.chain , a , b , d )
colnames(mcmcobj)[1] <- "deviance"
colnames(xi.chain) <- paste0("xi[", 1:K , "]")
colnames(omega.chain) <- paste0("omega[", 1:K , "]")
colnames(M.alpha.chain) <- paste0("M.alpha[", 1:K , "]")
colnames(M.beta.chain) <- paste0("M.beta[", 1:K , "]")
tau.chain <- omega.chain * xi.chain
colnames(tau.chain) <- paste0("tau[", 1:K , "]")
mcmcobj <- cbind( mcmcobj , xi.chain , omega.chain ,
M.beta.chain , M.alpha.chain , tau.chain )
dfr <- cbind( "sig" = sig.chain , "nu"=nu.chain )
mcmcobj <- cbind( mcmcobj , dfr )
colnames(nonmastery.chain) <- paste0("nonmastery[", 1:K , "]")
colnames(transition.chain) <- paste0("transition[", 1:K , "]")
colnames(mastery.chain) <- paste0("mastery[", 1:K , "]")
mcmcobj <- cbind( mcmcobj , nonmastery.chain , transition.chain , mastery.chain )
if (save.theta){ mcmcobj <- cbind( mcmcobj , theta ) }
class(mcmcobj) <- "mcmc"
attr(mcmcobj, "mcpar") <- c( burnin+1 , burnin+SV , 1 )
mcmcobj <- as.mcmc.list( mcmcobj )
#----
# summary of the MCMC output
summary.mcmcobj <- mcmc.list.descriptives( mcmcobj )
# number of estimated parameters
# np <- 2*I + 2*K + 2
s2 <- Sys.time()
time <- list( "start"=s1 , "end"=s2 , "timediff"= s2-s1 )
#----
# result list
res <- list( "mcmcobj" = mcmcobj , "summary.mcmcobj" = summary.mcmcobj ,
"ic"=ic ,
"burnin"=burnin , "iter"=iter ,
"alpha.chain"=a.chain , "beta.chain" = b.chain ,
"xi.chain" = xi.chain , "omega.chain" = omega.chain ,
"sig.chain" = sig.chain , "nu.chain" = nu.chain ,
"theta.chain" = theta.chain ,
"deviance.chain"=deviance.chain ,
"EAP.rel" = EAP.rel , "person" = person ,
"dat" = dat0 , "weights" = weights ,
"time" = time , "model" = "2pnoh" ,
"description"="2PNO Hierarchical IRT Model for Criterion-Referenced Measurement")
class(res) <- "mcmc.sirt"
return(res)
}
|
24832648cbdce8977d656693482147c6bde2753f
|
b0dd8d08240e8041e4f8fbffa235d4d233aeefa9
|
/R/day04.R
|
d87b512bf20e00a1ab82e9ddcf146ac80a8027d8
|
[
"MIT"
] |
permissive
|
EvgenyPetrovsky/aoc2019
|
67293b92b3b67e4a341a0304729f4364d29907f8
|
0caa9fef500486ff6ea5563998c2a51b2d159d54
|
refs/heads/master
| 2020-09-28T00:17:32.559598
| 2020-01-07T23:06:13
| 2020-01-07T23:06:13
| 226,643,671
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 3,071
|
r
|
day04.R
|
#' Generate sequence of numbers based on value of low and high bound
#'
#' Convert numbers into string format after validation
#'
#' @param low low bound of range
#' @param high high bound of range
day04_makesequence <- function(low, high) {
numbers <- seq(from = low, to = high, by = 1)
as.character(numbers)
}
#' Function that generalizes 2 types of checks using binary check operator and
#'
#' function that folds outcomes to say whether check passes or not
#' @param number value to analyze
#' @param check_fun function to apply to check values
#' @param fold_fun function to apply to fold results of check
day04_docheck <- function(number, check_fun, fold_fun) {
n <- nchar(number)
v <- sapply(
FUN = function(x) substr(number,x,x),
X = 1:n, USE.NAMES = F, simplify = T)
if (n == 1) {
TRUE
} else if (n > 1) {
b <- sapply(
FUN = function(pos) check_fun(v[pos-1], v[pos]),
X = 2:n, USE.NAMES = F, simplify = T
)
fold_fun(b)
} else {
stop(paste("invalid number", number))
}
}
#' Check whether digits of number all go in asscending order
#'
#' Where 00111 and 12345 are valid and 12321 is not. Function works with atomic
#' values only
#' @param number number to be checked
day04_filterasc <- function(number) {
check_fun <- function(x, y) x <= y
fold_fun <- all
day04_docheck(number, check_fun, fold_fun)
}
#' Check whether number has at least to adjacent digits
#'
#' where 00111 and 11322 are valid and 12321 is not. Function works with atomic
#' values only
#' @export
#' @param number number to be checked
day04_filteradj <- function(number) {
check_fun <- function(x, y) x == y
fold_fun <- any
day04_docheck(number, check_fun, fold_fun)
}
#' Day 04 part 1 solution
#' @export
day04_part1_solution <- function() {
ns <- strsplit(aoc19::DATASET$day04, split = "-",fixed = T)[[1]]
rs <- day04_makesequence(ns[1], ns[2]) %>%
Filter(f = function(x) all(day04_filterasc(x), day04_filteradj(x)))
length(rs)
}
#' Check whether adjacent digits are not part of bigger group
#'
#' where 00111 and 11322 are valid and 12321 is not. Function works with atomic
#' values only
#'
#' @param number number to be checked
day04_filteradj2 <- function(number) {
n <- nchar(number)
v <- sapply(
FUN = function(x) substr(number,x,x),
X = 1:n, USE.NAMES = F, simplify = T)
v <- c("X", v, "X")
checkfun <- function(pos) {
all(
v[pos-1] != v[pos],
v[pos+1] == v[pos],
v[pos+2] != v[pos]
)
}
if (n >= 4) {
b <- sapply(
FUN = function(pos) checkfun(pos),
X = 2:(length(v)-2), USE.NAMES = F, simplify = T
)
# if any of digits go in pair (no triple) - TRUE
any(b)
} else {
stop(paste("invalid number", number))
}
}
#' Day 04 part 2 solution
#'
#' @export
day04_part2_solution <- function() {
ns <- strsplit(aoc19::DATASET$day04, split = "-",fixed = T)[[1]]
rs <- day04_makesequence(ns[1], ns[2]) %>%
Filter(f = day04_filterasc) %>%
Filter(f = day04_filteradj) %>%
Filter(f = day04_filteradj2)
length(rs)
}
|
61f554422230a4f50ea54fffc3453885cbbe6bd3
|
a2f9f6c19adbf6bc915b5a5a202b7038d2ad3d49
|
/man/nonzero.glmnetcr.Rd
|
556aa3bc53ac8da78779e4dc009be880fdc8819c
|
[] |
no_license
|
cran/glmnetcr
|
4108d6c5e5eb8f0dcda122c6fc9f81c76a80fb7a
|
beef1284f457a5536b07281b6e1586fd68ef0520
|
refs/heads/master
| 2021-01-01T06:32:17.510212
| 2020-07-03T16:10:06
| 2020-07-03T16:10:06
| 17,696,438
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 883
|
rd
|
nonzero.glmnetcr.Rd
|
\name{nonzero.glmnetcr}
\alias{nonzero.glmnetcr}
\title{
Extract Non-Zero Model Coefficients}
\description{
The \code{nonzero.glmnetcr} function returns only those non-zero coefficient estimates for a selected model}
\usage{
nonzero.glmnetcr(fit, s)
}
\arguments{
\item{fit}{a \code{glmnetcr} object}
\item{s}{the step at which the non-zero coefficient estimates are desired}
}
\value{
\item{a0}{intercept estimate}
\item{beta}{non-zero estimates for variables and ordinal thresholds}
}
\author{
Kellie J. Archer, \email{archer.43@osu.edu}
}
\seealso{
See Also as \code{\link{glmnetcr}}, \code{\link{coef.glmnetcr}}, \code{\link{select.glmnetcr}}
}
\examples{
data(diabetes)
x <- diabetes[, 2:dim(diabetes)[2]]
y <- diabetes$y
glmnet.fit <- glmnetcr(x, y)
AIC.step <- select.glmnetcr(glmnet.fit, which = "AIC")
nonzero.glmnetcr(glmnet.fit, s = AIC.step)
}
\keyword{ misc }
|
eb097233369bc0d218c75f310fe6550557095712
|
4cb5426e8432d4af8f6997c420520ffb29cefd3e
|
/F1.R
|
af1a788e8504439b5c47ca23eefd9b8eeaa15f5d
|
[
"CC0-1.0"
] |
permissive
|
boyland-pf/MorpheusData
|
8e00e43573fc6a05ef37f4bfe82eee03bef8bc6f
|
10dfe4cd91ace1b26e93235bf9644b931233c497
|
refs/heads/master
| 2021-10-23T03:47:35.315995
| 2019-03-14T21:30:03
| 2019-03-14T21:30:03
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 1,158
|
r
|
F1.R
|
# making table data sets
library(dplyr)
library(tidyr)
library(MorpheusData)
#############benchmark 1
dat <- read.table(text=
"
idx_key upedonid
k1 id2
k2 id2
k3 id2
k4 null
k5 id3
k6 id3
k7 id4
k8 id5
k9 id5
k10 null
k11 id6
", header=T)
write.csv(dat, "data-raw/f1_input1.csv", row.names=FALSE)
df_out = dat %>%
group_by(upedonid) %>% summarise(cnt=n()) %>% filter(upedonid != 'null' & cnt > 1)
write.csv(df_out, "data-raw/f1_output1.csv", row.names=FALSE)
f1_output1 <- read.csv("data-raw/f1_output1.csv", check.names = FALSE)
fctr.cols <- sapply(f1_output1, is.factor)
int.cols <- sapply(f1_output1, is.integer)
f1_output1[, fctr.cols] <- sapply(f1_output1[, fctr.cols], as.character)
f1_output1[, int.cols] <- sapply(f1_output1[, int.cols], as.numeric)
save(f1_output1, file = "data/f1_output1.rdata")
f1_input1 <- read.csv("data-raw/f1_input1.csv", check.names = FALSE)
fctr.cols <- sapply(f1_input1, is.factor)
int.cols <- sapply(f1_input1, is.integer)
f1_input1[, fctr.cols] <- sapply(f1_input1[, fctr.cols], as.character)
f1_input1[, int.cols] <- sapply(f1_input1[, int.cols], as.numeric)
save(f1_input1, file = "data/f1_input1.rdata")
|
ad4aff2c10f95ec03915240d5067f8982cc03fcf
|
9f8b3fee0598ded6727ffd4d1fbe3b648b254e11
|
/Install Most used Packages.R
|
1bf48551aa2ee2b6acb51d27cc0c9c52b5db6dbd
|
[] |
no_license
|
kireru/Most-important-Packages-for-my-daily-use
|
91352e8b71d8ff042a94fd3789a4fd336a034a6f
|
e1a2733bc31959cf93a89127fbcc5599fc2d88fa
|
refs/heads/master
| 2021-01-10T01:01:49.514951
| 2015-04-22T07:01:23
| 2015-04-22T07:01:23
| 34,110,682
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 3,179
|
r
|
Install Most used Packages.R
|
# My most used packages
install.packages("reshape2")
install.packages("grid")
install.packages("scales")
install.packages("lubridate")
install.packages("sqldf")
install.packages("sqldf")
install.packages("sqldf")
install.packages("forecast")
install.packages("ggplot2")
install.packages("plyr")
install.packages("stringr")
install.packages("RPostgreSQL")
install.packages("RMYSQL")
install.packages("RMongo")
install.packages("RODBC")
install.packages("RSQLite")
install.packages("lubridate")
install.packages("qcc")
install.packages("reshape2")
install.packages("randomForest")
install.packages("Sim.DiffProcGUI")
install.packages("foreach")
install.packages("cacheSweave")
install.packages("Matrix")
install.packages("survey")
install.packages("xlsReadWrite")
install.packages("rgdal")
install.packages("lme4")
install.packages("xtable")
install.packages("MASS")
install.packages("xts")
install.packages("twitteR")
install.packages("survival")
install.packages("RMySQL")
install.packages("tikzDevice")
install.packages("boot")
install.packages("data.table")
install.packages("Hmisc")
install.packages("RColorBrewer")
install.packages("foreign")
install.packages("reshape2")
install.packages("zoo")
install.packages("latticeExtra")
install.packages("vegan")
install.packages("PerformanceAnalytics")
install.packages("Rcmdr")
install.packages("reshape")
install.packages("RTextTools")
install.packages("lattice")
install.packages("XML")
install.packages("rgl")
install.packages("ape")
install.packages("rJava")
install.packages("rpart")
install.packages("Rcpp")
install.packages("Sim.DiffProc")
install.packages("RODBC")
install.packages("sp")
install.packages("quantmod")
install.packages("car")
install.packages("maxent")
install.packages("maptools")
install.packages("nlme")
install.packages("MCMCglmm")
install.packages("fortunes")
install.packages('SmarterPoland')
devtools::install_github('rstudio/shinyapps')
install.packages("caret")
install.packages("NMF")
## Borrowed
# How to know the Most used packages
library(plyr)
library(XML)
# build a vector of URL pages we'll want to use
urls <- paste("http://crantastic.org/popcon?page=", 1:10, sep = "")
# scrape all the data from the URLs into one big data.frame
packages.df <- ldply(urls, function(url)readHTMLTable(url)[[1]])
# turn the "Users" column from factor to numeric
packages.df$Users <- as.numeric(as.character(packages.df$Users))
# sort by decreasing "Users"
packages.df <- arrange(packages.df, desc(Users))
# print the 50 most used packages
head(packages.df$`Package Name`, 50)
##How to install multiple Packages
libs=c("CircStats","coda","deldir","gplots","igraph","ks","odesolve??","RandomFields")
type=getOption("pkgType")
CheckInstallPackage <- function(packages, repos="http://cran.r-project.org",
depend=c("Depends", "Imports", "LinkingTo", "Suggests", "Enhances"), ...) {
installed=as.data.frame(installed.packages())
for(p in packages) {
if(is.na(charmatch(p, installed[,1]))) {
install.packages(p, repos=repos, dependencies=depend, ...)
}
}
}
CheckInstallPackage(packages=libs)
|
dd5b8eca20994acf15a8af4272b3a880fe26b656
|
2a31693d31bc1115d817e7bbaacfa286a76696ee
|
/R/abtree-package.R
|
8b8297f7de8330038b875dfd1c4121e92717162d
|
[] |
no_license
|
dfeng/abtree
|
83ae7dbf0bef02009b62678206de5c4b2c5df9b0
|
ea4851fa89c94d39f77f8b0b4e8ca794dd7c84fc
|
refs/heads/master
| 2021-09-23T21:03:46.662223
| 2018-09-27T17:26:23
| 2018-09-27T17:26:23
| 63,723,799
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 150
|
r
|
abtree-package.R
|
#' abtree
#'
#' @docType package
#' @name abtree-package
#' @author Xiaofei Wang and Derek Feng
#' @importFrom Rcpp evalCpp
#' @useDynLib abtree
NULL
|
4df82a0ee354b9967f7749d4ea0b8a5730a85e6a
|
cfc4926222a19c12c7caa86efe632b92d1914da6
|
/R/augment.R
|
ca89900614159bbea326dbbf70ead5beb09671d4
|
[] |
no_license
|
aditharun/CNPBayes
|
9347a54ec3e902b4ff65712d7f3b9c01cc6a8392
|
f2237ed850db93bb974766b17fd80048aa78ab81
|
refs/heads/master
| 2023-03-24T05:13:32.757582
| 2020-08-07T17:57:16
| 2020-08-07T17:57:16
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 8,690
|
r
|
augment.R
|
##
## problem: batch not clustered with other batches and does not have hom del comp, while other batches clearly do
##
## Solution 1: augment data for batches without homdel by simulating 5 observations
## - simulate(rnorm(median(min), 0.3))
## - fit model
## - provide posterior probs only for non-augmented data
## potential problems : cutoff of 1 is very arbitrary
## - would need to filter augmented data in ggMixture; predictive distribution may look funny
##
##
##augmentData <- function(full.data){
## full.data$augmented <- FALSE
## dat <- group_by(full.data, batch) %>%
## summarize(nhom=sum(medians < -1, na.rm=TRUE),
## min_homdel=min(medians, na.rm=TRUE),
## nambiguous=sum(medians < -0.9, na.rm=TRUE))
## nzero <- sum(dat$nhom == 0, na.rm=TRUE)
## if(nzero == 0 || nzero == nrow(dat))
## return(full.data)
## dat2 <- dat %>%
## filter(nhom == 0)
## if(!"id" %in% colnames(full.data)){
## full.data$id <- seq_len(nrow(full.data)) %>%
## as.character
## }
## current <- full.data
## for(i in seq_len(nrow(dat2))){
## augment <- filter(full.data, batch == dat2$batch[i]) %>%
## "["(1:5, ) %>%
## mutate(medians = rnorm(5, mean(dat$min_homdel), 0.3),
## augmented=TRUE,
## id=paste0(id, "*"))
## latest <- bind_rows(current, augment)
## current <- latest %>%
## arrange(batch_index)
## }
## current
##}
useSingleBatchValues <- function(mb, sb){
cv.mb <- current_values(mb)
cv.sb <- current_values(sb)
cv.mb$theta <- replicate(numBatch(mb), cv.sb$theta) %>%
"["(1,,) %>%
t
cv.mb$sigma2 <- replicate(numBatch(mb), cv.sb$sigma2) %>%
"["(1,,) %>%
t
cv.mb$p <- replicate(numBatch(mb), cv.sb$p) %>%
"["(1,,) %>%
t
current_values(mb) <- cv.mb
modes(mb) <- cv.mb
mb
}
setMethod("augmentData2", "MultiBatchList", function(object){
model <- NULL
. <- NULL
##
## - only makes sense to do this if multibatch models with 3 or 4 components are included in the list
##
sp <- specs(object) %>%
filter(k == 3 & substr(model, 1, 2) == "MB")
if(nrow(sp) == 0) return(object)
sp <- sp[1, ]
object2 <- object[ specs(object)$model %in% sp$model ]
ix <- order(specs(object2)$k, decreasing=FALSE)
## order models by number of components.
object2 <- object2[ix]
SB <- toSingleBatch(object2[[1]])
iter(SB) <- max(iter(object), 150L)
SB <- posteriorSimulation(SB)
##
## Running MCMC for SingleBatch model to find posterior predictive distribution
##
message("Checking whether possible homozygous deletions occur in only a subset of batches...")
modes(SB) <- computeModes(SB)
SB <- setModes(SB)
mn.sd <- c(theta(SB)[1], sigma(SB)[1])
limits <- mn.sd[1] + c(-1, 1)*2*mn.sd[2]
## record number of observations in each batch that are within 2sds of the mean
freq.del <- assays(object) %>%
group_by(batch) %>%
summarize(n = sum(oned < limits[[2]]))
fewobs <- freq.del$n <= 2
iszero <- freq.del$n == 0
if( all(iszero) ){
## no homozygous deletions
assays(object)$is_simulated <- FALSE
return(object)
}
if( !any(fewobs) ){
## many homozygous deletions in each batch
assays(object)$is_simulated <- FALSE
return(object)
}
## Some of the batches have 2 or fewer homozygous deletions
## Augment data with 10 observations to allow fitting this component
##
## - sample a minimum of 10 observations (with replacement) from the posterior predictive distribution of the other batches
##
batches <- freq.del$batch [ fewobs ]
##zerobatch <- freq.del$batch[ freq.del$n == 0 ]
dat <- assays(object2[[1]])
expected_homdel <- modes(SB)[["p"]][1] * table(dat$batch)
expected_homdel <- ceiling(expected_homdel [ unique(dat$batch) %in% batches ])
nsample <- pmax(10L, expected_homdel)
pred <- predictiveTibble(SB) %>%
##filter(!(batch %in% batches) & component == 0) %>%
filter(component == 0) %>%
"["(sample(seq_len(nrow(.)), sum(nsample), replace=TRUE), ) %>%
select(oned) %>%
mutate(batch=rep(batches, nsample),
id=paste0("augment_", seq_len(nrow(.))),
oned=oned+rnorm(nrow(.), 0, mn.sd[2])) %>%
select(c(id, oned, batch))
newdat <- bind_rows(assays(object), pred) %>%
arrange(batch) %>%
mutate(is_simulated = seq_len(nrow(.)) %in% grep("augment_", id))
mbl <- MultiBatchList(data=newdat,
parameters=parameters(object))
mbl
})
##augmentTest <- function(object){
## ##
## ## - only makes sense to do this if multibatch models
## ## with 3 or 4 components are included in the list
## ##
## ##
## ## Idea:
## ## 1. Run SingleBatch independently for each batch
## ## 2. Assess if there is a potential problem
## ## - components with high standard deviation, or models with small standard deviation of thetas
## ## - components with very few observations
## ## 3. If no problems detected, return object
## ##
## ## Unusually high standard deviations
## ## - is homozygous deletion component missing?
## ## assign well estimated components to theta1 theta2 of theta matrix
## ## set theta0 to NA for these batches
## ##
## ## Small standard deviations
## ## - set components with most observations to theta 1 theta2 of theta matrix
## ## - set theta0 to NA
## ## 4. Impute missing theta 10 times assuming MVN
## ## 5. Augment data with the imputed thetas
## ##
## mb <- object[["MB3"]]
## iter(mb) <- max(iter(object), 150L)
## burnin(mb) <- 0L
## mbm <- as(mb, "MultiBatchModel")
## zfreq <- tableBatchZ(mbm)
## if(any(zfreq == 0)){
##
## }
## mb <- posteriorSimulation(mb)
## ub <- unique(batch(mb))
## mb.list <- vector("list", length(ub))
## for(i in seq_along(ub)){
## B <- ub[i]
## mb2 <- mb[ batch(mb) == B ]
## mb.list[[i]] <- posteriorSimulation(mb2)
## }
## th <- lapply(mb.list, theta) %>%
## do.call(rbind, .) %>%
## round(2)
## sds <- lapply(mb.list, sigma) %>%
## do.call(rbind, .) %>%
## round(2)
## zz <- lapply(mb.list, function(x) table(z(x))) %>%
## do.call(rbind, .)
##
## r1 <- order(rowSds(th), decreasing=TRUE)
##
## batchfreq <- table(batch(object))
## B <- which(batchfreq==max(batchfreq))[[1]]
## mb <- object[["MB3"]]
## sb <- mb[ batch(mb) == B ]
## iter(mb) <- iter(sb) <- max(iter(object), 150L)
## sb <- posteriorSimulation(sb)
## modes(sb) <- computeModes(sb)
## sb <- setModes(sb)
## MB <- useSingleBatchValues(mb=mb, sb=sb)
## mbm <- as(MB, "MultiBatchModel")
## z(mbm) <- update_z(mbm)
## zfreq <- tableBatchZ(mbm)
## if(any(zfreq)==0){
##
## }
## MB <- posteriorSimulation(MB)
## modes(MB) <- computeModes(MB)
## MB <- setModes(MB)
## sds <- sigma(MB)
## th <- theta(MB)
## fc <- sds[, 1]/min(sds[, 1])
## if(!any(fc > 2.5 )) {
## assays(object)$is_simulated <- FALSE
## return(object)
## }
## if(any(fc > 2.5)){
## th1 <- th[, 1]
## th1[ fc > 2.5 ] <- NA
## th[, 1] <- NA
##
## th.imputed <- replicate(10, Impute, simplify=FALSE)
## th.imputed2 <- lapply(th.imputed, function(x) x$yimp[, 1]) %>%
## do.call(cbind, .)
## th.imputed <- th.imputed2[which(is.na(th1)), drop=FALSE]
## }
## dat <- assays(object[[1]])
## newdat <- bind_rows(assays(object), pred) %>%
## arrange(batch) %>%
## mutate(is_simulated = seq_len(nrow(.)) %in% grep("augment_", id))
## mbl <- MultiBatchList(data=newdat,
## parameters=parameters(object))
## mbl
##}
impute <- function(model, loc.scale, start.index){
N <- NULL
phat <- NULL
expected <- NULL
batch_labels <- NULL
x <- assays(model) %>%
group_by(batch) %>%
summarize(N=n()) %>%
##left_join(tab, by="batch") %>%
left_join(loc.scale, by="batch") %>%
mutate(##n = ifelse(is.na(n), 0, n),
##phat=n/N,
expected=2*ceiling(N*max(phat, na.rm=TRUE))) %>%
filter(!is.na(theta)) %>%
mutate(expected=pmax(expected, 5))
##filter(n < 3)
imp.list <- vector("list", nrow(x))
for(i in seq_along(imp.list)){
imp.list[[i]] <- rnorm(x$expected[i], mean=x$theta[i],
sd=sqrt(x$sigma2[i]))
}
imp <- unlist(imp.list)
index <- seq_along(imp) + start.index - 1
impdat <- tibble(id=paste0("augment_", index),
oned=imp,
provisional_batch=NA,
##likely_hd=TRUE,
likely_deletion=TRUE,
batch=rep(x$batch, x$expected),
is_simulated=TRUE)
batch_mapping <- assays(model) %>%
group_by(batch) %>%
summarize(batch_labels=unique(batch_labels))
impdat2 <- left_join(impdat, batch_mapping, by="batch")
impdat2
}
|
992cfac7de3aaea8eab29b5b85be256aaea21091
|
f2a0a8fda06fc7c1a7602472aab8569df5101d48
|
/R/front41WriteInput.R
|
718cc205e251e351f6c47fb4b021ee01a50bef1e
|
[] |
no_license
|
cran/frontier
|
833b64b32ae93e7f5c8333ccbbd3670f0fa12182
|
91725b1e6bb2df9b47c3d9eda2d545996a0f0c54
|
refs/heads/master
| 2021-01-01T19:07:23.783127
| 2020-04-17T15:10:03
| 2020-04-17T15:10:03
| 17,696,150
| 5
| 4
| null | null | null | null |
UTF-8
|
R
| false
| false
| 12,407
|
r
|
front41WriteInput.R
|
front41WriteInput <- function( data, crossSectionName, timePeriodName = NULL,
yName, xNames = NULL, qxNames = NULL, zNames = NULL, quadHalf = TRUE,
modelType = ifelse( is.null( zNames ), 1, 2 ), functionType = 1,
logDepVar = TRUE, mu = FALSE, eta = FALSE,
path = ".",
insFile = "front41.ins", dtaFile = sub( "\\.ins$", ".dta", insFile ),
outFile = sub( "\\.ins$", ".out", insFile ), startUpFile = "front41.000",
iprint = 5, indic = 1, tol = 0.00001, tol2 = 0.001, bignum = 1.0E+16,
step1 = 0.00001, igrid2 = 1, gridno = 0.1, maxit = 100, ite = 1 ) {
if( qxNames == "all" && !is.null( qxNames ) ) {
qxNames <- xNames
}
if( !is.character( qxNames ) && !is.null( qxNames ) ) {
stop( "argument 'qxNames' must be either logical or a vector of strings" )
}
checkNames( c( crossSectionName, timePeriodName, yName, xNames, zNames,
qxNames ), names( data ) )
if( !modelType %in% c( 1, 2 ) ) {
stop( "argument 'modelType' must be either 1 or 2" )
}
if( !functionType %in% c( 1, 2 ) ) {
stop( "argument 'functionType' must be either 1 or 2" )
}
if( !is.logical( logDepVar ) ) {
stop( "argument 'logDepVar' must be logical" )
}
if( !is.logical( mu ) ) {
stop( "argument 'mu' must be logical" )
}
if( modelType == 1 ) {
if( !is.logical( eta ) ) {
stop( "argument 'eta' must be logical" )
}
}
# iprint
if( !is.numeric( iprint ) ) {
stop( "argument 'iprint' must be numeric" )
} else if( iprint != round( iprint ) ) {
stop( "argument 'iprint' must be an iteger" )
} else if( iprint < 0 ) {
stop( "argument 'iprint' must be non-negative" )
}
iprint <- as.integer( iprint )
# indic
if( !is.numeric( indic ) ) {
stop( "argument 'indic' must be numeric" )
} else if( indic != round( indic ) ) {
stop( "argument 'indic' must be an integer" )
}
indic <- as.integer( indic )
# tol
if( !is.numeric( tol ) ) {
stop( "argument 'tol' must be numeric" )
} else if( tol < 0 ) {
stop( "argument 'tol' must be non-negative" )
}
# tol2
if( !is.numeric( tol2 ) ) {
stop( "argument 'tol2' must be numeric" )
} else if( tol2 < 0 ) {
stop( "argument 'tol2' must be non-negative" )
}
# bignum
if( !is.numeric( bignum ) ) {
stop( "argument 'bignum' must be numeric" )
} else if( bignum <= 0 ) {
stop( "argument 'bignum' must be positive" )
}
# step1
if( !is.numeric( step1 ) ) {
stop( "argument 'step1' must be numeric" )
} else if( step1 <= 0 ) {
stop( "argument 'step1' must be positive" )
}
# igrid2
if( ! igrid2 %in% c( 1, 2 ) ) {
stop( "argument 'igrid2' must be either '1' or '2'" )
}
# gridno
if( !is.numeric( gridno ) ) {
stop( "argument 'gridno' must be numeric" )
} else if( gridno <= 0 ) {
stop( "argument 'gridno' must be positive" )
}
# maxit
if( !is.numeric( maxit ) ) {
stop( "argument 'maxit' must be numeric" )
} else if( maxit != round( maxit ) ) {
stop( "argument 'maxit' must be an integer" )
} else if( maxit <= 0 ) {
stop( "argument 'maxit' must be positive" )
}
maxit <- as.integer( maxit )
# ite
if( ! ite %in% c( 0, 1 ) ) {
stop( "argument 'ite' must be either '0' or '1'" )
}
nCrossSection <- length( unique( data[[ crossSectionName ]] ) )
nTimePeriods <- ifelse( is.null( timePeriodName ), 1,
length( unique( data[[ timePeriodName ]] ) ) )
nTotalObs <- nrow( data )
nXvars <- length( xNames )
nTLvars <- length( qxNames )
nXtotal <- nXvars + nTLvars * ( nTLvars + 1 ) / 2
nZvars <- length( zNames )
if( modelType == 2 ) {
eta <- nZvars
} else {
eta <- ifelse( eta, "y", "n" )
}
commentRow <- max( 16, nchar( dtaFile ) + 1 )
cat( modelType, rep( " ", commentRow - 1 ),
"1=ERROR COMPONENTS MODEL, 2=TE EFFECTS MODEL\n",
file = file.path( path, insFile ), sep = "" )
cat( dtaFile, rep( " ", commentRow - nchar( dtaFile ) ),
"DATA FILE NAME\n", file = file.path( path, insFile ),
append = TRUE, sep = "" )
cat( outFile, rep( " ", commentRow - nchar( outFile ) ),
"OUTPUT FILE NAME\n", file = file.path( path, insFile ),
append = TRUE, sep = "" )
cat( functionType, rep( " ", commentRow - 1 ),
"1=PRODUCTION FUNCTION, 2=COST FUNCTION\n",
file = file.path( path, insFile ), append = TRUE, sep = "" )
cat( ifelse( logDepVar, "y", "n" ), rep( " ", commentRow - 1 ),
"LOGGED DEPENDENT VARIABLE (Y/N)\n",
file = file.path( path, insFile ), append = TRUE, sep = "" )
cat( nCrossSection,
rep( " ", commentRow - nchar( as.character( nCrossSection ) ) ),
"NUMBER OF CROSS-SECTIONS\n",
file = file.path( path, insFile ), append = TRUE, sep = "" )
cat( nTimePeriods,
rep( " ", commentRow - nchar( as.character( nTimePeriods ) ) ),
"NUMBER OF TIME PERIODS\n",
file = file.path( path, insFile ), append = TRUE, sep = "" )
cat( nTotalObs,
rep( " ", commentRow - nchar( as.character( nTotalObs ) ) ),
"NUMBER OF OBSERVATIONS IN TOTAL\n",
file = file.path( path, insFile ), append = TRUE, sep = "" )
cat( nXtotal,
rep( " ", commentRow - nchar( as.character( nXtotal ) ) ),
"NUMBER OF REGRESSOR VARIABLES (Xs)\n",
file = file.path( path, insFile ), append = TRUE, sep = "" )
cat( ifelse( mu, "y", "n" ), rep( " ", commentRow - 1 ),
"MU (Y/N) [OR DELTA0 (Y/N) IF USING TE EFFECTS MODEL]\n",
file = file.path( path, insFile ), append = TRUE, sep = "" )
cat( eta, rep( " ", commentRow - nchar( as.character( eta ) ) ),
"ETA (Y/N) [OR NUMBER OF TE EFFECTS REGRESSORS (Zs)]\n",
file = file.path( path, insFile ), append = TRUE, sep = "" )
cat( "n", rep( " ", commentRow - 1 ),
"STARTING VALUES (Y/N)\n",
file = file.path( path, insFile ), append = TRUE, sep = "" )
## create table for data
# cross section identifier
dataTable <- matrix( data[[ crossSectionName ]], ncol = 1 )
# time period identifier
if( is.null( timePeriodName ) ) {
dataTable <- cbind( dataTable, rep( 1, nrow( dataTable ) ) )
} else {
dataTable <- cbind( dataTable, data[[ timePeriodName ]] )
}
# endogenous variable
dataTable <- cbind( dataTable, data[[ yName ]] )
# exogenous variables
if( nXvars > 0 ) {
for( i in 1:nXvars ) {
dataTable <- cbind( dataTable, data[[ xNames[ i ] ]] )
}
}
# exogenous variables: quadratic and interaction terms
if( nTLvars > 0 ) {
for( i in 1:nTLvars ) {
for( j in i:nTLvars ) {
dataTable <- cbind( dataTable,
ifelse( i == j, 1 , 2 ) * ifelse( quadHalf, 0.5, 1 ) *
data[[ qxNames[ i ] ]] * data[[ qxNames[ j ] ]] )
}
}
}
# variables explaining the efficiency level
if( nZvars > 0 ) {
for( i in 1:nZvars ) {
dataTable <- cbind( dataTable, data[[ zNames[ i ] ]] )
}
}
# write data file to disk
write.table( dataTable, file = file.path( path, dtaFile ), row.names = FALSE,
col.names = FALSE, sep = "\t" )
## create start-up file
if( !is.null( startUpFile ) ) {
cat( "KEY VALUES USED IN FRONTIER PROGRAM (VERSION 4.1)\n",
file = file.path( path, startUpFile ) )
cat( "NUMBER: DESCRIPTION:\n",
file = file.path( path, startUpFile ), append = TRUE )
cat( iprint,
rep( " ", 16 - nchar( as.character( iprint ) ) ),
"IPRINT - PRINT INFO EVERY \"N\" ITERATIONS, 0=DO NOT PRINT\n",
file = file.path( path, startUpFile ), append = TRUE, sep = "" )
cat( indic,
rep( " ", 16 - nchar( as.character( indic ) ) ),
"INDIC - USED IN UNIDIMENSIONAL SEARCH PROCEDURE - SEE BELOW\n",
file = file.path( path, startUpFile ), append = TRUE, sep = "" )
tolString <- sub( "e", "D", format( tol, scientific = 2 ) )
cat( tolString,
rep( " ", 16 - nchar( tolString ) ),
"TOL - CONVERGENCE TOLERANCE (PROPORTIONAL)\n",
file = file.path( path, startUpFile ), append = TRUE, sep = "" )
tol2String <- sub( "e", "D", format( tol2, scientific = 2 ) )
cat( tol2String,
rep( " ", 16 - nchar( tol2String ) ),
"TOL2 - TOLERANCE USED IN UNI-DIMENSIONAL SEARCH PROCEDURE\n",
file = file.path( path, startUpFile ), append = TRUE, sep = "" )
bignumString <- sub( "e", "D", format( bignum, scientific = 2 ) )
cat( bignumString,
rep( " ", 16 - nchar( bignumString ) ),
"BIGNUM - USED TO SET BOUNDS ON DEN & DIST\n",
file = file.path( path, startUpFile ), append = TRUE, sep = "" )
step1String <- sub( "e", "D", format( step1, scientific = 2 ) )
cat( step1String,
rep( " ", 16 - nchar( step1String ) ),
"STEP1 - SIZE OF 1ST STEP IN SEARCH PROCEDURE\n",
file = file.path( path, startUpFile ), append = TRUE, sep = "" )
cat( igrid2,
rep( " ", 16 - nchar( as.character( igrid2 ) ) ),
"IGRID2 - 1=DOUBLE ACCURACY GRID SEARCH, 0=SINGLE\n",
file = file.path( path, startUpFile ), append = TRUE, sep = "" )
cat( gridno,
rep( " ", 16 - nchar( as.character( gridno ) ) ),
"GRIDNO - STEPS TAKEN IN SINGLE ACCURACY GRID SEARCH ON GAMMA\n",
file = file.path( path, startUpFile ), append = TRUE, sep = "" )
cat( maxit,
rep( " ", 16 - nchar( as.character( maxit ) ) ),
"MAXIT - MAXIMUM NUMBER OF ITERATIONS PERMITTED\n",
file = file.path( path, startUpFile ), append = TRUE, sep = "" )
cat( ite,
rep( " ", 16 - nchar( as.character( ite ) ) ),
"ITE - 1=PRINT ALL TE ESTIMATES, 0=PRINT ONLY MEAN TE\n",
file = file.path( path, startUpFile ), append = TRUE, sep = "" )
cat( "\n",
file = file.path( path, startUpFile ), append = TRUE )
cat( "THE NUMBERS IN THIS FILE ARE READ BY THE FRONTIER PROGRAM WHEN IT BEGINS\n",
file = file.path( path, startUpFile ), append = TRUE )
cat( "EXECUTION. YOU MAY CHANGE THE NUMBERS IN THIS FILE IF YOU WISH. IT IS\n",
file = file.path( path, startUpFile ), append = TRUE )
cat( "ADVISED THAT A BACKUP OF THIS FILE IS MADE PRIOR TO ALTERATION.\n",
file = file.path( path, startUpFile ), append = TRUE )
cat( "\n",
file = file.path( path, startUpFile ), append = TRUE )
cat( "FOR MORE INFORMATION ON THESE VARIABLES SEE: COELLI (1996), CEPA WORKING\n",
file = file.path( path, startUpFile ), append = TRUE )
cat( "PAPER 96/07, UNIVERSITY OF NEW ENGLAND, ARMIDALE, NSW, 2351, AUSTRALIA.\n",
file = file.path( path, startUpFile ), append = TRUE )
cat( "\n",
file = file.path( path, startUpFile ), append = TRUE )
cat( "INDIC VALUES:\n",
file = file.path( path, startUpFile ), append = TRUE )
cat( "indic=2 says do not scale step length in unidimensional search\n",
file = file.path( path, startUpFile ), append = TRUE )
cat( "indic=1 says scale (to length of last step) only if last step was smaller\n",
file = file.path( path, startUpFile ), append = TRUE )
cat( "indic= any other number says scale (to length of last step) \n",
file = file.path( path, startUpFile ), append = TRUE )
}
returnList <- list( data = dataTable,
crossSectionName = crossSectionName,
timePeriodName = timePeriodName,
yName = yName,
xNames = xNames,
qxNames = qxNames,
zNames = zNames,
quadHalf = quadHalf,
functionType = functionType,
logDepVar = logDepVar,
mu = mu,
eta = eta,
path = path,
insFile = insFile,
dtaFile = dtaFile,
outFile = outFile,
startUpFile = startUpFile,
iprint = iprint,
indic = indic,
tol = tol,
tol2 = tol2,
bignum = bignum,
step1 = step1,
igrid2 = igrid2,
gridno = gridno,
maxit = maxit,
ite = ite,
modelType = modelType,
nCrossSection = nCrossSection,
nTimePeriods = nTimePeriods,
nTotalObs = nTotalObs,
nXtotal = nXtotal,
nZvars = nZvars )
class( returnList ) <- "front41WriteInput"
invisible( returnList )
}
|
a0c0bd2ab4ef66e1345351894346c01340d29772
|
d8b48e3684cec7f383bb2e18563dd407747fa886
|
/results/2012-12-07/HaibTfbsA549Atf3V0422111Etoh02AlnRep0/HaibTfbsA549Atf3V0422111Etoh02AlnRep0.r
|
c45a1731251052ca8254d6baedd2be725ee37efe
|
[] |
no_license
|
yfu/rotation1
|
1fdc47eee512fda71a8f21608191233d1a4c0698
|
695cfaf0b0f7dc9344eef0f3db4d28752115ccdc
|
refs/heads/master
| 2021-01-23T00:14:36.512703
| 2013-01-19T20:50:56
| 2013-01-19T20:50:56
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 1,462
|
r
|
HaibTfbsA549Atf3V0422111Etoh02AlnRep0.r
|
setwd("/Users/yfu/Dropbox/Courses/Rotation/results/2012-12-07/HaibTfbsA549Atf3V0422111Etoh02AlnRep0")
# How to plot p-values of hyper and binom:
dev.new()
binom = read.table("binom_p_value.txt", header=F)
hyper = read.table("hyper_p_value.txt", header=F)
binom.x = binom$V1
binom.y = binom$V2
hyper.x = hyper$V1
hyper.y = hyper$V2
# First plot
plot(hyper.x, log(hyper.y), ylim=c(-12, -5), pch=16, axe=FALSE, xlab="", ylab="", type="b", col="black", main="HaibTfbsA549Atf3V0422111Etoh02AlnRep0 GO:0005160
log p-values of binom and hyper")
axis(2, ylim=c(-12, -5), col="black")
mtext("log p-value of hyper", side=2, line=2.5)
box()
par(new=T)
log()
# Second plot
plot(binom.x, log(binom.y), pch=15, xlab="", ylab="", ylim=c(-40,-20), axe=FALSE, type="b", col="red")
mtext("log p-value of binom", side=4, col="red", col.axis="red", line=2.5)
axis(4, ylim=c(-40, -20), col="red", col.axis="red")
axis(1, binom.x)
mtext("strength", side=1, col="black", line=2.5)
legend(0.05, -12, legend=c("p-value of binom", "p-value of hyper"), text.col=c("black", "red"), pch=c(16,15), col=c("black", "red"))
dev.new()
plot(binom.x, log(hyper.y/binom.y), pch=16, ylim=c(10, 35), axe=FALSE , type="b", col="black", main="HaibTfbsA549Atf3V0422111Etoh02AlnRep0 GO:0005160
log (p-value of hyper over p-value of binom)")
# x axis
axis(1, binom.x)
axis(2, 10:35)
mtext("Strength", side=1, col="black", line=2.5)
# mtext("log(hyper.y/binom.y)", side=2, col="black", line=2.5)
|
f215f54e0cfadadac1e38a80e79ca965fe4771a2
|
6a5c22469f612720f7cbed40864c84345885598d
|
/man/ftdoi-package.Rd
|
8223cb083d09fc5e35e8893d1792eb3a178a3b6d
|
[
"MIT"
] |
permissive
|
sckott/ftdoi
|
7dbdf53337453aa0b84b9a196e82043b961a8556
|
d713797475d355266dea704d8286f7d0129dfa5c
|
refs/heads/master
| 2022-12-27T01:18:24.916132
| 2020-10-16T20:59:24
| 2020-10-16T20:59:24
| 116,639,723
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| true
| 276
|
rd
|
ftdoi-package.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/ftdoi-package.R
\docType{package}
\name{ftdoi-package}
\alias{ftdoi-package}
\alias{ftdoi}
\title{ftdoi}
\description{
Interface to the ftdoi.org API for publisher url patterns.
}
\keyword{package}
|
0513138b35bb7b76e28f6b602892f0b55c34d580
|
3399fe1fd1a9fd40c2a2f25810fe114818156f0d
|
/gen-plots.r
|
71fa299c634f79e0dd106126dc4836d568c532b7
|
[
"MIT"
] |
permissive
|
yousefamar/trace-analysis-scripts
|
694b3617f5aa61cd1dae47642e7bf37ba6051bb0
|
6f15612386dd2b995f91ba82061b424e188abed1
|
refs/heads/master
| 2021-01-24T00:09:13.834262
| 2018-02-24T16:06:07
| 2018-02-24T16:06:07
| 122,754,813
| 4
| 1
| null | null | null | null |
UTF-8
|
R
| false
| false
| 4,591
|
r
|
gen-plots.r
|
library(ggplot2)
library(sitools)
mac2name = function(data) {
data$mac = as.character(data$mac)
data$mac[data$mac == '01:00:5e:7f:ff:fa'] = 'Multicast'
data$mac[data$mac == '00:17:88:29:55:4d'] = 'Hue Bridge'
data$mac[data$mac == '88:4a:ea:dd:b4:7c'] = 'Neato Robot Vac'
data$mac[data$mac == '48:74:6e:6a:fb:47'] = 'iPhone 5s'
data$mac[data$mac == 'd0:03:4b:50:75:44'] = 'Apple TV'
data$mac[data$mac == '78:4F:43:6E:F4:b1'] = 'MacBook Pro'
data$mac[data$mac == '44:65:0d:b4:b2:f5'] = 'Amazon Echo'
data$mac[data$mac == 'f8:f0:05:f7:70:21'] = 'WINC-70-21'
data$mac[data$mac == '78:4f:43:6e:f4:b1'] = 'James\' iPad'# (1)'
data$mac[data$mac == '18:65:90:43:0c:58'] = 'James\' iPad'# (2)'
data$mac[data$mac == 'b0:70:2d:e4:1e:82'] = 'James\' iPad'# (3)'
data$mac[data$mac == '88:30:8a:77:5f:cc'] = 'James\' iPad'# (4)'
data$mac[data$mac == '78:7e:61:90:e4:c0'] = 'James\' iPad'# (5)'
data$mac[data$mac == '70:ee:50:12:84:aa'] = 'James\' iPad'# (6)'
data$mac[data$mac == '84:89:ad:3e:33:66'] = 'James\' iPad'# (7)'
data$mac[data$mac == '6c:19:c0:81:a9:cb'] = 'James\' iPad'# (8)'
data$mac[data$mac == 'f0:9f:c2:30:7f:59'] = 'Ubiquiti Access Point'
data$mac[data$mac == 'ff:ff:ff:ff:ff:ff'] = 'Broadcast'
data$mac[data$mac == '3c:46:d8:bc:9b:f4'] = 'TP Link MR6400'
data$mac[data$mac == '08:02:8e:97:da:3d'] = 'DLink Switch'
data$mac[data$mac == '50:c7:bf:0b:0a:71'] = '2x TP Link HS110'# (1)'
data$mac[data$mac == '50:c7:bf:0b:08:2e'] = '2x TP Link HS110'# (2)'
data$mac[data$mac == 'd0:52:a8:45:41:f6'] = 'SmartThings Hub'
data$mac[data$mac == 'f0:fe:6b:28:ca:e2'] = 'Foobot'
data = data[data$mac != 'TP Link MR6400',]
return(data)
}
data = mac2name(read.csv('mac-service-bytes.csv'))
data$service = as.character(data$service)
data$service[data$service == '-'] = 'other'
ggplot(data, aes(mac)) +
labs(x = "Originating Device", y = "Transmitted (bytes)") +
geom_bar(aes(weight = bytes, fill = service), las = 2) +
#geom_bar(aes(reorder(mac, -bytes), bytes, fill = service), las = 2, stat='identity') +
scale_y_continuous(labels=f2si) +
theme_bw(base_size=14) +
#scale_fill_brewer(palette = 'Set2') +
theme(
#panel.grid.major = element_line(colour = "white"),
#panel.grid.minor = element_line(colour = "white"),
axis.text = element_text(size = 16),
axis.text.x = element_text(angle=90,hjust=1,vjust=0.5),
#axis.title = element_text(size = 20, face="bold")
axis.title = element_text(size = 18),
legend.text=element_text(size=14)
)
ggsave('mac-service-bytes.pdf')
data = mac2name(read.csv('mac-protocol-bytes.csv'))
ggplot(data, aes(mac)) +
labs(x = "Originating Device", y = "Transmitted (bytes)") +
geom_bar(aes(weight = bytes, fill = protocol), las = 2) +
scale_y_continuous(labels=f2si) +
theme_bw(base_size=14) +
#scale_fill_brewer(palette = 'Set2') +
theme(
#panel.grid.major = element_line(colour = "white"),
#panel.grid.minor = element_line(colour = "white"),
axis.text = element_text(size = 16),
axis.text.x = element_text(angle=90,hjust=1,vjust=0.5),
#axis.title = element_text(size = 20, face="bold")
axis.title = element_text(size = 18),
legend.text=element_text(size=14)
)
ggsave('mac-protocol-bytes.pdf')
#data = mac2name(read.csv('mac-service-bytes-resp.csv'))
#ggplot(data, aes(mac)) +
# labs(x = "Responding Device", y = "Transmitted (bytes)") +
# geom_bar(aes(weight = bytes, fill = service), las = 2) +
# scale_y_continuous(labels=f2si) +
# theme_bw(base_size=14) +
# theme(
# #panel.grid.major = element_line(colour = "white"),
# #panel.grid.minor = element_line(colour = "white"),
# axis.text = element_text(size = 16),
# axis.text.x = element_text(angle=90,hjust=1,vjust=0.5),
# #axis.title = element_text(size = 20, face="bold")
# )
#ggsave('mac-service-bytes-resp.pdf')
data0 = mac2name(read.csv('orig-resp-bytes-extern.csv'))
data0$type = rep('external', nrow(data0))
data1 = mac2name(read.csv('orig-resp-bytes-intern.csv'))
data1$type = rep('internal', nrow(data1))
data = rbind(data0, data1)
ggplot(data, aes(mac)) +
labs(x = "Device", y = "Traffic (bytes)") +
geom_bar(aes(weight = bytes, fill = type), las = 2) +
scale_y_continuous(labels=f2si) +
theme_bw(base_size=14) +
#scale_fill_brewer(palette = 'Set2') +
theme(
#panel.grid.major = element_line(colour = "white"),
#panel.grid.minor = element_line(colour = "white"),
axis.text = element_text(size = 16),
axis.text.x = element_text(angle=90,hjust=1,vjust=0.5),
#axis.title = element_text(size = 20, face="bold")
axis.title = element_text(size = 18),
legend.text=element_text(size=14)
)
ggsave('mac-type-bytes.pdf')
|
c8bc706512a702a9284a1c68b0cef651e916f233
|
38c7e29bb938577f2160b90cb10776880330b12d
|
/automl_079855_20210528.R
|
285c8a356e7eec3c738a91cce2212333286ee7d8
|
[] |
no_license
|
aka7h/AV-Credit-Card-Lead-Prediction
|
17d3f5207c854b92968cbb484a67407c65f44072
|
86beba254951c043d7044b7cd316b9f20c520910
|
refs/heads/main
| 2023-05-07T19:40:27.395123
| 2021-06-01T08:40:33
| 2021-06-01T08:40:33
| 372,311,508
| 1
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 2,538
|
r
|
automl_079855_20210528.R
|
library(tidyverse)
library(tidymodels)
library(h2o)
library(themis)
train <- read_csv("../input/jobathon-may-2021-credit-card-lead-prediction/train.csv")
test <- read_csv("../input/jobathon-may-2021-credit-card-lead-prediction/test.csv")
train <- train %>% mutate_if(is.character,as.factor) %>% select(-ID) %>%
mutate(Is_Lead=factor(Is_Lead))
test <- test %>% mutate_if(is.character,as.factor)%>% select(-ID)
head(train)
sapply(train,function(x)sum(is.na(x)))
splits <- initial_split(train,prop = 0.8,strata = Is_Lead)
pre_proc_1 <- function(x,...){
x %>% recipe(Is_Lead~.) %>%
step_log(Avg_Account_Balance) %>%
step_impute_bag(Credit_Product,
impute_with = imp_vars(Age,Vintage,Occupation,
Avg_Account_Balance),
options = list(nbagg = 5, keepX = FALSE)) %>%
step_dummy(all_nominal()) %>%
step_nzv(all_predictors()) %>%
prep()
}
pre_proc_2 <- function(x,...){
x %>% recipe() %>%
step_log(Avg_Account_Balance) %>%
step_impute_bag(Credit_Product,
impute_with = imp_vars(Age,Vintage,Occupation,
Avg_Account_Balance),
options = list(nbagg = 5, keepX = FALSE)) %>%
step_dummy(all_nominal()) %>%
step_nzv(all_predictors()) %>%
prep()
}
tr_bag_rec_d <- pre_proc_1(training(splits)) %>% juice(.)
te_bag_rec_d <- pre_proc_1(testing(splits)) %>% juice(.)
test_bag_rec_d <- pre_proc_2(test) %>% juice(.)
tr_bag_rec_d <- tr_bag_rec_d %>% mutate(Is_Lead_X1=factor(Is_Lead_X1))
te_bag_rec_d <- te_bag_rec_d %>% mutate(Is_Lead_X1=factor(Is_Lead_X1))
h2o.init()
tr_dd <- as.h2o(tr_bag_rec_d)
va_dd <- as.h2o(te_bag_rec_d)
te_dd <- as.h2o(test_bag_rec_d)
X <- colnames(te_dd)
Y <- "Is_Lead_X1"
almname <- paste('ak_h2o_automl',format(Sys.time(),"%d%H%M%S"),sep = '_')
autoML <- h2o.automl(X,Y,training_frame = tr_dd,
validation_frame = va_dd,seed=223,
max_models=20,stopping_metric=c("AUC"),balance_classes=TRUE)
autoML@leader
leader_name <- as.tibble(autoML@leaderboard) %>% slice(1) %>% pull(model_id)
leader_model <- h2o.getModel(leader_name)
save(autoML, file="automlv1.rda")
yhat <- h2o.predict(leader_model,te_dd) %>% as_tibble()
submission <- data.frame('ID'=sample_sub$ID,'Is_Lead'=yhat$p1)
filename <- paste('ak_h20_automl_bag_imp_nvz_dummy',format(Sys.time(),"%Y%m%d%H%M%s"),sep = '_')
write.csv(submission,paste0(filename,'.csv',collapse = ''),row.names = FALSE)
|
495898c8985596aee4e911c52faf6ee23c3a4692
|
d84053e1e3f314c3a8789c56c37dc7e38a3c4cbd
|
/R/ttestcalc.R
|
9d30fe8c1b7d70c9a9e766a947a05942da622eb7
|
[] |
no_license
|
w142236/math4753
|
aafcbec99661648d9c57c2ddab912fefaf4525a0
|
e7ef457afc1bf604bce262f2958098720b218355
|
refs/heads/master
| 2020-12-14T19:33:05.932769
| 2020-04-16T23:56:13
| 2020-04-16T23:56:13
| 234,847,591
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 722
|
r
|
ttestcalc.R
|
#' t.test.calc()
#'
#' @description RAR statistic that deterines whether the t-value of the sample passed in and returns the t-value and the t-value intervals
#'
#' @param x a vector containing a sample
#' @param alpha the probability of the type I error
#' @param mu the population mean
#'
#' @return returns R A R being the interval with which the t-value is expected to lie between.
#' @export
#'
#' @examples x = rnorm(n = 30, mean = 5, sd =10);ttestcalc(x, alpha = .05, mu = 5)
ttestcalc = function(x, alpha, mu){
n = length(x)
t = (mean(x) - mu)/(sd(x)/sqrt(n))
t_negAlphaOver2 = qt(alpha/2, n-1)
t_AlphaOver2 = qt(1-alpha/2, n-1)
return(list(t = t, RLeft = t_negAlphaOver2, RRight = t_AlphaOver2))
}
|
18dd09f27fd516af15ab752163555e7444f8aee1
|
6a58629e00231acbf4fc114cc853111b40ab460e
|
/inst/unitTests/test_qCount.R
|
6cc5501ba6a3025756dbd054705df10f32bda031
|
[] |
no_license
|
zzygyx9119/QuasR
|
96d34bff3d1a9e88a1dad4bb5581801cb2e7c499
|
c8c97dac9c26751395cfc278e7a82f9d5bce15a0
|
refs/heads/master
| 2020-05-29T11:01:41.849901
| 2015-10-13T19:59:53
| 2015-10-13T19:59:53
| 46,695,475
| 1
| 0
| null | 2015-11-23T03:55:43
| 2015-11-23T03:55:43
| null |
UTF-8
|
R
| false
| false
| 33,736
|
r
|
test_qCount.R
|
# initialization of QuasR test environment
# allows: runTestFile("test_file.R", rngKind="default", rngNormalKind="default", verbose=1L)
if(!existsFunction("createFastaReads"))
source(system.file(package="QuasR", "unitTests", "help_function.R"))
if(!file.exists("./extdata"))
file.copy(system.file(package="QuasR", "extdata"), ".", recursive=TRUE)
projectSingle <- createProjectSingleMode()
projectPaired <- createProjectPairedMode()
projectAllelic <- createProjectAllelic()
projectSingleAllelic <- createProjectSingleMode(allelic=TRUE)
tilingRegion <- createTilingRegion()
gtfRegion <- createGtfRegion()
td <- tempdir()
genomeFile <- file.path("extdata", "hg19sub.fa")
sampleFile <- file.path("extdata", "samples_rna_single.txt")
snpFile <- file.path("extdata", "hg19sub_snp.txt")
.setUp <- function() { # runs before each test_...()
# make sure clObj exists and is a working cluster object
if(!exists("clObj", envir=.GlobalEnv) ||
!inherits(clObj, "cluster") ||
inherits(try(all(unlist(clusterEvalQ(clObj, TRUE))), silent=TRUE), "try-error")) {
clObj <<- makeCluster(getOption("QuasR_nb_cluster_nodes",2))
clusterEvalQ(clObj, library("QuasR"))
}
}
## Test alignment selection based on mapping quality
test_mapq <- function() {
project <- projectSingle
query <- GRanges(c("chrV"), IRanges(start=1, width=800))
checkException(qCount(project, query, mapqMin=-1))
checkException(qCount(project, query, mapqMax=256))
mapq <- scanBam(alignments(project)$genome$FileName[1])[[1]]$mapq
mapq[is.na(mapq)] <- 255
for(mymin in seq(10,250,by=40))
checkTrue(qCount(project[1], query, mapqMin=mymin)[1,2] == sum(mapq >= mymin))
for(mymax in seq(10,250,by=40))
checkTrue(qCount(project[1], query, mapqMax=mymax)[1,2] == sum(mapq <= mymax))
for(mycut in seq(10,250,by=40))
checkIdentical(length(mapq) - qCount(project[1], query, mapqMax=mycut)[1,2],
qCount(project[1], query, mapqMin=mycut+1)[1,2])
}
## Test parameter shift, selectReadPosition
test_shift <- function() {
project <- projectPaired
## qCount with SmartShift
#fr R1->left R2->right
query <- GRanges(c("chrV"), IRanges(start=1:20, width=1), "+")
resSoll <- rep(0,20)
resSoll[10] <- 4
res <- qCount(project, query, selectReadPosition="start", shift="halfInsert", orientation="any")[,-1]
checkTrue(all(resSoll == res), "Test 1: qCount with smartShift")
resSoll <- rep(0,20)
resSoll[c(6,14)] <- 2
res <- qCount(project, query, selectReadPosition="end", shift="halfInsert", orientation="any")[,-1]
checkTrue(all(resSoll == res), "Test 2: qCount with smartShift")
#fr R2->left R1->right
query <- GRanges("chrV", IRanges(start=21:40, width=1), "+")
resSoll <- rep(0,20)
resSoll[10] <- 4
res <- qCount(project, query, selectReadPosition="start", shift="halfInsert", orientation="any")[,-1]
checkTrue(all(resSoll == res), "Test 3: qCount with smartShift")
resSoll <- rep(0,20)
resSoll[c(6,14)] <- 2
res <- qCount(project, query, selectReadPosition="end", shift="halfInsert", orientation="any")[,-1]
checkTrue(all(resSoll == res), "Test 4: qCount with smartShift")
#ff R1->left R2->right
query <- GRanges("chrV", IRanges(start=41:60, width=1), "+")
resSoll <- rep(0,20)
resSoll[c(6,10)] <- 2
res <- qCount(project, query, selectReadPosition="start", shift="halfInsert", orientation="any")[,-1]
checkTrue(all(resSoll == res), "Test 5: qCount with smartShift")
resSoll <- rep(0,20)
resSoll[c(10,14)] <- 2
res <- qCount(project, query, selectReadPosition="end", shift="halfInsert", orientation="any")[,-1]
checkTrue(all(resSoll == res), "Test 6: qCount with smartShift")
#rr R2->left R1->right
query <- GRanges("chrV", IRanges(start=61:80, width=1), "+")
resSoll <- rep(0,20)
resSoll[c(10,14)] <- 2
res <- qCount(project, query, selectReadPosition="start", shift="halfInsert", orientation="any")[,-1]
checkTrue(all(resSoll == res), "Test 7: qCount with smartShift")
resSoll <- rep(0,20)
resSoll[c(6,10)] <- 2
res <- qCount(project, query, selectReadPosition="end", shift="halfInsert", orientation="any")[,-1]
checkTrue(all(resSoll == res), "Test 8: qCount with smartShift")
#rf R1->left R2->right
query <- GRanges("chrV", IRanges(start=81:99, width=1), "+")
resSoll <- rep(0,19)
resSoll[c(6,14)] <- 2
res <- qCount(project, query, selectReadPosition="start", shift="halfInsert", orientation="any")[,-1]
checkTrue(all(resSoll == res), "Test 9: qCount with smartShift")
resSoll <- rep(0,19)
resSoll[10] <- 4
res <- qCount(project, query, selectReadPosition="end", shift="halfInsert", orientation="any")[,-1]
checkTrue(all(resSoll == res), "Test 10: qCount with smartShift")
## qCount with interger as shift
aln <- GenomicAlignments::readGAlignments(project@alignments$FileName)
query <- GRanges(c("chrV"), IRanges(start=1:99, width=1), "+")
resSoll <- rep(0,99)
pos <- Rle(ifelse(strand(aln)=="+", start(aln), end(aln)))
resSoll[runValue(pos)] <- runLength(pos)
res <- qCount(project, query, selectReadPosition="start", shift=0, orientation="any")[,-1]
checkTrue(all(resSoll == res), "Test 1: qCount with shift and selectReadPosition")
resSoll <- rep(0,99)
pos <- Rle(ifelse(strand(aln)=="+", start(aln)+1, end(aln)-1))
resSoll[runValue(pos)] <- runLength(pos)
res <- qCount(project, query, selectReadPosition="start", shift=1, orientation="any")[,-1]
checkTrue(all(resSoll == res), "Test 2: qCount with shift and selectReadPosition")
resSoll <- rep(0,99)
pos <- Rle(ifelse(strand(aln)=="+", start(aln)-1, end(aln)+1))
resSoll[runValue(pos)] <- runLength(pos)
res <- qCount(project, query, selectReadPosition="start", shift=-1, orientation="any")[,-1]
checkTrue(all(resSoll == res), "Test 3: qCount with shift and selectReadPosition")
resSoll <- rep(0,99)
pos <- Rle(ifelse(strand(aln)=="+", end(aln), start(aln)))
resSoll[runValue(pos)] <- runLength(pos)
res <- qCount(project, query, selectReadPosition="end", shift=0, orientation="any")[,-1]
checkTrue(all(resSoll == res), "Test 4: qCount with shift and selectReadPosition")
resSoll <- rep(0,99)
pos <- Rle(ifelse(strand(aln)=="+", end(aln)+1, start(aln)-1))
resSoll[runValue(pos)] <- runLength(pos)
res <- qCount(project, query, selectReadPosition="end", shift=1, orientation="any")[,-1]
checkTrue(all(resSoll == res), "Test 5: qCount with shift and selectReadPosition")
resSoll <- rep(0,99)
pos <- Rle(ifelse(strand(aln)=="+", end(aln)-1, start(aln)+1))
resSoll[runValue(pos)] <- runLength(pos)
res <- qCount(project, query, selectReadPosition="end", shift=-1, orientation="any")[,-1]
checkTrue(all(resSoll == res), "Test 6: qCount with shift and selectReadPosition")
}
test_shift_allelic <- function(){
project <- projectAllelic
query <- GRanges(c("chrV"), IRanges(start=1:20, width=1), "+")
# no shift
resSoll <- rep(0,20)
resSoll[c(4,16)] <- 1
res <- qCount(project, query, selectReadPosition="start", orientation="any")[,-1]
checkTrue(all(resSoll == res[,1]), "Test 1: qCount allele specific")
checkTrue(all(resSoll == res[,2]), "Test 2: qCount allele specific")
checkTrue(all(resSoll == res[,3]), "Test 3: qCount allele specific")
colname <- paste(rep(project@alignments$SampleName, each=3), c("R","U","A"), sep="_")
checkTrue(all(colname == colnames(res)), "Test 4: qCount allele specific")
# smart shift
resSoll <- rep(0,20)
resSoll[10] <- 2
res <- qCount(project, query, selectReadPosition="start", shift="halfInsert", orientation="any")[,-1]
checkTrue(all(resSoll == res[,1]), "Test 5: qCount allele specific")
checkTrue(all(resSoll == res[,2]), "Test 6: qCount allele specific")
checkTrue(all(resSoll == res[,3]), "Test 7: qCount allele specific")
# shift
resSoll <- rep(0,20)
resSoll[c(6,14)] <- 1
res <- qCount(project, query, selectReadPosition="start", shift=2, orientation="any")[,-1]
checkTrue(all(resSoll == res[,1]), "Test 8: qCount allele specific")
checkTrue(all(resSoll == res[,2]), "Test 9: qCount allele specific")
checkTrue(all(resSoll == res[,3]), "Test 10: qCount allele specific")
}
test_orientation <- function() {
project <- projectPaired
aln <- GenomicAlignments::readGAlignments(project@alignments$FileName)
query <- GRanges(c("chrV"), IRanges(start=1:99, width=1), "+")
resSoll <- rep(0,99)
pos <- Rle(start(aln[strand(aln)=="+"]))
resSoll[runValue(pos)] <- runLength(pos)
res <- qCount(project, query, selectReadPosition="start", shift=0, orientation="same")[,-1]
checkTrue(all(resSoll == res), "Test 1: qCount with orientation and query strand")
resSoll <- rep(0,99)
pos <- Rle(end(aln[strand(aln)=="-"]))
resSoll[runValue(pos)] <- runLength(pos)
res <- qCount(project, query, selectReadPosition="start", shift=0, orientation="opposite")[,-1]
checkTrue(all(resSoll == res), "Test 2: qCount with orientation and query strand")
resSoll <- rep(0,99)
pos <- Rle(end(aln[strand(aln)=="+"]))
resSoll[runValue(pos)] <- runLength(pos)
res <- qCount(project, query, selectReadPosition="end", shift=0, orientation="same")[,-1]
checkTrue(all(resSoll == res), "Test 3: qCount with orientation and query strand")
resSoll <- rep(0,99)
pos <- Rle(start(aln[strand(aln)=="-"]))
resSoll[runValue(pos)] <- runLength(pos)
res <- qCount(project, query, selectReadPosition="end", shift=0, orientation="opposite")[,-1]
checkTrue(all(resSoll == res), "Test 4: qCount with orientation and query strand")
query <- GRanges(c("chrV"), IRanges(start=1:99, width=1), "-")
resSoll <- rep(0,99)
pos <- Rle(start(aln[strand(aln)=="+"]))
resSoll[runValue(pos)] <- runLength(pos)
res <- qCount(project, query, selectReadPosition="start", shift=0, orientation="opposite")[,-1]
checkTrue(all(resSoll == res), "Test 5: qCount with orientation and query strand")
resSoll <- rep(0,99)
pos <- Rle(end(aln[strand(aln)=="-"]))
resSoll[runValue(pos)] <- runLength(pos)
res <- qCount(project, query, selectReadPosition="start", shift=0, orientation="same")[,-1]
checkTrue(all(resSoll == res), "Test 6: qCount with orientation and query strand")
resSoll <- rep(0,99)
pos <- Rle(end(aln[strand(aln)=="+"]))
resSoll[runValue(pos)] <- runLength(pos)
res <- qCount(project, query, selectReadPosition="end", shift=0, orientation="opposite")[,-1]
checkTrue(all(resSoll == res), "Test 7: qCount with orientation and query strand")
resSoll <- rep(0,99)
pos <- Rle(start(aln[strand(aln)=="-"]))
resSoll[runValue(pos)] <- runLength(pos)
res <- qCount(project, query, selectReadPosition="end", shift=0, orientation="same")[,-1]
checkTrue(all(resSoll == res), "Test 8: qCount with orientation and query strand")
query <- GRanges(c("chrV"), IRanges(start=1:99, width=1), "*")
resSoll <- rep(0,99)
pos <- Rle(ifelse(strand(aln)=="+", start(aln), end(aln)))
resSoll[runValue(pos)] <- runLength(pos)
res <- qCount(project, query, selectReadPosition="start", shift=0, orientation="same")[,-1]
checkTrue(all(resSoll == res), "Test 9: qCount with orientation and query strand")
res <- qCount(project, query, selectReadPosition="start", shift=0, orientation="opposite")[,-1]
checkTrue(all(resSoll == res), "Test 10: qCount with orientation and query strand")
resSoll <- rep(0,99)
pos <- Rle(ifelse(strand(aln)=="+", end(aln), start(aln)))
resSoll[runValue(pos)] <- runLength(pos)
res <- qCount(project, query, selectReadPosition="end", shift=0, orientation="same")[,-1]
checkTrue(all(resSoll == res), "Test 11: qCount with orientation and query strand")
res <- qCount(project, query, selectReadPosition="end", shift=0, orientation="opposite")[,-1]
checkTrue(all(resSoll == res), "Test 12: qCount with orientation and query strand")
}
test_useRead <- function() {
project <- projectPaired
aln <- GenomicAlignments::readGAlignments(project@alignments$FileName,
param=ScanBamParam(flag=scanBamFlag(isFirstMateRead=T, isSecondMateRead=F)))
query <- GRanges(c("chrV"), IRanges(start=1:99, width=1), "+")
resSoll <- rep(0,99)
pos <- Rle(ifelse(strand(aln)=="+", start(aln), end(aln)))
resSoll[runValue(pos)] <- runLength(pos)
res <- qCount(project, query, selectReadPosition="start", shift=0, orientation="any", useRead="first")[,-1]
checkTrue(all(resSoll == res), "Test 1: qCount with useRead")
resSoll <- rep(0,99)
pos <- Rle(ifelse(strand(aln)=="+", end(aln), start(aln)))
resSoll[runValue(pos)] <- runLength(pos)
res <- qCount(project, query, selectReadPosition="end", shift=0, orientation="any", useRead="first")[,-1]
checkTrue(all(resSoll == res), "Test 2: qCount with useRead")
aln <- GenomicAlignments::readGAlignments(project@alignments$FileName,
param=ScanBamParam(flag=scanBamFlag(isFirstMateRead=F, isSecondMateRead=T)))
resSoll <- rep(0,99)
pos <- Rle(ifelse(strand(aln)=="+", start(aln), end(aln)))
resSoll[runValue(pos)] <- runLength(pos)
res <- qCount(project, query, selectReadPosition="start", shift=0, orientation="any", useRead="last")[,-1]
checkTrue(all(resSoll == res), "Test 3: qCount with useRead")
resSoll <- rep(0,99)
pos <- Rle(ifelse(strand(aln)=="+", end(aln), start(aln)))
resSoll[runValue(pos)] <- runLength(pos)
res <- qCount(project, query, selectReadPosition="end", shift=0, orientation="any", useRead="last")[,-1]
checkTrue(all(resSoll == res), "Test 4: qCount with useRead")
}
test_maxInsertSize <- function() {
project <- projectPaired
query <- GRanges(c("chrV"), IRanges(start=1:20, width=1), "*")
resSoll <- rep(0,20)
#resSoll[c(10, 30, 46, 50, 70, 74, 86, 94)] <- c(4,4,2,2,2,2,2,2)
res <- qCount(project, query, selectReadPosition="start", shift="halfInsert", maxInsertSize=0)[,-1]
checkTrue(all(resSoll == res), "Test 1: qCount with maxInsertSize")
resSoll[10] <- 4
res <- qCount(project, query, selectReadPosition="start", shift="halfInsert", maxInsertSize=14)[,-1]
checkTrue(all(resSoll == res), "Test 2: qCount with maxInsertSize")
}
test_query_GRanges <- function() {
project <- projectSingle
## NO masking
## reduce region by query rownames
region <- tilingRegion
strand(region) <- "*"
resSoll <- matrix(0, nrow=4, ncol=3, byrow=T)
resSoll[,1] = c(300,300,300,250)
resSoll[,c(2,3)] = 3*resSoll[,1]
res <- qCount(project, region, collapseBySample=F, orientation="same")
checkTrue(all(resSoll == res), "GRanges Test 1: qCount orientation=same")
strand(region) <- "+"
resSoll[,3] = 0
res <- qCount(project, region, collapseBySample=F, orientation="same")
checkTrue(all(resSoll == res), "GRanges Test 2: qCount orientation=same")
strand(region) <- "-"
resSoll[,2] = 0
resSoll[,3] = 3*resSoll[,1]
res <- qCount(project, region, collapseBySample=F, orientation="same")
checkTrue(all(resSoll == res), "GRanges Test 3: qCount orientation=same")
## NO reduce region by query rownames
names(region) <- NULL
strand(region) <- "*"
resSoll <- matrix(0, nrow=12, ncol=3, byrow=T)
resSoll[,1] = c(rep(100,11),50)
resSoll[,c(2,3)] = 3*resSoll[,1]
res <- qCount(project, region, collapseBySample=F, orientation="same")
checkTrue(all(resSoll == res), "GRanges Test 4: qCount orientation=same")
strand(region) <- "+"
resSoll[,3] = 0
res <- qCount(project, region, collapseBySample=F, orientation="same")
checkTrue(all(resSoll == res), "GRanges Test 5: qCount orientation=same")
strand(region) <- "-"
resSoll[,2] = 0
resSoll[,3] = 3*resSoll[,1]
res <- qCount(project, region, collapseBySample=F, orientation="same")
checkTrue(all(resSoll == res), "GRanges Test 6: qCount orientation=same")
## Masking Test 1
mask <- tilingRegion[names(tilingRegion) == "H4"]
## reduce region by query rownames
region <- tilingRegion
strand(region) <- "+"
resSoll <- matrix(0, nrow=4, ncol=3, byrow=T)
resSoll[,1] = c(200,300,150,0)
resSoll[,c(2,3)] = 3*resSoll[,1]
res <- qCount(project, region, mask=mask, collapseBySample=F, orientation="any")
checkTrue(all(resSoll == res), "GRanges Test 7: qCount with masking and orientation=any")
strand(region) <- "+"
resSoll[,3] = 0
res <- qCount(project, region, mask=mask, collapseBySample=F, orientation="same")
checkTrue(all(resSoll == res), "GRanges Test 8: qCount with masking and orientation=same")
strand(region) <- "-"
resSoll[,2] = 0
resSoll[,3] = 3*resSoll[,1]
res <- qCount(project, region, mask=mask, collapseBySample=F, orientation="same")
checkTrue(all(resSoll == res), "GRanges Test 9: qCount with masking and orientation=same")
## NO reduce region by query rownames
names(region) <- NULL
strand(region) <- "+"
resSoll <- matrix(0, nrow=12, ncol=3, byrow=T)
resSoll[,1] = c(100,100,50,0,50,100,50,0,50,100,50,0)
resSoll[,c(2,3)] = 3*resSoll[,1]
res <- qCount(project, region, mask=mask, collapseBySample=F, orientation="any")
checkTrue(all(resSoll == res), "GRanges Test 10: qCount with masking and orientation=any")
strand(region) <- "+"
resSoll[,3] = 0
res <- qCount(project, region, mask=mask, collapseBySample=F, orientation="same")
checkTrue(all(resSoll == res), "GRanges Test 11: qCount with masking and orientation=same")
strand(region) <- "-"
resSoll[,2] = 0
resSoll[,3] = 3*resSoll[,1]
res <- qCount(project, region, mask=mask, collapseBySample=F, orientation="same")
checkTrue(all(resSoll == res), "GRanges Test 12: qCount with masking and orientation=same")
## Masking Test 2
mask <- GRanges(seqnames="chrV", IRanges(c(361,401), c(390,700)))
## reduce region by query rownames
region <- tilingRegion
strand(region) <- "+"
resSoll <- matrix(0, nrow=4, ncol=3, byrow=T)
resSoll[,1] = c(170,120,100,100)
resSoll[,c(2,3)] = 3*resSoll[,1]
res <- qCount(project, region, mask=mask, collapseBySample=F, orientation="any")
checkTrue(all(resSoll == res), "GRanges Test 13: qCount with masking and orientation=any")
resSoll[,3] = 0
res <- qCount(project, region, mask=mask, collapseBySample=F, orientation="same")
checkTrue(all(resSoll == res), "GRanges Test 14: qCount with masking and orientation=same")
## NO reduce region by query rownames
names(region) <- NULL
strand(region) <- "+"
resSoll <- matrix(0, nrow=12, ncol=3, byrow=T)
resSoll[,1] = c(100,100,100,100,70,20,0,0,0,0,0,0)
resSoll[,c(2,3)] = 3*resSoll[,1]
res <- qCount(project, region, mask=mask, collapseBySample=F, orientation="any")
checkTrue(all(resSoll == res), "GRanges Test 15: qCount with masking and orientation=any")
resSoll[,3] = 0
res <- qCount(project, region, mask=mask, collapseBySample=F, orientation="same")
checkTrue(all(resSoll == res), "GRanges Test 16: qCount with masking and orientation=same")
}
test_query_GRangesList <- function() {
project <- projectSingle
region <- tilingRegion
strand(region) <- "*"
regionList <- split(region, names(region))
resSoll <- matrix(0, nrow=4, ncol=3, byrow=T)
resSoll[,1] = c(300,150,150,0)
resSoll[,c(2,3)] = 3*resSoll[,1]
res <- qCount(project, regionList, collapseBySample=F, orientation="same")
checkTrue(all(resSoll == res), "GRangesList Test 1: qCount orientation=same")
strand(region) <- "+"
regionList <- split(region, names(region))
resSoll[,3] = 0
res <- qCount(project, regionList, collapseBySample=F, orientation="same")
checkTrue(all(resSoll == res), "GRangesList Test 2: qCount orientation=same")
strand(region) <- "-"
regionList <- split(region, names(region))
resSoll[,2] = 0
resSoll[,3] = 3*resSoll[,1]
res <- qCount(project, regionList, collapseBySample=F, orientation="same")
checkTrue(all(resSoll == res), "GRangesList Test 3: qCount orientation=same")
## Masking Test 1
mask <- tilingRegion[names(tilingRegion) == "H4"]
region <- tilingRegion
strand(region) <- "+"
regionList <- split(region, names(region))
resSoll <- matrix(0, nrow=4, ncol=3, byrow=T)
resSoll[,1] = c(200,150,0,0)
resSoll[,c(2,3)] = 3*resSoll[,1]
res <- qCount(project, regionList, mask=mask, collapseBySample=F, orientation="any")
checkTrue(all(resSoll == res), "GRangesList Test 4: qCount with masking and orientation=any")
strand(region) <- "+"
regionList <- split(region, names(region))
resSoll[,3] = 0
res <- qCount(project, regionList, mask=mask, collapseBySample=F, orientation="same")
checkTrue(all(resSoll == res), "GRangesList Test 5: qCount with masking and orientation=same")
strand(region) <- "-"
regionList <- split(region, names(region))
resSoll[,2] = 0
resSoll[,3] = 3*resSoll[,1]
res <- qCount(project, regionList, mask=mask, collapseBySample=F, orientation="same")
checkTrue(all(resSoll == res), "GRangesList Test 6: qCount with masking and orientation=same")
## Masking Test 2
mask <- GRanges(seqnames="chrV", IRanges(c(361,401), c(390,700)))
region <- tilingRegion
strand(region) <- "+"
regionList <- split(region, names(region))
resSoll <- matrix(0, nrow=4, ncol=3, byrow=T)
resSoll[,1] = c(170,50,50,0)
resSoll[,c(2,3)] = 3*resSoll[,1]
res <- qCount(project, regionList, mask=mask, collapseBySample=F, orientation="any")
checkTrue(all(resSoll == res), "GRangesList Test 7: qCount with masking and orientation=any")
regionList <- split(region, names(region))
resSoll[,3] = 0
res <- qCount(project, regionList, mask=mask, collapseBySample=F, orientation="same")
checkTrue(all(resSoll == res), "GRangesList Test 8: qCount with masking and orientation=same")
}
test_query_GRanges_allelic <- function() {
project <- projectSingleAllelic
## NO masking
## reduce region by query rownames
region <- tilingRegion
strand(region) <- "*"
resSoll <- matrix(0, nrow=4, ncol=7, byrow=T)
resSoll[,1] = c(300,300,300,250)
resSoll[,c(2,3,4,5,6,7)] = resSoll[,1]
res <- qCount(project, region, collapseBySample=F, orientation="same")
checkTrue(all(resSoll == res), "GRanges Test 1: qCount allele specific orientation=same")
strand(region) <- "+"
resSoll[,c(5,6,7)] = 0
res <- qCount(project, region, collapseBySample=F, orientation="same")
checkTrue(all(resSoll == res), "GRanges Test 2: qCount allele specific orientation=same")
strand(region) <- "-"
resSoll[,c(2,3,4)] = 0
resSoll[,c(5,6,7)] = resSoll[,1]
res <- qCount(project, region, collapseBySample=F, orientation="same")
checkTrue(all(resSoll == res), "GRanges Test 3: qCount allele specific orientation=same")
## NO reduce region by query rownames
names(region) <- NULL
strand(region) <- "*"
resSoll <- matrix(0, nrow=12, ncol=7, byrow=T)
resSoll[,1] = c(rep(100,11),50)
resSoll[,c(2,3,4,5,6,7)] = resSoll[,1]
res <- qCount(project, region, collapseBySample=F, orientation="same")
checkTrue(all(resSoll == res), "GRanges Test 4: qCount allele specific orientation=same")
strand(region) <- "+"
resSoll[,c(5,6,7)] = 0
res <- qCount(project, region, collapseBySample=F, orientation="same")
checkTrue(all(resSoll == res), "GRanges Test 5: qCount allele specific orientation=same")
strand(region) <- "-"
resSoll[,c(2,3,4)] = 0
resSoll[,c(5,6,7)] = resSoll[,1]
res <- qCount(project, region, collapseBySample=F, orientation="same")
checkTrue(all(resSoll == res), "GRanges Test 6: qCount allele specific orientation=same")
## Masking Test 1
mask <- tilingRegion[names(tilingRegion) == "H4"]
## reduce region by query rownames
region <- tilingRegion
strand(region) <- "+"
resSoll <- matrix(0, nrow=4, ncol=7, byrow=T)
resSoll[,1] = c(200,300,150,0)
resSoll[,c(2,3,4,5,6,7)] = resSoll[,1]
res <- qCount(project, region, mask=mask, collapseBySample=F, orientation="any")
checkTrue(all(resSoll == res), "GRanges Test 7: qCount allele specific with masking and orientation=same")
strand(region) <- "+"
resSoll[,c(5,6,7)] = 0
res <- qCount(project, region, mask=mask, collapseBySample=F, orientation="same")
checkTrue(all(resSoll == res), "GRanges Test 8: qCount allele specific with masking and orientation=same")
strand(region) <- "-"
resSoll[,c(2,3,4)] = 0
resSoll[,c(5,6,7)] = resSoll[,1]
res <- qCount(project, region, mask=mask, collapseBySample=F, orientation="same")
checkTrue(all(resSoll == res), "GRanges Test 9: qCount allele specific with masking and orientation=same")
## NO reduce region by query rownames
names(region) <- NULL
strand(region) <- "+"
resSoll <- matrix(0, nrow=12, ncol=7, byrow=T)
resSoll[,1] = c(100,100,50,0,50,100,50,0,50,100,50,0)
resSoll[,c(2,3,4,5,6,7)] = resSoll[,1]
res <- qCount(project, region, mask=mask, collapseBySample=F, orientation="any")
checkTrue(all(resSoll == res), "GRanges Test 10: qCount allele specific with masking and orientation=any")
strand(region) <- "+"
resSoll[,c(5,6,7)] = 0
res <- qCount(project, region, mask=mask, collapseBySample=F, orientation="same")
checkTrue(all(resSoll == res), "GRanges Test 11: qCount allele specific with masking and orientation=same")
strand(region) <- "-"
resSoll[,c(2,3,4)] = 0
resSoll[,c(5,6,7)] = resSoll[,1]
res <- qCount(project, region, mask=mask, collapseBySample=F, orientation="same")
checkTrue(all(resSoll == res), "GRanges Test 12: qCount allele specific with masking and orientation=same")
}
test_query_GRangesList_allelic <- function() {
project <- projectSingleAllelic
region <- tilingRegion
strand(region) <- "*"
regionList <- split(region, names(region))
resSoll <- matrix(0, nrow=4, ncol=7, byrow=T)
resSoll[,1] = c(300,150,150,0)
resSoll[,c(2,3,4,5,6,7)] = resSoll[,1]
res <- qCount(project, regionList, collapseBySample=F, orientation="same")
checkTrue(all(resSoll == res), "GRangesList Test 1: qCount allele specific orientation=same")
strand(region) <- "+"
regionList <- split(region, names(region))
resSoll[,c(5,6,7)] = 0
res <- qCount(project, regionList, collapseBySample=F, orientation="same")
checkTrue(all(resSoll == res), "GRangesList Test 2: qCount allele specific orientation=same")
strand(region) <- "-"
regionList <- split(region, names(region))
resSoll[,c(2,3,4)] = 0
resSoll[,c(5,6,7)] = resSoll[,1]
res <- qCount(project, regionList, collapseBySample=F, orientation="same")
checkTrue(all(resSoll == res), "GRangesList Test 3: qCount allele specific orientation=same")
## Masking Test 1
mask <- tilingRegion[names(tilingRegion) == "H4"]
region <- tilingRegion
strand(region) <- "+"
regionList <- split(region, names(region))
resSoll <- matrix(0, nrow=4, ncol=7, byrow=T)
resSoll[,1] = c(200,150,0,0)
resSoll[,c(2,3,4,5,6,7)] = resSoll[,1]
res <- qCount(project, regionList, mask=mask, collapseBySample=F, orientation="any")
checkTrue(all(resSoll == res), "GRangesList Test 4: qCount allele specific with masking and orientation=any")
strand(region) <- "+"
regionList <- split(region, names(region))
resSoll[,c(5,6,7)] = 0
res <- qCount(project, regionList, mask=mask, collapseBySample=F, orientation="same")
checkTrue(all(resSoll == res), "GRangesList Test 5: qCount allele specific with masking and orientation=same")
strand(region) <- "-"
regionList <- split(region, names(region))
resSoll[,c(2,3,4)] = 0
resSoll[,c(5,6,7)] = resSoll[,1]
res <- qCount(project, regionList, mask=mask, collapseBySample=F, orientation="same")
checkTrue(all(resSoll == res), "GRangesList Test 6: qCount allele specific with masking and orientation=same")
}
test_query_TxDb <- function() {
project <- qAlign(sampleFile, genomeFile, splicedAlignment=TRUE, alignmentsDir=td, clObj=clObj)
txdb <- createTxDb()
mask <- gtfRegion[mcols(gtfRegion)$gene_name == "TNFRSF18"]
## TxDb vs GRanges
# Gene
res <- qCount(project, txdb, collapseBySample=F, reportLevel=NULL)
resTxdb <- qCount(project, txdb, collapseBySample=F, reportLevel="gene")
checkTrue(all(resTxdb == res), "TxDb vs GRanges Test 1")
region <- gtfRegion
names(region) <- mcols(gtfRegion)$gene_id
resGr <- qCount(project, region, collapseBySample=F)
resGr <- resGr[sort(rownames(resGr)),]
checkTrue(all(resTxdb == resGr), "TxDb vs GRanges Test 2")
# Exon
resTxdb <- qCount(project, txdb, collapseBySample=F, reportLevel="exon")
region <- exons(txdb)
resGr <- qCount(project, region, collapseBySample=F)
resGr <- resGr[sort(rownames(resGr)),]
checkTrue(all(resTxdb == resGr), "TxDb vs GRanges Test 3")
# Promoter
resTxdb <- qCount(project, txdb, collapseBySample=F, reportLevel="promoter")
region <- promoters(txdb, columns=c("tx_id","tx_name"))
names(region) <- paste(mcols(region)$tx_id,mcols(region)$tx_name, sep=";")
resGr <- qCount(project, region, collapseBySample=F)
resGr <- resGr[sort(rownames(resGr)),]
checkTrue(all(resTxdb == resGr), "TxDb vs GRanges Test 4")
# junction, includeSpliced
exGr <- GRanges(c("chr1","chr1","chr1","chr1"),
IRanges(start=c(11720,12322,14043,14363), end=c(12212,12518,14165,14512)))
resE <- qCount(project, exGr, collapseBySample=FALSE)
resEU <- qCount(project, exGr, collapseBySample=FALSE, includeSpliced=FALSE)
inGr <- GRanges(c("chr1","chr1"), IRanges(start=c(12213,14166), end=c(12321,14362)), strand=c("+","+"))
resJ <- qCount(project, NULL, reportLevel="junction", collapseBySample=FALSE)
checkTrue(all((resE - resEU)[c(1,3),-1] == as.matrix(mcols(resJ[match(inGr, resJ)]))), "junction/includeSpliced Test 1")
## TxDb vs GRanges with masked region
resTxdb <- qCount(project, txdb, collapseBySample=F, mask=mask, reportLevel="gene")
region <- gtfRegion
names(region) <- mcols(gtfRegion)$gene_id
resGr <- qCount(project, region, collapseBySample=F, mask=mask)
resGr <- resGr[sort(rownames(resGr)),]
checkTrue(all(resTxdb == resGr), "TxDb vs GRanges Test 5")
## Collapse by sample
resTxdbCS <- qCount(project, txdb, collapseBySample=T, mask=mask, reportLevel="gene")
res <- cbind(rowSums(resTxdb[,c(2,3)]), rowSums(resTxdb[,c(4,5)]))
checkTrue(all(res == resTxdbCS[,c(2,3)]), "TxDb collapse by Sample Test")
}
test_collapseBySample_GRanges <- function() {
## Non Allelic
project <- qAlign(sampleFile, genomeFile, alignmentsDir=td, clObj=clObj)
res <- qCount(project, gtfRegion, collapseBySample=T)
projectS1 <- project[1:2]
resS1 <- qCount(projectS1, gtfRegion, collapseBySample=T)
projectS2 <- project[3:4]
resS2 <- qCount(projectS2, gtfRegion, collapseBySample=T)
checkTrue(all(res[,2:3] == cbind(resS1[,2], resS2[,2])),
"Test collapseBySample; Collapse counts are not equal.")
## Allelic
project <- qAlign(sampleFile, genomeFile, snpFile=snpFile, alignmentsDir=td, clObj=clObj)
res <- qCount(project, gtfRegion, collapseBySample=T)
projectS1 <- project[1:2]
resS1 <- qCount(projectS1, gtfRegion, collapseBySample=T)
projectS2 <- project[3:4]
resS2 <- qCount(projectS2, gtfRegion, collapseBySample=T)
checkTrue(all(res[,2:7] == cbind(resS1[,2:4], resS2[,2:4])),
"Test collapseBySample; Collapse counts are not equal for allelic.")
}
test_auxiliaryName <- function() {
project <- qAlign(file.path("extdata", "samples_chip_single.txt"), genomeFile,
auxiliaryFile=file.path("extdata", "auxiliaries.txt"), alignmentsDir=td, clObj=clObj)
## Test aux counts
auxRegion <- createAuxRegion()
res <- qCount(project, auxRegion, collapseBySample=F, auxiliaryName="phiX174")
resSoll <- c(251,493)
checkTrue(all(resSoll == res[,2:3]))
}
test_includeSecondary <- function() {
proj <- qAlign(file.path("extdata", "phiX_paired_withSecondary_sampleFile.txt"),
file.path("extdata", "NC_001422.1.fa"), paired="fr")
gr <- GRanges("phiX174", IRanges(start=1, end=5386))
# include secondary alignments
checkTrue(qCount(proj, gr, useRead="any" )[1,'test'] == 696)
checkTrue(qCount(proj, gr, useRead="first")[1,'test'] == 348)
checkTrue(qCount(proj, gr, useRead="last" )[1,'test'] == 348)
# exclude secondary alignments
checkTrue(qCount(proj, gr, useRead="any" , includeSecondary=FALSE)[1,'test'] == 384)
checkTrue(qCount(proj, gr, useRead="first", includeSecondary=FALSE)[1,'test'] == 192)
checkTrue(qCount(proj, gr, useRead="last" , includeSecondary=FALSE)[1,'test'] == 192)
}
|
d41f953c35306add3d7b196b5e9adfa2205af672
|
a66e9e10a8d2d8919615009054cbe40c560930cc
|
/man/eq_diagram_xgroup.Rd
|
199accea0038421b36d9aa1b0a178c358a646ae3
|
[] |
no_license
|
skranz/RelationalContracts
|
91035b5ae6ce0d5804688da1b98655c84fdb15a2
|
e29d080e8bfff5d9033f9b0f8b86ff5e002c6369
|
refs/heads/master
| 2021-06-14T17:48:23.383867
| 2021-03-05T13:27:25
| 2021-03-05T13:27:25
| 161,347,415
| 4
| 0
| null | null | null | null |
UTF-8
|
R
| false
| true
| 2,012
|
rd
|
eq_diagram_xgroup.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/diagram.R
\name{eq_diagram_xgroup}
\alias{eq_diagram_xgroup}
\title{Draws a diagram of equilibrium state transition}
\usage{
eq_diagram_xgroup(
g,
show.own.loop = FALSE,
show.terminal.loop = FALSE,
use.x = NULL,
just.eq.chain = FALSE,
x0 = g$sdf$x[1],
hide.passive.edge = TRUE,
add.passive.edge = TRUE,
label.fun = NULL,
tooltip.fun = NULL,
active.edge.color = "#000077",
passive.edge.color = "#dddddd",
passive.edge.width = 1,
return.dfs = FALSE,
eq = g[["eq"]],
ap.col = if (has.col(eq, "ap")) "ap" else NA,
font.size = 24,
font = paste0(font.size, "px Arial black")
)
}
\arguments{
\item{g}{The solved game object}
\item{show.own.loop}{Shall a loop from a state to itself be drawn if there is a positive probability to stay in the state? (Default=FALSE)}
\item{show.terminal.loop}{Only relevant if \code{show.own.loop = TRUE}. If still \code{show.terminal.loop=FALSE} omit loops in terminal state that don't transist to any other state.}
\item{use.x}{optionally a vector of state ids that shall only be shown.}
\item{just.eq.chain}{If TRUE only show states that can be reached with positive probability on the equilibrium path when starting from state x0.}
\item{x0}{only relevant if \code{just.eq.chain=TRUE}. The ID of the x0 state. By default the first defined state.}
\item{label.fun}{An optional function that takes the equilibrium object and game and returns a character vector that contains a label for each state.}
\item{tooltip.fun}{Similar to \code{label.fun} but for the tooltip shown on a state.}
\item{return.dfs}{if TRUE don't show diagram but only return the relevant edge and node data frames that can be used to call \code{DiagrammeR::create_graph}. Useful if you want to manually customize graphs further.}
}
\description{
Draws an arrow from state x to state y if and
only if on the equilibrium path there is a positive
probability to directly transist from x to y.
}
|
cde29911ccdccbbe5ac8731d4e0e22344cc020b7
|
a4b6795dda7fba0795566d2034cdabf47cd5deae
|
/R/batchImport.R
|
575d3e63e4d6bc25a659154f5f3fac6c4555e28f
|
[] |
no_license
|
alphonse/CavSpec
|
94b7a88590bdc8927b5f61dd4cf926d71d714df9
|
ea654b367e94c2fc64ba8ab7d9bed6af38fc49f8
|
refs/heads/master
| 2021-01-19T13:53:03.083455
| 2015-05-14T17:37:50
| 2015-05-14T17:37:50
| 30,997,047
| 0
| 1
| null | null | null | null |
UTF-8
|
R
| false
| false
| 362
|
r
|
batchImport.R
|
batchImport <- function(sample.path, ...) {
list.files(path = sample.path, pattern = '*.txt', full.names=TRUE) %>%
lapply(FUN = read.table, ...) %>%
setNames(as.character(strsplit(list.files(
path = sample.path, pattern = '*.txt'), split = '.txt'))) %>%
ldply() %>%
subset(select = c(1, 4, 5)) %>%
setNames(c('Gas', 'lambda', 'I'))
}
|
4b8aaebf639ec19e42610e5604321de1a822c712
|
54d035cc403340cd4d4aed2ef90908ad3ce53873
|
/code/shinyApps/buildTeams2p0/global.R
|
9715135882f2c14ee6a3b019872ecb10b222c468
|
[] |
no_license
|
brian-bot/penguinLeague
|
3c2e201966f5be152d97ad5bbcb855a7afeb6a33
|
fd408ba6f13d73fe51f21854b96774d393ab64f2
|
refs/heads/master
| 2020-12-29T02:22:07.084036
| 2019-09-02T21:20:39
| 2019-09-02T21:20:39
| 33,828,738
| 1
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 1,144
|
r
|
global.R
|
baseRepoDir <- file.path(path.expand("~"), "workspace/repos/penguinLeague")
baseDataDir <- file.path(baseRepoDir, "data/2019/mlb")
source(file.path(baseRepoDir, "code/generalScripts/leagueBootstrap2019.R"))
load(file.path(baseDataDir, "rangeData.RData"))
allNames <- data.frame(fullName = c(rangeData$batters$fullName, rangeData$pitchers$fullName),
withId = c(rownames(rangeData$batters), rownames(rangeData$pitchers)),
stringsAsFactors = FALSE)
rownames(allNames) <- NULL
allNames <- allNames[ !duplicated(allNames), ]
rownames(allNames) <- allNames$withId
allNames <- allNames[ order(allNames$withId), ]
today <- Sys.Date()
currentPeriod <- which(sapply(periods, function(x){ today >= x$startDate & today <= x$endDate}))
seasonPeriods <- which(sapply(periods, function(x){ today >= x$startDate }))
if(length(seasonPeriods) == 0){
seasonPeriods <- which(sapply(periods, function(x){ x$startDate == as.Date("2019-03-20")}))
currentPeriod <- which(sapply(periods, function(x){ x$startDate == as.Date("2019-03-20")}))
}
penguinConfig <- readLines(file.path(path.expand("~"), ".penguinConfig"))
|
53250d91e259e75969ab7d0a0c337d6dc6cdb90e
|
621b4f6601a37b688cc509e776b5d5d167999a64
|
/server.R
|
bab36749ad90b05beed2ecfd475e3df98ad2fb58
|
[] |
no_license
|
jrnold/shiny_diamonds
|
46bc2b3fd4948b3d67707d5f87abb6b3611f2093
|
454ad7f8ccab6d4f34ea4c17191ca5c5aa6027d5
|
refs/heads/master
| 2016-09-11T05:59:31.152629
| 2013-04-06T20:30:06
| 2013-04-06T20:30:06
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 715
|
r
|
server.R
|
library(shiny)
library(ggplot2)
data("diamonds")
# Define server logic required to generate and plot a random distribution
shinyServer(function(input, output) {
datasetInput <- reactive(diamonds)
output$summary <- renderPrint({
summary(datasetInput())
})
output$table <- renderTable({
n <- nrow(diamonds)
x <- datasetInput()
x <- x[ , input$variables, drop=FALSE]
if (input$random) {
cat("foo")
x <- x[sample(seq_len(n), input$obs, replace=FALSE), , drop=FALSE]
} else {
x <- head(x, input$obs)
}
if (input$sort != " ") {
sorder <- order(x[[input$sort]])
x <- x[sorder, , drop=FALSE]
}
x
})
source("plot.R", local=TRUE)
})
|
d22575f64ae981bc7a07e1bd5121d14fd77fa512
|
ae92ad692c4b405c2ac227b11724e63527ed23c3
|
/results/19-07-15-OptimalityStrongestPredictorOfMrnaStability/mdl_comparison_weights.R
|
a5c3bb6d9ec213535c38d4f8c4479e554c0afcd3
|
[
"MIT"
] |
permissive
|
santiago1234/MZT-rna-stability
|
86f9ebed7ad91596c7fc6a68a9b88298513f4d5e
|
8b886d9b05cd1f304439e4735268bedbaf2d007a
|
refs/heads/master
| 2023-04-12T13:59:52.324713
| 2021-12-19T04:48:17
| 2021-12-19T04:48:17
| 164,946,878
| 9
| 1
| null | null | null | null |
UTF-8
|
R
| false
| false
| 1,143
|
r
|
mdl_comparison_weights.R
|
library(tidyverse)
library(brms)
dset <- read_csv("results_data/pathways_mRNAstability.csv")
dset <- dset[!apply(dset, 1, function(x) any(is.na(x))), ,]
mdl_weights <- function(dset) {
# bayesian model comparison analysis
fml_opt <- bf(stability ~ PLS_1 + PLS_2 + PLS_3 + PLS_4 + PLS_5 + PLS_6 + PLS_7 + PLS_8 + PLS_9)
fml_m6a <- bf(stability ~ m6A)
fml_mir <- bf(stability ~ microRNAsites)
fit_opt <- brm(fml_opt, family = gaussian(), data = dset, chains = 2, cores = 2)
fit_m6a <- brm(fml_m6a, family = gaussian(), data = dset, chains = 2, cores = 2)
fit_mir <- brm(fml_mir, family = gaussian(), data = dset, chains = 2, cores = 2)
loo_opt <- loo(fit_opt)
loo_m6a <- loo(fit_m6a)
loo_mir <- loo(fit_mir)
loo_list <- list(optimality=loo_opt, m6A=loo_m6a, microRNA=loo_mir)
results <- loo_model_weights(loo_list)
tibble(
model = names(results),
weights = as.vector(results)
)
}
mdlwts <- dset %>%
group_by(specie, cell_type) %>%
nest() %>%
mutate(
model_weights = map(data, mdl_weights)
)
unnest(mdlwts, model_weights) %>%
write_csv("results_data/mdl_weights.csv")
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.